1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This is the parent TargetLowering class for hardware code gen 11 /// targets. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "AMDGPUISelLowering.h" 16 #include "AMDGPU.h" 17 #include "AMDGPUCallLowering.h" 18 #include "AMDGPUFrameLowering.h" 19 #include "AMDGPUSubtarget.h" 20 #include "AMDGPUTargetMachine.h" 21 #include "Utils/AMDGPUBaseInfo.h" 22 #include "R600MachineFunctionInfo.h" 23 #include "SIInstrInfo.h" 24 #include "SIMachineFunctionInfo.h" 25 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 26 #include "llvm/CodeGen/Analysis.h" 27 #include "llvm/CodeGen/CallingConvLower.h" 28 #include "llvm/CodeGen/MachineFunction.h" 29 #include "llvm/CodeGen/MachineRegisterInfo.h" 30 #include "llvm/CodeGen/SelectionDAG.h" 31 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 32 #include "llvm/IR/DataLayout.h" 33 #include "llvm/IR/DiagnosticInfo.h" 34 #include "llvm/Support/KnownBits.h" 35 #include "llvm/Support/MathExtras.h" 36 using namespace llvm; 37 38 #include "AMDGPUGenCallingConv.inc" 39 40 static cl::opt<bool> AMDGPUBypassSlowDiv( 41 "amdgpu-bypass-slow-div", 42 cl::desc("Skip 64-bit divide for dynamic 32-bit values"), 43 cl::init(true)); 44 45 // Find a larger type to do a load / store of a vector with. 46 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { 47 unsigned StoreSize = VT.getStoreSizeInBits(); 48 if (StoreSize <= 32) 49 return EVT::getIntegerVT(Ctx, StoreSize); 50 51 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); 52 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 53 } 54 55 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) { 56 EVT VT = Op.getValueType(); 57 KnownBits Known = DAG.computeKnownBits(Op); 58 return VT.getSizeInBits() - Known.countMinLeadingZeros(); 59 } 60 61 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) { 62 EVT VT = Op.getValueType(); 63 64 // In order for this to be a signed 24-bit value, bit 23, must 65 // be a sign bit. 66 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op); 67 } 68 69 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, 70 const AMDGPUSubtarget &STI) 71 : TargetLowering(TM), Subtarget(&STI) { 72 // Lower floating point store/load to integer store/load to reduce the number 73 // of patterns in tablegen. 74 setOperationAction(ISD::LOAD, MVT::f32, Promote); 75 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); 76 77 setOperationAction(ISD::LOAD, MVT::v2f32, Promote); 78 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); 79 80 setOperationAction(ISD::LOAD, MVT::v3f32, Promote); 81 AddPromotedToType(ISD::LOAD, MVT::v3f32, MVT::v3i32); 82 83 setOperationAction(ISD::LOAD, MVT::v4f32, Promote); 84 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); 85 86 setOperationAction(ISD::LOAD, MVT::v5f32, Promote); 87 AddPromotedToType(ISD::LOAD, MVT::v5f32, MVT::v5i32); 88 89 setOperationAction(ISD::LOAD, MVT::v8f32, Promote); 90 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32); 91 92 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); 93 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); 94 95 setOperationAction(ISD::LOAD, MVT::v32f32, Promote); 96 AddPromotedToType(ISD::LOAD, MVT::v32f32, MVT::v32i32); 97 98 setOperationAction(ISD::LOAD, MVT::i64, Promote); 99 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); 100 101 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 102 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32); 103 104 setOperationAction(ISD::LOAD, MVT::f64, Promote); 105 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32); 106 107 setOperationAction(ISD::LOAD, MVT::v2f64, Promote); 108 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32); 109 110 // There are no 64-bit extloads. These should be done as a 32-bit extload and 111 // an extension to 64-bit. 112 for (MVT VT : MVT::integer_valuetypes()) { 113 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); 114 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); 115 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); 116 } 117 118 for (MVT VT : MVT::integer_valuetypes()) { 119 if (VT == MVT::i64) 120 continue; 121 122 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 123 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); 124 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); 125 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 126 127 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 128 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); 129 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); 130 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 131 132 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 133 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); 134 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); 135 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 136 } 137 138 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) { 139 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); 140 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); 141 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); 142 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); 143 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); 144 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); 145 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); 146 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); 147 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); 148 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v3i16, Expand); 149 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v3i16, Expand); 150 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v3i16, Expand); 151 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); 152 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); 153 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); 154 } 155 156 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 157 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); 158 setLoadExtAction(ISD::EXTLOAD, MVT::v3f32, MVT::v3f16, Expand); 159 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand); 160 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand); 161 setLoadExtAction(ISD::EXTLOAD, MVT::v16f32, MVT::v16f16, Expand); 162 setLoadExtAction(ISD::EXTLOAD, MVT::v32f32, MVT::v32f16, Expand); 163 164 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 165 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand); 166 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand); 167 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand); 168 169 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 170 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 171 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand); 172 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); 173 174 setOperationAction(ISD::STORE, MVT::f32, Promote); 175 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); 176 177 setOperationAction(ISD::STORE, MVT::v2f32, Promote); 178 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); 179 180 setOperationAction(ISD::STORE, MVT::v3f32, Promote); 181 AddPromotedToType(ISD::STORE, MVT::v3f32, MVT::v3i32); 182 183 setOperationAction(ISD::STORE, MVT::v4f32, Promote); 184 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); 185 186 setOperationAction(ISD::STORE, MVT::v5f32, Promote); 187 AddPromotedToType(ISD::STORE, MVT::v5f32, MVT::v5i32); 188 189 setOperationAction(ISD::STORE, MVT::v8f32, Promote); 190 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32); 191 192 setOperationAction(ISD::STORE, MVT::v16f32, Promote); 193 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); 194 195 setOperationAction(ISD::STORE, MVT::v32f32, Promote); 196 AddPromotedToType(ISD::STORE, MVT::v32f32, MVT::v32i32); 197 198 setOperationAction(ISD::STORE, MVT::i64, Promote); 199 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32); 200 201 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 202 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32); 203 204 setOperationAction(ISD::STORE, MVT::f64, Promote); 205 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32); 206 207 setOperationAction(ISD::STORE, MVT::v2f64, Promote); 208 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32); 209 210 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 211 setTruncStoreAction(MVT::i64, MVT::i8, Expand); 212 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 213 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 214 215 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand); 216 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand); 217 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand); 218 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand); 219 220 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 221 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand); 222 setTruncStoreAction(MVT::v3f32, MVT::v3f16, Expand); 223 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand); 224 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand); 225 setTruncStoreAction(MVT::v16f32, MVT::v16f16, Expand); 226 setTruncStoreAction(MVT::v32f32, MVT::v32f16, Expand); 227 228 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 229 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 230 231 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 232 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand); 233 234 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand); 235 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand); 236 237 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand); 238 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand); 239 240 241 setOperationAction(ISD::Constant, MVT::i32, Legal); 242 setOperationAction(ISD::Constant, MVT::i64, Legal); 243 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 244 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 245 246 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 247 setOperationAction(ISD::BRIND, MVT::Other, Expand); 248 249 // This is totally unsupported, just custom lower to produce an error. 250 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 251 252 // Library functions. These default to Expand, but we have instructions 253 // for them. 254 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 255 setOperationAction(ISD::FEXP2, MVT::f32, Legal); 256 setOperationAction(ISD::FPOW, MVT::f32, Legal); 257 setOperationAction(ISD::FLOG2, MVT::f32, Legal); 258 setOperationAction(ISD::FABS, MVT::f32, Legal); 259 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 260 setOperationAction(ISD::FRINT, MVT::f32, Legal); 261 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 262 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 263 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 264 265 setOperationAction(ISD::FROUND, MVT::f32, Custom); 266 setOperationAction(ISD::FROUND, MVT::f64, Custom); 267 268 setOperationAction(ISD::FLOG, MVT::f32, Custom); 269 setOperationAction(ISD::FLOG10, MVT::f32, Custom); 270 setOperationAction(ISD::FEXP, MVT::f32, Custom); 271 272 273 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); 274 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); 275 276 setOperationAction(ISD::FREM, MVT::f32, Custom); 277 setOperationAction(ISD::FREM, MVT::f64, Custom); 278 279 // Expand to fneg + fadd. 280 setOperationAction(ISD::FSUB, MVT::f64, Expand); 281 282 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3i32, Custom); 283 setOperationAction(ISD::CONCAT_VECTORS, MVT::v3f32, Custom); 284 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 285 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); 286 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5i32, Custom); 287 setOperationAction(ISD::CONCAT_VECTORS, MVT::v5f32, Custom); 288 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 289 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 290 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); 291 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); 292 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3f32, Custom); 293 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v3i32, Custom); 294 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); 295 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); 296 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5f32, Custom); 297 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v5i32, Custom); 298 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); 299 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); 300 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16f32, Custom); 301 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v16i32, Custom); 302 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32f32, Custom); 303 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v32i32, Custom); 304 305 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 306 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom); 307 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom); 308 309 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 310 for (MVT VT : ScalarIntVTs) { 311 // These should use [SU]DIVREM, so set them to expand 312 setOperationAction(ISD::SDIV, VT, Expand); 313 setOperationAction(ISD::UDIV, VT, Expand); 314 setOperationAction(ISD::SREM, VT, Expand); 315 setOperationAction(ISD::UREM, VT, Expand); 316 317 // GPU does not have divrem function for signed or unsigned. 318 setOperationAction(ISD::SDIVREM, VT, Custom); 319 setOperationAction(ISD::UDIVREM, VT, Custom); 320 321 // GPU does not have [S|U]MUL_LOHI functions as a single instruction. 322 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 323 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 324 325 setOperationAction(ISD::BSWAP, VT, Expand); 326 setOperationAction(ISD::CTTZ, VT, Expand); 327 setOperationAction(ISD::CTLZ, VT, Expand); 328 329 // AMDGPU uses ADDC/SUBC/ADDE/SUBE 330 setOperationAction(ISD::ADDC, VT, Legal); 331 setOperationAction(ISD::SUBC, VT, Legal); 332 setOperationAction(ISD::ADDE, VT, Legal); 333 setOperationAction(ISD::SUBE, VT, Legal); 334 } 335 336 // The hardware supports 32-bit FSHR, but not FSHL. 337 setOperationAction(ISD::FSHR, MVT::i32, Legal); 338 339 // The hardware supports 32-bit ROTR, but not ROTL. 340 setOperationAction(ISD::ROTL, MVT::i32, Expand); 341 setOperationAction(ISD::ROTL, MVT::i64, Expand); 342 setOperationAction(ISD::ROTR, MVT::i64, Expand); 343 344 setOperationAction(ISD::MUL, MVT::i64, Expand); 345 setOperationAction(ISD::MULHU, MVT::i64, Expand); 346 setOperationAction(ISD::MULHS, MVT::i64, Expand); 347 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 348 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 349 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 350 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 351 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 352 353 setOperationAction(ISD::SMIN, MVT::i32, Legal); 354 setOperationAction(ISD::UMIN, MVT::i32, Legal); 355 setOperationAction(ISD::SMAX, MVT::i32, Legal); 356 setOperationAction(ISD::UMAX, MVT::i32, Legal); 357 358 setOperationAction(ISD::CTTZ, MVT::i64, Custom); 359 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); 360 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 361 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 362 363 static const MVT::SimpleValueType VectorIntTypes[] = { 364 MVT::v2i32, MVT::v3i32, MVT::v4i32, MVT::v5i32 365 }; 366 367 for (MVT VT : VectorIntTypes) { 368 // Expand the following operations for the current type by default. 369 setOperationAction(ISD::ADD, VT, Expand); 370 setOperationAction(ISD::AND, VT, Expand); 371 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 372 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 373 setOperationAction(ISD::MUL, VT, Expand); 374 setOperationAction(ISD::MULHU, VT, Expand); 375 setOperationAction(ISD::MULHS, VT, Expand); 376 setOperationAction(ISD::OR, VT, Expand); 377 setOperationAction(ISD::SHL, VT, Expand); 378 setOperationAction(ISD::SRA, VT, Expand); 379 setOperationAction(ISD::SRL, VT, Expand); 380 setOperationAction(ISD::ROTL, VT, Expand); 381 setOperationAction(ISD::ROTR, VT, Expand); 382 setOperationAction(ISD::SUB, VT, Expand); 383 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 384 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 385 setOperationAction(ISD::SDIV, VT, Expand); 386 setOperationAction(ISD::UDIV, VT, Expand); 387 setOperationAction(ISD::SREM, VT, Expand); 388 setOperationAction(ISD::UREM, VT, Expand); 389 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 390 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 391 setOperationAction(ISD::SDIVREM, VT, Custom); 392 setOperationAction(ISD::UDIVREM, VT, Expand); 393 setOperationAction(ISD::SELECT, VT, Expand); 394 setOperationAction(ISD::VSELECT, VT, Expand); 395 setOperationAction(ISD::SELECT_CC, VT, Expand); 396 setOperationAction(ISD::XOR, VT, Expand); 397 setOperationAction(ISD::BSWAP, VT, Expand); 398 setOperationAction(ISD::CTPOP, VT, Expand); 399 setOperationAction(ISD::CTTZ, VT, Expand); 400 setOperationAction(ISD::CTLZ, VT, Expand); 401 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 402 setOperationAction(ISD::SETCC, VT, Expand); 403 } 404 405 static const MVT::SimpleValueType FloatVectorTypes[] = { 406 MVT::v2f32, MVT::v3f32, MVT::v4f32, MVT::v5f32 407 }; 408 409 for (MVT VT : FloatVectorTypes) { 410 setOperationAction(ISD::FABS, VT, Expand); 411 setOperationAction(ISD::FMINNUM, VT, Expand); 412 setOperationAction(ISD::FMAXNUM, VT, Expand); 413 setOperationAction(ISD::FADD, VT, Expand); 414 setOperationAction(ISD::FCEIL, VT, Expand); 415 setOperationAction(ISD::FCOS, VT, Expand); 416 setOperationAction(ISD::FDIV, VT, Expand); 417 setOperationAction(ISD::FEXP2, VT, Expand); 418 setOperationAction(ISD::FEXP, VT, Expand); 419 setOperationAction(ISD::FLOG2, VT, Expand); 420 setOperationAction(ISD::FREM, VT, Expand); 421 setOperationAction(ISD::FLOG, VT, Expand); 422 setOperationAction(ISD::FLOG10, VT, Expand); 423 setOperationAction(ISD::FPOW, VT, Expand); 424 setOperationAction(ISD::FFLOOR, VT, Expand); 425 setOperationAction(ISD::FTRUNC, VT, Expand); 426 setOperationAction(ISD::FMUL, VT, Expand); 427 setOperationAction(ISD::FMA, VT, Expand); 428 setOperationAction(ISD::FRINT, VT, Expand); 429 setOperationAction(ISD::FNEARBYINT, VT, Expand); 430 setOperationAction(ISD::FSQRT, VT, Expand); 431 setOperationAction(ISD::FSIN, VT, Expand); 432 setOperationAction(ISD::FSUB, VT, Expand); 433 setOperationAction(ISD::FNEG, VT, Expand); 434 setOperationAction(ISD::VSELECT, VT, Expand); 435 setOperationAction(ISD::SELECT_CC, VT, Expand); 436 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 437 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 438 setOperationAction(ISD::SETCC, VT, Expand); 439 setOperationAction(ISD::FCANONICALIZE, VT, Expand); 440 } 441 442 // This causes using an unrolled select operation rather than expansion with 443 // bit operations. This is in general better, but the alternative using BFI 444 // instructions may be better if the select sources are SGPRs. 445 setOperationAction(ISD::SELECT, MVT::v2f32, Promote); 446 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32); 447 448 setOperationAction(ISD::SELECT, MVT::v3f32, Promote); 449 AddPromotedToType(ISD::SELECT, MVT::v3f32, MVT::v3i32); 450 451 setOperationAction(ISD::SELECT, MVT::v4f32, Promote); 452 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32); 453 454 setOperationAction(ISD::SELECT, MVT::v5f32, Promote); 455 AddPromotedToType(ISD::SELECT, MVT::v5f32, MVT::v5i32); 456 457 // There are no libcalls of any kind. 458 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) 459 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr); 460 461 setSchedulingPreference(Sched::RegPressure); 462 setJumpIsExpensive(true); 463 464 // FIXME: This is only partially true. If we have to do vector compares, any 465 // SGPR pair can be a condition register. If we have a uniform condition, we 466 // are better off doing SALU operations, where there is only one SCC. For now, 467 // we don't have a way of knowing during instruction selection if a condition 468 // will be uniform and we always use vector compares. Assume we are using 469 // vector compares until that is fixed. 470 setHasMultipleConditionRegisters(true); 471 472 setMinCmpXchgSizeInBits(32); 473 setSupportsUnalignedAtomics(false); 474 475 PredictableSelectIsExpensive = false; 476 477 // We want to find all load dependencies for long chains of stores to enable 478 // merging into very wide vectors. The problem is with vectors with > 4 479 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 480 // vectors are a legal type, even though we have to split the loads 481 // usually. When we can more precisely specify load legality per address 482 // space, we should be able to make FindBetterChain/MergeConsecutiveStores 483 // smarter so that they can figure out what to do in 2 iterations without all 484 // N > 4 stores on the same chain. 485 GatherAllAliasesMaxDepth = 16; 486 487 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry 488 // about these during lowering. 489 MaxStoresPerMemcpy = 0xffffffff; 490 MaxStoresPerMemmove = 0xffffffff; 491 MaxStoresPerMemset = 0xffffffff; 492 493 // The expansion for 64-bit division is enormous. 494 if (AMDGPUBypassSlowDiv) 495 addBypassSlowDiv(64, 32); 496 497 setTargetDAGCombine(ISD::BITCAST); 498 setTargetDAGCombine(ISD::SHL); 499 setTargetDAGCombine(ISD::SRA); 500 setTargetDAGCombine(ISD::SRL); 501 setTargetDAGCombine(ISD::TRUNCATE); 502 setTargetDAGCombine(ISD::MUL); 503 setTargetDAGCombine(ISD::MULHU); 504 setTargetDAGCombine(ISD::MULHS); 505 setTargetDAGCombine(ISD::SELECT); 506 setTargetDAGCombine(ISD::SELECT_CC); 507 setTargetDAGCombine(ISD::STORE); 508 setTargetDAGCombine(ISD::FADD); 509 setTargetDAGCombine(ISD::FSUB); 510 setTargetDAGCombine(ISD::FNEG); 511 setTargetDAGCombine(ISD::FABS); 512 setTargetDAGCombine(ISD::AssertZext); 513 setTargetDAGCombine(ISD::AssertSext); 514 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 515 } 516 517 //===----------------------------------------------------------------------===// 518 // Target Information 519 //===----------------------------------------------------------------------===// 520 521 LLVM_READNONE 522 static bool fnegFoldsIntoOp(unsigned Opc) { 523 switch (Opc) { 524 case ISD::FADD: 525 case ISD::FSUB: 526 case ISD::FMUL: 527 case ISD::FMA: 528 case ISD::FMAD: 529 case ISD::FMINNUM: 530 case ISD::FMAXNUM: 531 case ISD::FMINNUM_IEEE: 532 case ISD::FMAXNUM_IEEE: 533 case ISD::FSIN: 534 case ISD::FTRUNC: 535 case ISD::FRINT: 536 case ISD::FNEARBYINT: 537 case ISD::FCANONICALIZE: 538 case AMDGPUISD::RCP: 539 case AMDGPUISD::RCP_LEGACY: 540 case AMDGPUISD::RCP_IFLAG: 541 case AMDGPUISD::SIN_HW: 542 case AMDGPUISD::FMUL_LEGACY: 543 case AMDGPUISD::FMIN_LEGACY: 544 case AMDGPUISD::FMAX_LEGACY: 545 case AMDGPUISD::FMED3: 546 return true; 547 default: 548 return false; 549 } 550 } 551 552 /// \p returns true if the operation will definitely need to use a 64-bit 553 /// encoding, and thus will use a VOP3 encoding regardless of the source 554 /// modifiers. 555 LLVM_READONLY 556 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) { 557 return N->getNumOperands() > 2 || VT == MVT::f64; 558 } 559 560 // Most FP instructions support source modifiers, but this could be refined 561 // slightly. 562 LLVM_READONLY 563 static bool hasSourceMods(const SDNode *N) { 564 if (isa<MemSDNode>(N)) 565 return false; 566 567 switch (N->getOpcode()) { 568 case ISD::CopyToReg: 569 case ISD::SELECT: 570 case ISD::FDIV: 571 case ISD::FREM: 572 case ISD::INLINEASM: 573 case ISD::INLINEASM_BR: 574 case AMDGPUISD::DIV_SCALE: 575 case ISD::INTRINSIC_W_CHAIN: 576 577 // TODO: Should really be looking at the users of the bitcast. These are 578 // problematic because bitcasts are used to legalize all stores to integer 579 // types. 580 case ISD::BITCAST: 581 return false; 582 case ISD::INTRINSIC_WO_CHAIN: { 583 switch (cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()) { 584 case Intrinsic::amdgcn_interp_p1: 585 case Intrinsic::amdgcn_interp_p2: 586 case Intrinsic::amdgcn_interp_mov: 587 case Intrinsic::amdgcn_interp_p1_f16: 588 case Intrinsic::amdgcn_interp_p2_f16: 589 return false; 590 default: 591 return true; 592 } 593 } 594 default: 595 return true; 596 } 597 } 598 599 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N, 600 unsigned CostThreshold) { 601 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus 602 // it is truly free to use a source modifier in all cases. If there are 603 // multiple users but for each one will necessitate using VOP3, there will be 604 // a code size increase. Try to avoid increasing code size unless we know it 605 // will save on the instruction count. 606 unsigned NumMayIncreaseSize = 0; 607 MVT VT = N->getValueType(0).getScalarType().getSimpleVT(); 608 609 // XXX - Should this limit number of uses to check? 610 for (const SDNode *U : N->uses()) { 611 if (!hasSourceMods(U)) 612 return false; 613 614 if (!opMustUseVOP3Encoding(U, VT)) { 615 if (++NumMayIncreaseSize > CostThreshold) 616 return false; 617 } 618 } 619 620 return true; 621 } 622 623 EVT AMDGPUTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT, 624 ISD::NodeType ExtendKind) const { 625 assert(!VT.isVector() && "only scalar expected"); 626 627 // Round to the next multiple of 32-bits. 628 unsigned Size = VT.getSizeInBits(); 629 if (Size <= 32) 630 return MVT::i32; 631 return EVT::getIntegerVT(Context, 32 * ((Size + 31) / 32)); 632 } 633 634 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const { 635 return MVT::i32; 636 } 637 638 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const { 639 return true; 640 } 641 642 // The backend supports 32 and 64 bit floating point immediates. 643 // FIXME: Why are we reporting vectors of FP immediates as legal? 644 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT, 645 bool ForCodeSize) const { 646 EVT ScalarVT = VT.getScalarType(); 647 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 || 648 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts())); 649 } 650 651 // We don't want to shrink f64 / f32 constants. 652 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { 653 EVT ScalarVT = VT.getScalarType(); 654 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64); 655 } 656 657 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N, 658 ISD::LoadExtType ExtTy, 659 EVT NewVT) const { 660 // TODO: This may be worth removing. Check regression tests for diffs. 661 if (!TargetLoweringBase::shouldReduceLoadWidth(N, ExtTy, NewVT)) 662 return false; 663 664 unsigned NewSize = NewVT.getStoreSizeInBits(); 665 666 // If we are reducing to a 32-bit load or a smaller multi-dword load, 667 // this is always better. 668 if (NewSize >= 32) 669 return true; 670 671 EVT OldVT = N->getValueType(0); 672 unsigned OldSize = OldVT.getStoreSizeInBits(); 673 674 MemSDNode *MN = cast<MemSDNode>(N); 675 unsigned AS = MN->getAddressSpace(); 676 // Do not shrink an aligned scalar load to sub-dword. 677 // Scalar engine cannot do sub-dword loads. 678 if (OldSize >= 32 && NewSize < 32 && MN->getAlignment() >= 4 && 679 (AS == AMDGPUAS::CONSTANT_ADDRESS || 680 AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT || 681 (isa<LoadSDNode>(N) && 682 AS == AMDGPUAS::GLOBAL_ADDRESS && MN->isInvariant())) && 683 AMDGPUInstrInfo::isUniformMMO(MN->getMemOperand())) 684 return false; 685 686 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar 687 // extloads, so doing one requires using a buffer_load. In cases where we 688 // still couldn't use a scalar load, using the wider load shouldn't really 689 // hurt anything. 690 691 // If the old size already had to be an extload, there's no harm in continuing 692 // to reduce the width. 693 return (OldSize < 32); 694 } 695 696 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy, 697 const SelectionDAG &DAG, 698 const MachineMemOperand &MMO) const { 699 700 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits()); 701 702 if (LoadTy.getScalarType() == MVT::i32) 703 return false; 704 705 unsigned LScalarSize = LoadTy.getScalarSizeInBits(); 706 unsigned CastScalarSize = CastTy.getScalarSizeInBits(); 707 708 if ((LScalarSize >= CastScalarSize) && (CastScalarSize < 32)) 709 return false; 710 711 bool Fast = false; 712 return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(), 713 CastTy, MMO, &Fast) && 714 Fast; 715 } 716 717 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also 718 // profitable with the expansion for 64-bit since it's generally good to 719 // speculate things. 720 // FIXME: These should really have the size as a parameter. 721 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const { 722 return true; 723 } 724 725 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const { 726 return true; 727 } 728 729 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const { 730 switch (N->getOpcode()) { 731 default: 732 return false; 733 case ISD::EntryToken: 734 case ISD::TokenFactor: 735 return true; 736 case ISD::INTRINSIC_WO_CHAIN: 737 { 738 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 739 switch (IntrID) { 740 default: 741 return false; 742 case Intrinsic::amdgcn_readfirstlane: 743 case Intrinsic::amdgcn_readlane: 744 return true; 745 } 746 } 747 break; 748 case ISD::LOAD: 749 { 750 if (cast<LoadSDNode>(N)->getMemOperand()->getAddrSpace() == 751 AMDGPUAS::CONSTANT_ADDRESS_32BIT) 752 return true; 753 return false; 754 } 755 break; 756 } 757 } 758 759 TargetLowering::NegatibleCost 760 AMDGPUTargetLowering::getNegatibleCost(SDValue Op, SelectionDAG &DAG, 761 bool LegalOperations, bool ForCodeSize, 762 unsigned Depth) const { 763 switch (Op.getOpcode()) { 764 case ISD::FMA: 765 case ISD::FMAD: { 766 // Negating a fma is not free if it has users without source mods. 767 if (!allUsesHaveSourceMods(Op.getNode())) 768 return NegatibleCost::Expensive; 769 break; 770 } 771 default: 772 break; 773 } 774 775 return TargetLowering::getNegatibleCost(Op, DAG, LegalOperations, ForCodeSize, 776 Depth); 777 } 778 779 //===---------------------------------------------------------------------===// 780 // Target Properties 781 //===---------------------------------------------------------------------===// 782 783 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { 784 assert(VT.isFloatingPoint()); 785 786 // Packed operations do not have a fabs modifier. 787 return VT == MVT::f32 || VT == MVT::f64 || 788 (Subtarget->has16BitInsts() && VT == MVT::f16); 789 } 790 791 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { 792 assert(VT.isFloatingPoint()); 793 return VT == MVT::f32 || VT == MVT::f64 || 794 (Subtarget->has16BitInsts() && VT == MVT::f16) || 795 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16); 796 } 797 798 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, 799 unsigned NumElem, 800 unsigned AS) const { 801 return true; 802 } 803 804 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { 805 // There are few operations which truly have vector input operands. Any vector 806 // operation is going to involve operations on each component, and a 807 // build_vector will be a copy per element, so it always makes sense to use a 808 // build_vector input in place of the extracted element to avoid a copy into a 809 // super register. 810 // 811 // We should probably only do this if all users are extracts only, but this 812 // should be the common case. 813 return true; 814 } 815 816 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { 817 // Truncate is just accessing a subregister. 818 819 unsigned SrcSize = Source.getSizeInBits(); 820 unsigned DestSize = Dest.getSizeInBits(); 821 822 return DestSize < SrcSize && DestSize % 32 == 0 ; 823 } 824 825 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const { 826 // Truncate is just accessing a subregister. 827 828 unsigned SrcSize = Source->getScalarSizeInBits(); 829 unsigned DestSize = Dest->getScalarSizeInBits(); 830 831 if (DestSize== 16 && Subtarget->has16BitInsts()) 832 return SrcSize >= 32; 833 834 return DestSize < SrcSize && DestSize % 32 == 0; 835 } 836 837 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const { 838 unsigned SrcSize = Src->getScalarSizeInBits(); 839 unsigned DestSize = Dest->getScalarSizeInBits(); 840 841 if (SrcSize == 16 && Subtarget->has16BitInsts()) 842 return DestSize >= 32; 843 844 return SrcSize == 32 && DestSize == 64; 845 } 846 847 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const { 848 // Any register load of a 64-bit value really requires 2 32-bit moves. For all 849 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, 850 // this will enable reducing 64-bit operations the 32-bit, which is always 851 // good. 852 853 if (Src == MVT::i16) 854 return Dest == MVT::i32 ||Dest == MVT::i64 ; 855 856 return Src == MVT::i32 && Dest == MVT::i64; 857 } 858 859 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 860 return isZExtFree(Val.getValueType(), VT2); 861 } 862 863 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 864 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a 865 // limited number of native 64-bit operations. Shrinking an operation to fit 866 // in a single 32-bit register should always be helpful. As currently used, 867 // this is much less general than the name suggests, and is only used in 868 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is 869 // not profitable, and may actually be harmful. 870 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32; 871 } 872 873 //===---------------------------------------------------------------------===// 874 // TargetLowering Callbacks 875 //===---------------------------------------------------------------------===// 876 877 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC, 878 bool IsVarArg) { 879 switch (CC) { 880 case CallingConv::AMDGPU_VS: 881 case CallingConv::AMDGPU_GS: 882 case CallingConv::AMDGPU_PS: 883 case CallingConv::AMDGPU_CS: 884 case CallingConv::AMDGPU_HS: 885 case CallingConv::AMDGPU_ES: 886 case CallingConv::AMDGPU_LS: 887 return CC_AMDGPU; 888 case CallingConv::C: 889 case CallingConv::Fast: 890 case CallingConv::Cold: 891 return CC_AMDGPU_Func; 892 case CallingConv::AMDGPU_KERNEL: 893 case CallingConv::SPIR_KERNEL: 894 default: 895 report_fatal_error("Unsupported calling convention for call"); 896 } 897 } 898 899 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC, 900 bool IsVarArg) { 901 switch (CC) { 902 case CallingConv::AMDGPU_KERNEL: 903 case CallingConv::SPIR_KERNEL: 904 llvm_unreachable("kernels should not be handled here"); 905 case CallingConv::AMDGPU_VS: 906 case CallingConv::AMDGPU_GS: 907 case CallingConv::AMDGPU_PS: 908 case CallingConv::AMDGPU_CS: 909 case CallingConv::AMDGPU_HS: 910 case CallingConv::AMDGPU_ES: 911 case CallingConv::AMDGPU_LS: 912 return RetCC_SI_Shader; 913 case CallingConv::C: 914 case CallingConv::Fast: 915 case CallingConv::Cold: 916 return RetCC_AMDGPU_Func; 917 default: 918 report_fatal_error("Unsupported calling convention."); 919 } 920 } 921 922 /// The SelectionDAGBuilder will automatically promote function arguments 923 /// with illegal types. However, this does not work for the AMDGPU targets 924 /// since the function arguments are stored in memory as these illegal types. 925 /// In order to handle this properly we need to get the original types sizes 926 /// from the LLVM IR Function and fixup the ISD:InputArg values before 927 /// passing them to AnalyzeFormalArguments() 928 929 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting 930 /// input values across multiple registers. Each item in the Ins array 931 /// represents a single value that will be stored in registers. Ins[x].VT is 932 /// the value type of the value that will be stored in the register, so 933 /// whatever SDNode we lower the argument to needs to be this type. 934 /// 935 /// In order to correctly lower the arguments we need to know the size of each 936 /// argument. Since Ins[x].VT gives us the size of the register that will 937 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type 938 /// for the orignal function argument so that we can deduce the correct memory 939 /// type to use for Ins[x]. In most cases the correct memory type will be 940 /// Ins[x].ArgVT. However, this will not always be the case. If, for example, 941 /// we have a kernel argument of type v8i8, this argument will be split into 942 /// 8 parts and each part will be represented by its own item in the Ins array. 943 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of 944 /// the argument before it was split. From this, we deduce that the memory type 945 /// for each individual part is i8. We pass the memory type as LocVT to the 946 /// calling convention analysis function and the register type (Ins[x].VT) as 947 /// the ValVT. 948 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute( 949 CCState &State, 950 const SmallVectorImpl<ISD::InputArg> &Ins) const { 951 const MachineFunction &MF = State.getMachineFunction(); 952 const Function &Fn = MF.getFunction(); 953 LLVMContext &Ctx = Fn.getParent()->getContext(); 954 const AMDGPUSubtarget &ST = AMDGPUSubtarget::get(MF); 955 const unsigned ExplicitOffset = ST.getExplicitKernelArgOffset(Fn); 956 CallingConv::ID CC = Fn.getCallingConv(); 957 958 unsigned MaxAlign = 1; 959 uint64_t ExplicitArgOffset = 0; 960 const DataLayout &DL = Fn.getParent()->getDataLayout(); 961 962 unsigned InIndex = 0; 963 964 for (const Argument &Arg : Fn.args()) { 965 Type *BaseArgTy = Arg.getType(); 966 unsigned Align = DL.getABITypeAlignment(BaseArgTy); 967 MaxAlign = std::max(Align, MaxAlign); 968 unsigned AllocSize = DL.getTypeAllocSize(BaseArgTy); 969 970 uint64_t ArgOffset = alignTo(ExplicitArgOffset, Align) + ExplicitOffset; 971 ExplicitArgOffset = alignTo(ExplicitArgOffset, Align) + AllocSize; 972 973 // We're basically throwing away everything passed into us and starting over 974 // to get accurate in-memory offsets. The "PartOffset" is completely useless 975 // to us as computed in Ins. 976 // 977 // We also need to figure out what type legalization is trying to do to get 978 // the correct memory offsets. 979 980 SmallVector<EVT, 16> ValueVTs; 981 SmallVector<uint64_t, 16> Offsets; 982 ComputeValueVTs(*this, DL, BaseArgTy, ValueVTs, &Offsets, ArgOffset); 983 984 for (unsigned Value = 0, NumValues = ValueVTs.size(); 985 Value != NumValues; ++Value) { 986 uint64_t BasePartOffset = Offsets[Value]; 987 988 EVT ArgVT = ValueVTs[Value]; 989 EVT MemVT = ArgVT; 990 MVT RegisterVT = getRegisterTypeForCallingConv(Ctx, CC, ArgVT); 991 unsigned NumRegs = getNumRegistersForCallingConv(Ctx, CC, ArgVT); 992 993 if (NumRegs == 1) { 994 // This argument is not split, so the IR type is the memory type. 995 if (ArgVT.isExtended()) { 996 // We have an extended type, like i24, so we should just use the 997 // register type. 998 MemVT = RegisterVT; 999 } else { 1000 MemVT = ArgVT; 1001 } 1002 } else if (ArgVT.isVector() && RegisterVT.isVector() && 1003 ArgVT.getScalarType() == RegisterVT.getScalarType()) { 1004 assert(ArgVT.getVectorNumElements() > RegisterVT.getVectorNumElements()); 1005 // We have a vector value which has been split into a vector with 1006 // the same scalar type, but fewer elements. This should handle 1007 // all the floating-point vector types. 1008 MemVT = RegisterVT; 1009 } else if (ArgVT.isVector() && 1010 ArgVT.getVectorNumElements() == NumRegs) { 1011 // This arg has been split so that each element is stored in a separate 1012 // register. 1013 MemVT = ArgVT.getScalarType(); 1014 } else if (ArgVT.isExtended()) { 1015 // We have an extended type, like i65. 1016 MemVT = RegisterVT; 1017 } else { 1018 unsigned MemoryBits = ArgVT.getStoreSizeInBits() / NumRegs; 1019 assert(ArgVT.getStoreSizeInBits() % NumRegs == 0); 1020 if (RegisterVT.isInteger()) { 1021 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits); 1022 } else if (RegisterVT.isVector()) { 1023 assert(!RegisterVT.getScalarType().isFloatingPoint()); 1024 unsigned NumElements = RegisterVT.getVectorNumElements(); 1025 assert(MemoryBits % NumElements == 0); 1026 // This vector type has been split into another vector type with 1027 // a different elements size. 1028 EVT ScalarVT = EVT::getIntegerVT(State.getContext(), 1029 MemoryBits / NumElements); 1030 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements); 1031 } else { 1032 llvm_unreachable("cannot deduce memory type."); 1033 } 1034 } 1035 1036 // Convert one element vectors to scalar. 1037 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1) 1038 MemVT = MemVT.getScalarType(); 1039 1040 // Round up vec3/vec5 argument. 1041 if (MemVT.isVector() && !MemVT.isPow2VectorType()) { 1042 assert(MemVT.getVectorNumElements() == 3 || 1043 MemVT.getVectorNumElements() == 5); 1044 MemVT = MemVT.getPow2VectorType(State.getContext()); 1045 } else if (!MemVT.isSimple() && !MemVT.isVector()) { 1046 MemVT = MemVT.getRoundIntegerType(State.getContext()); 1047 } 1048 1049 unsigned PartOffset = 0; 1050 for (unsigned i = 0; i != NumRegs; ++i) { 1051 State.addLoc(CCValAssign::getCustomMem(InIndex++, RegisterVT, 1052 BasePartOffset + PartOffset, 1053 MemVT.getSimpleVT(), 1054 CCValAssign::Full)); 1055 PartOffset += MemVT.getStoreSize(); 1056 } 1057 } 1058 } 1059 } 1060 1061 SDValue AMDGPUTargetLowering::LowerReturn( 1062 SDValue Chain, CallingConv::ID CallConv, 1063 bool isVarArg, 1064 const SmallVectorImpl<ISD::OutputArg> &Outs, 1065 const SmallVectorImpl<SDValue> &OutVals, 1066 const SDLoc &DL, SelectionDAG &DAG) const { 1067 // FIXME: Fails for r600 tests 1068 //assert(!isVarArg && Outs.empty() && OutVals.empty() && 1069 // "wave terminate should not have return values"); 1070 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain); 1071 } 1072 1073 //===---------------------------------------------------------------------===// 1074 // Target specific lowering 1075 //===---------------------------------------------------------------------===// 1076 1077 /// Selects the correct CCAssignFn for a given CallingConvention value. 1078 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 1079 bool IsVarArg) { 1080 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg); 1081 } 1082 1083 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 1084 bool IsVarArg) { 1085 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg); 1086 } 1087 1088 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain, 1089 SelectionDAG &DAG, 1090 MachineFrameInfo &MFI, 1091 int ClobberedFI) const { 1092 SmallVector<SDValue, 8> ArgChains; 1093 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); 1094 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; 1095 1096 // Include the original chain at the beginning of the list. When this is 1097 // used by target LowerCall hooks, this helps legalize find the 1098 // CALLSEQ_BEGIN node. 1099 ArgChains.push_back(Chain); 1100 1101 // Add a chain value for each stack argument corresponding 1102 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1103 UE = DAG.getEntryNode().getNode()->use_end(); 1104 U != UE; ++U) { 1105 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) { 1106 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) { 1107 if (FI->getIndex() < 0) { 1108 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); 1109 int64_t InLastByte = InFirstByte; 1110 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; 1111 1112 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1113 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1114 ArgChains.push_back(SDValue(L, 1)); 1115 } 1116 } 1117 } 1118 } 1119 1120 // Build a tokenfactor for all the chains. 1121 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 1122 } 1123 1124 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI, 1125 SmallVectorImpl<SDValue> &InVals, 1126 StringRef Reason) const { 1127 SDValue Callee = CLI.Callee; 1128 SelectionDAG &DAG = CLI.DAG; 1129 1130 const Function &Fn = DAG.getMachineFunction().getFunction(); 1131 1132 StringRef FuncName("<unknown>"); 1133 1134 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) 1135 FuncName = G->getSymbol(); 1136 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1137 FuncName = G->getGlobal()->getName(); 1138 1139 DiagnosticInfoUnsupported NoCalls( 1140 Fn, Reason + FuncName, CLI.DL.getDebugLoc()); 1141 DAG.getContext()->diagnose(NoCalls); 1142 1143 if (!CLI.IsTailCall) { 1144 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) 1145 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); 1146 } 1147 1148 return DAG.getEntryNode(); 1149 } 1150 1151 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, 1152 SmallVectorImpl<SDValue> &InVals) const { 1153 return lowerUnhandledCall(CLI, InVals, "unsupported call to function "); 1154 } 1155 1156 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 1157 SelectionDAG &DAG) const { 1158 const Function &Fn = DAG.getMachineFunction().getFunction(); 1159 1160 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca", 1161 SDLoc(Op).getDebugLoc()); 1162 DAG.getContext()->diagnose(NoDynamicAlloca); 1163 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)}; 1164 return DAG.getMergeValues(Ops, SDLoc()); 1165 } 1166 1167 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 1168 SelectionDAG &DAG) const { 1169 switch (Op.getOpcode()) { 1170 default: 1171 Op->print(errs(), &DAG); 1172 llvm_unreachable("Custom lowering code for this" 1173 "instruction is not implemented yet!"); 1174 break; 1175 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 1176 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 1177 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 1178 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG); 1179 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG); 1180 case ISD::FREM: return LowerFREM(Op, DAG); 1181 case ISD::FCEIL: return LowerFCEIL(Op, DAG); 1182 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG); 1183 case ISD::FRINT: return LowerFRINT(Op, DAG); 1184 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); 1185 case ISD::FROUND: return LowerFROUND(Op, DAG); 1186 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); 1187 case ISD::FLOG: 1188 return LowerFLOG(Op, DAG, 1.0F / numbers::log2ef); 1189 case ISD::FLOG10: 1190 return LowerFLOG(Op, DAG, numbers::ln2f / numbers::ln10f); 1191 case ISD::FEXP: 1192 return lowerFEXP(Op, DAG); 1193 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 1194 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 1195 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG); 1196 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 1197 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 1198 case ISD::CTTZ: 1199 case ISD::CTTZ_ZERO_UNDEF: 1200 case ISD::CTLZ: 1201 case ISD::CTLZ_ZERO_UNDEF: 1202 return LowerCTLZ_CTTZ(Op, DAG); 1203 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 1204 } 1205 return Op; 1206 } 1207 1208 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N, 1209 SmallVectorImpl<SDValue> &Results, 1210 SelectionDAG &DAG) const { 1211 switch (N->getOpcode()) { 1212 case ISD::SIGN_EXTEND_INREG: 1213 // Different parts of legalization seem to interpret which type of 1214 // sign_extend_inreg is the one to check for custom lowering. The extended 1215 // from type is what really matters, but some places check for custom 1216 // lowering of the result type. This results in trying to use 1217 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do 1218 // nothing here and let the illegal result integer be handled normally. 1219 return; 1220 default: 1221 return; 1222 } 1223 } 1224 1225 bool AMDGPUTargetLowering::hasDefinedInitializer(const GlobalValue *GV) { 1226 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1227 if (!GVar || !GVar->hasInitializer()) 1228 return false; 1229 1230 return !isa<UndefValue>(GVar->getInitializer()); 1231 } 1232 1233 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, 1234 SDValue Op, 1235 SelectionDAG &DAG) const { 1236 1237 const DataLayout &DL = DAG.getDataLayout(); 1238 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); 1239 const GlobalValue *GV = G->getGlobal(); 1240 1241 if (G->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1242 G->getAddressSpace() == AMDGPUAS::REGION_ADDRESS) { 1243 if (!MFI->isEntryFunction()) { 1244 SDLoc DL(Op); 1245 const Function &Fn = DAG.getMachineFunction().getFunction(); 1246 DiagnosticInfoUnsupported BadLDSDecl( 1247 Fn, "local memory global used by non-kernel function", 1248 DL.getDebugLoc(), DS_Warning); 1249 DAG.getContext()->diagnose(BadLDSDecl); 1250 1251 // We currently don't have a way to correctly allocate LDS objects that 1252 // aren't directly associated with a kernel. We do force inlining of 1253 // functions that use local objects. However, if these dead functions are 1254 // not eliminated, we don't want a compile time error. Just emit a warning 1255 // and a trap, since there should be no callable path here. 1256 SDValue Trap = DAG.getNode(ISD::TRAP, DL, MVT::Other, DAG.getEntryNode()); 1257 SDValue OutputChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, 1258 Trap, DAG.getRoot()); 1259 DAG.setRoot(OutputChain); 1260 return DAG.getUNDEF(Op.getValueType()); 1261 } 1262 1263 // XXX: What does the value of G->getOffset() mean? 1264 assert(G->getOffset() == 0 && 1265 "Do not know what to do with an non-zero offset"); 1266 1267 // TODO: We could emit code to handle the initialization somewhere. 1268 if (!hasDefinedInitializer(GV)) { 1269 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV); 1270 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType()); 1271 } 1272 } 1273 1274 const Function &Fn = DAG.getMachineFunction().getFunction(); 1275 DiagnosticInfoUnsupported BadInit( 1276 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc()); 1277 DAG.getContext()->diagnose(BadInit); 1278 return SDValue(); 1279 } 1280 1281 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 1282 SelectionDAG &DAG) const { 1283 SmallVector<SDValue, 8> Args; 1284 1285 EVT VT = Op.getValueType(); 1286 if (VT == MVT::v4i16 || VT == MVT::v4f16) { 1287 SDLoc SL(Op); 1288 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0)); 1289 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1)); 1290 1291 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi }); 1292 return DAG.getNode(ISD::BITCAST, SL, VT, BV); 1293 } 1294 1295 for (const SDUse &U : Op->ops()) 1296 DAG.ExtractVectorElements(U.get(), Args); 1297 1298 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1299 } 1300 1301 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 1302 SelectionDAG &DAG) const { 1303 1304 SmallVector<SDValue, 8> Args; 1305 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1306 EVT VT = Op.getValueType(); 1307 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start, 1308 VT.getVectorNumElements()); 1309 1310 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1311 } 1312 1313 /// Generate Min/Max node 1314 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT, 1315 SDValue LHS, SDValue RHS, 1316 SDValue True, SDValue False, 1317 SDValue CC, 1318 DAGCombinerInfo &DCI) const { 1319 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 1320 return SDValue(); 1321 1322 SelectionDAG &DAG = DCI.DAG; 1323 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 1324 switch (CCOpcode) { 1325 case ISD::SETOEQ: 1326 case ISD::SETONE: 1327 case ISD::SETUNE: 1328 case ISD::SETNE: 1329 case ISD::SETUEQ: 1330 case ISD::SETEQ: 1331 case ISD::SETFALSE: 1332 case ISD::SETFALSE2: 1333 case ISD::SETTRUE: 1334 case ISD::SETTRUE2: 1335 case ISD::SETUO: 1336 case ISD::SETO: 1337 break; 1338 case ISD::SETULE: 1339 case ISD::SETULT: { 1340 if (LHS == True) 1341 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1342 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1343 } 1344 case ISD::SETOLE: 1345 case ISD::SETOLT: 1346 case ISD::SETLE: 1347 case ISD::SETLT: { 1348 // Ordered. Assume ordered for undefined. 1349 1350 // Only do this after legalization to avoid interfering with other combines 1351 // which might occur. 1352 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1353 !DCI.isCalledByLegalizer()) 1354 return SDValue(); 1355 1356 // We need to permute the operands to get the correct NaN behavior. The 1357 // selected operand is the second one based on the failing compare with NaN, 1358 // so permute it based on the compare type the hardware uses. 1359 if (LHS == True) 1360 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1361 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1362 } 1363 case ISD::SETUGE: 1364 case ISD::SETUGT: { 1365 if (LHS == True) 1366 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1367 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1368 } 1369 case ISD::SETGT: 1370 case ISD::SETGE: 1371 case ISD::SETOGE: 1372 case ISD::SETOGT: { 1373 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1374 !DCI.isCalledByLegalizer()) 1375 return SDValue(); 1376 1377 if (LHS == True) 1378 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1379 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1380 } 1381 case ISD::SETCC_INVALID: 1382 llvm_unreachable("Invalid setcc condcode!"); 1383 } 1384 return SDValue(); 1385 } 1386 1387 std::pair<SDValue, SDValue> 1388 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const { 1389 SDLoc SL(Op); 1390 1391 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1392 1393 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1394 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1395 1396 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1397 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1398 1399 return std::make_pair(Lo, Hi); 1400 } 1401 1402 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { 1403 SDLoc SL(Op); 1404 1405 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1406 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1407 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1408 } 1409 1410 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const { 1411 SDLoc SL(Op); 1412 1413 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1414 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1415 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1416 } 1417 1418 // Split a vector type into two parts. The first part is a power of two vector. 1419 // The second part is whatever is left over, and is a scalar if it would 1420 // otherwise be a 1-vector. 1421 std::pair<EVT, EVT> 1422 AMDGPUTargetLowering::getSplitDestVTs(const EVT &VT, SelectionDAG &DAG) const { 1423 EVT LoVT, HiVT; 1424 EVT EltVT = VT.getVectorElementType(); 1425 unsigned NumElts = VT.getVectorNumElements(); 1426 unsigned LoNumElts = PowerOf2Ceil((NumElts + 1) / 2); 1427 LoVT = EVT::getVectorVT(*DAG.getContext(), EltVT, LoNumElts); 1428 HiVT = NumElts - LoNumElts == 1 1429 ? EltVT 1430 : EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts - LoNumElts); 1431 return std::make_pair(LoVT, HiVT); 1432 } 1433 1434 // Split a vector value into two parts of types LoVT and HiVT. HiVT could be 1435 // scalar. 1436 std::pair<SDValue, SDValue> 1437 AMDGPUTargetLowering::splitVector(const SDValue &N, const SDLoc &DL, 1438 const EVT &LoVT, const EVT &HiVT, 1439 SelectionDAG &DAG) const { 1440 assert(LoVT.getVectorNumElements() + 1441 (HiVT.isVector() ? HiVT.getVectorNumElements() : 1) <= 1442 N.getValueType().getVectorNumElements() && 1443 "More vector elements requested than available!"); 1444 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, N, 1445 DAG.getVectorIdxConstant(0, DL)); 1446 SDValue Hi = DAG.getNode( 1447 HiVT.isVector() ? ISD::EXTRACT_SUBVECTOR : ISD::EXTRACT_VECTOR_ELT, DL, 1448 HiVT, N, DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), DL)); 1449 return std::make_pair(Lo, Hi); 1450 } 1451 1452 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, 1453 SelectionDAG &DAG) const { 1454 LoadSDNode *Load = cast<LoadSDNode>(Op); 1455 EVT VT = Op.getValueType(); 1456 SDLoc SL(Op); 1457 1458 1459 // If this is a 2 element vector, we really want to scalarize and not create 1460 // weird 1 element vectors. 1461 if (VT.getVectorNumElements() == 2) { 1462 SDValue Ops[2]; 1463 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(Load, DAG); 1464 return DAG.getMergeValues(Ops, SL); 1465 } 1466 1467 SDValue BasePtr = Load->getBasePtr(); 1468 EVT MemVT = Load->getMemoryVT(); 1469 1470 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1471 1472 EVT LoVT, HiVT; 1473 EVT LoMemVT, HiMemVT; 1474 SDValue Lo, Hi; 1475 1476 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG); 1477 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG); 1478 std::tie(Lo, Hi) = splitVector(Op, SL, LoVT, HiVT, DAG); 1479 1480 unsigned Size = LoMemVT.getStoreSize(); 1481 unsigned BaseAlign = Load->getAlignment(); 1482 unsigned HiAlign = MinAlign(BaseAlign, Size); 1483 1484 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, 1485 Load->getChain(), BasePtr, SrcValue, LoMemVT, 1486 BaseAlign, Load->getMemOperand()->getFlags()); 1487 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size); 1488 SDValue HiLoad = 1489 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(), 1490 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()), 1491 HiMemVT, HiAlign, Load->getMemOperand()->getFlags()); 1492 1493 SDValue Join; 1494 if (LoVT == HiVT) { 1495 // This is the case that the vector is power of two so was evenly split. 1496 Join = DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad); 1497 } else { 1498 Join = DAG.getNode(ISD::INSERT_SUBVECTOR, SL, VT, DAG.getUNDEF(VT), LoLoad, 1499 DAG.getVectorIdxConstant(0, SL)); 1500 Join = DAG.getNode( 1501 HiVT.isVector() ? ISD::INSERT_SUBVECTOR : ISD::INSERT_VECTOR_ELT, SL, 1502 VT, Join, HiLoad, 1503 DAG.getVectorIdxConstant(LoVT.getVectorNumElements(), SL)); 1504 } 1505 1506 SDValue Ops[] = {Join, DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 1507 LoLoad.getValue(1), HiLoad.getValue(1))}; 1508 1509 return DAG.getMergeValues(Ops, SL); 1510 } 1511 1512 // Widen a vector load from vec3 to vec4. 1513 SDValue AMDGPUTargetLowering::WidenVectorLoad(SDValue Op, 1514 SelectionDAG &DAG) const { 1515 LoadSDNode *Load = cast<LoadSDNode>(Op); 1516 EVT VT = Op.getValueType(); 1517 assert(VT.getVectorNumElements() == 3); 1518 SDValue BasePtr = Load->getBasePtr(); 1519 EVT MemVT = Load->getMemoryVT(); 1520 SDLoc SL(Op); 1521 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1522 unsigned BaseAlign = Load->getAlignment(); 1523 1524 EVT WideVT = 1525 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 4); 1526 EVT WideMemVT = 1527 EVT::getVectorVT(*DAG.getContext(), MemVT.getVectorElementType(), 4); 1528 SDValue WideLoad = DAG.getExtLoad( 1529 Load->getExtensionType(), SL, WideVT, Load->getChain(), BasePtr, SrcValue, 1530 WideMemVT, BaseAlign, Load->getMemOperand()->getFlags()); 1531 return DAG.getMergeValues( 1532 {DAG.getNode(ISD::EXTRACT_SUBVECTOR, SL, VT, WideLoad, 1533 DAG.getVectorIdxConstant(0, SL)), 1534 WideLoad.getValue(1)}, 1535 SL); 1536 } 1537 1538 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, 1539 SelectionDAG &DAG) const { 1540 StoreSDNode *Store = cast<StoreSDNode>(Op); 1541 SDValue Val = Store->getValue(); 1542 EVT VT = Val.getValueType(); 1543 1544 // If this is a 2 element vector, we really want to scalarize and not create 1545 // weird 1 element vectors. 1546 if (VT.getVectorNumElements() == 2) 1547 return scalarizeVectorStore(Store, DAG); 1548 1549 EVT MemVT = Store->getMemoryVT(); 1550 SDValue Chain = Store->getChain(); 1551 SDValue BasePtr = Store->getBasePtr(); 1552 SDLoc SL(Op); 1553 1554 EVT LoVT, HiVT; 1555 EVT LoMemVT, HiMemVT; 1556 SDValue Lo, Hi; 1557 1558 std::tie(LoVT, HiVT) = getSplitDestVTs(VT, DAG); 1559 std::tie(LoMemVT, HiMemVT) = getSplitDestVTs(MemVT, DAG); 1560 std::tie(Lo, Hi) = splitVector(Val, SL, LoVT, HiVT, DAG); 1561 1562 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize()); 1563 1564 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); 1565 unsigned BaseAlign = Store->getAlignment(); 1566 unsigned Size = LoMemVT.getStoreSize(); 1567 unsigned HiAlign = MinAlign(BaseAlign, Size); 1568 1569 SDValue LoStore = 1570 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign, 1571 Store->getMemOperand()->getFlags()); 1572 SDValue HiStore = 1573 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size), 1574 HiMemVT, HiAlign, Store->getMemOperand()->getFlags()); 1575 1576 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore); 1577 } 1578 1579 // This is a shortcut for integer division because we have fast i32<->f32 1580 // conversions, and fast f32 reciprocal instructions. The fractional part of a 1581 // float is enough to accurately represent up to a 24-bit signed integer. 1582 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, 1583 bool Sign) const { 1584 SDLoc DL(Op); 1585 EVT VT = Op.getValueType(); 1586 SDValue LHS = Op.getOperand(0); 1587 SDValue RHS = Op.getOperand(1); 1588 MVT IntVT = MVT::i32; 1589 MVT FltVT = MVT::f32; 1590 1591 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS); 1592 if (LHSSignBits < 9) 1593 return SDValue(); 1594 1595 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS); 1596 if (RHSSignBits < 9) 1597 return SDValue(); 1598 1599 unsigned BitSize = VT.getSizeInBits(); 1600 unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 1601 unsigned DivBits = BitSize - SignBits; 1602 if (Sign) 1603 ++DivBits; 1604 1605 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; 1606 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; 1607 1608 SDValue jq = DAG.getConstant(1, DL, IntVT); 1609 1610 if (Sign) { 1611 // char|short jq = ia ^ ib; 1612 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); 1613 1614 // jq = jq >> (bitsize - 2) 1615 jq = DAG.getNode(ISD::SRA, DL, VT, jq, 1616 DAG.getConstant(BitSize - 2, DL, VT)); 1617 1618 // jq = jq | 0x1 1619 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); 1620 } 1621 1622 // int ia = (int)LHS; 1623 SDValue ia = LHS; 1624 1625 // int ib, (int)RHS; 1626 SDValue ib = RHS; 1627 1628 // float fa = (float)ia; 1629 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia); 1630 1631 // float fb = (float)ib; 1632 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib); 1633 1634 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT, 1635 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb)); 1636 1637 // fq = trunc(fq); 1638 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq); 1639 1640 // float fqneg = -fq; 1641 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq); 1642 1643 MachineFunction &MF = DAG.getMachineFunction(); 1644 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 1645 1646 // float fr = mad(fqneg, fb, fa); 1647 unsigned OpCode = !MFI->getMode().allFP32Denormals() ? 1648 (unsigned)ISD::FMAD : 1649 (unsigned)AMDGPUISD::FMAD_FTZ; 1650 1651 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa); 1652 1653 // int iq = (int)fq; 1654 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq); 1655 1656 // fr = fabs(fr); 1657 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr); 1658 1659 // fb = fabs(fb); 1660 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb); 1661 1662 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 1663 1664 // int cv = fr >= fb; 1665 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE); 1666 1667 // jq = (cv ? jq : 0); 1668 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); 1669 1670 // dst = iq + jq; 1671 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); 1672 1673 // Rem needs compensation, it's easier to recompute it 1674 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); 1675 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); 1676 1677 // Truncate to number of bits this divide really is. 1678 if (Sign) { 1679 SDValue InRegSize 1680 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits)); 1681 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize); 1682 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize); 1683 } else { 1684 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT); 1685 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask); 1686 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask); 1687 } 1688 1689 return DAG.getMergeValues({ Div, Rem }, DL); 1690 } 1691 1692 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op, 1693 SelectionDAG &DAG, 1694 SmallVectorImpl<SDValue> &Results) const { 1695 SDLoc DL(Op); 1696 EVT VT = Op.getValueType(); 1697 1698 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64"); 1699 1700 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1701 1702 SDValue One = DAG.getConstant(1, DL, HalfVT); 1703 SDValue Zero = DAG.getConstant(0, DL, HalfVT); 1704 1705 //HiLo split 1706 SDValue LHS = Op.getOperand(0); 1707 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1708 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One); 1709 1710 SDValue RHS = Op.getOperand(1); 1711 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1712 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One); 1713 1714 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) && 1715 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) { 1716 1717 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1718 LHS_Lo, RHS_Lo); 1719 1720 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero}); 1721 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero}); 1722 1723 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV)); 1724 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM)); 1725 return; 1726 } 1727 1728 if (isTypeLegal(MVT::i64)) { 1729 MachineFunction &MF = DAG.getMachineFunction(); 1730 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>(); 1731 1732 // Compute denominator reciprocal. 1733 unsigned FMAD = !MFI->getMode().allFP32Denormals() ? 1734 (unsigned)ISD::FMAD : 1735 (unsigned)AMDGPUISD::FMAD_FTZ; 1736 1737 1738 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo); 1739 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi); 1740 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi, 1741 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32), 1742 Cvt_Lo); 1743 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1); 1744 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp, 1745 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32)); 1746 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1, 1747 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32)); 1748 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2); 1749 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc, 1750 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32), 1751 Mul1); 1752 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2); 1753 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc); 1754 SDValue Rcp64 = DAG.getBitcast(VT, 1755 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi})); 1756 1757 SDValue Zero64 = DAG.getConstant(0, DL, VT); 1758 SDValue One64 = DAG.getConstant(1, DL, VT); 1759 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1); 1760 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1); 1761 1762 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS); 1763 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64); 1764 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1); 1765 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1766 Zero); 1767 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1768 One); 1769 1770 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo, 1771 Mulhi1_Lo, Zero1); 1772 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi, 1773 Mulhi1_Hi, Add1_Lo.getValue(1)); 1774 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi); 1775 SDValue Add1 = DAG.getBitcast(VT, 1776 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi})); 1777 1778 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1); 1779 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2); 1780 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1781 Zero); 1782 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1783 One); 1784 1785 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo, 1786 Mulhi2_Lo, Zero1); 1787 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc, 1788 Mulhi2_Hi, Add1_Lo.getValue(1)); 1789 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC, 1790 Zero, Add2_Lo.getValue(1)); 1791 SDValue Add2 = DAG.getBitcast(VT, 1792 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi})); 1793 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2); 1794 1795 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3); 1796 1797 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero); 1798 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One); 1799 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo, 1800 Mul3_Lo, Zero1); 1801 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi, 1802 Mul3_Hi, Sub1_Lo.getValue(1)); 1803 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi); 1804 SDValue Sub1 = DAG.getBitcast(VT, 1805 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi})); 1806 1807 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT); 1808 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero, 1809 ISD::SETUGE); 1810 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero, 1811 ISD::SETUGE); 1812 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ); 1813 1814 // TODO: Here and below portions of the code can be enclosed into if/endif. 1815 // Currently control flow is unconditional and we have 4 selects after 1816 // potential endif to substitute PHIs. 1817 1818 // if C3 != 0 ... 1819 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo, 1820 RHS_Lo, Zero1); 1821 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi, 1822 RHS_Hi, Sub1_Lo.getValue(1)); 1823 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1824 Zero, Sub2_Lo.getValue(1)); 1825 SDValue Sub2 = DAG.getBitcast(VT, 1826 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi})); 1827 1828 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64); 1829 1830 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero, 1831 ISD::SETUGE); 1832 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero, 1833 ISD::SETUGE); 1834 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ); 1835 1836 // if (C6 != 0) 1837 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64); 1838 1839 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo, 1840 RHS_Lo, Zero1); 1841 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1842 RHS_Hi, Sub2_Lo.getValue(1)); 1843 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi, 1844 Zero, Sub3_Lo.getValue(1)); 1845 SDValue Sub3 = DAG.getBitcast(VT, 1846 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi})); 1847 1848 // endif C6 1849 // endif C3 1850 1851 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE); 1852 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE); 1853 1854 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE); 1855 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE); 1856 1857 Results.push_back(Div); 1858 Results.push_back(Rem); 1859 1860 return; 1861 } 1862 1863 // r600 expandion. 1864 // Get Speculative values 1865 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo); 1866 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo); 1867 1868 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ); 1869 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero}); 1870 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM); 1871 1872 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ); 1873 SDValue DIV_Lo = Zero; 1874 1875 const unsigned halfBitWidth = HalfVT.getSizeInBits(); 1876 1877 for (unsigned i = 0; i < halfBitWidth; ++i) { 1878 const unsigned bitPos = halfBitWidth - i - 1; 1879 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT); 1880 // Get value of high bit 1881 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS); 1882 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One); 1883 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); 1884 1885 // Shift 1886 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); 1887 // Add LHS high bit 1888 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); 1889 1890 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT); 1891 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE); 1892 1893 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT); 1894 1895 // Update REM 1896 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); 1897 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE); 1898 } 1899 1900 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi}); 1901 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV); 1902 Results.push_back(DIV); 1903 Results.push_back(REM); 1904 } 1905 1906 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, 1907 SelectionDAG &DAG) const { 1908 SDLoc DL(Op); 1909 EVT VT = Op.getValueType(); 1910 1911 if (VT == MVT::i64) { 1912 SmallVector<SDValue, 2> Results; 1913 LowerUDIVREM64(Op, DAG, Results); 1914 return DAG.getMergeValues(Results, DL); 1915 } 1916 1917 if (VT == MVT::i32) { 1918 if (SDValue Res = LowerDIVREM24(Op, DAG, false)) 1919 return Res; 1920 } 1921 1922 SDValue Num = Op.getOperand(0); 1923 SDValue Den = Op.getOperand(1); 1924 1925 // RCP = URECIP(Den) = 2^32 / Den + e 1926 // e is rounding error. 1927 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den); 1928 1929 // RCP_LO = mul(RCP, Den) */ 1930 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den); 1931 1932 // RCP_HI = mulhu (RCP, Den) */ 1933 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den); 1934 1935 // NEG_RCP_LO = -RCP_LO 1936 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1937 RCP_LO); 1938 1939 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 1940 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1941 NEG_RCP_LO, RCP_LO, 1942 ISD::SETEQ); 1943 // Calculate the rounding error from the URECIP instruction 1944 // E = mulhu(ABS_RCP_LO, RCP) 1945 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP); 1946 1947 // RCP_A_E = RCP + E 1948 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E); 1949 1950 // RCP_S_E = RCP - E 1951 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E); 1952 1953 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 1954 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1955 RCP_A_E, RCP_S_E, 1956 ISD::SETEQ); 1957 // Quotient = mulhu(Tmp0, Num) 1958 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num); 1959 1960 // Num_S_Remainder = Quotient * Den 1961 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den); 1962 1963 // Remainder = Num - Num_S_Remainder 1964 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder); 1965 1966 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 1967 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den, 1968 DAG.getConstant(-1, DL, VT), 1969 DAG.getConstant(0, DL, VT), 1970 ISD::SETUGE); 1971 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 1972 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num, 1973 Num_S_Remainder, 1974 DAG.getConstant(-1, DL, VT), 1975 DAG.getConstant(0, DL, VT), 1976 ISD::SETUGE); 1977 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 1978 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den, 1979 Remainder_GE_Zero); 1980 1981 // Calculate Division result: 1982 1983 // Quotient_A_One = Quotient + 1 1984 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient, 1985 DAG.getConstant(1, DL, VT)); 1986 1987 // Quotient_S_One = Quotient - 1 1988 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient, 1989 DAG.getConstant(1, DL, VT)); 1990 1991 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 1992 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1993 Quotient, Quotient_A_One, ISD::SETEQ); 1994 1995 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 1996 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1997 Quotient_S_One, Div, ISD::SETEQ); 1998 1999 // Calculate Rem result: 2000 2001 // Remainder_S_Den = Remainder - Den 2002 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den); 2003 2004 // Remainder_A_Den = Remainder + Den 2005 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den); 2006 2007 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 2008 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 2009 Remainder, Remainder_S_Den, ISD::SETEQ); 2010 2011 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 2012 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 2013 Remainder_A_Den, Rem, ISD::SETEQ); 2014 SDValue Ops[2] = { 2015 Div, 2016 Rem 2017 }; 2018 return DAG.getMergeValues(Ops, DL); 2019 } 2020 2021 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, 2022 SelectionDAG &DAG) const { 2023 SDLoc DL(Op); 2024 EVT VT = Op.getValueType(); 2025 2026 SDValue LHS = Op.getOperand(0); 2027 SDValue RHS = Op.getOperand(1); 2028 2029 SDValue Zero = DAG.getConstant(0, DL, VT); 2030 SDValue NegOne = DAG.getConstant(-1, DL, VT); 2031 2032 if (VT == MVT::i32) { 2033 if (SDValue Res = LowerDIVREM24(Op, DAG, true)) 2034 return Res; 2035 } 2036 2037 if (VT == MVT::i64 && 2038 DAG.ComputeNumSignBits(LHS) > 32 && 2039 DAG.ComputeNumSignBits(RHS) > 32) { 2040 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 2041 2042 //HiLo split 2043 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 2044 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 2045 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 2046 LHS_Lo, RHS_Lo); 2047 SDValue Res[2] = { 2048 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), 2049 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) 2050 }; 2051 return DAG.getMergeValues(Res, DL); 2052 } 2053 2054 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); 2055 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); 2056 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); 2057 SDValue RSign = LHSign; // Remainder sign is the same as LHS 2058 2059 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); 2060 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); 2061 2062 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); 2063 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); 2064 2065 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); 2066 SDValue Rem = Div.getValue(1); 2067 2068 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); 2069 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); 2070 2071 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); 2072 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); 2073 2074 SDValue Res[2] = { 2075 Div, 2076 Rem 2077 }; 2078 return DAG.getMergeValues(Res, DL); 2079 } 2080 2081 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y)) 2082 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const { 2083 SDLoc SL(Op); 2084 EVT VT = Op.getValueType(); 2085 SDValue X = Op.getOperand(0); 2086 SDValue Y = Op.getOperand(1); 2087 2088 // TODO: Should this propagate fast-math-flags? 2089 2090 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y); 2091 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div); 2092 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y); 2093 2094 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul); 2095 } 2096 2097 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const { 2098 SDLoc SL(Op); 2099 SDValue Src = Op.getOperand(0); 2100 2101 // result = trunc(src) 2102 // if (src > 0.0 && src != result) 2103 // result += 1.0 2104 2105 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2106 2107 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2108 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 2109 2110 EVT SetCCVT = 2111 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2112 2113 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT); 2114 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2115 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2116 2117 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero); 2118 // TODO: Should this propagate fast-math-flags? 2119 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2120 } 2121 2122 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, 2123 SelectionDAG &DAG) { 2124 const unsigned FractBits = 52; 2125 const unsigned ExpBits = 11; 2126 2127 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 2128 Hi, 2129 DAG.getConstant(FractBits - 32, SL, MVT::i32), 2130 DAG.getConstant(ExpBits, SL, MVT::i32)); 2131 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart, 2132 DAG.getConstant(1023, SL, MVT::i32)); 2133 2134 return Exp; 2135 } 2136 2137 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { 2138 SDLoc SL(Op); 2139 SDValue Src = Op.getOperand(0); 2140 2141 assert(Op.getValueType() == MVT::f64); 2142 2143 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2144 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2145 2146 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2147 2148 // Extract the upper half, since this is where we will find the sign and 2149 // exponent. 2150 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One); 2151 2152 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2153 2154 const unsigned FractBits = 52; 2155 2156 // Extract the sign bit. 2157 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); 2158 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); 2159 2160 // Extend back to 64-bits. 2161 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit}); 2162 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); 2163 2164 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src); 2165 const SDValue FractMask 2166 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); 2167 2168 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp); 2169 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64); 2170 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not); 2171 2172 EVT SetCCVT = 2173 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2174 2175 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); 2176 2177 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2178 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2179 2180 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0); 2181 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1); 2182 2183 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2); 2184 } 2185 2186 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const { 2187 SDLoc SL(Op); 2188 SDValue Src = Op.getOperand(0); 2189 2190 assert(Op.getValueType() == MVT::f64); 2191 2192 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52"); 2193 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64); 2194 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src); 2195 2196 // TODO: Should this propagate fast-math-flags? 2197 2198 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign); 2199 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign); 2200 2201 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src); 2202 2203 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51"); 2204 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64); 2205 2206 EVT SetCCVT = 2207 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2208 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT); 2209 2210 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2); 2211 } 2212 2213 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const { 2214 // FNEARBYINT and FRINT are the same, except in their handling of FP 2215 // exceptions. Those aren't really meaningful for us, and OpenCL only has 2216 // rint, so just treat them as equivalent. 2217 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0)); 2218 } 2219 2220 // XXX - May require not supporting f32 denormals? 2221 2222 // Don't handle v2f16. The extra instructions to scalarize and repack around the 2223 // compare and vselect end up producing worse code than scalarizing the whole 2224 // operation. 2225 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { 2226 SDLoc SL(Op); 2227 SDValue X = Op.getOperand(0); 2228 EVT VT = Op.getValueType(); 2229 2230 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X); 2231 2232 // TODO: Should this propagate fast-math-flags? 2233 2234 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T); 2235 2236 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff); 2237 2238 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT); 2239 const SDValue One = DAG.getConstantFP(1.0, SL, VT); 2240 const SDValue Half = DAG.getConstantFP(0.5, SL, VT); 2241 2242 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X); 2243 2244 EVT SetCCVT = 2245 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 2246 2247 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE); 2248 2249 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero); 2250 2251 return DAG.getNode(ISD::FADD, SL, VT, T, Sel); 2252 } 2253 2254 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { 2255 SDLoc SL(Op); 2256 SDValue Src = Op.getOperand(0); 2257 2258 // result = trunc(src); 2259 // if (src < 0.0 && src != result) 2260 // result += -1.0. 2261 2262 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2263 2264 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2265 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); 2266 2267 EVT SetCCVT = 2268 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2269 2270 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT); 2271 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2272 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2273 2274 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero); 2275 // TODO: Should this propagate fast-math-flags? 2276 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2277 } 2278 2279 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG, 2280 double Log2BaseInverted) const { 2281 EVT VT = Op.getValueType(); 2282 2283 SDLoc SL(Op); 2284 SDValue Operand = Op.getOperand(0); 2285 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand); 2286 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT); 2287 2288 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand); 2289 } 2290 2291 // exp2(M_LOG2E_F * f); 2292 SDValue AMDGPUTargetLowering::lowerFEXP(SDValue Op, SelectionDAG &DAG) const { 2293 EVT VT = Op.getValueType(); 2294 SDLoc SL(Op); 2295 SDValue Src = Op.getOperand(0); 2296 2297 const SDValue K = DAG.getConstantFP(numbers::log2e, SL, VT); 2298 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Src, K, Op->getFlags()); 2299 return DAG.getNode(ISD::FEXP2, SL, VT, Mul, Op->getFlags()); 2300 } 2301 2302 static bool isCtlzOpc(unsigned Opc) { 2303 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; 2304 } 2305 2306 static bool isCttzOpc(unsigned Opc) { 2307 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF; 2308 } 2309 2310 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const { 2311 SDLoc SL(Op); 2312 SDValue Src = Op.getOperand(0); 2313 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF || 2314 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; 2315 2316 unsigned ISDOpc, NewOpc; 2317 if (isCtlzOpc(Op.getOpcode())) { 2318 ISDOpc = ISD::CTLZ_ZERO_UNDEF; 2319 NewOpc = AMDGPUISD::FFBH_U32; 2320 } else if (isCttzOpc(Op.getOpcode())) { 2321 ISDOpc = ISD::CTTZ_ZERO_UNDEF; 2322 NewOpc = AMDGPUISD::FFBL_B32; 2323 } else 2324 llvm_unreachable("Unexpected OPCode!!!"); 2325 2326 2327 if (ZeroUndef && Src.getValueType() == MVT::i32) 2328 return DAG.getNode(NewOpc, SL, MVT::i32, Src); 2329 2330 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2331 2332 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2333 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2334 2335 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 2336 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 2337 2338 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2339 *DAG.getContext(), MVT::i32); 2340 2341 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo; 2342 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ); 2343 2344 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo); 2345 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi); 2346 2347 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); 2348 SDValue Add, NewOpr; 2349 if (isCtlzOpc(Op.getOpcode())) { 2350 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32); 2351 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) 2352 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi); 2353 } else { 2354 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32); 2355 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x)) 2356 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo); 2357 } 2358 2359 if (!ZeroUndef) { 2360 // Test if the full 64-bit input is zero. 2361 2362 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, 2363 // which we probably don't want. 2364 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi; 2365 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ); 2366 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0); 2367 2368 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction 2369 // with the same cycles, otherwise it is slower. 2370 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, 2371 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); 2372 2373 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); 2374 2375 // The instruction returns -1 for 0 input, but the defined intrinsic 2376 // behavior is to return the number of bits. 2377 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, 2378 SrcIsZero, Bits32, NewOpr); 2379 } 2380 2381 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr); 2382 } 2383 2384 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, 2385 bool Signed) const { 2386 // Unsigned 2387 // cul2f(ulong u) 2388 //{ 2389 // uint lz = clz(u); 2390 // uint e = (u != 0) ? 127U + 63U - lz : 0; 2391 // u = (u << lz) & 0x7fffffffffffffffUL; 2392 // ulong t = u & 0xffffffffffUL; 2393 // uint v = (e << 23) | (uint)(u >> 40); 2394 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 2395 // return as_float(v + r); 2396 //} 2397 // Signed 2398 // cl2f(long l) 2399 //{ 2400 // long s = l >> 63; 2401 // float r = cul2f((l + s) ^ s); 2402 // return s ? -r : r; 2403 //} 2404 2405 SDLoc SL(Op); 2406 SDValue Src = Op.getOperand(0); 2407 SDValue L = Src; 2408 2409 SDValue S; 2410 if (Signed) { 2411 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); 2412 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); 2413 2414 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); 2415 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); 2416 } 2417 2418 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2419 *DAG.getContext(), MVT::f32); 2420 2421 2422 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); 2423 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); 2424 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); 2425 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); 2426 2427 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); 2428 SDValue E = DAG.getSelect(SL, MVT::i32, 2429 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), 2430 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), 2431 ZeroI32); 2432 2433 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, 2434 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), 2435 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); 2436 2437 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, 2438 DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); 2439 2440 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, 2441 U, DAG.getConstant(40, SL, MVT::i64)); 2442 2443 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, 2444 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), 2445 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); 2446 2447 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); 2448 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); 2449 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); 2450 2451 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2452 2453 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); 2454 2455 SDValue R = DAG.getSelect(SL, MVT::i32, 2456 RCmp, 2457 One, 2458 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); 2459 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); 2460 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); 2461 2462 if (!Signed) 2463 return R; 2464 2465 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); 2466 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); 2467 } 2468 2469 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, 2470 bool Signed) const { 2471 SDLoc SL(Op); 2472 SDValue Src = Op.getOperand(0); 2473 2474 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2475 2476 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2477 DAG.getConstant(0, SL, MVT::i32)); 2478 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2479 DAG.getConstant(1, SL, MVT::i32)); 2480 2481 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, 2482 SL, MVT::f64, Hi); 2483 2484 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); 2485 2486 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, 2487 DAG.getConstant(32, SL, MVT::i32)); 2488 // TODO: Should this propagate fast-math-flags? 2489 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); 2490 } 2491 2492 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, 2493 SelectionDAG &DAG) const { 2494 // TODO: Factor out code common with LowerSINT_TO_FP. 2495 EVT DestVT = Op.getValueType(); 2496 SDValue Src = Op.getOperand(0); 2497 EVT SrcVT = Src.getValueType(); 2498 2499 if (SrcVT == MVT::i16) { 2500 if (DestVT == MVT::f16) 2501 return Op; 2502 SDLoc DL(Op); 2503 2504 // Promote src to i32 2505 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Src); 2506 return DAG.getNode(ISD::UINT_TO_FP, DL, DestVT, Ext); 2507 } 2508 2509 assert(SrcVT == MVT::i64 && "operation should be legal"); 2510 2511 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2512 SDLoc DL(Op); 2513 2514 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2515 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2516 SDValue FPRound = 2517 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2518 2519 return FPRound; 2520 } 2521 2522 if (DestVT == MVT::f32) 2523 return LowerINT_TO_FP32(Op, DAG, false); 2524 2525 assert(DestVT == MVT::f64); 2526 return LowerINT_TO_FP64(Op, DAG, false); 2527 } 2528 2529 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, 2530 SelectionDAG &DAG) const { 2531 EVT DestVT = Op.getValueType(); 2532 2533 SDValue Src = Op.getOperand(0); 2534 EVT SrcVT = Src.getValueType(); 2535 2536 if (SrcVT == MVT::i16) { 2537 if (DestVT == MVT::f16) 2538 return Op; 2539 2540 SDLoc DL(Op); 2541 // Promote src to i32 2542 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, Src); 2543 return DAG.getNode(ISD::SINT_TO_FP, DL, DestVT, Ext); 2544 } 2545 2546 assert(SrcVT == MVT::i64 && "operation should be legal"); 2547 2548 // TODO: Factor out code common with LowerUINT_TO_FP. 2549 2550 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2551 SDLoc DL(Op); 2552 SDValue Src = Op.getOperand(0); 2553 2554 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2555 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2556 SDValue FPRound = 2557 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2558 2559 return FPRound; 2560 } 2561 2562 if (DestVT == MVT::f32) 2563 return LowerINT_TO_FP32(Op, DAG, true); 2564 2565 assert(DestVT == MVT::f64); 2566 return LowerINT_TO_FP64(Op, DAG, true); 2567 } 2568 2569 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, 2570 bool Signed) const { 2571 SDLoc SL(Op); 2572 2573 SDValue Src = Op.getOperand(0); 2574 2575 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2576 2577 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL, 2578 MVT::f64); 2579 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL, 2580 MVT::f64); 2581 // TODO: Should this propagate fast-math-flags? 2582 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0); 2583 2584 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul); 2585 2586 2587 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc); 2588 2589 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL, 2590 MVT::i32, FloorMul); 2591 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma); 2592 2593 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}); 2594 2595 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result); 2596 } 2597 2598 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const { 2599 SDLoc DL(Op); 2600 SDValue N0 = Op.getOperand(0); 2601 2602 // Convert to target node to get known bits 2603 if (N0.getValueType() == MVT::f32) 2604 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0); 2605 2606 if (getTargetMachine().Options.UnsafeFPMath) { 2607 // There is a generic expand for FP_TO_FP16 with unsafe fast math. 2608 return SDValue(); 2609 } 2610 2611 assert(N0.getSimpleValueType() == MVT::f64); 2612 2613 // f64 -> f16 conversion using round-to-nearest-even rounding mode. 2614 const unsigned ExpMask = 0x7ff; 2615 const unsigned ExpBiasf64 = 1023; 2616 const unsigned ExpBiasf16 = 15; 2617 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2618 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2619 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0); 2620 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U, 2621 DAG.getConstant(32, DL, MVT::i64)); 2622 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32); 2623 U = DAG.getZExtOrTrunc(U, DL, MVT::i32); 2624 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2625 DAG.getConstant(20, DL, MVT::i64)); 2626 E = DAG.getNode(ISD::AND, DL, MVT::i32, E, 2627 DAG.getConstant(ExpMask, DL, MVT::i32)); 2628 // Subtract the fp64 exponent bias (1023) to get the real exponent and 2629 // add the f16 bias (15) to get the biased exponent for the f16 format. 2630 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E, 2631 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32)); 2632 2633 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2634 DAG.getConstant(8, DL, MVT::i32)); 2635 M = DAG.getNode(ISD::AND, DL, MVT::i32, M, 2636 DAG.getConstant(0xffe, DL, MVT::i32)); 2637 2638 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH, 2639 DAG.getConstant(0x1ff, DL, MVT::i32)); 2640 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U); 2641 2642 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ); 2643 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set); 2644 2645 // (M != 0 ? 0x0200 : 0) | 0x7c00; 2646 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32, 2647 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32), 2648 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32)); 2649 2650 // N = M | (E << 12); 2651 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2652 DAG.getNode(ISD::SHL, DL, MVT::i32, E, 2653 DAG.getConstant(12, DL, MVT::i32))); 2654 2655 // B = clamp(1-E, 0, 13); 2656 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32, 2657 One, E); 2658 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero); 2659 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B, 2660 DAG.getConstant(13, DL, MVT::i32)); 2661 2662 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2663 DAG.getConstant(0x1000, DL, MVT::i32)); 2664 2665 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B); 2666 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B); 2667 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE); 2668 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1); 2669 2670 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT); 2671 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V, 2672 DAG.getConstant(0x7, DL, MVT::i32)); 2673 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V, 2674 DAG.getConstant(2, DL, MVT::i32)); 2675 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32), 2676 One, Zero, ISD::SETEQ); 2677 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32), 2678 One, Zero, ISD::SETGT); 2679 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1); 2680 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1); 2681 2682 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32), 2683 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT); 2684 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32), 2685 I, V, ISD::SETEQ); 2686 2687 // Extract the sign bit. 2688 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2689 DAG.getConstant(16, DL, MVT::i32)); 2690 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign, 2691 DAG.getConstant(0x8000, DL, MVT::i32)); 2692 2693 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V); 2694 return DAG.getZExtOrTrunc(V, DL, Op.getValueType()); 2695 } 2696 2697 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op, 2698 SelectionDAG &DAG) const { 2699 SDValue Src = Op.getOperand(0); 2700 2701 // TODO: Factor out code common with LowerFP_TO_UINT. 2702 2703 EVT SrcVT = Src.getValueType(); 2704 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2705 SDLoc DL(Op); 2706 2707 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2708 SDValue FpToInt32 = 2709 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2710 2711 return FpToInt32; 2712 } 2713 2714 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2715 return LowerFP64_TO_INT(Op, DAG, true); 2716 2717 return SDValue(); 2718 } 2719 2720 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op, 2721 SelectionDAG &DAG) const { 2722 SDValue Src = Op.getOperand(0); 2723 2724 // TODO: Factor out code common with LowerFP_TO_SINT. 2725 2726 EVT SrcVT = Src.getValueType(); 2727 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2728 SDLoc DL(Op); 2729 2730 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2731 SDValue FpToInt32 = 2732 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2733 2734 return FpToInt32; 2735 } 2736 2737 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2738 return LowerFP64_TO_INT(Op, DAG, false); 2739 2740 return SDValue(); 2741 } 2742 2743 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2744 SelectionDAG &DAG) const { 2745 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2746 MVT VT = Op.getSimpleValueType(); 2747 MVT ScalarVT = VT.getScalarType(); 2748 2749 assert(VT.isVector()); 2750 2751 SDValue Src = Op.getOperand(0); 2752 SDLoc DL(Op); 2753 2754 // TODO: Don't scalarize on Evergreen? 2755 unsigned NElts = VT.getVectorNumElements(); 2756 SmallVector<SDValue, 8> Args; 2757 DAG.ExtractVectorElements(Src, Args, 0, NElts); 2758 2759 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType()); 2760 for (unsigned I = 0; I < NElts; ++I) 2761 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp); 2762 2763 return DAG.getBuildVector(VT, DL, Args); 2764 } 2765 2766 //===----------------------------------------------------------------------===// 2767 // Custom DAG optimizations 2768 //===----------------------------------------------------------------------===// 2769 2770 static bool isU24(SDValue Op, SelectionDAG &DAG) { 2771 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24; 2772 } 2773 2774 static bool isI24(SDValue Op, SelectionDAG &DAG) { 2775 EVT VT = Op.getValueType(); 2776 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated 2777 // as unsigned 24-bit values. 2778 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24; 2779 } 2780 2781 static SDValue simplifyI24(SDNode *Node24, 2782 TargetLowering::DAGCombinerInfo &DCI) { 2783 SelectionDAG &DAG = DCI.DAG; 2784 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2785 bool IsIntrin = Node24->getOpcode() == ISD::INTRINSIC_WO_CHAIN; 2786 2787 SDValue LHS = IsIntrin ? Node24->getOperand(1) : Node24->getOperand(0); 2788 SDValue RHS = IsIntrin ? Node24->getOperand(2) : Node24->getOperand(1); 2789 unsigned NewOpcode = Node24->getOpcode(); 2790 if (IsIntrin) { 2791 unsigned IID = cast<ConstantSDNode>(Node24->getOperand(0))->getZExtValue(); 2792 NewOpcode = IID == Intrinsic::amdgcn_mul_i24 ? 2793 AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 2794 } 2795 2796 APInt Demanded = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 24); 2797 2798 // First try to simplify using SimplifyMultipleUseDemandedBits which allows 2799 // the operands to have other uses, but will only perform simplifications that 2800 // involve bypassing some nodes for this user. 2801 SDValue DemandedLHS = TLI.SimplifyMultipleUseDemandedBits(LHS, Demanded, DAG); 2802 SDValue DemandedRHS = TLI.SimplifyMultipleUseDemandedBits(RHS, Demanded, DAG); 2803 if (DemandedLHS || DemandedRHS) 2804 return DAG.getNode(NewOpcode, SDLoc(Node24), Node24->getVTList(), 2805 DemandedLHS ? DemandedLHS : LHS, 2806 DemandedRHS ? DemandedRHS : RHS); 2807 2808 // Now try SimplifyDemandedBits which can simplify the nodes used by our 2809 // operands if this node is the only user. 2810 if (TLI.SimplifyDemandedBits(LHS, Demanded, DCI)) 2811 return SDValue(Node24, 0); 2812 if (TLI.SimplifyDemandedBits(RHS, Demanded, DCI)) 2813 return SDValue(Node24, 0); 2814 2815 return SDValue(); 2816 } 2817 2818 template <typename IntTy> 2819 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, 2820 uint32_t Width, const SDLoc &DL) { 2821 if (Width + Offset < 32) { 2822 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); 2823 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); 2824 return DAG.getConstant(Result, DL, MVT::i32); 2825 } 2826 2827 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32); 2828 } 2829 2830 static bool hasVolatileUser(SDNode *Val) { 2831 for (SDNode *U : Val->uses()) { 2832 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) { 2833 if (M->isVolatile()) 2834 return true; 2835 } 2836 } 2837 2838 return false; 2839 } 2840 2841 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const { 2842 // i32 vectors are the canonical memory type. 2843 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT)) 2844 return false; 2845 2846 if (!VT.isByteSized()) 2847 return false; 2848 2849 unsigned Size = VT.getStoreSize(); 2850 2851 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector()) 2852 return false; 2853 2854 if (Size == 3 || (Size > 4 && (Size % 4 != 0))) 2855 return false; 2856 2857 return true; 2858 } 2859 2860 // Replace load of an illegal type with a store of a bitcast to a friendlier 2861 // type. 2862 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N, 2863 DAGCombinerInfo &DCI) const { 2864 if (!DCI.isBeforeLegalize()) 2865 return SDValue(); 2866 2867 LoadSDNode *LN = cast<LoadSDNode>(N); 2868 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) 2869 return SDValue(); 2870 2871 SDLoc SL(N); 2872 SelectionDAG &DAG = DCI.DAG; 2873 EVT VT = LN->getMemoryVT(); 2874 2875 unsigned Size = VT.getStoreSize(); 2876 unsigned Align = LN->getAlignment(); 2877 if (Align < Size && isTypeLegal(VT)) { 2878 bool IsFast; 2879 unsigned AS = LN->getAddressSpace(); 2880 2881 // Expand unaligned loads earlier than legalization. Due to visitation order 2882 // problems during legalization, the emitted instructions to pack and unpack 2883 // the bytes again are not eliminated in the case of an unaligned copy. 2884 if (!allowsMisalignedMemoryAccesses( 2885 VT, AS, Align, LN->getMemOperand()->getFlags(), &IsFast)) { 2886 SDValue Ops[2]; 2887 2888 if (VT.isVector()) 2889 std::tie(Ops[0], Ops[1]) = scalarizeVectorLoad(LN, DAG); 2890 else 2891 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG); 2892 2893 return DAG.getMergeValues(Ops, SDLoc(N)); 2894 } 2895 2896 if (!IsFast) 2897 return SDValue(); 2898 } 2899 2900 if (!shouldCombineMemoryType(VT)) 2901 return SDValue(); 2902 2903 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2904 2905 SDValue NewLoad 2906 = DAG.getLoad(NewVT, SL, LN->getChain(), 2907 LN->getBasePtr(), LN->getMemOperand()); 2908 2909 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad); 2910 DCI.CombineTo(N, BC, NewLoad.getValue(1)); 2911 return SDValue(N, 0); 2912 } 2913 2914 // Replace store of an illegal type with a store of a bitcast to a friendlier 2915 // type. 2916 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N, 2917 DAGCombinerInfo &DCI) const { 2918 if (!DCI.isBeforeLegalize()) 2919 return SDValue(); 2920 2921 StoreSDNode *SN = cast<StoreSDNode>(N); 2922 if (SN->isVolatile() || !ISD::isNormalStore(SN)) 2923 return SDValue(); 2924 2925 EVT VT = SN->getMemoryVT(); 2926 unsigned Size = VT.getStoreSize(); 2927 2928 SDLoc SL(N); 2929 SelectionDAG &DAG = DCI.DAG; 2930 unsigned Align = SN->getAlignment(); 2931 if (Align < Size && isTypeLegal(VT)) { 2932 bool IsFast; 2933 unsigned AS = SN->getAddressSpace(); 2934 2935 // Expand unaligned stores earlier than legalization. Due to visitation 2936 // order problems during legalization, the emitted instructions to pack and 2937 // unpack the bytes again are not eliminated in the case of an unaligned 2938 // copy. 2939 if (!allowsMisalignedMemoryAccesses( 2940 VT, AS, Align, SN->getMemOperand()->getFlags(), &IsFast)) { 2941 if (VT.isVector()) 2942 return scalarizeVectorStore(SN, DAG); 2943 2944 return expandUnalignedStore(SN, DAG); 2945 } 2946 2947 if (!IsFast) 2948 return SDValue(); 2949 } 2950 2951 if (!shouldCombineMemoryType(VT)) 2952 return SDValue(); 2953 2954 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2955 SDValue Val = SN->getValue(); 2956 2957 //DCI.AddToWorklist(Val.getNode()); 2958 2959 bool OtherUses = !Val.hasOneUse(); 2960 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val); 2961 if (OtherUses) { 2962 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal); 2963 DAG.ReplaceAllUsesOfValueWith(Val, CastBack); 2964 } 2965 2966 return DAG.getStore(SN->getChain(), SL, CastVal, 2967 SN->getBasePtr(), SN->getMemOperand()); 2968 } 2969 2970 // FIXME: This should go in generic DAG combiner with an isTruncateFree check, 2971 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU 2972 // issues. 2973 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N, 2974 DAGCombinerInfo &DCI) const { 2975 SelectionDAG &DAG = DCI.DAG; 2976 SDValue N0 = N->getOperand(0); 2977 2978 // (vt2 (assertzext (truncate vt0:x), vt1)) -> 2979 // (vt2 (truncate (assertzext vt0:x, vt1))) 2980 if (N0.getOpcode() == ISD::TRUNCATE) { 2981 SDValue N1 = N->getOperand(1); 2982 EVT ExtVT = cast<VTSDNode>(N1)->getVT(); 2983 SDLoc SL(N); 2984 2985 SDValue Src = N0.getOperand(0); 2986 EVT SrcVT = Src.getValueType(); 2987 if (SrcVT.bitsGE(ExtVT)) { 2988 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1); 2989 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg); 2990 } 2991 } 2992 2993 return SDValue(); 2994 } 2995 2996 SDValue AMDGPUTargetLowering::performIntrinsicWOChainCombine( 2997 SDNode *N, DAGCombinerInfo &DCI) const { 2998 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 2999 switch (IID) { 3000 case Intrinsic::amdgcn_mul_i24: 3001 case Intrinsic::amdgcn_mul_u24: 3002 return simplifyI24(N, DCI); 3003 default: 3004 return SDValue(); 3005 } 3006 } 3007 3008 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the 3009 /// binary operation \p Opc to it with the corresponding constant operands. 3010 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl( 3011 DAGCombinerInfo &DCI, const SDLoc &SL, 3012 unsigned Opc, SDValue LHS, 3013 uint32_t ValLo, uint32_t ValHi) const { 3014 SelectionDAG &DAG = DCI.DAG; 3015 SDValue Lo, Hi; 3016 std::tie(Lo, Hi) = split64BitValue(LHS, DAG); 3017 3018 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32); 3019 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32); 3020 3021 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS); 3022 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS); 3023 3024 // Re-visit the ands. It's possible we eliminated one of them and it could 3025 // simplify the vector. 3026 DCI.AddToWorklist(Lo.getNode()); 3027 DCI.AddToWorklist(Hi.getNode()); 3028 3029 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd}); 3030 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3031 } 3032 3033 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, 3034 DAGCombinerInfo &DCI) const { 3035 EVT VT = N->getValueType(0); 3036 3037 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3038 if (!RHS) 3039 return SDValue(); 3040 3041 SDValue LHS = N->getOperand(0); 3042 unsigned RHSVal = RHS->getZExtValue(); 3043 if (!RHSVal) 3044 return LHS; 3045 3046 SDLoc SL(N); 3047 SelectionDAG &DAG = DCI.DAG; 3048 3049 switch (LHS->getOpcode()) { 3050 default: 3051 break; 3052 case ISD::ZERO_EXTEND: 3053 case ISD::SIGN_EXTEND: 3054 case ISD::ANY_EXTEND: { 3055 SDValue X = LHS->getOperand(0); 3056 3057 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 && 3058 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) { 3059 // Prefer build_vector as the canonical form if packed types are legal. 3060 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x 3061 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL, 3062 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) }); 3063 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 3064 } 3065 3066 // shl (ext x) => zext (shl x), if shift does not overflow int 3067 if (VT != MVT::i64) 3068 break; 3069 KnownBits Known = DAG.computeKnownBits(X); 3070 unsigned LZ = Known.countMinLeadingZeros(); 3071 if (LZ < RHSVal) 3072 break; 3073 EVT XVT = X.getValueType(); 3074 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0)); 3075 return DAG.getZExtOrTrunc(Shl, SL, VT); 3076 } 3077 } 3078 3079 if (VT != MVT::i64) 3080 return SDValue(); 3081 3082 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) 3083 3084 // On some subtargets, 64-bit shift is a quarter rate instruction. In the 3085 // common case, splitting this into a move and a 32-bit shift is faster and 3086 // the same code size. 3087 if (RHSVal < 32) 3088 return SDValue(); 3089 3090 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); 3091 3092 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS); 3093 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt); 3094 3095 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3096 3097 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift}); 3098 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3099 } 3100 3101 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N, 3102 DAGCombinerInfo &DCI) const { 3103 if (N->getValueType(0) != MVT::i64) 3104 return SDValue(); 3105 3106 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3107 if (!RHS) 3108 return SDValue(); 3109 3110 SelectionDAG &DAG = DCI.DAG; 3111 SDLoc SL(N); 3112 unsigned RHSVal = RHS->getZExtValue(); 3113 3114 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) 3115 if (RHSVal == 32) { 3116 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 3117 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 3118 DAG.getConstant(31, SL, MVT::i32)); 3119 3120 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift}); 3121 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 3122 } 3123 3124 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) 3125 if (RHSVal == 63) { 3126 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 3127 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 3128 DAG.getConstant(31, SL, MVT::i32)); 3129 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift}); 3130 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 3131 } 3132 3133 return SDValue(); 3134 } 3135 3136 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, 3137 DAGCombinerInfo &DCI) const { 3138 auto *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3139 if (!RHS) 3140 return SDValue(); 3141 3142 EVT VT = N->getValueType(0); 3143 SDValue LHS = N->getOperand(0); 3144 unsigned ShiftAmt = RHS->getZExtValue(); 3145 SelectionDAG &DAG = DCI.DAG; 3146 SDLoc SL(N); 3147 3148 // fold (srl (and x, c1 << c2), c2) -> (and (srl(x, c2), c1) 3149 // this improves the ability to match BFE patterns in isel. 3150 if (LHS.getOpcode() == ISD::AND) { 3151 if (auto *Mask = dyn_cast<ConstantSDNode>(LHS.getOperand(1))) { 3152 if (Mask->getAPIntValue().isShiftedMask() && 3153 Mask->getAPIntValue().countTrailingZeros() == ShiftAmt) { 3154 return DAG.getNode( 3155 ISD::AND, SL, VT, 3156 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(0), N->getOperand(1)), 3157 DAG.getNode(ISD::SRL, SL, VT, LHS.getOperand(1), N->getOperand(1))); 3158 } 3159 } 3160 } 3161 3162 if (VT != MVT::i64) 3163 return SDValue(); 3164 3165 if (ShiftAmt < 32) 3166 return SDValue(); 3167 3168 // srl i64:x, C for C >= 32 3169 // => 3170 // build_pair (srl hi_32(x), C - 32), 0 3171 SDValue One = DAG.getConstant(1, SL, MVT::i32); 3172 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3173 3174 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, LHS); 3175 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecOp, One); 3176 3177 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); 3178 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst); 3179 3180 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero}); 3181 3182 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair); 3183 } 3184 3185 SDValue AMDGPUTargetLowering::performTruncateCombine( 3186 SDNode *N, DAGCombinerInfo &DCI) const { 3187 SDLoc SL(N); 3188 SelectionDAG &DAG = DCI.DAG; 3189 EVT VT = N->getValueType(0); 3190 SDValue Src = N->getOperand(0); 3191 3192 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x) 3193 if (Src.getOpcode() == ISD::BITCAST && !VT.isVector()) { 3194 SDValue Vec = Src.getOperand(0); 3195 if (Vec.getOpcode() == ISD::BUILD_VECTOR) { 3196 SDValue Elt0 = Vec.getOperand(0); 3197 EVT EltVT = Elt0.getValueType(); 3198 if (VT.getSizeInBits() <= EltVT.getSizeInBits()) { 3199 if (EltVT.isFloatingPoint()) { 3200 Elt0 = DAG.getNode(ISD::BITCAST, SL, 3201 EltVT.changeTypeToInteger(), Elt0); 3202 } 3203 3204 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0); 3205 } 3206 } 3207 } 3208 3209 // Equivalent of above for accessing the high element of a vector as an 3210 // integer operation. 3211 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y) 3212 if (Src.getOpcode() == ISD::SRL && !VT.isVector()) { 3213 if (auto K = isConstOrConstSplat(Src.getOperand(1))) { 3214 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) { 3215 SDValue BV = stripBitcast(Src.getOperand(0)); 3216 if (BV.getOpcode() == ISD::BUILD_VECTOR && 3217 BV.getValueType().getVectorNumElements() == 2) { 3218 SDValue SrcElt = BV.getOperand(1); 3219 EVT SrcEltVT = SrcElt.getValueType(); 3220 if (SrcEltVT.isFloatingPoint()) { 3221 SrcElt = DAG.getNode(ISD::BITCAST, SL, 3222 SrcEltVT.changeTypeToInteger(), SrcElt); 3223 } 3224 3225 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt); 3226 } 3227 } 3228 } 3229 } 3230 3231 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit. 3232 // 3233 // i16 (trunc (srl i64:x, K)), K <= 16 -> 3234 // i16 (trunc (srl (i32 (trunc x), K))) 3235 if (VT.getScalarSizeInBits() < 32) { 3236 EVT SrcVT = Src.getValueType(); 3237 if (SrcVT.getScalarSizeInBits() > 32 && 3238 (Src.getOpcode() == ISD::SRL || 3239 Src.getOpcode() == ISD::SRA || 3240 Src.getOpcode() == ISD::SHL)) { 3241 SDValue Amt = Src.getOperand(1); 3242 KnownBits Known = DAG.computeKnownBits(Amt); 3243 unsigned Size = VT.getScalarSizeInBits(); 3244 if ((Known.isConstant() && Known.getConstant().ule(Size)) || 3245 (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) { 3246 EVT MidVT = VT.isVector() ? 3247 EVT::getVectorVT(*DAG.getContext(), MVT::i32, 3248 VT.getVectorNumElements()) : MVT::i32; 3249 3250 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout()); 3251 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT, 3252 Src.getOperand(0)); 3253 DCI.AddToWorklist(Trunc.getNode()); 3254 3255 if (Amt.getValueType() != NewShiftVT) { 3256 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT); 3257 DCI.AddToWorklist(Amt.getNode()); 3258 } 3259 3260 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT, 3261 Trunc, Amt); 3262 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift); 3263 } 3264 } 3265 } 3266 3267 return SDValue(); 3268 } 3269 3270 // We need to specifically handle i64 mul here to avoid unnecessary conversion 3271 // instructions. If we only match on the legalized i64 mul expansion, 3272 // SimplifyDemandedBits will be unable to remove them because there will be 3273 // multiple uses due to the separate mul + mulh[su]. 3274 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, 3275 SDValue N0, SDValue N1, unsigned Size, bool Signed) { 3276 if (Size <= 32) { 3277 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3278 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1); 3279 } 3280 3281 // Because we want to eliminate extension instructions before the 3282 // operation, we need to create a single user here (i.e. not the separate 3283 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it. 3284 3285 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24; 3286 3287 SDValue Mul = DAG.getNode(MulOpc, SL, 3288 DAG.getVTList(MVT::i32, MVT::i32), N0, N1); 3289 3290 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, 3291 Mul.getValue(0), Mul.getValue(1)); 3292 } 3293 3294 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, 3295 DAGCombinerInfo &DCI) const { 3296 EVT VT = N->getValueType(0); 3297 3298 unsigned Size = VT.getSizeInBits(); 3299 if (VT.isVector() || Size > 64) 3300 return SDValue(); 3301 3302 // There are i16 integer mul/mad. 3303 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16)) 3304 return SDValue(); 3305 3306 SelectionDAG &DAG = DCI.DAG; 3307 SDLoc DL(N); 3308 3309 SDValue N0 = N->getOperand(0); 3310 SDValue N1 = N->getOperand(1); 3311 3312 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends 3313 // in the source into any_extends if the result of the mul is truncated. Since 3314 // we can assume the high bits are whatever we want, use the underlying value 3315 // to avoid the unknown high bits from interfering. 3316 if (N0.getOpcode() == ISD::ANY_EXTEND) 3317 N0 = N0.getOperand(0); 3318 3319 if (N1.getOpcode() == ISD::ANY_EXTEND) 3320 N1 = N1.getOperand(0); 3321 3322 SDValue Mul; 3323 3324 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { 3325 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3326 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3327 Mul = getMul24(DAG, DL, N0, N1, Size, false); 3328 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { 3329 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3330 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3331 Mul = getMul24(DAG, DL, N0, N1, Size, true); 3332 } else { 3333 return SDValue(); 3334 } 3335 3336 // We need to use sext even for MUL_U24, because MUL_U24 is used 3337 // for signed multiply of 8 and 16-bit types. 3338 return DAG.getSExtOrTrunc(Mul, DL, VT); 3339 } 3340 3341 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N, 3342 DAGCombinerInfo &DCI) const { 3343 EVT VT = N->getValueType(0); 3344 3345 if (!Subtarget->hasMulI24() || VT.isVector()) 3346 return SDValue(); 3347 3348 SelectionDAG &DAG = DCI.DAG; 3349 SDLoc DL(N); 3350 3351 SDValue N0 = N->getOperand(0); 3352 SDValue N1 = N->getOperand(1); 3353 3354 if (!isI24(N0, DAG) || !isI24(N1, DAG)) 3355 return SDValue(); 3356 3357 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3358 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3359 3360 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1); 3361 DCI.AddToWorklist(Mulhi.getNode()); 3362 return DAG.getSExtOrTrunc(Mulhi, DL, VT); 3363 } 3364 3365 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N, 3366 DAGCombinerInfo &DCI) const { 3367 EVT VT = N->getValueType(0); 3368 3369 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32) 3370 return SDValue(); 3371 3372 SelectionDAG &DAG = DCI.DAG; 3373 SDLoc DL(N); 3374 3375 SDValue N0 = N->getOperand(0); 3376 SDValue N1 = N->getOperand(1); 3377 3378 if (!isU24(N0, DAG) || !isU24(N1, DAG)) 3379 return SDValue(); 3380 3381 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3382 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3383 3384 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1); 3385 DCI.AddToWorklist(Mulhi.getNode()); 3386 return DAG.getZExtOrTrunc(Mulhi, DL, VT); 3387 } 3388 3389 SDValue AMDGPUTargetLowering::performMulLoHi24Combine( 3390 SDNode *N, DAGCombinerInfo &DCI) const { 3391 SelectionDAG &DAG = DCI.DAG; 3392 3393 // Simplify demanded bits before splitting into multiple users. 3394 if (SDValue V = simplifyI24(N, DCI)) 3395 return V; 3396 3397 SDValue N0 = N->getOperand(0); 3398 SDValue N1 = N->getOperand(1); 3399 3400 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24); 3401 3402 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3403 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24; 3404 3405 SDLoc SL(N); 3406 3407 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1); 3408 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1); 3409 return DAG.getMergeValues({ MulLo, MulHi }, SL); 3410 } 3411 3412 static bool isNegativeOne(SDValue Val) { 3413 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) 3414 return C->isAllOnesValue(); 3415 return false; 3416 } 3417 3418 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG, 3419 SDValue Op, 3420 const SDLoc &DL, 3421 unsigned Opc) const { 3422 EVT VT = Op.getValueType(); 3423 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT); 3424 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() && 3425 LegalVT != MVT::i16)) 3426 return SDValue(); 3427 3428 if (VT != MVT::i32) 3429 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op); 3430 3431 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op); 3432 if (VT != MVT::i32) 3433 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX); 3434 3435 return FFBX; 3436 } 3437 3438 // The native instructions return -1 on 0 input. Optimize out a select that 3439 // produces -1 on 0. 3440 // 3441 // TODO: If zero is not undef, we could also do this if the output is compared 3442 // against the bitwidth. 3443 // 3444 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly. 3445 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, 3446 SDValue LHS, SDValue RHS, 3447 DAGCombinerInfo &DCI) const { 3448 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3449 if (!CmpRhs || !CmpRhs->isNullValue()) 3450 return SDValue(); 3451 3452 SelectionDAG &DAG = DCI.DAG; 3453 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 3454 SDValue CmpLHS = Cond.getOperand(0); 3455 3456 unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : 3457 AMDGPUISD::FFBH_U32; 3458 3459 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x 3460 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x 3461 if (CCOpcode == ISD::SETEQ && 3462 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3463 RHS.getOperand(0) == CmpLHS && 3464 isNegativeOne(LHS)) { 3465 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3466 } 3467 3468 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x 3469 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x 3470 if (CCOpcode == ISD::SETNE && 3471 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3472 LHS.getOperand(0) == CmpLHS && 3473 isNegativeOne(RHS)) { 3474 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3475 } 3476 3477 return SDValue(); 3478 } 3479 3480 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, 3481 unsigned Op, 3482 const SDLoc &SL, 3483 SDValue Cond, 3484 SDValue N1, 3485 SDValue N2) { 3486 SelectionDAG &DAG = DCI.DAG; 3487 EVT VT = N1.getValueType(); 3488 3489 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond, 3490 N1.getOperand(0), N2.getOperand(0)); 3491 DCI.AddToWorklist(NewSelect.getNode()); 3492 return DAG.getNode(Op, SL, VT, NewSelect); 3493 } 3494 3495 // Pull a free FP operation out of a select so it may fold into uses. 3496 // 3497 // select c, (fneg x), (fneg y) -> fneg (select c, x, y) 3498 // select c, (fneg x), k -> fneg (select c, x, (fneg k)) 3499 // 3500 // select c, (fabs x), (fabs y) -> fabs (select c, x, y) 3501 // select c, (fabs x), +k -> fabs (select c, x, k) 3502 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, 3503 SDValue N) { 3504 SelectionDAG &DAG = DCI.DAG; 3505 SDValue Cond = N.getOperand(0); 3506 SDValue LHS = N.getOperand(1); 3507 SDValue RHS = N.getOperand(2); 3508 3509 EVT VT = N.getValueType(); 3510 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) || 3511 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) { 3512 return distributeOpThroughSelect(DCI, LHS.getOpcode(), 3513 SDLoc(N), Cond, LHS, RHS); 3514 } 3515 3516 bool Inv = false; 3517 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) { 3518 std::swap(LHS, RHS); 3519 Inv = true; 3520 } 3521 3522 // TODO: Support vector constants. 3523 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 3524 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) { 3525 SDLoc SL(N); 3526 // If one side is an fneg/fabs and the other is a constant, we can push the 3527 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative. 3528 SDValue NewLHS = LHS.getOperand(0); 3529 SDValue NewRHS = RHS; 3530 3531 // Careful: if the neg can be folded up, don't try to pull it back down. 3532 bool ShouldFoldNeg = true; 3533 3534 if (NewLHS.hasOneUse()) { 3535 unsigned Opc = NewLHS.getOpcode(); 3536 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc)) 3537 ShouldFoldNeg = false; 3538 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL) 3539 ShouldFoldNeg = false; 3540 } 3541 3542 if (ShouldFoldNeg) { 3543 if (LHS.getOpcode() == ISD::FNEG) 3544 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3545 else if (CRHS->isNegative()) 3546 return SDValue(); 3547 3548 if (Inv) 3549 std::swap(NewLHS, NewRHS); 3550 3551 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, 3552 Cond, NewLHS, NewRHS); 3553 DCI.AddToWorklist(NewSelect.getNode()); 3554 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect); 3555 } 3556 } 3557 3558 return SDValue(); 3559 } 3560 3561 3562 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N, 3563 DAGCombinerInfo &DCI) const { 3564 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0))) 3565 return Folded; 3566 3567 SDValue Cond = N->getOperand(0); 3568 if (Cond.getOpcode() != ISD::SETCC) 3569 return SDValue(); 3570 3571 EVT VT = N->getValueType(0); 3572 SDValue LHS = Cond.getOperand(0); 3573 SDValue RHS = Cond.getOperand(1); 3574 SDValue CC = Cond.getOperand(2); 3575 3576 SDValue True = N->getOperand(1); 3577 SDValue False = N->getOperand(2); 3578 3579 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses. 3580 SelectionDAG &DAG = DCI.DAG; 3581 if (DAG.isConstantValueOfAnyType(True) && 3582 !DAG.isConstantValueOfAnyType(False)) { 3583 // Swap cmp + select pair to move constant to false input. 3584 // This will allow using VOPC cndmasks more often. 3585 // select (setcc x, y), k, x -> select (setccinv x, y), x, k 3586 3587 SDLoc SL(N); 3588 ISD::CondCode NewCC = 3589 getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), LHS.getValueType()); 3590 3591 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC); 3592 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True); 3593 } 3594 3595 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) { 3596 SDValue MinMax 3597 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); 3598 // Revisit this node so we can catch min3/max3/med3 patterns. 3599 //DCI.AddToWorklist(MinMax.getNode()); 3600 return MinMax; 3601 } 3602 } 3603 3604 // There's no reason to not do this if the condition has other uses. 3605 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI); 3606 } 3607 3608 static bool isInv2Pi(const APFloat &APF) { 3609 static const APFloat KF16(APFloat::IEEEhalf(), APInt(16, 0x3118)); 3610 static const APFloat KF32(APFloat::IEEEsingle(), APInt(32, 0x3e22f983)); 3611 static const APFloat KF64(APFloat::IEEEdouble(), APInt(64, 0x3fc45f306dc9c882)); 3612 3613 return APF.bitwiseIsEqual(KF16) || 3614 APF.bitwiseIsEqual(KF32) || 3615 APF.bitwiseIsEqual(KF64); 3616 } 3617 3618 // 0 and 1.0 / (0.5 * pi) do not have inline immmediates, so there is an 3619 // additional cost to negate them. 3620 bool AMDGPUTargetLowering::isConstantCostlierToNegate(SDValue N) const { 3621 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) { 3622 if (C->isZero() && !C->isNegative()) 3623 return true; 3624 3625 if (Subtarget->hasInv2PiInlineImm() && isInv2Pi(C->getValueAPF())) 3626 return true; 3627 } 3628 3629 return false; 3630 } 3631 3632 static unsigned inverseMinMax(unsigned Opc) { 3633 switch (Opc) { 3634 case ISD::FMAXNUM: 3635 return ISD::FMINNUM; 3636 case ISD::FMINNUM: 3637 return ISD::FMAXNUM; 3638 case ISD::FMAXNUM_IEEE: 3639 return ISD::FMINNUM_IEEE; 3640 case ISD::FMINNUM_IEEE: 3641 return ISD::FMAXNUM_IEEE; 3642 case AMDGPUISD::FMAX_LEGACY: 3643 return AMDGPUISD::FMIN_LEGACY; 3644 case AMDGPUISD::FMIN_LEGACY: 3645 return AMDGPUISD::FMAX_LEGACY; 3646 default: 3647 llvm_unreachable("invalid min/max opcode"); 3648 } 3649 } 3650 3651 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N, 3652 DAGCombinerInfo &DCI) const { 3653 SelectionDAG &DAG = DCI.DAG; 3654 SDValue N0 = N->getOperand(0); 3655 EVT VT = N->getValueType(0); 3656 3657 unsigned Opc = N0.getOpcode(); 3658 3659 // If the input has multiple uses and we can either fold the negate down, or 3660 // the other uses cannot, give up. This both prevents unprofitable 3661 // transformations and infinite loops: we won't repeatedly try to fold around 3662 // a negate that has no 'good' form. 3663 if (N0.hasOneUse()) { 3664 // This may be able to fold into the source, but at a code size cost. Don't 3665 // fold if the fold into the user is free. 3666 if (allUsesHaveSourceMods(N, 0)) 3667 return SDValue(); 3668 } else { 3669 if (fnegFoldsIntoOp(Opc) && 3670 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode()))) 3671 return SDValue(); 3672 } 3673 3674 SDLoc SL(N); 3675 switch (Opc) { 3676 case ISD::FADD: { 3677 if (!mayIgnoreSignedZero(N0)) 3678 return SDValue(); 3679 3680 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y)) 3681 SDValue LHS = N0.getOperand(0); 3682 SDValue RHS = N0.getOperand(1); 3683 3684 if (LHS.getOpcode() != ISD::FNEG) 3685 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3686 else 3687 LHS = LHS.getOperand(0); 3688 3689 if (RHS.getOpcode() != ISD::FNEG) 3690 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3691 else 3692 RHS = RHS.getOperand(0); 3693 3694 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags()); 3695 if (Res.getOpcode() != ISD::FADD) 3696 return SDValue(); // Op got folded away. 3697 if (!N0.hasOneUse()) 3698 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3699 return Res; 3700 } 3701 case ISD::FMUL: 3702 case AMDGPUISD::FMUL_LEGACY: { 3703 // (fneg (fmul x, y)) -> (fmul x, (fneg y)) 3704 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y)) 3705 SDValue LHS = N0.getOperand(0); 3706 SDValue RHS = N0.getOperand(1); 3707 3708 if (LHS.getOpcode() == ISD::FNEG) 3709 LHS = LHS.getOperand(0); 3710 else if (RHS.getOpcode() == ISD::FNEG) 3711 RHS = RHS.getOperand(0); 3712 else 3713 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3714 3715 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags()); 3716 if (Res.getOpcode() != Opc) 3717 return SDValue(); // Op got folded away. 3718 if (!N0.hasOneUse()) 3719 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3720 return Res; 3721 } 3722 case ISD::FMA: 3723 case ISD::FMAD: { 3724 if (!mayIgnoreSignedZero(N0)) 3725 return SDValue(); 3726 3727 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z)) 3728 SDValue LHS = N0.getOperand(0); 3729 SDValue MHS = N0.getOperand(1); 3730 SDValue RHS = N0.getOperand(2); 3731 3732 if (LHS.getOpcode() == ISD::FNEG) 3733 LHS = LHS.getOperand(0); 3734 else if (MHS.getOpcode() == ISD::FNEG) 3735 MHS = MHS.getOperand(0); 3736 else 3737 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS); 3738 3739 if (RHS.getOpcode() != ISD::FNEG) 3740 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3741 else 3742 RHS = RHS.getOperand(0); 3743 3744 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS); 3745 if (Res.getOpcode() != Opc) 3746 return SDValue(); // Op got folded away. 3747 if (!N0.hasOneUse()) 3748 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3749 return Res; 3750 } 3751 case ISD::FMAXNUM: 3752 case ISD::FMINNUM: 3753 case ISD::FMAXNUM_IEEE: 3754 case ISD::FMINNUM_IEEE: 3755 case AMDGPUISD::FMAX_LEGACY: 3756 case AMDGPUISD::FMIN_LEGACY: { 3757 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y) 3758 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y) 3759 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y) 3760 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y) 3761 3762 SDValue LHS = N0.getOperand(0); 3763 SDValue RHS = N0.getOperand(1); 3764 3765 // 0 doesn't have a negated inline immediate. 3766 // TODO: This constant check should be generalized to other operations. 3767 if (isConstantCostlierToNegate(RHS)) 3768 return SDValue(); 3769 3770 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3771 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3772 unsigned Opposite = inverseMinMax(Opc); 3773 3774 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags()); 3775 if (Res.getOpcode() != Opposite) 3776 return SDValue(); // Op got folded away. 3777 if (!N0.hasOneUse()) 3778 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3779 return Res; 3780 } 3781 case AMDGPUISD::FMED3: { 3782 SDValue Ops[3]; 3783 for (unsigned I = 0; I < 3; ++I) 3784 Ops[I] = DAG.getNode(ISD::FNEG, SL, VT, N0->getOperand(I), N0->getFlags()); 3785 3786 SDValue Res = DAG.getNode(AMDGPUISD::FMED3, SL, VT, Ops, N0->getFlags()); 3787 if (Res.getOpcode() != AMDGPUISD::FMED3) 3788 return SDValue(); // Op got folded away. 3789 if (!N0.hasOneUse()) 3790 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3791 return Res; 3792 } 3793 case ISD::FP_EXTEND: 3794 case ISD::FTRUNC: 3795 case ISD::FRINT: 3796 case ISD::FNEARBYINT: // XXX - Should fround be handled? 3797 case ISD::FSIN: 3798 case ISD::FCANONICALIZE: 3799 case AMDGPUISD::RCP: 3800 case AMDGPUISD::RCP_LEGACY: 3801 case AMDGPUISD::RCP_IFLAG: 3802 case AMDGPUISD::SIN_HW: { 3803 SDValue CvtSrc = N0.getOperand(0); 3804 if (CvtSrc.getOpcode() == ISD::FNEG) { 3805 // (fneg (fp_extend (fneg x))) -> (fp_extend x) 3806 // (fneg (rcp (fneg x))) -> (rcp x) 3807 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0)); 3808 } 3809 3810 if (!N0.hasOneUse()) 3811 return SDValue(); 3812 3813 // (fneg (fp_extend x)) -> (fp_extend (fneg x)) 3814 // (fneg (rcp x)) -> (rcp (fneg x)) 3815 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3816 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags()); 3817 } 3818 case ISD::FP_ROUND: { 3819 SDValue CvtSrc = N0.getOperand(0); 3820 3821 if (CvtSrc.getOpcode() == ISD::FNEG) { 3822 // (fneg (fp_round (fneg x))) -> (fp_round x) 3823 return DAG.getNode(ISD::FP_ROUND, SL, VT, 3824 CvtSrc.getOperand(0), N0.getOperand(1)); 3825 } 3826 3827 if (!N0.hasOneUse()) 3828 return SDValue(); 3829 3830 // (fneg (fp_round x)) -> (fp_round (fneg x)) 3831 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3832 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1)); 3833 } 3834 case ISD::FP16_TO_FP: { 3835 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal 3836 // f16, but legalization of f16 fneg ends up pulling it out of the source. 3837 // Put the fneg back as a legal source operation that can be matched later. 3838 SDLoc SL(N); 3839 3840 SDValue Src = N0.getOperand(0); 3841 EVT SrcVT = Src.getValueType(); 3842 3843 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000) 3844 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src, 3845 DAG.getConstant(0x8000, SL, SrcVT)); 3846 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg); 3847 } 3848 default: 3849 return SDValue(); 3850 } 3851 } 3852 3853 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N, 3854 DAGCombinerInfo &DCI) const { 3855 SelectionDAG &DAG = DCI.DAG; 3856 SDValue N0 = N->getOperand(0); 3857 3858 if (!N0.hasOneUse()) 3859 return SDValue(); 3860 3861 switch (N0.getOpcode()) { 3862 case ISD::FP16_TO_FP: { 3863 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal"); 3864 SDLoc SL(N); 3865 SDValue Src = N0.getOperand(0); 3866 EVT SrcVT = Src.getValueType(); 3867 3868 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff) 3869 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src, 3870 DAG.getConstant(0x7fff, SL, SrcVT)); 3871 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs); 3872 } 3873 default: 3874 return SDValue(); 3875 } 3876 } 3877 3878 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N, 3879 DAGCombinerInfo &DCI) const { 3880 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3881 if (!CFP) 3882 return SDValue(); 3883 3884 // XXX - Should this flush denormals? 3885 const APFloat &Val = CFP->getValueAPF(); 3886 APFloat One(Val.getSemantics(), "1.0"); 3887 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); 3888 } 3889 3890 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, 3891 DAGCombinerInfo &DCI) const { 3892 SelectionDAG &DAG = DCI.DAG; 3893 SDLoc DL(N); 3894 3895 switch(N->getOpcode()) { 3896 default: 3897 break; 3898 case ISD::BITCAST: { 3899 EVT DestVT = N->getValueType(0); 3900 3901 // Push casts through vector builds. This helps avoid emitting a large 3902 // number of copies when materializing floating point vector constants. 3903 // 3904 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) => 3905 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y)) 3906 if (DestVT.isVector()) { 3907 SDValue Src = N->getOperand(0); 3908 if (Src.getOpcode() == ISD::BUILD_VECTOR) { 3909 EVT SrcVT = Src.getValueType(); 3910 unsigned NElts = DestVT.getVectorNumElements(); 3911 3912 if (SrcVT.getVectorNumElements() == NElts) { 3913 EVT DestEltVT = DestVT.getVectorElementType(); 3914 3915 SmallVector<SDValue, 8> CastedElts; 3916 SDLoc SL(N); 3917 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) { 3918 SDValue Elt = Src.getOperand(I); 3919 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt)); 3920 } 3921 3922 return DAG.getBuildVector(DestVT, SL, CastedElts); 3923 } 3924 } 3925 } 3926 3927 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector()) 3928 break; 3929 3930 // Fold bitcasts of constants. 3931 // 3932 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) 3933 // TODO: Generalize and move to DAGCombiner 3934 SDValue Src = N->getOperand(0); 3935 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) { 3936 if (Src.getValueType() == MVT::i64) { 3937 SDLoc SL(N); 3938 uint64_t CVal = C->getZExtValue(); 3939 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3940 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3941 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3942 return DAG.getNode(ISD::BITCAST, SL, DestVT, BV); 3943 } 3944 } 3945 3946 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) { 3947 const APInt &Val = C->getValueAPF().bitcastToAPInt(); 3948 SDLoc SL(N); 3949 uint64_t CVal = Val.getZExtValue(); 3950 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3951 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3952 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3953 3954 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec); 3955 } 3956 3957 break; 3958 } 3959 case ISD::SHL: { 3960 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3961 break; 3962 3963 return performShlCombine(N, DCI); 3964 } 3965 case ISD::SRL: { 3966 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3967 break; 3968 3969 return performSrlCombine(N, DCI); 3970 } 3971 case ISD::SRA: { 3972 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3973 break; 3974 3975 return performSraCombine(N, DCI); 3976 } 3977 case ISD::TRUNCATE: 3978 return performTruncateCombine(N, DCI); 3979 case ISD::MUL: 3980 return performMulCombine(N, DCI); 3981 case ISD::MULHS: 3982 return performMulhsCombine(N, DCI); 3983 case ISD::MULHU: 3984 return performMulhuCombine(N, DCI); 3985 case AMDGPUISD::MUL_I24: 3986 case AMDGPUISD::MUL_U24: 3987 case AMDGPUISD::MULHI_I24: 3988 case AMDGPUISD::MULHI_U24: { 3989 if (SDValue V = simplifyI24(N, DCI)) 3990 return V; 3991 return SDValue(); 3992 } 3993 case AMDGPUISD::MUL_LOHI_I24: 3994 case AMDGPUISD::MUL_LOHI_U24: 3995 return performMulLoHi24Combine(N, DCI); 3996 case ISD::SELECT: 3997 return performSelectCombine(N, DCI); 3998 case ISD::FNEG: 3999 return performFNegCombine(N, DCI); 4000 case ISD::FABS: 4001 return performFAbsCombine(N, DCI); 4002 case AMDGPUISD::BFE_I32: 4003 case AMDGPUISD::BFE_U32: { 4004 assert(!N->getValueType(0).isVector() && 4005 "Vector handling of BFE not implemented"); 4006 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 4007 if (!Width) 4008 break; 4009 4010 uint32_t WidthVal = Width->getZExtValue() & 0x1f; 4011 if (WidthVal == 0) 4012 return DAG.getConstant(0, DL, MVT::i32); 4013 4014 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 4015 if (!Offset) 4016 break; 4017 4018 SDValue BitsFrom = N->getOperand(0); 4019 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; 4020 4021 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; 4022 4023 if (OffsetVal == 0) { 4024 // This is already sign / zero extended, so try to fold away extra BFEs. 4025 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); 4026 4027 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom); 4028 if (OpSignBits >= SignBits) 4029 return BitsFrom; 4030 4031 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal); 4032 if (Signed) { 4033 // This is a sign_extend_inreg. Replace it to take advantage of existing 4034 // DAG Combines. If not eliminated, we will match back to BFE during 4035 // selection. 4036 4037 // TODO: The sext_inreg of extended types ends, although we can could 4038 // handle them in a single BFE. 4039 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom, 4040 DAG.getValueType(SmallVT)); 4041 } 4042 4043 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); 4044 } 4045 4046 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) { 4047 if (Signed) { 4048 return constantFoldBFE<int32_t>(DAG, 4049 CVal->getSExtValue(), 4050 OffsetVal, 4051 WidthVal, 4052 DL); 4053 } 4054 4055 return constantFoldBFE<uint32_t>(DAG, 4056 CVal->getZExtValue(), 4057 OffsetVal, 4058 WidthVal, 4059 DL); 4060 } 4061 4062 if ((OffsetVal + WidthVal) >= 32 && 4063 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) { 4064 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); 4065 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32, 4066 BitsFrom, ShiftVal); 4067 } 4068 4069 if (BitsFrom.hasOneUse()) { 4070 APInt Demanded = APInt::getBitsSet(32, 4071 OffsetVal, 4072 OffsetVal + WidthVal); 4073 4074 KnownBits Known; 4075 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 4076 !DCI.isBeforeLegalizeOps()); 4077 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 4078 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) || 4079 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) { 4080 DCI.CommitTargetLoweringOpt(TLO); 4081 } 4082 } 4083 4084 break; 4085 } 4086 case ISD::LOAD: 4087 return performLoadCombine(N, DCI); 4088 case ISD::STORE: 4089 return performStoreCombine(N, DCI); 4090 case AMDGPUISD::RCP: 4091 case AMDGPUISD::RCP_IFLAG: 4092 return performRcpCombine(N, DCI); 4093 case ISD::AssertZext: 4094 case ISD::AssertSext: 4095 return performAssertSZExtCombine(N, DCI); 4096 case ISD::INTRINSIC_WO_CHAIN: 4097 return performIntrinsicWOChainCombine(N, DCI); 4098 } 4099 return SDValue(); 4100 } 4101 4102 //===----------------------------------------------------------------------===// 4103 // Helper functions 4104 //===----------------------------------------------------------------------===// 4105 4106 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 4107 const TargetRegisterClass *RC, 4108 unsigned Reg, EVT VT, 4109 const SDLoc &SL, 4110 bool RawReg) const { 4111 MachineFunction &MF = DAG.getMachineFunction(); 4112 MachineRegisterInfo &MRI = MF.getRegInfo(); 4113 unsigned VReg; 4114 4115 if (!MRI.isLiveIn(Reg)) { 4116 VReg = MRI.createVirtualRegister(RC); 4117 MRI.addLiveIn(Reg, VReg); 4118 } else { 4119 VReg = MRI.getLiveInVirtReg(Reg); 4120 } 4121 4122 if (RawReg) 4123 return DAG.getRegister(VReg, VT); 4124 4125 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT); 4126 } 4127 4128 // This may be called multiple times, and nothing prevents creating multiple 4129 // objects at the same offset. See if we already defined this object. 4130 static int getOrCreateFixedStackObject(MachineFrameInfo &MFI, unsigned Size, 4131 int64_t Offset) { 4132 for (int I = MFI.getObjectIndexBegin(); I < 0; ++I) { 4133 if (MFI.getObjectOffset(I) == Offset) { 4134 assert(MFI.getObjectSize(I) == Size); 4135 return I; 4136 } 4137 } 4138 4139 return MFI.CreateFixedObject(Size, Offset, true); 4140 } 4141 4142 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG, 4143 EVT VT, 4144 const SDLoc &SL, 4145 int64_t Offset) const { 4146 MachineFunction &MF = DAG.getMachineFunction(); 4147 MachineFrameInfo &MFI = MF.getFrameInfo(); 4148 int FI = getOrCreateFixedStackObject(MFI, VT.getStoreSize(), Offset); 4149 4150 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset); 4151 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32); 4152 4153 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4, 4154 MachineMemOperand::MODereferenceable | 4155 MachineMemOperand::MOInvariant); 4156 } 4157 4158 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG, 4159 const SDLoc &SL, 4160 SDValue Chain, 4161 SDValue ArgVal, 4162 int64_t Offset) const { 4163 MachineFunction &MF = DAG.getMachineFunction(); 4164 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset); 4165 4166 SDValue Ptr = DAG.getConstant(Offset, SL, MVT::i32); 4167 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4, 4168 MachineMemOperand::MODereferenceable); 4169 return Store; 4170 } 4171 4172 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG, 4173 const TargetRegisterClass *RC, 4174 EVT VT, const SDLoc &SL, 4175 const ArgDescriptor &Arg) const { 4176 assert(Arg && "Attempting to load missing argument"); 4177 4178 SDValue V = Arg.isRegister() ? 4179 CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL) : 4180 loadStackInputValue(DAG, VT, SL, Arg.getStackOffset()); 4181 4182 if (!Arg.isMasked()) 4183 return V; 4184 4185 unsigned Mask = Arg.getMask(); 4186 unsigned Shift = countTrailingZeros<unsigned>(Mask); 4187 V = DAG.getNode(ISD::SRL, SL, VT, V, 4188 DAG.getShiftAmountConstant(Shift, VT, SL)); 4189 return DAG.getNode(ISD::AND, SL, VT, V, 4190 DAG.getConstant(Mask >> Shift, SL, VT)); 4191 } 4192 4193 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset( 4194 const MachineFunction &MF, const ImplicitParameter Param) const { 4195 const AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 4196 const AMDGPUSubtarget &ST = 4197 AMDGPUSubtarget::get(getTargetMachine(), MF.getFunction()); 4198 unsigned ExplicitArgOffset = ST.getExplicitKernelArgOffset(MF.getFunction()); 4199 const Align Alignment = ST.getAlignmentForImplicitArgPtr(); 4200 uint64_t ArgOffset = alignTo(MFI->getExplicitKernArgSize(), Alignment) + 4201 ExplicitArgOffset; 4202 switch (Param) { 4203 case GRID_DIM: 4204 return ArgOffset; 4205 case GRID_OFFSET: 4206 return ArgOffset + 4; 4207 } 4208 llvm_unreachable("unexpected implicit parameter type"); 4209 } 4210 4211 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node; 4212 4213 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { 4214 switch ((AMDGPUISD::NodeType)Opcode) { 4215 case AMDGPUISD::FIRST_NUMBER: break; 4216 // AMDIL DAG nodes 4217 NODE_NAME_CASE(UMUL); 4218 NODE_NAME_CASE(BRANCH_COND); 4219 4220 // AMDGPU DAG nodes 4221 NODE_NAME_CASE(IF) 4222 NODE_NAME_CASE(ELSE) 4223 NODE_NAME_CASE(LOOP) 4224 NODE_NAME_CASE(CALL) 4225 NODE_NAME_CASE(TC_RETURN) 4226 NODE_NAME_CASE(TRAP) 4227 NODE_NAME_CASE(RET_FLAG) 4228 NODE_NAME_CASE(RETURN_TO_EPILOG) 4229 NODE_NAME_CASE(ENDPGM) 4230 NODE_NAME_CASE(DWORDADDR) 4231 NODE_NAME_CASE(FRACT) 4232 NODE_NAME_CASE(SETCC) 4233 NODE_NAME_CASE(SETREG) 4234 NODE_NAME_CASE(DENORM_MODE) 4235 NODE_NAME_CASE(FMA_W_CHAIN) 4236 NODE_NAME_CASE(FMUL_W_CHAIN) 4237 NODE_NAME_CASE(CLAMP) 4238 NODE_NAME_CASE(COS_HW) 4239 NODE_NAME_CASE(SIN_HW) 4240 NODE_NAME_CASE(FMAX_LEGACY) 4241 NODE_NAME_CASE(FMIN_LEGACY) 4242 NODE_NAME_CASE(FMAX3) 4243 NODE_NAME_CASE(SMAX3) 4244 NODE_NAME_CASE(UMAX3) 4245 NODE_NAME_CASE(FMIN3) 4246 NODE_NAME_CASE(SMIN3) 4247 NODE_NAME_CASE(UMIN3) 4248 NODE_NAME_CASE(FMED3) 4249 NODE_NAME_CASE(SMED3) 4250 NODE_NAME_CASE(UMED3) 4251 NODE_NAME_CASE(FDOT2) 4252 NODE_NAME_CASE(URECIP) 4253 NODE_NAME_CASE(DIV_SCALE) 4254 NODE_NAME_CASE(DIV_FMAS) 4255 NODE_NAME_CASE(DIV_FIXUP) 4256 NODE_NAME_CASE(FMAD_FTZ) 4257 NODE_NAME_CASE(TRIG_PREOP) 4258 NODE_NAME_CASE(RCP) 4259 NODE_NAME_CASE(RSQ) 4260 NODE_NAME_CASE(RCP_LEGACY) 4261 NODE_NAME_CASE(RSQ_LEGACY) 4262 NODE_NAME_CASE(RCP_IFLAG) 4263 NODE_NAME_CASE(FMUL_LEGACY) 4264 NODE_NAME_CASE(RSQ_CLAMP) 4265 NODE_NAME_CASE(LDEXP) 4266 NODE_NAME_CASE(FP_CLASS) 4267 NODE_NAME_CASE(DOT4) 4268 NODE_NAME_CASE(CARRY) 4269 NODE_NAME_CASE(BORROW) 4270 NODE_NAME_CASE(BFE_U32) 4271 NODE_NAME_CASE(BFE_I32) 4272 NODE_NAME_CASE(BFI) 4273 NODE_NAME_CASE(BFM) 4274 NODE_NAME_CASE(FFBH_U32) 4275 NODE_NAME_CASE(FFBH_I32) 4276 NODE_NAME_CASE(FFBL_B32) 4277 NODE_NAME_CASE(MUL_U24) 4278 NODE_NAME_CASE(MUL_I24) 4279 NODE_NAME_CASE(MULHI_U24) 4280 NODE_NAME_CASE(MULHI_I24) 4281 NODE_NAME_CASE(MUL_LOHI_U24) 4282 NODE_NAME_CASE(MUL_LOHI_I24) 4283 NODE_NAME_CASE(MAD_U24) 4284 NODE_NAME_CASE(MAD_I24) 4285 NODE_NAME_CASE(MAD_I64_I32) 4286 NODE_NAME_CASE(MAD_U64_U32) 4287 NODE_NAME_CASE(PERM) 4288 NODE_NAME_CASE(TEXTURE_FETCH) 4289 NODE_NAME_CASE(R600_EXPORT) 4290 NODE_NAME_CASE(CONST_ADDRESS) 4291 NODE_NAME_CASE(REGISTER_LOAD) 4292 NODE_NAME_CASE(REGISTER_STORE) 4293 NODE_NAME_CASE(SAMPLE) 4294 NODE_NAME_CASE(SAMPLEB) 4295 NODE_NAME_CASE(SAMPLED) 4296 NODE_NAME_CASE(SAMPLEL) 4297 NODE_NAME_CASE(CVT_F32_UBYTE0) 4298 NODE_NAME_CASE(CVT_F32_UBYTE1) 4299 NODE_NAME_CASE(CVT_F32_UBYTE2) 4300 NODE_NAME_CASE(CVT_F32_UBYTE3) 4301 NODE_NAME_CASE(CVT_PKRTZ_F16_F32) 4302 NODE_NAME_CASE(CVT_PKNORM_I16_F32) 4303 NODE_NAME_CASE(CVT_PKNORM_U16_F32) 4304 NODE_NAME_CASE(CVT_PK_I16_I32) 4305 NODE_NAME_CASE(CVT_PK_U16_U32) 4306 NODE_NAME_CASE(FP_TO_FP16) 4307 NODE_NAME_CASE(FP16_ZEXT) 4308 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR) 4309 NODE_NAME_CASE(CONST_DATA_PTR) 4310 NODE_NAME_CASE(PC_ADD_REL_OFFSET) 4311 NODE_NAME_CASE(LDS) 4312 NODE_NAME_CASE(DUMMY_CHAIN) 4313 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break; 4314 NODE_NAME_CASE(LOAD_D16_HI) 4315 NODE_NAME_CASE(LOAD_D16_LO) 4316 NODE_NAME_CASE(LOAD_D16_HI_I8) 4317 NODE_NAME_CASE(LOAD_D16_HI_U8) 4318 NODE_NAME_CASE(LOAD_D16_LO_I8) 4319 NODE_NAME_CASE(LOAD_D16_LO_U8) 4320 NODE_NAME_CASE(STORE_MSKOR) 4321 NODE_NAME_CASE(LOAD_CONSTANT) 4322 NODE_NAME_CASE(TBUFFER_STORE_FORMAT) 4323 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16) 4324 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT) 4325 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16) 4326 NODE_NAME_CASE(DS_ORDERED_COUNT) 4327 NODE_NAME_CASE(ATOMIC_CMP_SWAP) 4328 NODE_NAME_CASE(ATOMIC_INC) 4329 NODE_NAME_CASE(ATOMIC_DEC) 4330 NODE_NAME_CASE(ATOMIC_LOAD_FMIN) 4331 NODE_NAME_CASE(ATOMIC_LOAD_FMAX) 4332 NODE_NAME_CASE(BUFFER_LOAD) 4333 NODE_NAME_CASE(BUFFER_LOAD_UBYTE) 4334 NODE_NAME_CASE(BUFFER_LOAD_USHORT) 4335 NODE_NAME_CASE(BUFFER_LOAD_BYTE) 4336 NODE_NAME_CASE(BUFFER_LOAD_SHORT) 4337 NODE_NAME_CASE(BUFFER_LOAD_FORMAT) 4338 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16) 4339 NODE_NAME_CASE(SBUFFER_LOAD) 4340 NODE_NAME_CASE(BUFFER_STORE) 4341 NODE_NAME_CASE(BUFFER_STORE_BYTE) 4342 NODE_NAME_CASE(BUFFER_STORE_SHORT) 4343 NODE_NAME_CASE(BUFFER_STORE_FORMAT) 4344 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16) 4345 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP) 4346 NODE_NAME_CASE(BUFFER_ATOMIC_ADD) 4347 NODE_NAME_CASE(BUFFER_ATOMIC_SUB) 4348 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN) 4349 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN) 4350 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX) 4351 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX) 4352 NODE_NAME_CASE(BUFFER_ATOMIC_AND) 4353 NODE_NAME_CASE(BUFFER_ATOMIC_OR) 4354 NODE_NAME_CASE(BUFFER_ATOMIC_XOR) 4355 NODE_NAME_CASE(BUFFER_ATOMIC_INC) 4356 NODE_NAME_CASE(BUFFER_ATOMIC_DEC) 4357 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP) 4358 NODE_NAME_CASE(BUFFER_ATOMIC_FADD) 4359 NODE_NAME_CASE(BUFFER_ATOMIC_PK_FADD) 4360 NODE_NAME_CASE(ATOMIC_PK_FADD) 4361 4362 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break; 4363 } 4364 return nullptr; 4365 } 4366 4367 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand, 4368 SelectionDAG &DAG, int Enabled, 4369 int &RefinementSteps, 4370 bool &UseOneConstNR, 4371 bool Reciprocal) const { 4372 EVT VT = Operand.getValueType(); 4373 4374 if (VT == MVT::f32) { 4375 RefinementSteps = 0; 4376 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); 4377 } 4378 4379 // TODO: There is also f64 rsq instruction, but the documentation is less 4380 // clear on its precision. 4381 4382 return SDValue(); 4383 } 4384 4385 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand, 4386 SelectionDAG &DAG, int Enabled, 4387 int &RefinementSteps) const { 4388 EVT VT = Operand.getValueType(); 4389 4390 if (VT == MVT::f32) { 4391 // Reciprocal, < 1 ulp error. 4392 // 4393 // This reciprocal approximation converges to < 0.5 ulp error with one 4394 // newton rhapson performed with two fused multiple adds (FMAs). 4395 4396 RefinementSteps = 0; 4397 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); 4398 } 4399 4400 // TODO: There is also f64 rcp instruction, but the documentation is less 4401 // clear on its precision. 4402 4403 return SDValue(); 4404 } 4405 4406 void AMDGPUTargetLowering::computeKnownBitsForTargetNode( 4407 const SDValue Op, KnownBits &Known, 4408 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { 4409 4410 Known.resetAll(); // Don't know anything. 4411 4412 unsigned Opc = Op.getOpcode(); 4413 4414 switch (Opc) { 4415 default: 4416 break; 4417 case AMDGPUISD::CARRY: 4418 case AMDGPUISD::BORROW: { 4419 Known.Zero = APInt::getHighBitsSet(32, 31); 4420 break; 4421 } 4422 4423 case AMDGPUISD::BFE_I32: 4424 case AMDGPUISD::BFE_U32: { 4425 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4426 if (!CWidth) 4427 return; 4428 4429 uint32_t Width = CWidth->getZExtValue() & 0x1f; 4430 4431 if (Opc == AMDGPUISD::BFE_U32) 4432 Known.Zero = APInt::getHighBitsSet(32, 32 - Width); 4433 4434 break; 4435 } 4436 case AMDGPUISD::FP_TO_FP16: 4437 case AMDGPUISD::FP16_ZEXT: { 4438 unsigned BitWidth = Known.getBitWidth(); 4439 4440 // High bits are zero. 4441 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16); 4442 break; 4443 } 4444 case AMDGPUISD::MUL_U24: 4445 case AMDGPUISD::MUL_I24: { 4446 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 4447 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1); 4448 unsigned TrailZ = LHSKnown.countMinTrailingZeros() + 4449 RHSKnown.countMinTrailingZeros(); 4450 Known.Zero.setLowBits(std::min(TrailZ, 32u)); 4451 // Skip extra check if all bits are known zeros. 4452 if (TrailZ >= 32) 4453 break; 4454 4455 // Truncate to 24 bits. 4456 LHSKnown = LHSKnown.trunc(24); 4457 RHSKnown = RHSKnown.trunc(24); 4458 4459 if (Opc == AMDGPUISD::MUL_I24) { 4460 unsigned LHSValBits = 24 - LHSKnown.countMinSignBits(); 4461 unsigned RHSValBits = 24 - RHSKnown.countMinSignBits(); 4462 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u); 4463 if (MaxValBits >= 32) 4464 break; 4465 bool LHSNegative = LHSKnown.isNegative(); 4466 bool LHSNonNegative = LHSKnown.isNonNegative(); 4467 bool LHSPositive = LHSKnown.isStrictlyPositive(); 4468 bool RHSNegative = RHSKnown.isNegative(); 4469 bool RHSNonNegative = RHSKnown.isNonNegative(); 4470 bool RHSPositive = RHSKnown.isStrictlyPositive(); 4471 4472 if ((LHSNonNegative && RHSNonNegative) || (LHSNegative && RHSNegative)) 4473 Known.Zero.setHighBits(32 - MaxValBits); 4474 else if ((LHSNegative && RHSPositive) || (LHSPositive && RHSNegative)) 4475 Known.One.setHighBits(32 - MaxValBits); 4476 } else { 4477 unsigned LHSValBits = 24 - LHSKnown.countMinLeadingZeros(); 4478 unsigned RHSValBits = 24 - RHSKnown.countMinLeadingZeros(); 4479 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u); 4480 if (MaxValBits >= 32) 4481 break; 4482 Known.Zero.setHighBits(32 - MaxValBits); 4483 } 4484 break; 4485 } 4486 case AMDGPUISD::PERM: { 4487 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4488 if (!CMask) 4489 return; 4490 4491 KnownBits LHSKnown = DAG.computeKnownBits(Op.getOperand(0), Depth + 1); 4492 KnownBits RHSKnown = DAG.computeKnownBits(Op.getOperand(1), Depth + 1); 4493 unsigned Sel = CMask->getZExtValue(); 4494 4495 for (unsigned I = 0; I < 32; I += 8) { 4496 unsigned SelBits = Sel & 0xff; 4497 if (SelBits < 4) { 4498 SelBits *= 8; 4499 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4500 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4501 } else if (SelBits < 7) { 4502 SelBits = (SelBits & 3) * 8; 4503 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4504 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4505 } else if (SelBits == 0x0c) { 4506 Known.Zero |= 0xFFull << I; 4507 } else if (SelBits > 0x0c) { 4508 Known.One |= 0xFFull << I; 4509 } 4510 Sel >>= 8; 4511 } 4512 break; 4513 } 4514 case AMDGPUISD::BUFFER_LOAD_UBYTE: { 4515 Known.Zero.setHighBits(24); 4516 break; 4517 } 4518 case AMDGPUISD::BUFFER_LOAD_USHORT: { 4519 Known.Zero.setHighBits(16); 4520 break; 4521 } 4522 case AMDGPUISD::LDS: { 4523 auto GA = cast<GlobalAddressSDNode>(Op.getOperand(0).getNode()); 4524 unsigned Align = GA->getGlobal()->getAlignment(); 4525 4526 Known.Zero.setHighBits(16); 4527 if (Align) 4528 Known.Zero.setLowBits(Log2_32(Align)); 4529 break; 4530 } 4531 case ISD::INTRINSIC_WO_CHAIN: { 4532 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4533 switch (IID) { 4534 case Intrinsic::amdgcn_mbcnt_lo: 4535 case Intrinsic::amdgcn_mbcnt_hi: { 4536 const GCNSubtarget &ST = 4537 DAG.getMachineFunction().getSubtarget<GCNSubtarget>(); 4538 // These return at most the wavefront size - 1. 4539 unsigned Size = Op.getValueType().getSizeInBits(); 4540 Known.Zero.setHighBits(Size - ST.getWavefrontSizeLog2()); 4541 break; 4542 } 4543 default: 4544 break; 4545 } 4546 } 4547 } 4548 } 4549 4550 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode( 4551 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4552 unsigned Depth) const { 4553 switch (Op.getOpcode()) { 4554 case AMDGPUISD::BFE_I32: { 4555 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4556 if (!Width) 4557 return 1; 4558 4559 unsigned SignBits = 32 - Width->getZExtValue() + 1; 4560 if (!isNullConstant(Op.getOperand(1))) 4561 return SignBits; 4562 4563 // TODO: Could probably figure something out with non-0 offsets. 4564 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 4565 return std::max(SignBits, Op0SignBits); 4566 } 4567 4568 case AMDGPUISD::BFE_U32: { 4569 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4570 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; 4571 } 4572 4573 case AMDGPUISD::CARRY: 4574 case AMDGPUISD::BORROW: 4575 return 31; 4576 case AMDGPUISD::BUFFER_LOAD_BYTE: 4577 return 25; 4578 case AMDGPUISD::BUFFER_LOAD_SHORT: 4579 return 17; 4580 case AMDGPUISD::BUFFER_LOAD_UBYTE: 4581 return 24; 4582 case AMDGPUISD::BUFFER_LOAD_USHORT: 4583 return 16; 4584 case AMDGPUISD::FP_TO_FP16: 4585 case AMDGPUISD::FP16_ZEXT: 4586 return 16; 4587 default: 4588 return 1; 4589 } 4590 } 4591 4592 unsigned AMDGPUTargetLowering::computeNumSignBitsForTargetInstr( 4593 GISelKnownBits &Analysis, Register R, 4594 const APInt &DemandedElts, const MachineRegisterInfo &MRI, 4595 unsigned Depth) const { 4596 const MachineInstr *MI = MRI.getVRegDef(R); 4597 if (!MI) 4598 return 1; 4599 4600 // TODO: Check range metadata on MMO. 4601 switch (MI->getOpcode()) { 4602 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SBYTE: 4603 return 25; 4604 case AMDGPU::G_AMDGPU_BUFFER_LOAD_SSHORT: 4605 return 17; 4606 case AMDGPU::G_AMDGPU_BUFFER_LOAD_UBYTE: 4607 return 24; 4608 case AMDGPU::G_AMDGPU_BUFFER_LOAD_USHORT: 4609 return 16; 4610 default: 4611 return 1; 4612 } 4613 } 4614 4615 bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op, 4616 const SelectionDAG &DAG, 4617 bool SNaN, 4618 unsigned Depth) const { 4619 unsigned Opcode = Op.getOpcode(); 4620 switch (Opcode) { 4621 case AMDGPUISD::FMIN_LEGACY: 4622 case AMDGPUISD::FMAX_LEGACY: { 4623 if (SNaN) 4624 return true; 4625 4626 // TODO: Can check no nans on one of the operands for each one, but which 4627 // one? 4628 return false; 4629 } 4630 case AMDGPUISD::FMUL_LEGACY: 4631 case AMDGPUISD::CVT_PKRTZ_F16_F32: { 4632 if (SNaN) 4633 return true; 4634 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4635 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4636 } 4637 case AMDGPUISD::FMED3: 4638 case AMDGPUISD::FMIN3: 4639 case AMDGPUISD::FMAX3: 4640 case AMDGPUISD::FMAD_FTZ: { 4641 if (SNaN) 4642 return true; 4643 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1) && 4644 DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4645 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4646 } 4647 case AMDGPUISD::CVT_F32_UBYTE0: 4648 case AMDGPUISD::CVT_F32_UBYTE1: 4649 case AMDGPUISD::CVT_F32_UBYTE2: 4650 case AMDGPUISD::CVT_F32_UBYTE3: 4651 return true; 4652 4653 case AMDGPUISD::RCP: 4654 case AMDGPUISD::RSQ: 4655 case AMDGPUISD::RCP_LEGACY: 4656 case AMDGPUISD::RSQ_LEGACY: 4657 case AMDGPUISD::RSQ_CLAMP: { 4658 if (SNaN) 4659 return true; 4660 4661 // TODO: Need is known positive check. 4662 return false; 4663 } 4664 case AMDGPUISD::LDEXP: 4665 case AMDGPUISD::FRACT: { 4666 if (SNaN) 4667 return true; 4668 return DAG.isKnownNeverNaN(Op.getOperand(0), SNaN, Depth + 1); 4669 } 4670 case AMDGPUISD::DIV_SCALE: 4671 case AMDGPUISD::DIV_FMAS: 4672 case AMDGPUISD::DIV_FIXUP: 4673 case AMDGPUISD::TRIG_PREOP: 4674 // TODO: Refine on operands. 4675 return SNaN; 4676 case AMDGPUISD::SIN_HW: 4677 case AMDGPUISD::COS_HW: { 4678 // TODO: Need check for infinity 4679 return SNaN; 4680 } 4681 case ISD::INTRINSIC_WO_CHAIN: { 4682 unsigned IntrinsicID 4683 = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4684 // TODO: Handle more intrinsics 4685 switch (IntrinsicID) { 4686 case Intrinsic::amdgcn_cubeid: 4687 return true; 4688 4689 case Intrinsic::amdgcn_frexp_mant: { 4690 if (SNaN) 4691 return true; 4692 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1); 4693 } 4694 case Intrinsic::amdgcn_cvt_pkrtz: { 4695 if (SNaN) 4696 return true; 4697 return DAG.isKnownNeverNaN(Op.getOperand(1), SNaN, Depth + 1) && 4698 DAG.isKnownNeverNaN(Op.getOperand(2), SNaN, Depth + 1); 4699 } 4700 case Intrinsic::amdgcn_fdot2: 4701 // TODO: Refine on operand 4702 return SNaN; 4703 default: 4704 return false; 4705 } 4706 } 4707 default: 4708 return false; 4709 } 4710 } 4711 4712 TargetLowering::AtomicExpansionKind 4713 AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const { 4714 switch (RMW->getOperation()) { 4715 case AtomicRMWInst::Nand: 4716 case AtomicRMWInst::FAdd: 4717 case AtomicRMWInst::FSub: 4718 return AtomicExpansionKind::CmpXChg; 4719 default: 4720 return AtomicExpansionKind::None; 4721 } 4722 } 4723