1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This is the parent TargetLowering class for hardware code gen 12 /// targets. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "AMDGPUISelLowering.h" 17 #include "AMDGPU.h" 18 #include "AMDGPUFrameLowering.h" 19 #include "AMDGPUIntrinsicInfo.h" 20 #include "AMDGPURegisterInfo.h" 21 #include "AMDGPUSubtarget.h" 22 #include "R600MachineFunctionInfo.h" 23 #include "SIMachineFunctionInfo.h" 24 #include "llvm/CodeGen/CallingConvLower.h" 25 #include "llvm/CodeGen/MachineFunction.h" 26 #include "llvm/CodeGen/MachineRegisterInfo.h" 27 #include "llvm/CodeGen/SelectionDAG.h" 28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 29 #include "llvm/IR/DataLayout.h" 30 #include "llvm/IR/DiagnosticInfo.h" 31 #include "SIInstrInfo.h" 32 using namespace llvm; 33 34 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT, 35 CCValAssign::LocInfo LocInfo, 36 ISD::ArgFlagsTy ArgFlags, CCState &State) { 37 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), 38 ArgFlags.getOrigAlign()); 39 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 40 41 return true; 42 } 43 44 #include "AMDGPUGenCallingConv.inc" 45 46 // Find a larger type to do a load / store of a vector with. 47 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { 48 unsigned StoreSize = VT.getStoreSizeInBits(); 49 if (StoreSize <= 32) 50 return EVT::getIntegerVT(Ctx, StoreSize); 51 52 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); 53 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 54 } 55 56 // Type for a vector that will be loaded to. 57 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) { 58 unsigned StoreSize = VT.getStoreSizeInBits(); 59 if (StoreSize <= 32) 60 return EVT::getIntegerVT(Ctx, 32); 61 62 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 63 } 64 65 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM, 66 const AMDGPUSubtarget &STI) 67 : TargetLowering(TM), Subtarget(&STI) { 68 setOperationAction(ISD::Constant, MVT::i32, Legal); 69 setOperationAction(ISD::Constant, MVT::i64, Legal); 70 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 71 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 72 73 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 74 setOperationAction(ISD::BRIND, MVT::Other, Expand); 75 76 // This is totally unsupported, just custom lower to produce an error. 77 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 78 79 // We need to custom lower some of the intrinsics 80 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 81 82 // Library functions. These default to Expand, but we have instructions 83 // for them. 84 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 85 setOperationAction(ISD::FEXP2, MVT::f32, Legal); 86 setOperationAction(ISD::FPOW, MVT::f32, Legal); 87 setOperationAction(ISD::FLOG2, MVT::f32, Legal); 88 setOperationAction(ISD::FABS, MVT::f32, Legal); 89 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 90 setOperationAction(ISD::FRINT, MVT::f32, Legal); 91 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 92 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 93 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 94 95 setOperationAction(ISD::FROUND, MVT::f32, Custom); 96 setOperationAction(ISD::FROUND, MVT::f64, Custom); 97 98 setOperationAction(ISD::FREM, MVT::f32, Custom); 99 setOperationAction(ISD::FREM, MVT::f64, Custom); 100 101 // v_mad_f32 does not support denormals according to some sources. 102 if (!Subtarget->hasFP32Denormals()) 103 setOperationAction(ISD::FMAD, MVT::f32, Legal); 104 105 // Expand to fneg + fadd. 106 setOperationAction(ISD::FSUB, MVT::f64, Expand); 107 108 // Lower floating point store/load to integer store/load to reduce the number 109 // of patterns in tablegen. 110 setOperationAction(ISD::STORE, MVT::f32, Promote); 111 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); 112 113 setOperationAction(ISD::STORE, MVT::v2f32, Promote); 114 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); 115 116 setOperationAction(ISD::STORE, MVT::v4f32, Promote); 117 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); 118 119 setOperationAction(ISD::STORE, MVT::v8f32, Promote); 120 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32); 121 122 setOperationAction(ISD::STORE, MVT::v16f32, Promote); 123 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); 124 125 setOperationAction(ISD::STORE, MVT::f64, Promote); 126 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64); 127 128 setOperationAction(ISD::STORE, MVT::v2f64, Promote); 129 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64); 130 131 // Custom lowering of vector stores is required for local address space 132 // stores. 133 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 134 135 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom); 136 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom); 137 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom); 138 139 // XXX: This can be change to Custom, once ExpandVectorStores can 140 // handle 64-bit stores. 141 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 142 143 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 144 setTruncStoreAction(MVT::i64, MVT::i8, Expand); 145 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 146 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand); 147 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand); 148 149 150 setOperationAction(ISD::LOAD, MVT::f32, Promote); 151 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); 152 153 setOperationAction(ISD::LOAD, MVT::v2f32, Promote); 154 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); 155 156 setOperationAction(ISD::LOAD, MVT::v4f32, Promote); 157 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); 158 159 setOperationAction(ISD::LOAD, MVT::v8f32, Promote); 160 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32); 161 162 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); 163 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); 164 165 setOperationAction(ISD::LOAD, MVT::f64, Promote); 166 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64); 167 168 setOperationAction(ISD::LOAD, MVT::v2f64, Promote); 169 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64); 170 171 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 172 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); 173 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 174 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 175 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); 176 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); 177 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); 178 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); 179 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); 180 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); 181 182 // There are no 64-bit extloads. These should be done as a 32-bit extload and 183 // an extension to 64-bit. 184 for (MVT VT : MVT::integer_valuetypes()) { 185 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); 186 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); 187 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); 188 } 189 190 for (MVT VT : MVT::integer_vector_valuetypes()) { 191 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); 192 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); 193 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); 194 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); 195 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); 196 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); 197 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); 198 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); 199 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); 200 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); 201 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); 202 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); 203 } 204 205 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 206 207 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) { 208 setOperationAction(ISD::FCEIL, MVT::f64, Custom); 209 setOperationAction(ISD::FTRUNC, MVT::f64, Custom); 210 setOperationAction(ISD::FRINT, MVT::f64, Custom); 211 setOperationAction(ISD::FFLOOR, MVT::f64, Custom); 212 } 213 214 if (!Subtarget->hasBFI()) { 215 // fcopysign can be done in a single instruction with BFI. 216 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 217 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 218 } 219 220 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 221 222 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 223 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); 224 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand); 225 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand); 226 227 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 228 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 229 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand); 230 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); 231 232 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 233 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand); 234 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand); 235 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand); 236 237 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 238 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 239 240 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 241 for (MVT VT : ScalarIntVTs) { 242 setOperationAction(ISD::SREM, VT, Expand); 243 setOperationAction(ISD::SDIV, VT, Expand); 244 245 // GPU does not have divrem function for signed or unsigned. 246 setOperationAction(ISD::SDIVREM, VT, Custom); 247 setOperationAction(ISD::UDIVREM, VT, Custom); 248 249 // GPU does not have [S|U]MUL_LOHI functions as a single instruction. 250 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 251 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 252 253 setOperationAction(ISD::BSWAP, VT, Expand); 254 setOperationAction(ISD::CTTZ, VT, Expand); 255 setOperationAction(ISD::CTLZ, VT, Expand); 256 } 257 258 if (!Subtarget->hasBCNT(32)) 259 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 260 261 if (!Subtarget->hasBCNT(64)) 262 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 263 264 // The hardware supports 32-bit ROTR, but not ROTL. 265 setOperationAction(ISD::ROTL, MVT::i32, Expand); 266 setOperationAction(ISD::ROTL, MVT::i64, Expand); 267 setOperationAction(ISD::ROTR, MVT::i64, Expand); 268 269 setOperationAction(ISD::MUL, MVT::i64, Expand); 270 setOperationAction(ISD::MULHU, MVT::i64, Expand); 271 setOperationAction(ISD::MULHS, MVT::i64, Expand); 272 setOperationAction(ISD::UDIV, MVT::i32, Expand); 273 setOperationAction(ISD::UREM, MVT::i32, Expand); 274 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 275 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 276 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 277 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 278 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 279 280 setOperationAction(ISD::SMIN, MVT::i32, Legal); 281 setOperationAction(ISD::UMIN, MVT::i32, Legal); 282 setOperationAction(ISD::SMAX, MVT::i32, Legal); 283 setOperationAction(ISD::UMAX, MVT::i32, Legal); 284 285 if (Subtarget->hasFFBH()) 286 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 287 else 288 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 289 290 if (!Subtarget->hasFFBL()) 291 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 292 293 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 294 295 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 296 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 297 298 // We only really have 32-bit BFE instructions (and 16-bit on VI). 299 // 300 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any 301 // effort to match them now. We want this to be false for i64 cases when the 302 // extraction isn't restricted to the upper or lower half. Ideally we would 303 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that 304 // span the midpoint are probably relatively rare, so don't worry about them 305 // for now. 306 if (Subtarget->hasBFE()) 307 setHasExtractBitsInsn(true); 308 309 static const MVT::SimpleValueType VectorIntTypes[] = { 310 MVT::v2i32, MVT::v4i32 311 }; 312 313 for (MVT VT : VectorIntTypes) { 314 // Expand the following operations for the current type by default. 315 setOperationAction(ISD::ADD, VT, Expand); 316 setOperationAction(ISD::AND, VT, Expand); 317 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 318 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 319 setOperationAction(ISD::MUL, VT, Expand); 320 setOperationAction(ISD::OR, VT, Expand); 321 setOperationAction(ISD::SHL, VT, Expand); 322 setOperationAction(ISD::SRA, VT, Expand); 323 setOperationAction(ISD::SRL, VT, Expand); 324 setOperationAction(ISD::ROTL, VT, Expand); 325 setOperationAction(ISD::ROTR, VT, Expand); 326 setOperationAction(ISD::SUB, VT, Expand); 327 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 328 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 329 setOperationAction(ISD::SDIV, VT, Expand); 330 setOperationAction(ISD::UDIV, VT, Expand); 331 setOperationAction(ISD::SREM, VT, Expand); 332 setOperationAction(ISD::UREM, VT, Expand); 333 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 334 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 335 setOperationAction(ISD::SDIVREM, VT, Custom); 336 setOperationAction(ISD::UDIVREM, VT, Expand); 337 setOperationAction(ISD::ADDC, VT, Expand); 338 setOperationAction(ISD::SUBC, VT, Expand); 339 setOperationAction(ISD::ADDE, VT, Expand); 340 setOperationAction(ISD::SUBE, VT, Expand); 341 setOperationAction(ISD::SELECT, VT, Expand); 342 setOperationAction(ISD::VSELECT, VT, Expand); 343 setOperationAction(ISD::SELECT_CC, VT, Expand); 344 setOperationAction(ISD::XOR, VT, Expand); 345 setOperationAction(ISD::BSWAP, VT, Expand); 346 setOperationAction(ISD::CTPOP, VT, Expand); 347 setOperationAction(ISD::CTTZ, VT, Expand); 348 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 349 setOperationAction(ISD::CTLZ, VT, Expand); 350 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 351 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 352 } 353 354 static const MVT::SimpleValueType FloatVectorTypes[] = { 355 MVT::v2f32, MVT::v4f32 356 }; 357 358 for (MVT VT : FloatVectorTypes) { 359 setOperationAction(ISD::FABS, VT, Expand); 360 setOperationAction(ISD::FMINNUM, VT, Expand); 361 setOperationAction(ISD::FMAXNUM, VT, Expand); 362 setOperationAction(ISD::FADD, VT, Expand); 363 setOperationAction(ISD::FCEIL, VT, Expand); 364 setOperationAction(ISD::FCOS, VT, Expand); 365 setOperationAction(ISD::FDIV, VT, Expand); 366 setOperationAction(ISD::FEXP2, VT, Expand); 367 setOperationAction(ISD::FLOG2, VT, Expand); 368 setOperationAction(ISD::FREM, VT, Expand); 369 setOperationAction(ISD::FPOW, VT, Expand); 370 setOperationAction(ISD::FFLOOR, VT, Expand); 371 setOperationAction(ISD::FTRUNC, VT, Expand); 372 setOperationAction(ISD::FMUL, VT, Expand); 373 setOperationAction(ISD::FMA, VT, Expand); 374 setOperationAction(ISD::FRINT, VT, Expand); 375 setOperationAction(ISD::FNEARBYINT, VT, Expand); 376 setOperationAction(ISD::FSQRT, VT, Expand); 377 setOperationAction(ISD::FSIN, VT, Expand); 378 setOperationAction(ISD::FSUB, VT, Expand); 379 setOperationAction(ISD::FNEG, VT, Expand); 380 setOperationAction(ISD::SELECT, VT, Expand); 381 setOperationAction(ISD::VSELECT, VT, Expand); 382 setOperationAction(ISD::SELECT_CC, VT, Expand); 383 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 384 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 385 } 386 387 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); 388 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); 389 390 setTargetDAGCombine(ISD::AND); 391 setTargetDAGCombine(ISD::SHL); 392 setTargetDAGCombine(ISD::SRA); 393 setTargetDAGCombine(ISD::SRL); 394 setTargetDAGCombine(ISD::MUL); 395 setTargetDAGCombine(ISD::SELECT); 396 setTargetDAGCombine(ISD::SELECT_CC); 397 setTargetDAGCombine(ISD::STORE); 398 399 setTargetDAGCombine(ISD::FADD); 400 setTargetDAGCombine(ISD::FSUB); 401 402 setTargetDAGCombine(ISD::BITCAST); 403 404 setBooleanContents(ZeroOrNegativeOneBooleanContent); 405 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 406 407 setSchedulingPreference(Sched::RegPressure); 408 setJumpIsExpensive(true); 409 410 // SI at least has hardware support for floating point exceptions, but no way 411 // of using or handling them is implemented. They are also optional in OpenCL 412 // (Section 7.3) 413 setHasFloatingPointExceptions(Subtarget->hasFPExceptions()); 414 415 setSelectIsExpensive(false); 416 PredictableSelectIsExpensive = false; 417 418 setFsqrtIsCheap(true); 419 420 // We want to find all load dependencies for long chains of stores to enable 421 // merging into very wide vectors. The problem is with vectors with > 4 422 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 423 // vectors are a legal type, even though we have to split the loads 424 // usually. When we can more precisely specify load legality per address 425 // space, we should be able to make FindBetterChain/MergeConsecutiveStores 426 // smarter so that they can figure out what to do in 2 iterations without all 427 // N > 4 stores on the same chain. 428 GatherAllAliasesMaxDepth = 16; 429 430 // FIXME: Need to really handle these. 431 MaxStoresPerMemcpy = 4096; 432 MaxStoresPerMemmove = 4096; 433 MaxStoresPerMemset = 4096; 434 } 435 436 //===----------------------------------------------------------------------===// 437 // Target Information 438 //===----------------------------------------------------------------------===// 439 440 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const { 441 return MVT::i32; 442 } 443 444 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const { 445 return true; 446 } 447 448 // The backend supports 32 and 64 bit floating point immediates. 449 // FIXME: Why are we reporting vectors of FP immediates as legal? 450 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 451 EVT ScalarVT = VT.getScalarType(); 452 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64); 453 } 454 455 // We don't want to shrink f64 / f32 constants. 456 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { 457 EVT ScalarVT = VT.getScalarType(); 458 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64); 459 } 460 461 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N, 462 ISD::LoadExtType, 463 EVT NewVT) const { 464 465 unsigned NewSize = NewVT.getStoreSizeInBits(); 466 467 // If we are reducing to a 32-bit load, this is always better. 468 if (NewSize == 32) 469 return true; 470 471 EVT OldVT = N->getValueType(0); 472 unsigned OldSize = OldVT.getStoreSizeInBits(); 473 474 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar 475 // extloads, so doing one requires using a buffer_load. In cases where we 476 // still couldn't use a scalar load, using the wider load shouldn't really 477 // hurt anything. 478 479 // If the old size already had to be an extload, there's no harm in continuing 480 // to reduce the width. 481 return (OldSize < 32); 482 } 483 484 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, 485 EVT CastTy) const { 486 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits()) 487 return true; 488 489 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits(); 490 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits(); 491 492 return ((LScalarSize <= CastScalarSize) || 493 (CastScalarSize >= 32) || 494 (LScalarSize < 32)); 495 } 496 497 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also 498 // profitable with the expansion for 64-bit since it's generally good to 499 // speculate things. 500 // FIXME: These should really have the size as a parameter. 501 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const { 502 return true; 503 } 504 505 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const { 506 return true; 507 } 508 509 //===---------------------------------------------------------------------===// 510 // Target Properties 511 //===---------------------------------------------------------------------===// 512 513 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { 514 assert(VT.isFloatingPoint()); 515 return VT == MVT::f32 || VT == MVT::f64; 516 } 517 518 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { 519 assert(VT.isFloatingPoint()); 520 return VT == MVT::f32 || VT == MVT::f64; 521 } 522 523 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, 524 unsigned NumElem, 525 unsigned AS) const { 526 return true; 527 } 528 529 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { 530 // There are few operations which truly have vector input operands. Any vector 531 // operation is going to involve operations on each component, and a 532 // build_vector will be a copy per element, so it always makes sense to use a 533 // build_vector input in place of the extracted element to avoid a copy into a 534 // super register. 535 // 536 // We should probably only do this if all users are extracts only, but this 537 // should be the common case. 538 return true; 539 } 540 541 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { 542 // Truncate is just accessing a subregister. 543 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0); 544 } 545 546 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const { 547 // Truncate is just accessing a subregister. 548 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() && 549 (Dest->getPrimitiveSizeInBits() % 32 == 0); 550 } 551 552 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const { 553 unsigned SrcSize = Src->getScalarSizeInBits(); 554 unsigned DestSize = Dest->getScalarSizeInBits(); 555 556 return SrcSize == 32 && DestSize == 64; 557 } 558 559 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const { 560 // Any register load of a 64-bit value really requires 2 32-bit moves. For all 561 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, 562 // this will enable reducing 64-bit operations the 32-bit, which is always 563 // good. 564 return Src == MVT::i32 && Dest == MVT::i64; 565 } 566 567 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 568 return isZExtFree(Val.getValueType(), VT2); 569 } 570 571 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 572 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a 573 // limited number of native 64-bit operations. Shrinking an operation to fit 574 // in a single 32-bit register should always be helpful. As currently used, 575 // this is much less general than the name suggests, and is only used in 576 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is 577 // not profitable, and may actually be harmful. 578 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32; 579 } 580 581 //===---------------------------------------------------------------------===// 582 // TargetLowering Callbacks 583 //===---------------------------------------------------------------------===// 584 585 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State, 586 const SmallVectorImpl<ISD::InputArg> &Ins) const { 587 588 State.AnalyzeFormalArguments(Ins, CC_AMDGPU); 589 } 590 591 void AMDGPUTargetLowering::AnalyzeReturn(CCState &State, 592 const SmallVectorImpl<ISD::OutputArg> &Outs) const { 593 594 State.AnalyzeReturn(Outs, RetCC_SI); 595 } 596 597 SDValue AMDGPUTargetLowering::LowerReturn( 598 SDValue Chain, 599 CallingConv::ID CallConv, 600 bool isVarArg, 601 const SmallVectorImpl<ISD::OutputArg> &Outs, 602 const SmallVectorImpl<SDValue> &OutVals, 603 SDLoc DL, SelectionDAG &DAG) const { 604 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain); 605 } 606 607 //===---------------------------------------------------------------------===// 608 // Target specific lowering 609 //===---------------------------------------------------------------------===// 610 611 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, 612 SmallVectorImpl<SDValue> &InVals) const { 613 SDValue Callee = CLI.Callee; 614 SelectionDAG &DAG = CLI.DAG; 615 616 const Function &Fn = *DAG.getMachineFunction().getFunction(); 617 618 StringRef FuncName("<unknown>"); 619 620 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) 621 FuncName = G->getSymbol(); 622 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 623 FuncName = G->getGlobal()->getName(); 624 625 DiagnosticInfoUnsupported NoCalls( 626 Fn, "unsupported call to function " + FuncName, CLI.DL.getDebugLoc()); 627 DAG.getContext()->diagnose(NoCalls); 628 return SDValue(); 629 } 630 631 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 632 SelectionDAG &DAG) const { 633 const Function &Fn = *DAG.getMachineFunction().getFunction(); 634 635 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca", 636 SDLoc(Op).getDebugLoc()); 637 DAG.getContext()->diagnose(NoDynamicAlloca); 638 return SDValue(); 639 } 640 641 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 642 SelectionDAG &DAG) const { 643 switch (Op.getOpcode()) { 644 default: 645 Op.getNode()->dump(); 646 llvm_unreachable("Custom lowering code for this" 647 "instruction is not implemented yet!"); 648 break; 649 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 650 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 651 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 652 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 653 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG); 654 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG); 655 case ISD::FREM: return LowerFREM(Op, DAG); 656 case ISD::FCEIL: return LowerFCEIL(Op, DAG); 657 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG); 658 case ISD::FRINT: return LowerFRINT(Op, DAG); 659 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); 660 case ISD::FROUND: return LowerFROUND(Op, DAG); 661 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); 662 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 663 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 664 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 665 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 666 case ISD::CTLZ: 667 case ISD::CTLZ_ZERO_UNDEF: 668 return LowerCTLZ(Op, DAG); 669 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 670 } 671 return Op; 672 } 673 674 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N, 675 SmallVectorImpl<SDValue> &Results, 676 SelectionDAG &DAG) const { 677 switch (N->getOpcode()) { 678 case ISD::SIGN_EXTEND_INREG: 679 // Different parts of legalization seem to interpret which type of 680 // sign_extend_inreg is the one to check for custom lowering. The extended 681 // from type is what really matters, but some places check for custom 682 // lowering of the result type. This results in trying to use 683 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do 684 // nothing here and let the illegal result integer be handled normally. 685 return; 686 default: 687 return; 688 } 689 } 690 691 // FIXME: This implements accesses to initialized globals in the constant 692 // address space by copying them to private and accessing that. It does not 693 // properly handle illegal types or vectors. The private vector loads are not 694 // scalarized, and the illegal scalars hit an assertion. This technique will not 695 // work well with large initializers, and this should eventually be 696 // removed. Initialized globals should be placed into a data section that the 697 // runtime will load into a buffer before the kernel is executed. Uses of the 698 // global need to be replaced with a pointer loaded from an implicit kernel 699 // argument into this buffer holding the copy of the data, which will remove the 700 // need for any of this. 701 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init, 702 const GlobalValue *GV, 703 const SDValue &InitPtr, 704 SDValue Chain, 705 SelectionDAG &DAG) const { 706 const DataLayout &TD = DAG.getDataLayout(); 707 SDLoc DL(InitPtr); 708 Type *InitTy = Init->getType(); 709 710 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) { 711 EVT VT = EVT::getEVT(InitTy); 712 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS); 713 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr, 714 MachinePointerInfo(UndefValue::get(PtrTy)), false, 715 false, TD.getPrefTypeAlignment(InitTy)); 716 } 717 718 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) { 719 EVT VT = EVT::getEVT(CFP->getType()); 720 PointerType *PtrTy = PointerType::get(CFP->getType(), 0); 721 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr, 722 MachinePointerInfo(UndefValue::get(PtrTy)), false, 723 false, TD.getPrefTypeAlignment(CFP->getType())); 724 } 725 726 if (StructType *ST = dyn_cast<StructType>(InitTy)) { 727 const StructLayout *SL = TD.getStructLayout(ST); 728 729 EVT PtrVT = InitPtr.getValueType(); 730 SmallVector<SDValue, 8> Chains; 731 732 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) { 733 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), DL, PtrVT); 734 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset); 735 736 Constant *Elt = Init->getAggregateElement(I); 737 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG)); 738 } 739 740 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 741 } 742 743 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) { 744 EVT PtrVT = InitPtr.getValueType(); 745 746 unsigned NumElements; 747 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy)) 748 NumElements = AT->getNumElements(); 749 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy)) 750 NumElements = VT->getNumElements(); 751 else 752 llvm_unreachable("Unexpected type"); 753 754 unsigned EltSize = TD.getTypeAllocSize(SeqTy->getElementType()); 755 SmallVector<SDValue, 8> Chains; 756 for (unsigned i = 0; i < NumElements; ++i) { 757 SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT); 758 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset); 759 760 Constant *Elt = Init->getAggregateElement(i); 761 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG)); 762 } 763 764 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 765 } 766 767 if (isa<UndefValue>(Init)) { 768 EVT VT = EVT::getEVT(InitTy); 769 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS); 770 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr, 771 MachinePointerInfo(UndefValue::get(PtrTy)), false, 772 false, TD.getPrefTypeAlignment(InitTy)); 773 } 774 775 Init->dump(); 776 llvm_unreachable("Unhandled constant initializer"); 777 } 778 779 static bool hasDefinedInitializer(const GlobalValue *GV) { 780 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 781 if (!GVar || !GVar->hasInitializer()) 782 return false; 783 784 return !isa<UndefValue>(GVar->getInitializer()); 785 } 786 787 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, 788 SDValue Op, 789 SelectionDAG &DAG) const { 790 791 const DataLayout &DL = DAG.getDataLayout(); 792 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); 793 const GlobalValue *GV = G->getGlobal(); 794 795 switch (G->getAddressSpace()) { 796 case AMDGPUAS::LOCAL_ADDRESS: { 797 // XXX: What does the value of G->getOffset() mean? 798 assert(G->getOffset() == 0 && 799 "Do not know what to do with an non-zero offset"); 800 801 // TODO: We could emit code to handle the initialization somewhere. 802 if (hasDefinedInitializer(GV)) 803 break; 804 805 unsigned Offset; 806 if (MFI->LocalMemoryObjects.count(GV) == 0) { 807 unsigned Align = GV->getAlignment(); 808 if (Align == 0) 809 Align = DL.getABITypeAlignment(GV->getValueType()); 810 811 /// TODO: We should sort these to minimize wasted space due to alignment 812 /// padding. Currently the padding is decided by the first encountered use 813 /// during lowering. 814 Offset = MFI->LDSSize = alignTo(MFI->LDSSize, Align); 815 MFI->LocalMemoryObjects[GV] = Offset; 816 MFI->LDSSize += DL.getTypeAllocSize(GV->getValueType()); 817 } else { 818 Offset = MFI->LocalMemoryObjects[GV]; 819 } 820 821 return DAG.getConstant(Offset, SDLoc(Op), 822 getPointerTy(DL, AMDGPUAS::LOCAL_ADDRESS)); 823 } 824 case AMDGPUAS::CONSTANT_ADDRESS: { 825 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 826 Type *EltType = GV->getValueType(); 827 unsigned Size = DL.getTypeAllocSize(EltType); 828 unsigned Alignment = DL.getPrefTypeAlignment(EltType); 829 830 MVT PrivPtrVT = getPointerTy(DL, AMDGPUAS::PRIVATE_ADDRESS); 831 MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 832 833 int FI = FrameInfo->CreateStackObject(Size, Alignment, false); 834 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT); 835 836 const GlobalVariable *Var = cast<GlobalVariable>(GV); 837 if (!Var->hasInitializer()) { 838 // This has no use, but bugpoint will hit it. 839 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT); 840 } 841 842 const Constant *Init = Var->getInitializer(); 843 SmallVector<SDNode*, 8> WorkList; 844 845 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(), 846 E = DAG.getEntryNode()->use_end(); I != E; ++I) { 847 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD) 848 continue; 849 WorkList.push_back(*I); 850 } 851 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG); 852 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(), 853 E = WorkList.end(); I != E; ++I) { 854 SmallVector<SDValue, 8> Ops; 855 Ops.push_back(Chain); 856 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) { 857 Ops.push_back((*I)->getOperand(i)); 858 } 859 DAG.UpdateNodeOperands(*I, Ops); 860 } 861 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT); 862 } 863 } 864 865 const Function &Fn = *DAG.getMachineFunction().getFunction(); 866 DiagnosticInfoUnsupported BadInit( 867 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc()); 868 DAG.getContext()->diagnose(BadInit); 869 return SDValue(); 870 } 871 872 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 873 SelectionDAG &DAG) const { 874 SmallVector<SDValue, 8> Args; 875 876 for (const SDUse &U : Op->ops()) 877 DAG.ExtractVectorElements(U.get(), Args); 878 879 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args); 880 } 881 882 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 883 SelectionDAG &DAG) const { 884 885 SmallVector<SDValue, 8> Args; 886 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 887 EVT VT = Op.getValueType(); 888 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start, 889 VT.getVectorNumElements()); 890 891 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args); 892 } 893 894 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 895 SelectionDAG &DAG) const { 896 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 897 SDLoc DL(Op); 898 EVT VT = Op.getValueType(); 899 900 switch (IntrinsicID) { 901 default: return Op; 902 case AMDGPUIntrinsic::AMDGPU_clamp: 903 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name. 904 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT, 905 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 906 907 case Intrinsic::AMDGPU_ldexp: // Legacy name 908 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1), 909 Op.getOperand(2)); 910 911 case AMDGPUIntrinsic::AMDGPU_bfe_i32: 912 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 913 Op.getOperand(1), 914 Op.getOperand(2), 915 Op.getOperand(3)); 916 917 case AMDGPUIntrinsic::AMDGPU_bfe_u32: 918 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 919 Op.getOperand(1), 920 Op.getOperand(2), 921 Op.getOperand(3)); 922 923 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name. 924 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1)); 925 926 case AMDGPUIntrinsic::AMDGPU_brev: // Legacy name 927 return DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(1)); 928 } 929 } 930 931 /// \brief Generate Min/Max node 932 SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL, 933 EVT VT, 934 SDValue LHS, 935 SDValue RHS, 936 SDValue True, 937 SDValue False, 938 SDValue CC, 939 DAGCombinerInfo &DCI) const { 940 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 941 return SDValue(); 942 943 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 944 return SDValue(); 945 946 SelectionDAG &DAG = DCI.DAG; 947 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 948 switch (CCOpcode) { 949 case ISD::SETOEQ: 950 case ISD::SETONE: 951 case ISD::SETUNE: 952 case ISD::SETNE: 953 case ISD::SETUEQ: 954 case ISD::SETEQ: 955 case ISD::SETFALSE: 956 case ISD::SETFALSE2: 957 case ISD::SETTRUE: 958 case ISD::SETTRUE2: 959 case ISD::SETUO: 960 case ISD::SETO: 961 break; 962 case ISD::SETULE: 963 case ISD::SETULT: { 964 if (LHS == True) 965 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 966 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 967 } 968 case ISD::SETOLE: 969 case ISD::SETOLT: 970 case ISD::SETLE: 971 case ISD::SETLT: { 972 // Ordered. Assume ordered for undefined. 973 974 // Only do this after legalization to avoid interfering with other combines 975 // which might occur. 976 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 977 !DCI.isCalledByLegalizer()) 978 return SDValue(); 979 980 // We need to permute the operands to get the correct NaN behavior. The 981 // selected operand is the second one based on the failing compare with NaN, 982 // so permute it based on the compare type the hardware uses. 983 if (LHS == True) 984 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 985 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 986 } 987 case ISD::SETUGE: 988 case ISD::SETUGT: { 989 if (LHS == True) 990 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 991 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 992 } 993 case ISD::SETGT: 994 case ISD::SETGE: 995 case ISD::SETOGE: 996 case ISD::SETOGT: { 997 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 998 !DCI.isCalledByLegalizer()) 999 return SDValue(); 1000 1001 if (LHS == True) 1002 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1003 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1004 } 1005 case ISD::SETCC_INVALID: 1006 llvm_unreachable("Invalid setcc condcode!"); 1007 } 1008 return SDValue(); 1009 } 1010 1011 std::pair<SDValue, SDValue> 1012 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const { 1013 SDLoc SL(Op); 1014 1015 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1016 1017 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1018 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1019 1020 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1021 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1022 1023 return std::make_pair(Lo, Hi); 1024 } 1025 1026 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { 1027 SDLoc SL(Op); 1028 1029 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1030 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1031 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1032 } 1033 1034 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const { 1035 SDLoc SL(Op); 1036 1037 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1038 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1039 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1040 } 1041 1042 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, 1043 SelectionDAG &DAG) const { 1044 LoadSDNode *Load = cast<LoadSDNode>(Op); 1045 EVT VT = Op.getValueType(); 1046 1047 1048 // If this is a 2 element vector, we really want to scalarize and not create 1049 // weird 1 element vectors. 1050 if (VT.getVectorNumElements() == 2) 1051 return scalarizeVectorLoad(Load, DAG); 1052 1053 SDValue BasePtr = Load->getBasePtr(); 1054 EVT PtrVT = BasePtr.getValueType(); 1055 EVT MemVT = Load->getMemoryVT(); 1056 SDLoc SL(Op); 1057 1058 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1059 1060 EVT LoVT, HiVT; 1061 EVT LoMemVT, HiMemVT; 1062 SDValue Lo, Hi; 1063 1064 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1065 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1066 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT); 1067 1068 unsigned Size = LoMemVT.getStoreSize(); 1069 unsigned BaseAlign = Load->getAlignment(); 1070 unsigned HiAlign = MinAlign(BaseAlign, Size); 1071 1072 SDValue LoLoad 1073 = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, 1074 Load->getChain(), BasePtr, 1075 SrcValue, 1076 LoMemVT, Load->isVolatile(), Load->isNonTemporal(), 1077 Load->isInvariant(), BaseAlign); 1078 1079 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 1080 DAG.getConstant(Size, SL, PtrVT)); 1081 1082 SDValue HiLoad 1083 = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, 1084 Load->getChain(), HiPtr, 1085 SrcValue.getWithOffset(LoMemVT.getStoreSize()), 1086 HiMemVT, Load->isVolatile(), Load->isNonTemporal(), 1087 Load->isInvariant(), HiAlign); 1088 1089 SDValue Ops[] = { 1090 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad), 1091 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 1092 LoLoad.getValue(1), HiLoad.getValue(1)) 1093 }; 1094 1095 return DAG.getMergeValues(Ops, SL); 1096 } 1097 1098 // FIXME: This isn't doing anything for SI. This should be used in a target 1099 // combine during type legalization. 1100 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op, 1101 SelectionDAG &DAG) const { 1102 StoreSDNode *Store = cast<StoreSDNode>(Op); 1103 EVT MemVT = Store->getMemoryVT(); 1104 unsigned MemBits = MemVT.getSizeInBits(); 1105 1106 // Byte stores are really expensive, so if possible, try to pack 32-bit vector 1107 // truncating store into an i32 store. 1108 // XXX: We could also handle optimize other vector bitwidths. 1109 if (!MemVT.isVector() || MemBits > 32) { 1110 return SDValue(); 1111 } 1112 1113 SDLoc DL(Op); 1114 SDValue Value = Store->getValue(); 1115 EVT VT = Value.getValueType(); 1116 EVT ElemVT = VT.getVectorElementType(); 1117 SDValue Ptr = Store->getBasePtr(); 1118 EVT MemEltVT = MemVT.getVectorElementType(); 1119 unsigned MemEltBits = MemEltVT.getSizeInBits(); 1120 unsigned MemNumElements = MemVT.getVectorNumElements(); 1121 unsigned PackedSize = MemVT.getStoreSizeInBits(); 1122 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32); 1123 1124 assert(Value.getValueType().getScalarSizeInBits() >= 32); 1125 1126 SDValue PackedValue; 1127 for (unsigned i = 0; i < MemNumElements; ++i) { 1128 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value, 1129 DAG.getConstant(i, DL, MVT::i32)); 1130 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32); 1131 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg 1132 1133 SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32); 1134 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift); 1135 1136 if (i == 0) { 1137 PackedValue = Elt; 1138 } else { 1139 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt); 1140 } 1141 } 1142 1143 if (PackedSize < 32) { 1144 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize); 1145 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr, 1146 Store->getMemOperand()->getPointerInfo(), 1147 PackedVT, 1148 Store->isNonTemporal(), Store->isVolatile(), 1149 Store->getAlignment()); 1150 } 1151 1152 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr, 1153 Store->getMemOperand()->getPointerInfo(), 1154 Store->isVolatile(), Store->isNonTemporal(), 1155 Store->getAlignment()); 1156 } 1157 1158 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, 1159 SelectionDAG &DAG) const { 1160 StoreSDNode *Store = cast<StoreSDNode>(Op); 1161 SDValue Val = Store->getValue(); 1162 EVT VT = Val.getValueType(); 1163 1164 // If this is a 2 element vector, we really want to scalarize and not create 1165 // weird 1 element vectors. 1166 if (VT.getVectorNumElements() == 2) 1167 return scalarizeVectorStore(Store, DAG); 1168 1169 EVT MemVT = Store->getMemoryVT(); 1170 SDValue Chain = Store->getChain(); 1171 SDValue BasePtr = Store->getBasePtr(); 1172 SDLoc SL(Op); 1173 1174 EVT LoVT, HiVT; 1175 EVT LoMemVT, HiMemVT; 1176 SDValue Lo, Hi; 1177 1178 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1179 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1180 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT); 1181 1182 EVT PtrVT = BasePtr.getValueType(); 1183 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 1184 DAG.getConstant(LoMemVT.getStoreSize(), SL, 1185 PtrVT)); 1186 1187 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); 1188 unsigned BaseAlign = Store->getAlignment(); 1189 unsigned Size = LoMemVT.getStoreSize(); 1190 unsigned HiAlign = MinAlign(BaseAlign, Size); 1191 1192 SDValue LoStore 1193 = DAG.getTruncStore(Chain, SL, Lo, 1194 BasePtr, 1195 SrcValue, 1196 LoMemVT, 1197 Store->isNonTemporal(), 1198 Store->isVolatile(), 1199 BaseAlign); 1200 SDValue HiStore 1201 = DAG.getTruncStore(Chain, SL, Hi, 1202 HiPtr, 1203 SrcValue.getWithOffset(Size), 1204 HiMemVT, 1205 Store->isNonTemporal(), 1206 Store->isVolatile(), 1207 HiAlign); 1208 1209 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore); 1210 } 1211 1212 // This is a shortcut for integer division because we have fast i32<->f32 1213 // conversions, and fast f32 reciprocal instructions. The fractional part of a 1214 // float is enough to accurately represent up to a 24-bit integer. 1215 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const { 1216 SDLoc DL(Op); 1217 EVT VT = Op.getValueType(); 1218 SDValue LHS = Op.getOperand(0); 1219 SDValue RHS = Op.getOperand(1); 1220 MVT IntVT = MVT::i32; 1221 MVT FltVT = MVT::f32; 1222 1223 ISD::NodeType ToFp = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; 1224 ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; 1225 1226 if (VT.isVector()) { 1227 unsigned NElts = VT.getVectorNumElements(); 1228 IntVT = MVT::getVectorVT(MVT::i32, NElts); 1229 FltVT = MVT::getVectorVT(MVT::f32, NElts); 1230 } 1231 1232 unsigned BitSize = VT.getScalarType().getSizeInBits(); 1233 1234 SDValue jq = DAG.getConstant(1, DL, IntVT); 1235 1236 if (sign) { 1237 // char|short jq = ia ^ ib; 1238 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); 1239 1240 // jq = jq >> (bitsize - 2) 1241 jq = DAG.getNode(ISD::SRA, DL, VT, jq, 1242 DAG.getConstant(BitSize - 2, DL, VT)); 1243 1244 // jq = jq | 0x1 1245 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); 1246 1247 // jq = (int)jq 1248 jq = DAG.getSExtOrTrunc(jq, DL, IntVT); 1249 } 1250 1251 // int ia = (int)LHS; 1252 SDValue ia = sign ? 1253 DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT); 1254 1255 // int ib, (int)RHS; 1256 SDValue ib = sign ? 1257 DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT); 1258 1259 // float fa = (float)ia; 1260 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia); 1261 1262 // float fb = (float)ib; 1263 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib); 1264 1265 // TODO: Should this propagate fast-math-flags? 1266 // float fq = native_divide(fa, fb); 1267 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT, 1268 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb)); 1269 1270 // fq = trunc(fq); 1271 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq); 1272 1273 // float fqneg = -fq; 1274 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq); 1275 1276 // float fr = mad(fqneg, fb, fa); 1277 SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT, 1278 DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa); 1279 1280 // int iq = (int)fq; 1281 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq); 1282 1283 // fr = fabs(fr); 1284 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr); 1285 1286 // fb = fabs(fb); 1287 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb); 1288 1289 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 1290 1291 // int cv = fr >= fb; 1292 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE); 1293 1294 // jq = (cv ? jq : 0); 1295 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); 1296 1297 // dst = trunc/extend to legal type 1298 iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT); 1299 1300 // dst = iq + jq; 1301 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); 1302 1303 // Rem needs compensation, it's easier to recompute it 1304 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); 1305 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); 1306 1307 SDValue Res[2] = { 1308 Div, 1309 Rem 1310 }; 1311 return DAG.getMergeValues(Res, DL); 1312 } 1313 1314 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op, 1315 SelectionDAG &DAG, 1316 SmallVectorImpl<SDValue> &Results) const { 1317 assert(Op.getValueType() == MVT::i64); 1318 1319 SDLoc DL(Op); 1320 EVT VT = Op.getValueType(); 1321 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1322 1323 SDValue one = DAG.getConstant(1, DL, HalfVT); 1324 SDValue zero = DAG.getConstant(0, DL, HalfVT); 1325 1326 //HiLo split 1327 SDValue LHS = Op.getOperand(0); 1328 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero); 1329 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one); 1330 1331 SDValue RHS = Op.getOperand(1); 1332 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero); 1333 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one); 1334 1335 if (VT == MVT::i64 && 1336 DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) && 1337 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) { 1338 1339 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1340 LHS_Lo, RHS_Lo); 1341 1342 SDValue DIV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, 1343 Res.getValue(0), zero); 1344 SDValue REM = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, 1345 Res.getValue(1), zero); 1346 1347 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV)); 1348 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM)); 1349 return; 1350 } 1351 1352 // Get Speculative values 1353 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo); 1354 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo); 1355 1356 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ); 1357 SDValue REM = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, REM_Lo, zero); 1358 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM); 1359 1360 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ); 1361 SDValue DIV_Lo = zero; 1362 1363 const unsigned halfBitWidth = HalfVT.getSizeInBits(); 1364 1365 for (unsigned i = 0; i < halfBitWidth; ++i) { 1366 const unsigned bitPos = halfBitWidth - i - 1; 1367 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT); 1368 // Get value of high bit 1369 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS); 1370 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one); 1371 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); 1372 1373 // Shift 1374 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); 1375 // Add LHS high bit 1376 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); 1377 1378 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT); 1379 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE); 1380 1381 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT); 1382 1383 // Update REM 1384 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); 1385 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE); 1386 } 1387 1388 SDValue DIV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v2i32, DIV_Lo, DIV_Hi); 1389 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV); 1390 Results.push_back(DIV); 1391 Results.push_back(REM); 1392 } 1393 1394 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, 1395 SelectionDAG &DAG) const { 1396 SDLoc DL(Op); 1397 EVT VT = Op.getValueType(); 1398 1399 if (VT == MVT::i64) { 1400 SmallVector<SDValue, 2> Results; 1401 LowerUDIVREM64(Op, DAG, Results); 1402 return DAG.getMergeValues(Results, DL); 1403 } 1404 1405 SDValue Num = Op.getOperand(0); 1406 SDValue Den = Op.getOperand(1); 1407 1408 if (VT == MVT::i32) { 1409 if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) && 1410 DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) { 1411 // TODO: We technically could do this for i64, but shouldn't that just be 1412 // handled by something generally reducing 64-bit division on 32-bit 1413 // values to 32-bit? 1414 return LowerDIVREM24(Op, DAG, false); 1415 } 1416 } 1417 1418 // RCP = URECIP(Den) = 2^32 / Den + e 1419 // e is rounding error. 1420 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den); 1421 1422 // RCP_LO = mul(RCP, Den) */ 1423 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den); 1424 1425 // RCP_HI = mulhu (RCP, Den) */ 1426 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den); 1427 1428 // NEG_RCP_LO = -RCP_LO 1429 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1430 RCP_LO); 1431 1432 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 1433 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1434 NEG_RCP_LO, RCP_LO, 1435 ISD::SETEQ); 1436 // Calculate the rounding error from the URECIP instruction 1437 // E = mulhu(ABS_RCP_LO, RCP) 1438 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP); 1439 1440 // RCP_A_E = RCP + E 1441 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E); 1442 1443 // RCP_S_E = RCP - E 1444 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E); 1445 1446 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 1447 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1448 RCP_A_E, RCP_S_E, 1449 ISD::SETEQ); 1450 // Quotient = mulhu(Tmp0, Num) 1451 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num); 1452 1453 // Num_S_Remainder = Quotient * Den 1454 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den); 1455 1456 // Remainder = Num - Num_S_Remainder 1457 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder); 1458 1459 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 1460 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den, 1461 DAG.getConstant(-1, DL, VT), 1462 DAG.getConstant(0, DL, VT), 1463 ISD::SETUGE); 1464 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 1465 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num, 1466 Num_S_Remainder, 1467 DAG.getConstant(-1, DL, VT), 1468 DAG.getConstant(0, DL, VT), 1469 ISD::SETUGE); 1470 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 1471 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den, 1472 Remainder_GE_Zero); 1473 1474 // Calculate Division result: 1475 1476 // Quotient_A_One = Quotient + 1 1477 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient, 1478 DAG.getConstant(1, DL, VT)); 1479 1480 // Quotient_S_One = Quotient - 1 1481 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient, 1482 DAG.getConstant(1, DL, VT)); 1483 1484 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 1485 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1486 Quotient, Quotient_A_One, ISD::SETEQ); 1487 1488 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 1489 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1490 Quotient_S_One, Div, ISD::SETEQ); 1491 1492 // Calculate Rem result: 1493 1494 // Remainder_S_Den = Remainder - Den 1495 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den); 1496 1497 // Remainder_A_Den = Remainder + Den 1498 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den); 1499 1500 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 1501 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1502 Remainder, Remainder_S_Den, ISD::SETEQ); 1503 1504 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 1505 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1506 Remainder_A_Den, Rem, ISD::SETEQ); 1507 SDValue Ops[2] = { 1508 Div, 1509 Rem 1510 }; 1511 return DAG.getMergeValues(Ops, DL); 1512 } 1513 1514 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, 1515 SelectionDAG &DAG) const { 1516 SDLoc DL(Op); 1517 EVT VT = Op.getValueType(); 1518 1519 SDValue LHS = Op.getOperand(0); 1520 SDValue RHS = Op.getOperand(1); 1521 1522 SDValue Zero = DAG.getConstant(0, DL, VT); 1523 SDValue NegOne = DAG.getConstant(-1, DL, VT); 1524 1525 if (VT == MVT::i32 && 1526 DAG.ComputeNumSignBits(LHS) > 8 && 1527 DAG.ComputeNumSignBits(RHS) > 8) { 1528 return LowerDIVREM24(Op, DAG, true); 1529 } 1530 if (VT == MVT::i64 && 1531 DAG.ComputeNumSignBits(LHS) > 32 && 1532 DAG.ComputeNumSignBits(RHS) > 32) { 1533 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1534 1535 //HiLo split 1536 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1537 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1538 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1539 LHS_Lo, RHS_Lo); 1540 SDValue Res[2] = { 1541 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), 1542 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) 1543 }; 1544 return DAG.getMergeValues(Res, DL); 1545 } 1546 1547 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); 1548 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); 1549 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); 1550 SDValue RSign = LHSign; // Remainder sign is the same as LHS 1551 1552 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); 1553 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); 1554 1555 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); 1556 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); 1557 1558 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); 1559 SDValue Rem = Div.getValue(1); 1560 1561 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); 1562 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); 1563 1564 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); 1565 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); 1566 1567 SDValue Res[2] = { 1568 Div, 1569 Rem 1570 }; 1571 return DAG.getMergeValues(Res, DL); 1572 } 1573 1574 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y)) 1575 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const { 1576 SDLoc SL(Op); 1577 EVT VT = Op.getValueType(); 1578 SDValue X = Op.getOperand(0); 1579 SDValue Y = Op.getOperand(1); 1580 1581 // TODO: Should this propagate fast-math-flags? 1582 1583 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y); 1584 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div); 1585 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y); 1586 1587 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul); 1588 } 1589 1590 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const { 1591 SDLoc SL(Op); 1592 SDValue Src = Op.getOperand(0); 1593 1594 // result = trunc(src) 1595 // if (src > 0.0 && src != result) 1596 // result += 1.0 1597 1598 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 1599 1600 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 1601 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 1602 1603 EVT SetCCVT = 1604 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 1605 1606 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT); 1607 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 1608 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 1609 1610 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero); 1611 // TODO: Should this propagate fast-math-flags? 1612 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 1613 } 1614 1615 static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) { 1616 const unsigned FractBits = 52; 1617 const unsigned ExpBits = 11; 1618 1619 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 1620 Hi, 1621 DAG.getConstant(FractBits - 32, SL, MVT::i32), 1622 DAG.getConstant(ExpBits, SL, MVT::i32)); 1623 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart, 1624 DAG.getConstant(1023, SL, MVT::i32)); 1625 1626 return Exp; 1627 } 1628 1629 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { 1630 SDLoc SL(Op); 1631 SDValue Src = Op.getOperand(0); 1632 1633 assert(Op.getValueType() == MVT::f64); 1634 1635 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1636 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1637 1638 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 1639 1640 // Extract the upper half, since this is where we will find the sign and 1641 // exponent. 1642 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One); 1643 1644 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 1645 1646 const unsigned FractBits = 52; 1647 1648 // Extract the sign bit. 1649 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); 1650 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); 1651 1652 // Extend back to to 64-bits. 1653 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 1654 Zero, SignBit); 1655 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); 1656 1657 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src); 1658 const SDValue FractMask 1659 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); 1660 1661 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp); 1662 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64); 1663 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not); 1664 1665 EVT SetCCVT = 1666 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 1667 1668 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); 1669 1670 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 1671 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 1672 1673 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0); 1674 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1); 1675 1676 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2); 1677 } 1678 1679 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const { 1680 SDLoc SL(Op); 1681 SDValue Src = Op.getOperand(0); 1682 1683 assert(Op.getValueType() == MVT::f64); 1684 1685 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52"); 1686 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64); 1687 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src); 1688 1689 // TODO: Should this propagate fast-math-flags? 1690 1691 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign); 1692 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign); 1693 1694 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src); 1695 1696 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51"); 1697 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64); 1698 1699 EVT SetCCVT = 1700 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 1701 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT); 1702 1703 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2); 1704 } 1705 1706 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const { 1707 // FNEARBYINT and FRINT are the same, except in their handling of FP 1708 // exceptions. Those aren't really meaningful for us, and OpenCL only has 1709 // rint, so just treat them as equivalent. 1710 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0)); 1711 } 1712 1713 // XXX - May require not supporting f32 denormals? 1714 SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const { 1715 SDLoc SL(Op); 1716 SDValue X = Op.getOperand(0); 1717 1718 SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X); 1719 1720 // TODO: Should this propagate fast-math-flags? 1721 1722 SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T); 1723 1724 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff); 1725 1726 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32); 1727 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 1728 const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32); 1729 1730 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X); 1731 1732 EVT SetCCVT = 1733 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 1734 1735 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE); 1736 1737 SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero); 1738 1739 return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel); 1740 } 1741 1742 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const { 1743 SDLoc SL(Op); 1744 SDValue X = Op.getOperand(0); 1745 1746 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X); 1747 1748 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1749 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1750 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32); 1751 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32); 1752 EVT SetCCVT = 1753 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 1754 1755 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 1756 1757 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One); 1758 1759 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 1760 1761 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL, 1762 MVT::i64); 1763 1764 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp); 1765 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64, 1766 DAG.getConstant(INT64_C(0x0008000000000000), SL, 1767 MVT::i64), 1768 Exp); 1769 1770 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M); 1771 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT, 1772 DAG.getConstant(0, SL, MVT::i64), Tmp0, 1773 ISD::SETNE); 1774 1775 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1, 1776 D, DAG.getConstant(0, SL, MVT::i64)); 1777 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2); 1778 1779 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64)); 1780 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K); 1781 1782 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 1783 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 1784 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ); 1785 1786 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64, 1787 ExpEqNegOne, 1788 DAG.getConstantFP(1.0, SL, MVT::f64), 1789 DAG.getConstantFP(0.0, SL, MVT::f64)); 1790 1791 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X); 1792 1793 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K); 1794 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K); 1795 1796 return K; 1797 } 1798 1799 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { 1800 EVT VT = Op.getValueType(); 1801 1802 if (VT == MVT::f32) 1803 return LowerFROUND32(Op, DAG); 1804 1805 if (VT == MVT::f64) 1806 return LowerFROUND64(Op, DAG); 1807 1808 llvm_unreachable("unhandled type"); 1809 } 1810 1811 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { 1812 SDLoc SL(Op); 1813 SDValue Src = Op.getOperand(0); 1814 1815 // result = trunc(src); 1816 // if (src < 0.0 && src != result) 1817 // result += -1.0. 1818 1819 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 1820 1821 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 1822 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); 1823 1824 EVT SetCCVT = 1825 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 1826 1827 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT); 1828 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 1829 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 1830 1831 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero); 1832 // TODO: Should this propagate fast-math-flags? 1833 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 1834 } 1835 1836 SDValue AMDGPUTargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 1837 SDLoc SL(Op); 1838 SDValue Src = Op.getOperand(0); 1839 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; 1840 1841 if (ZeroUndef && Src.getValueType() == MVT::i32) 1842 return DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Src); 1843 1844 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 1845 1846 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1847 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1848 1849 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1850 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1851 1852 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 1853 *DAG.getContext(), MVT::i32); 1854 1855 SDValue Hi0 = DAG.getSetCC(SL, SetCCVT, Hi, Zero, ISD::SETEQ); 1856 1857 SDValue CtlzLo = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Lo); 1858 SDValue CtlzHi = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Hi); 1859 1860 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); 1861 SDValue Add = DAG.getNode(ISD::ADD, SL, MVT::i32, CtlzLo, Bits32); 1862 1863 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) 1864 SDValue NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0, Add, CtlzHi); 1865 1866 if (!ZeroUndef) { 1867 // Test if the full 64-bit input is zero. 1868 1869 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, 1870 // which we probably don't want. 1871 SDValue Lo0 = DAG.getSetCC(SL, SetCCVT, Lo, Zero, ISD::SETEQ); 1872 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0, Hi0); 1873 1874 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction 1875 // with the same cycles, otherwise it is slower. 1876 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, 1877 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); 1878 1879 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); 1880 1881 // The instruction returns -1 for 0 input, but the defined intrinsic 1882 // behavior is to return the number of bits. 1883 NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, 1884 SrcIsZero, Bits32, NewCtlz); 1885 } 1886 1887 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewCtlz); 1888 } 1889 1890 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, 1891 bool Signed) const { 1892 // Unsigned 1893 // cul2f(ulong u) 1894 //{ 1895 // uint lz = clz(u); 1896 // uint e = (u != 0) ? 127U + 63U - lz : 0; 1897 // u = (u << lz) & 0x7fffffffffffffffUL; 1898 // ulong t = u & 0xffffffffffUL; 1899 // uint v = (e << 23) | (uint)(u >> 40); 1900 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 1901 // return as_float(v + r); 1902 //} 1903 // Signed 1904 // cl2f(long l) 1905 //{ 1906 // long s = l >> 63; 1907 // float r = cul2f((l + s) ^ s); 1908 // return s ? -r : r; 1909 //} 1910 1911 SDLoc SL(Op); 1912 SDValue Src = Op.getOperand(0); 1913 SDValue L = Src; 1914 1915 SDValue S; 1916 if (Signed) { 1917 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); 1918 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); 1919 1920 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); 1921 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); 1922 } 1923 1924 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 1925 *DAG.getContext(), MVT::f32); 1926 1927 1928 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); 1929 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); 1930 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); 1931 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); 1932 1933 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); 1934 SDValue E = DAG.getSelect(SL, MVT::i32, 1935 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), 1936 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), 1937 ZeroI32); 1938 1939 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, 1940 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), 1941 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); 1942 1943 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, 1944 DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); 1945 1946 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, 1947 U, DAG.getConstant(40, SL, MVT::i64)); 1948 1949 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, 1950 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), 1951 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); 1952 1953 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); 1954 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); 1955 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); 1956 1957 SDValue One = DAG.getConstant(1, SL, MVT::i32); 1958 1959 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); 1960 1961 SDValue R = DAG.getSelect(SL, MVT::i32, 1962 RCmp, 1963 One, 1964 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); 1965 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); 1966 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); 1967 1968 if (!Signed) 1969 return R; 1970 1971 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); 1972 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); 1973 } 1974 1975 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, 1976 bool Signed) const { 1977 SDLoc SL(Op); 1978 SDValue Src = Op.getOperand(0); 1979 1980 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 1981 1982 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 1983 DAG.getConstant(0, SL, MVT::i32)); 1984 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 1985 DAG.getConstant(1, SL, MVT::i32)); 1986 1987 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, 1988 SL, MVT::f64, Hi); 1989 1990 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); 1991 1992 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, 1993 DAG.getConstant(32, SL, MVT::i32)); 1994 // TODO: Should this propagate fast-math-flags? 1995 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); 1996 } 1997 1998 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, 1999 SelectionDAG &DAG) const { 2000 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2001 "operation should be legal"); 2002 2003 EVT DestVT = Op.getValueType(); 2004 if (DestVT == MVT::f64) 2005 return LowerINT_TO_FP64(Op, DAG, false); 2006 2007 if (DestVT == MVT::f32) 2008 return LowerINT_TO_FP32(Op, DAG, false); 2009 2010 return SDValue(); 2011 } 2012 2013 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, 2014 SelectionDAG &DAG) const { 2015 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2016 "operation should be legal"); 2017 2018 EVT DestVT = Op.getValueType(); 2019 if (DestVT == MVT::f32) 2020 return LowerINT_TO_FP32(Op, DAG, true); 2021 2022 if (DestVT == MVT::f64) 2023 return LowerINT_TO_FP64(Op, DAG, true); 2024 2025 return SDValue(); 2026 } 2027 2028 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, 2029 bool Signed) const { 2030 SDLoc SL(Op); 2031 2032 SDValue Src = Op.getOperand(0); 2033 2034 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2035 2036 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL, 2037 MVT::f64); 2038 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL, 2039 MVT::f64); 2040 // TODO: Should this propagate fast-math-flags? 2041 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0); 2042 2043 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul); 2044 2045 2046 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc); 2047 2048 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL, 2049 MVT::i32, FloorMul); 2050 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma); 2051 2052 SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Lo, Hi); 2053 2054 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result); 2055 } 2056 2057 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op, 2058 SelectionDAG &DAG) const { 2059 SDValue Src = Op.getOperand(0); 2060 2061 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2062 return LowerFP64_TO_INT(Op, DAG, true); 2063 2064 return SDValue(); 2065 } 2066 2067 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op, 2068 SelectionDAG &DAG) const { 2069 SDValue Src = Op.getOperand(0); 2070 2071 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2072 return LowerFP64_TO_INT(Op, DAG, false); 2073 2074 return SDValue(); 2075 } 2076 2077 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2078 SelectionDAG &DAG) const { 2079 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2080 MVT VT = Op.getSimpleValueType(); 2081 MVT ScalarVT = VT.getScalarType(); 2082 2083 if (!VT.isVector()) 2084 return SDValue(); 2085 2086 SDValue Src = Op.getOperand(0); 2087 SDLoc DL(Op); 2088 2089 // TODO: Don't scalarize on Evergreen? 2090 unsigned NElts = VT.getVectorNumElements(); 2091 SmallVector<SDValue, 8> Args; 2092 DAG.ExtractVectorElements(Src, Args, 0, NElts); 2093 2094 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType()); 2095 for (unsigned I = 0; I < NElts; ++I) 2096 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp); 2097 2098 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args); 2099 } 2100 2101 //===----------------------------------------------------------------------===// 2102 // Custom DAG optimizations 2103 //===----------------------------------------------------------------------===// 2104 2105 static bool isU24(SDValue Op, SelectionDAG &DAG) { 2106 APInt KnownZero, KnownOne; 2107 EVT VT = Op.getValueType(); 2108 DAG.computeKnownBits(Op, KnownZero, KnownOne); 2109 2110 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24; 2111 } 2112 2113 static bool isI24(SDValue Op, SelectionDAG &DAG) { 2114 EVT VT = Op.getValueType(); 2115 2116 // In order for this to be a signed 24-bit value, bit 23, must 2117 // be a sign bit. 2118 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated 2119 // as unsigned 24-bit values. 2120 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24; 2121 } 2122 2123 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) { 2124 2125 SelectionDAG &DAG = DCI.DAG; 2126 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2127 EVT VT = Op.getValueType(); 2128 2129 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24); 2130 APInt KnownZero, KnownOne; 2131 TargetLowering::TargetLoweringOpt TLO(DAG, true, true); 2132 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 2133 DCI.CommitTargetLoweringOpt(TLO); 2134 } 2135 2136 template <typename IntTy> 2137 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, 2138 uint32_t Offset, uint32_t Width, SDLoc DL) { 2139 if (Width + Offset < 32) { 2140 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); 2141 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); 2142 return DAG.getConstant(Result, DL, MVT::i32); 2143 } 2144 2145 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32); 2146 } 2147 2148 static bool usesAllNormalStores(SDNode *LoadVal) { 2149 for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) { 2150 if (!ISD::isNormalStore(*I)) 2151 return false; 2152 } 2153 2154 return true; 2155 } 2156 2157 // If we have a copy of an illegal type, replace it with a load / store of an 2158 // equivalently sized legal type. This avoids intermediate bit pack / unpack 2159 // instructions emitted when handling extloads and truncstores. Ideally we could 2160 // recognize the pack / unpack pattern to eliminate it. 2161 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N, 2162 DAGCombinerInfo &DCI) const { 2163 if (!DCI.isBeforeLegalize()) 2164 return SDValue(); 2165 2166 StoreSDNode *SN = cast<StoreSDNode>(N); 2167 SDValue Value = SN->getValue(); 2168 EVT VT = Value.getValueType(); 2169 2170 if (isTypeLegal(VT) || SN->isVolatile() || 2171 !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8) 2172 return SDValue(); 2173 2174 LoadSDNode *LoadVal = cast<LoadSDNode>(Value); 2175 if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal)) 2176 return SDValue(); 2177 2178 EVT MemVT = LoadVal->getMemoryVT(); 2179 2180 SDLoc SL(N); 2181 SelectionDAG &DAG = DCI.DAG; 2182 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT); 2183 2184 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, 2185 LoadVT, SL, 2186 LoadVal->getChain(), 2187 LoadVal->getBasePtr(), 2188 LoadVal->getOffset(), 2189 LoadVT, 2190 LoadVal->getMemOperand()); 2191 2192 SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0)); 2193 DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false); 2194 2195 return DAG.getStore(SN->getChain(), SL, NewLoad, 2196 SN->getBasePtr(), SN->getMemOperand()); 2197 } 2198 2199 // TODO: Should repeat for other bit ops. 2200 SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N, 2201 DAGCombinerInfo &DCI) const { 2202 if (N->getValueType(0) != MVT::i64) 2203 return SDValue(); 2204 2205 // Break up 64-bit and of a constant into two 32-bit ands. This will typically 2206 // happen anyway for a VALU 64-bit and. This exposes other 32-bit integer 2207 // combine opportunities since most 64-bit operations are decomposed this way. 2208 // TODO: We won't want this for SALU especially if it is an inline immediate. 2209 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2210 if (!RHS) 2211 return SDValue(); 2212 2213 uint64_t Val = RHS->getZExtValue(); 2214 if (Lo_32(Val) != 0 && Hi_32(Val) != 0 && !RHS->hasOneUse()) { 2215 // If either half of the constant is 0, this is really a 32-bit and, so 2216 // split it. If we can re-use the full materialized constant, keep it. 2217 return SDValue(); 2218 } 2219 2220 SDLoc SL(N); 2221 SelectionDAG &DAG = DCI.DAG; 2222 2223 SDValue Lo, Hi; 2224 std::tie(Lo, Hi) = split64BitValue(N->getOperand(0), DAG); 2225 2226 SDValue LoRHS = DAG.getConstant(Lo_32(Val), SL, MVT::i32); 2227 SDValue HiRHS = DAG.getConstant(Hi_32(Val), SL, MVT::i32); 2228 2229 SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS); 2230 SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS); 2231 2232 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, LoAnd, HiAnd); 2233 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2234 } 2235 2236 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, 2237 DAGCombinerInfo &DCI) const { 2238 if (N->getValueType(0) != MVT::i64) 2239 return SDValue(); 2240 2241 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) 2242 2243 // On some subtargets, 64-bit shift is a quarter rate instruction. In the 2244 // common case, splitting this into a move and a 32-bit shift is faster and 2245 // the same code size. 2246 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2247 if (!RHS) 2248 return SDValue(); 2249 2250 unsigned RHSVal = RHS->getZExtValue(); 2251 if (RHSVal < 32) 2252 return SDValue(); 2253 2254 SDValue LHS = N->getOperand(0); 2255 2256 SDLoc SL(N); 2257 SelectionDAG &DAG = DCI.DAG; 2258 2259 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); 2260 2261 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS); 2262 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt); 2263 2264 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2265 2266 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Zero, NewShift); 2267 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2268 } 2269 2270 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N, 2271 DAGCombinerInfo &DCI) const { 2272 if (N->getValueType(0) != MVT::i64) 2273 return SDValue(); 2274 2275 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2276 if (!RHS) 2277 return SDValue(); 2278 2279 SelectionDAG &DAG = DCI.DAG; 2280 SDLoc SL(N); 2281 unsigned RHSVal = RHS->getZExtValue(); 2282 2283 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) 2284 if (RHSVal == 32) { 2285 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 2286 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 2287 DAG.getConstant(31, SL, MVT::i32)); 2288 2289 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2290 Hi, NewShift); 2291 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 2292 } 2293 2294 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) 2295 if (RHSVal == 63) { 2296 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 2297 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 2298 DAG.getConstant(31, SL, MVT::i32)); 2299 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2300 NewShift, NewShift); 2301 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 2302 } 2303 2304 return SDValue(); 2305 } 2306 2307 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, 2308 DAGCombinerInfo &DCI) const { 2309 if (N->getValueType(0) != MVT::i64) 2310 return SDValue(); 2311 2312 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2313 if (!RHS) 2314 return SDValue(); 2315 2316 unsigned ShiftAmt = RHS->getZExtValue(); 2317 if (ShiftAmt < 32) 2318 return SDValue(); 2319 2320 // srl i64:x, C for C >= 32 2321 // => 2322 // build_pair (srl hi_32(x), C - 32), 0 2323 2324 SelectionDAG &DAG = DCI.DAG; 2325 SDLoc SL(N); 2326 2327 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2328 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2329 2330 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0)); 2331 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, 2332 VecOp, One); 2333 2334 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); 2335 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst); 2336 2337 SDValue BuildPair = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2338 NewShift, Zero); 2339 2340 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair); 2341 } 2342 2343 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, 2344 DAGCombinerInfo &DCI) const { 2345 EVT VT = N->getValueType(0); 2346 2347 if (VT.isVector() || VT.getSizeInBits() > 32) 2348 return SDValue(); 2349 2350 SelectionDAG &DAG = DCI.DAG; 2351 SDLoc DL(N); 2352 2353 SDValue N0 = N->getOperand(0); 2354 SDValue N1 = N->getOperand(1); 2355 SDValue Mul; 2356 2357 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { 2358 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 2359 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 2360 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1); 2361 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { 2362 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 2363 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 2364 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1); 2365 } else { 2366 return SDValue(); 2367 } 2368 2369 // We need to use sext even for MUL_U24, because MUL_U24 is used 2370 // for signed multiply of 8 and 16-bit types. 2371 return DAG.getSExtOrTrunc(Mul, DL, VT); 2372 } 2373 2374 static bool isNegativeOne(SDValue Val) { 2375 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) 2376 return C->isAllOnesValue(); 2377 return false; 2378 } 2379 2380 static bool isCtlzOpc(unsigned Opc) { 2381 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; 2382 } 2383 2384 // Get FFBH node if the incoming op may have been type legalized from a smaller 2385 // type VT. 2386 // Need to match pre-legalized type because the generic legalization inserts the 2387 // add/sub between the select and compare. 2388 static SDValue getFFBH_U32(const TargetLowering &TLI, 2389 SelectionDAG &DAG, SDLoc SL, SDValue Op) { 2390 EVT VT = Op.getValueType(); 2391 EVT LegalVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 2392 if (LegalVT != MVT::i32) 2393 return SDValue(); 2394 2395 if (VT != MVT::i32) 2396 Op = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Op); 2397 2398 SDValue FFBH = DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Op); 2399 if (VT != MVT::i32) 2400 FFBH = DAG.getNode(ISD::TRUNCATE, SL, VT, FFBH); 2401 2402 return FFBH; 2403 } 2404 2405 // The native instructions return -1 on 0 input. Optimize out a select that 2406 // produces -1 on 0. 2407 // 2408 // TODO: If zero is not undef, we could also do this if the output is compared 2409 // against the bitwidth. 2410 // 2411 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly. 2412 SDValue AMDGPUTargetLowering::performCtlzCombine(SDLoc SL, 2413 SDValue Cond, 2414 SDValue LHS, 2415 SDValue RHS, 2416 DAGCombinerInfo &DCI) const { 2417 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2418 if (!CmpRhs || !CmpRhs->isNullValue()) 2419 return SDValue(); 2420 2421 SelectionDAG &DAG = DCI.DAG; 2422 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 2423 SDValue CmpLHS = Cond.getOperand(0); 2424 2425 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x 2426 if (CCOpcode == ISD::SETEQ && 2427 isCtlzOpc(RHS.getOpcode()) && 2428 RHS.getOperand(0) == CmpLHS && 2429 isNegativeOne(LHS)) { 2430 return getFFBH_U32(*this, DAG, SL, CmpLHS); 2431 } 2432 2433 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x 2434 if (CCOpcode == ISD::SETNE && 2435 isCtlzOpc(LHS.getOpcode()) && 2436 LHS.getOperand(0) == CmpLHS && 2437 isNegativeOne(RHS)) { 2438 return getFFBH_U32(*this, DAG, SL, CmpLHS); 2439 } 2440 2441 return SDValue(); 2442 } 2443 2444 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N, 2445 DAGCombinerInfo &DCI) const { 2446 SDValue Cond = N->getOperand(0); 2447 if (Cond.getOpcode() != ISD::SETCC) 2448 return SDValue(); 2449 2450 EVT VT = N->getValueType(0); 2451 SDValue LHS = Cond.getOperand(0); 2452 SDValue RHS = Cond.getOperand(1); 2453 SDValue CC = Cond.getOperand(2); 2454 2455 SDValue True = N->getOperand(1); 2456 SDValue False = N->getOperand(2); 2457 2458 if (VT == MVT::f32 && Cond.hasOneUse()) { 2459 SDValue MinMax 2460 = CombineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); 2461 // Revisit this node so we can catch min3/max3/med3 patterns. 2462 //DCI.AddToWorklist(MinMax.getNode()); 2463 return MinMax; 2464 } 2465 2466 // There's no reason to not do this if the condition has other uses. 2467 return performCtlzCombine(SDLoc(N), Cond, True, False, DCI); 2468 } 2469 2470 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, 2471 DAGCombinerInfo &DCI) const { 2472 SelectionDAG &DAG = DCI.DAG; 2473 SDLoc DL(N); 2474 2475 switch(N->getOpcode()) { 2476 default: 2477 break; 2478 case ISD::BITCAST: { 2479 EVT DestVT = N->getValueType(0); 2480 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector()) 2481 break; 2482 2483 // Fold bitcasts of constants. 2484 // 2485 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) 2486 // TODO: Generalize and move to DAGCombiner 2487 SDValue Src = N->getOperand(0); 2488 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) { 2489 assert(Src.getValueType() == MVT::i64); 2490 SDLoc SL(N); 2491 uint64_t CVal = C->getZExtValue(); 2492 return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT, 2493 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 2494 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 2495 } 2496 2497 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) { 2498 const APInt &Val = C->getValueAPF().bitcastToAPInt(); 2499 SDLoc SL(N); 2500 uint64_t CVal = Val.getZExtValue(); 2501 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2502 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 2503 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 2504 2505 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec); 2506 } 2507 2508 break; 2509 } 2510 case ISD::SHL: { 2511 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2512 break; 2513 2514 return performShlCombine(N, DCI); 2515 } 2516 case ISD::SRL: { 2517 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2518 break; 2519 2520 return performSrlCombine(N, DCI); 2521 } 2522 case ISD::SRA: { 2523 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2524 break; 2525 2526 return performSraCombine(N, DCI); 2527 } 2528 case ISD::AND: { 2529 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2530 break; 2531 2532 return performAndCombine(N, DCI); 2533 } 2534 case ISD::MUL: 2535 return performMulCombine(N, DCI); 2536 case AMDGPUISD::MUL_I24: 2537 case AMDGPUISD::MUL_U24: { 2538 SDValue N0 = N->getOperand(0); 2539 SDValue N1 = N->getOperand(1); 2540 simplifyI24(N0, DCI); 2541 simplifyI24(N1, DCI); 2542 return SDValue(); 2543 } 2544 case ISD::SELECT: 2545 return performSelectCombine(N, DCI); 2546 case AMDGPUISD::BFE_I32: 2547 case AMDGPUISD::BFE_U32: { 2548 assert(!N->getValueType(0).isVector() && 2549 "Vector handling of BFE not implemented"); 2550 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 2551 if (!Width) 2552 break; 2553 2554 uint32_t WidthVal = Width->getZExtValue() & 0x1f; 2555 if (WidthVal == 0) 2556 return DAG.getConstant(0, DL, MVT::i32); 2557 2558 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2559 if (!Offset) 2560 break; 2561 2562 SDValue BitsFrom = N->getOperand(0); 2563 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; 2564 2565 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; 2566 2567 if (OffsetVal == 0) { 2568 // This is already sign / zero extended, so try to fold away extra BFEs. 2569 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); 2570 2571 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom); 2572 if (OpSignBits >= SignBits) 2573 return BitsFrom; 2574 2575 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal); 2576 if (Signed) { 2577 // This is a sign_extend_inreg. Replace it to take advantage of existing 2578 // DAG Combines. If not eliminated, we will match back to BFE during 2579 // selection. 2580 2581 // TODO: The sext_inreg of extended types ends, although we can could 2582 // handle them in a single BFE. 2583 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom, 2584 DAG.getValueType(SmallVT)); 2585 } 2586 2587 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); 2588 } 2589 2590 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) { 2591 if (Signed) { 2592 return constantFoldBFE<int32_t>(DAG, 2593 CVal->getSExtValue(), 2594 OffsetVal, 2595 WidthVal, 2596 DL); 2597 } 2598 2599 return constantFoldBFE<uint32_t>(DAG, 2600 CVal->getZExtValue(), 2601 OffsetVal, 2602 WidthVal, 2603 DL); 2604 } 2605 2606 if ((OffsetVal + WidthVal) >= 32) { 2607 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); 2608 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32, 2609 BitsFrom, ShiftVal); 2610 } 2611 2612 if (BitsFrom.hasOneUse()) { 2613 APInt Demanded = APInt::getBitsSet(32, 2614 OffsetVal, 2615 OffsetVal + WidthVal); 2616 2617 APInt KnownZero, KnownOne; 2618 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2619 !DCI.isBeforeLegalizeOps()); 2620 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2621 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) || 2622 TLI.SimplifyDemandedBits(BitsFrom, Demanded, 2623 KnownZero, KnownOne, TLO)) { 2624 DCI.CommitTargetLoweringOpt(TLO); 2625 } 2626 } 2627 2628 break; 2629 } 2630 2631 case ISD::STORE: 2632 return performStoreCombine(N, DCI); 2633 } 2634 return SDValue(); 2635 } 2636 2637 //===----------------------------------------------------------------------===// 2638 // Helper functions 2639 //===----------------------------------------------------------------------===// 2640 2641 void AMDGPUTargetLowering::getOriginalFunctionArgs( 2642 SelectionDAG &DAG, 2643 const Function *F, 2644 const SmallVectorImpl<ISD::InputArg> &Ins, 2645 SmallVectorImpl<ISD::InputArg> &OrigIns) const { 2646 2647 for (unsigned i = 0, e = Ins.size(); i < e; ++i) { 2648 if (Ins[i].ArgVT == Ins[i].VT) { 2649 OrigIns.push_back(Ins[i]); 2650 continue; 2651 } 2652 2653 EVT VT; 2654 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) { 2655 // Vector has been split into scalars. 2656 VT = Ins[i].ArgVT.getVectorElementType(); 2657 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() && 2658 Ins[i].ArgVT.getVectorElementType() != 2659 Ins[i].VT.getVectorElementType()) { 2660 // Vector elements have been promoted 2661 VT = Ins[i].ArgVT; 2662 } else { 2663 // Vector has been spilt into smaller vectors. 2664 VT = Ins[i].VT; 2665 } 2666 2667 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used, 2668 Ins[i].OrigArgIndex, Ins[i].PartOffset); 2669 OrigIns.push_back(Arg); 2670 } 2671 } 2672 2673 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 2674 const TargetRegisterClass *RC, 2675 unsigned Reg, EVT VT) const { 2676 MachineFunction &MF = DAG.getMachineFunction(); 2677 MachineRegisterInfo &MRI = MF.getRegInfo(); 2678 unsigned VirtualRegister; 2679 if (!MRI.isLiveIn(Reg)) { 2680 VirtualRegister = MRI.createVirtualRegister(RC); 2681 MRI.addLiveIn(Reg, VirtualRegister); 2682 } else { 2683 VirtualRegister = MRI.getLiveInVirtReg(Reg); 2684 } 2685 return DAG.getRegister(VirtualRegister, VT); 2686 } 2687 2688 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset( 2689 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const { 2690 uint64_t ArgOffset = MFI->ABIArgOffset; 2691 switch (Param) { 2692 case GRID_DIM: 2693 return ArgOffset; 2694 case GRID_OFFSET: 2695 return ArgOffset + 4; 2696 } 2697 llvm_unreachable("unexpected implicit parameter type"); 2698 } 2699 2700 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node; 2701 2702 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { 2703 switch ((AMDGPUISD::NodeType)Opcode) { 2704 case AMDGPUISD::FIRST_NUMBER: break; 2705 // AMDIL DAG nodes 2706 NODE_NAME_CASE(CALL); 2707 NODE_NAME_CASE(UMUL); 2708 NODE_NAME_CASE(RET_FLAG); 2709 NODE_NAME_CASE(BRANCH_COND); 2710 2711 // AMDGPU DAG nodes 2712 NODE_NAME_CASE(DWORDADDR) 2713 NODE_NAME_CASE(FRACT) 2714 NODE_NAME_CASE(CLAMP) 2715 NODE_NAME_CASE(COS_HW) 2716 NODE_NAME_CASE(SIN_HW) 2717 NODE_NAME_CASE(FMAX_LEGACY) 2718 NODE_NAME_CASE(FMIN_LEGACY) 2719 NODE_NAME_CASE(FMAX3) 2720 NODE_NAME_CASE(SMAX3) 2721 NODE_NAME_CASE(UMAX3) 2722 NODE_NAME_CASE(FMIN3) 2723 NODE_NAME_CASE(SMIN3) 2724 NODE_NAME_CASE(UMIN3) 2725 NODE_NAME_CASE(FMED3) 2726 NODE_NAME_CASE(SMED3) 2727 NODE_NAME_CASE(UMED3) 2728 NODE_NAME_CASE(URECIP) 2729 NODE_NAME_CASE(DIV_SCALE) 2730 NODE_NAME_CASE(DIV_FMAS) 2731 NODE_NAME_CASE(DIV_FIXUP) 2732 NODE_NAME_CASE(TRIG_PREOP) 2733 NODE_NAME_CASE(RCP) 2734 NODE_NAME_CASE(RSQ) 2735 NODE_NAME_CASE(RSQ_LEGACY) 2736 NODE_NAME_CASE(RSQ_CLAMP) 2737 NODE_NAME_CASE(LDEXP) 2738 NODE_NAME_CASE(FP_CLASS) 2739 NODE_NAME_CASE(DOT4) 2740 NODE_NAME_CASE(CARRY) 2741 NODE_NAME_CASE(BORROW) 2742 NODE_NAME_CASE(BFE_U32) 2743 NODE_NAME_CASE(BFE_I32) 2744 NODE_NAME_CASE(BFI) 2745 NODE_NAME_CASE(BFM) 2746 NODE_NAME_CASE(FFBH_U32) 2747 NODE_NAME_CASE(MUL_U24) 2748 NODE_NAME_CASE(MUL_I24) 2749 NODE_NAME_CASE(MAD_U24) 2750 NODE_NAME_CASE(MAD_I24) 2751 NODE_NAME_CASE(TEXTURE_FETCH) 2752 NODE_NAME_CASE(EXPORT) 2753 NODE_NAME_CASE(CONST_ADDRESS) 2754 NODE_NAME_CASE(REGISTER_LOAD) 2755 NODE_NAME_CASE(REGISTER_STORE) 2756 NODE_NAME_CASE(LOAD_CONSTANT) 2757 NODE_NAME_CASE(LOAD_INPUT) 2758 NODE_NAME_CASE(SAMPLE) 2759 NODE_NAME_CASE(SAMPLEB) 2760 NODE_NAME_CASE(SAMPLED) 2761 NODE_NAME_CASE(SAMPLEL) 2762 NODE_NAME_CASE(CVT_F32_UBYTE0) 2763 NODE_NAME_CASE(CVT_F32_UBYTE1) 2764 NODE_NAME_CASE(CVT_F32_UBYTE2) 2765 NODE_NAME_CASE(CVT_F32_UBYTE3) 2766 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR) 2767 NODE_NAME_CASE(CONST_DATA_PTR) 2768 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break; 2769 NODE_NAME_CASE(SENDMSG) 2770 NODE_NAME_CASE(INTERP_MOV) 2771 NODE_NAME_CASE(INTERP_P1) 2772 NODE_NAME_CASE(INTERP_P2) 2773 NODE_NAME_CASE(STORE_MSKOR) 2774 NODE_NAME_CASE(TBUFFER_STORE_FORMAT) 2775 NODE_NAME_CASE(ATOMIC_CMP_SWAP) 2776 NODE_NAME_CASE(ATOMIC_INC) 2777 NODE_NAME_CASE(ATOMIC_DEC) 2778 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break; 2779 } 2780 return nullptr; 2781 } 2782 2783 SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand, 2784 DAGCombinerInfo &DCI, 2785 unsigned &RefinementSteps, 2786 bool &UseOneConstNR) const { 2787 SelectionDAG &DAG = DCI.DAG; 2788 EVT VT = Operand.getValueType(); 2789 2790 if (VT == MVT::f32) { 2791 RefinementSteps = 0; 2792 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); 2793 } 2794 2795 // TODO: There is also f64 rsq instruction, but the documentation is less 2796 // clear on its precision. 2797 2798 return SDValue(); 2799 } 2800 2801 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand, 2802 DAGCombinerInfo &DCI, 2803 unsigned &RefinementSteps) const { 2804 SelectionDAG &DAG = DCI.DAG; 2805 EVT VT = Operand.getValueType(); 2806 2807 if (VT == MVT::f32) { 2808 // Reciprocal, < 1 ulp error. 2809 // 2810 // This reciprocal approximation converges to < 0.5 ulp error with one 2811 // newton rhapson performed with two fused multiple adds (FMAs). 2812 2813 RefinementSteps = 0; 2814 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); 2815 } 2816 2817 // TODO: There is also f64 rcp instruction, but the documentation is less 2818 // clear on its precision. 2819 2820 return SDValue(); 2821 } 2822 2823 void AMDGPUTargetLowering::computeKnownBitsForTargetNode( 2824 const SDValue Op, 2825 APInt &KnownZero, 2826 APInt &KnownOne, 2827 const SelectionDAG &DAG, 2828 unsigned Depth) const { 2829 2830 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything. 2831 2832 APInt KnownZero2; 2833 APInt KnownOne2; 2834 unsigned Opc = Op.getOpcode(); 2835 2836 switch (Opc) { 2837 default: 2838 break; 2839 case AMDGPUISD::CARRY: 2840 case AMDGPUISD::BORROW: { 2841 KnownZero = APInt::getHighBitsSet(32, 31); 2842 break; 2843 } 2844 2845 case AMDGPUISD::BFE_I32: 2846 case AMDGPUISD::BFE_U32: { 2847 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2848 if (!CWidth) 2849 return; 2850 2851 unsigned BitWidth = 32; 2852 uint32_t Width = CWidth->getZExtValue() & 0x1f; 2853 2854 if (Opc == AMDGPUISD::BFE_U32) 2855 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width); 2856 2857 break; 2858 } 2859 } 2860 } 2861 2862 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode( 2863 SDValue Op, 2864 const SelectionDAG &DAG, 2865 unsigned Depth) const { 2866 switch (Op.getOpcode()) { 2867 case AMDGPUISD::BFE_I32: { 2868 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2869 if (!Width) 2870 return 1; 2871 2872 unsigned SignBits = 32 - Width->getZExtValue() + 1; 2873 if (!isNullConstant(Op.getOperand(1))) 2874 return SignBits; 2875 2876 // TODO: Could probably figure something out with non-0 offsets. 2877 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 2878 return std::max(SignBits, Op0SignBits); 2879 } 2880 2881 case AMDGPUISD::BFE_U32: { 2882 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 2883 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; 2884 } 2885 2886 case AMDGPUISD::CARRY: 2887 case AMDGPUISD::BORROW: 2888 return 31; 2889 2890 default: 2891 return 1; 2892 } 2893 } 2894