1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This is the parent TargetLowering class for hardware code gen 12 /// targets. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "AMDGPUISelLowering.h" 17 #include "AMDGPU.h" 18 #include "AMDGPUDiagnosticInfoUnsupported.h" 19 #include "AMDGPUFrameLowering.h" 20 #include "AMDGPUIntrinsicInfo.h" 21 #include "AMDGPURegisterInfo.h" 22 #include "AMDGPUSubtarget.h" 23 #include "R600MachineFunctionInfo.h" 24 #include "SIMachineFunctionInfo.h" 25 #include "llvm/CodeGen/CallingConvLower.h" 26 #include "llvm/CodeGen/MachineFunction.h" 27 #include "llvm/CodeGen/MachineRegisterInfo.h" 28 #include "llvm/CodeGen/SelectionDAG.h" 29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 30 #include "llvm/IR/DataLayout.h" 31 #include "SIInstrInfo.h" 32 using namespace llvm; 33 34 static bool allocateStack(unsigned ValNo, MVT ValVT, MVT LocVT, 35 CCValAssign::LocInfo LocInfo, 36 ISD::ArgFlagsTy ArgFlags, CCState &State) { 37 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), 38 ArgFlags.getOrigAlign()); 39 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 40 41 return true; 42 } 43 44 #include "AMDGPUGenCallingConv.inc" 45 46 // Find a larger type to do a load / store of a vector with. 47 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { 48 unsigned StoreSize = VT.getStoreSizeInBits(); 49 if (StoreSize <= 32) 50 return EVT::getIntegerVT(Ctx, StoreSize); 51 52 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); 53 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 54 } 55 56 // Type for a vector that will be loaded to. 57 EVT AMDGPUTargetLowering::getEquivalentLoadRegType(LLVMContext &Ctx, EVT VT) { 58 unsigned StoreSize = VT.getStoreSizeInBits(); 59 if (StoreSize <= 32) 60 return EVT::getIntegerVT(Ctx, 32); 61 62 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 63 } 64 65 AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM, 66 const AMDGPUSubtarget &STI) 67 : TargetLowering(TM), Subtarget(&STI) { 68 setOperationAction(ISD::Constant, MVT::i32, Legal); 69 setOperationAction(ISD::Constant, MVT::i64, Legal); 70 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 71 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 72 73 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 74 setOperationAction(ISD::BRIND, MVT::Other, Expand); 75 76 // This is totally unsupported, just custom lower to produce an error. 77 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 78 79 // We need to custom lower some of the intrinsics 80 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 81 82 // Library functions. These default to Expand, but we have instructions 83 // for them. 84 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 85 setOperationAction(ISD::FEXP2, MVT::f32, Legal); 86 setOperationAction(ISD::FPOW, MVT::f32, Legal); 87 setOperationAction(ISD::FLOG2, MVT::f32, Legal); 88 setOperationAction(ISD::FABS, MVT::f32, Legal); 89 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 90 setOperationAction(ISD::FRINT, MVT::f32, Legal); 91 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 92 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 93 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 94 95 setOperationAction(ISD::FROUND, MVT::f32, Custom); 96 setOperationAction(ISD::FROUND, MVT::f64, Custom); 97 98 setOperationAction(ISD::FREM, MVT::f32, Custom); 99 setOperationAction(ISD::FREM, MVT::f64, Custom); 100 101 // v_mad_f32 does not support denormals according to some sources. 102 if (!Subtarget->hasFP32Denormals()) 103 setOperationAction(ISD::FMAD, MVT::f32, Legal); 104 105 // Expand to fneg + fadd. 106 setOperationAction(ISD::FSUB, MVT::f64, Expand); 107 108 // Lower floating point store/load to integer store/load to reduce the number 109 // of patterns in tablegen. 110 setOperationAction(ISD::STORE, MVT::f32, Promote); 111 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); 112 113 setOperationAction(ISD::STORE, MVT::v2f32, Promote); 114 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); 115 116 setOperationAction(ISD::STORE, MVT::v4f32, Promote); 117 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); 118 119 setOperationAction(ISD::STORE, MVT::v8f32, Promote); 120 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32); 121 122 setOperationAction(ISD::STORE, MVT::v16f32, Promote); 123 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); 124 125 setOperationAction(ISD::STORE, MVT::f64, Promote); 126 AddPromotedToType(ISD::STORE, MVT::f64, MVT::i64); 127 128 setOperationAction(ISD::STORE, MVT::v2f64, Promote); 129 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v2i64); 130 131 // Custom lowering of vector stores is required for local address space 132 // stores. 133 setOperationAction(ISD::STORE, MVT::v4i32, Custom); 134 135 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Custom); 136 setTruncStoreAction(MVT::v2i32, MVT::v2i8, Custom); 137 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Custom); 138 139 // XXX: This can be change to Custom, once ExpandVectorStores can 140 // handle 64-bit stores. 141 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Expand); 142 143 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 144 setTruncStoreAction(MVT::i64, MVT::i8, Expand); 145 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 146 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand); 147 setTruncStoreAction(MVT::v4i64, MVT::v4i1, Expand); 148 149 150 setOperationAction(ISD::LOAD, MVT::f32, Promote); 151 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); 152 153 setOperationAction(ISD::LOAD, MVT::v2f32, Promote); 154 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); 155 156 setOperationAction(ISD::LOAD, MVT::v4f32, Promote); 157 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); 158 159 setOperationAction(ISD::LOAD, MVT::v8f32, Promote); 160 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32); 161 162 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); 163 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); 164 165 setOperationAction(ISD::LOAD, MVT::f64, Promote); 166 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::i64); 167 168 setOperationAction(ISD::LOAD, MVT::v2f64, Promote); 169 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v2i64); 170 171 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 172 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); 173 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 174 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 175 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); 176 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); 177 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); 178 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); 179 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); 180 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); 181 182 // There are no 64-bit extloads. These should be done as a 32-bit extload and 183 // an extension to 64-bit. 184 for (MVT VT : MVT::integer_valuetypes()) { 185 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); 186 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); 187 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); 188 } 189 190 for (MVT VT : MVT::integer_vector_valuetypes()) { 191 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); 192 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); 193 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); 194 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); 195 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); 196 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); 197 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); 198 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); 199 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); 200 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); 201 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); 202 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); 203 } 204 205 setOperationAction(ISD::BR_CC, MVT::i1, Expand); 206 207 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) { 208 setOperationAction(ISD::FCEIL, MVT::f64, Custom); 209 setOperationAction(ISD::FTRUNC, MVT::f64, Custom); 210 setOperationAction(ISD::FRINT, MVT::f64, Custom); 211 setOperationAction(ISD::FFLOOR, MVT::f64, Custom); 212 } 213 214 if (!Subtarget->hasBFI()) { 215 // fcopysign can be done in a single instruction with BFI. 216 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 217 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 218 } 219 220 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 221 222 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 223 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); 224 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand); 225 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand); 226 227 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 228 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 229 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand); 230 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); 231 232 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 233 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand); 234 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand); 235 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand); 236 237 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 238 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 239 240 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 241 for (MVT VT : ScalarIntVTs) { 242 setOperationAction(ISD::SREM, VT, Expand); 243 setOperationAction(ISD::SDIV, VT, Expand); 244 245 // GPU does not have divrem function for signed or unsigned. 246 setOperationAction(ISD::SDIVREM, VT, Custom); 247 setOperationAction(ISD::UDIVREM, VT, Custom); 248 249 // GPU does not have [S|U]MUL_LOHI functions as a single instruction. 250 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 251 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 252 253 setOperationAction(ISD::BSWAP, VT, Expand); 254 setOperationAction(ISD::CTTZ, VT, Expand); 255 setOperationAction(ISD::CTLZ, VT, Expand); 256 } 257 258 if (!Subtarget->hasBCNT(32)) 259 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 260 261 if (!Subtarget->hasBCNT(64)) 262 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 263 264 // The hardware supports 32-bit ROTR, but not ROTL. 265 setOperationAction(ISD::ROTL, MVT::i32, Expand); 266 setOperationAction(ISD::ROTL, MVT::i64, Expand); 267 setOperationAction(ISD::ROTR, MVT::i64, Expand); 268 269 setOperationAction(ISD::MUL, MVT::i64, Expand); 270 setOperationAction(ISD::MULHU, MVT::i64, Expand); 271 setOperationAction(ISD::MULHS, MVT::i64, Expand); 272 setOperationAction(ISD::UDIV, MVT::i32, Expand); 273 setOperationAction(ISD::UREM, MVT::i32, Expand); 274 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 275 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 276 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 277 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 278 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 279 280 setOperationAction(ISD::SMIN, MVT::i32, Legal); 281 setOperationAction(ISD::UMIN, MVT::i32, Legal); 282 setOperationAction(ISD::SMAX, MVT::i32, Legal); 283 setOperationAction(ISD::UMAX, MVT::i32, Legal); 284 285 if (Subtarget->hasFFBH()) 286 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 287 else 288 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand); 289 290 if (!Subtarget->hasFFBL()) 291 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand); 292 293 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand); 294 295 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 296 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 297 298 static const MVT::SimpleValueType VectorIntTypes[] = { 299 MVT::v2i32, MVT::v4i32 300 }; 301 302 for (MVT VT : VectorIntTypes) { 303 // Expand the following operations for the current type by default. 304 setOperationAction(ISD::ADD, VT, Expand); 305 setOperationAction(ISD::AND, VT, Expand); 306 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 307 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 308 setOperationAction(ISD::MUL, VT, Expand); 309 setOperationAction(ISD::OR, VT, Expand); 310 setOperationAction(ISD::SHL, VT, Expand); 311 setOperationAction(ISD::SRA, VT, Expand); 312 setOperationAction(ISD::SRL, VT, Expand); 313 setOperationAction(ISD::ROTL, VT, Expand); 314 setOperationAction(ISD::ROTR, VT, Expand); 315 setOperationAction(ISD::SUB, VT, Expand); 316 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 317 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 318 setOperationAction(ISD::SDIV, VT, Expand); 319 setOperationAction(ISD::UDIV, VT, Expand); 320 setOperationAction(ISD::SREM, VT, Expand); 321 setOperationAction(ISD::UREM, VT, Expand); 322 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 323 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 324 setOperationAction(ISD::SDIVREM, VT, Custom); 325 setOperationAction(ISD::UDIVREM, VT, Expand); 326 setOperationAction(ISD::ADDC, VT, Expand); 327 setOperationAction(ISD::SUBC, VT, Expand); 328 setOperationAction(ISD::ADDE, VT, Expand); 329 setOperationAction(ISD::SUBE, VT, Expand); 330 setOperationAction(ISD::SELECT, VT, Expand); 331 setOperationAction(ISD::VSELECT, VT, Expand); 332 setOperationAction(ISD::SELECT_CC, VT, Expand); 333 setOperationAction(ISD::XOR, VT, Expand); 334 setOperationAction(ISD::BSWAP, VT, Expand); 335 setOperationAction(ISD::CTPOP, VT, Expand); 336 setOperationAction(ISD::CTTZ, VT, Expand); 337 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand); 338 setOperationAction(ISD::CTLZ, VT, Expand); 339 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand); 340 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 341 } 342 343 static const MVT::SimpleValueType FloatVectorTypes[] = { 344 MVT::v2f32, MVT::v4f32 345 }; 346 347 for (MVT VT : FloatVectorTypes) { 348 setOperationAction(ISD::FABS, VT, Expand); 349 setOperationAction(ISD::FMINNUM, VT, Expand); 350 setOperationAction(ISD::FMAXNUM, VT, Expand); 351 setOperationAction(ISD::FADD, VT, Expand); 352 setOperationAction(ISD::FCEIL, VT, Expand); 353 setOperationAction(ISD::FCOS, VT, Expand); 354 setOperationAction(ISD::FDIV, VT, Expand); 355 setOperationAction(ISD::FEXP2, VT, Expand); 356 setOperationAction(ISD::FLOG2, VT, Expand); 357 setOperationAction(ISD::FREM, VT, Expand); 358 setOperationAction(ISD::FPOW, VT, Expand); 359 setOperationAction(ISD::FFLOOR, VT, Expand); 360 setOperationAction(ISD::FTRUNC, VT, Expand); 361 setOperationAction(ISD::FMUL, VT, Expand); 362 setOperationAction(ISD::FMA, VT, Expand); 363 setOperationAction(ISD::FRINT, VT, Expand); 364 setOperationAction(ISD::FNEARBYINT, VT, Expand); 365 setOperationAction(ISD::FSQRT, VT, Expand); 366 setOperationAction(ISD::FSIN, VT, Expand); 367 setOperationAction(ISD::FSUB, VT, Expand); 368 setOperationAction(ISD::FNEG, VT, Expand); 369 setOperationAction(ISD::SELECT, VT, Expand); 370 setOperationAction(ISD::VSELECT, VT, Expand); 371 setOperationAction(ISD::SELECT_CC, VT, Expand); 372 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 373 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 374 } 375 376 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); 377 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); 378 379 setTargetDAGCombine(ISD::AND); 380 setTargetDAGCombine(ISD::SHL); 381 setTargetDAGCombine(ISD::SRA); 382 setTargetDAGCombine(ISD::SRL); 383 setTargetDAGCombine(ISD::MUL); 384 setTargetDAGCombine(ISD::SELECT); 385 setTargetDAGCombine(ISD::SELECT_CC); 386 setTargetDAGCombine(ISD::STORE); 387 388 setTargetDAGCombine(ISD::FADD); 389 setTargetDAGCombine(ISD::FSUB); 390 391 setBooleanContents(ZeroOrNegativeOneBooleanContent); 392 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 393 394 setSchedulingPreference(Sched::RegPressure); 395 setJumpIsExpensive(true); 396 397 // SI at least has hardware support for floating point exceptions, but no way 398 // of using or handling them is implemented. They are also optional in OpenCL 399 // (Section 7.3) 400 setHasFloatingPointExceptions(false); 401 402 setSelectIsExpensive(false); 403 PredictableSelectIsExpensive = false; 404 405 setFsqrtIsCheap(true); 406 407 // We want to find all load dependencies for long chains of stores to enable 408 // merging into very wide vectors. The problem is with vectors with > 4 409 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 410 // vectors are a legal type, even though we have to split the loads 411 // usually. When we can more precisely specify load legality per address 412 // space, we should be able to make FindBetterChain/MergeConsecutiveStores 413 // smarter so that they can figure out what to do in 2 iterations without all 414 // N > 4 stores on the same chain. 415 GatherAllAliasesMaxDepth = 16; 416 417 // FIXME: Need to really handle these. 418 MaxStoresPerMemcpy = 4096; 419 MaxStoresPerMemmove = 4096; 420 MaxStoresPerMemset = 4096; 421 } 422 423 //===----------------------------------------------------------------------===// 424 // Target Information 425 //===----------------------------------------------------------------------===// 426 427 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const { 428 return MVT::i32; 429 } 430 431 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const { 432 return true; 433 } 434 435 // The backend supports 32 and 64 bit floating point immediates. 436 // FIXME: Why are we reporting vectors of FP immediates as legal? 437 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 438 EVT ScalarVT = VT.getScalarType(); 439 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64); 440 } 441 442 // We don't want to shrink f64 / f32 constants. 443 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { 444 EVT ScalarVT = VT.getScalarType(); 445 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64); 446 } 447 448 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N, 449 ISD::LoadExtType, 450 EVT NewVT) const { 451 452 unsigned NewSize = NewVT.getStoreSizeInBits(); 453 454 // If we are reducing to a 32-bit load, this is always better. 455 if (NewSize == 32) 456 return true; 457 458 EVT OldVT = N->getValueType(0); 459 unsigned OldSize = OldVT.getStoreSizeInBits(); 460 461 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar 462 // extloads, so doing one requires using a buffer_load. In cases where we 463 // still couldn't use a scalar load, using the wider load shouldn't really 464 // hurt anything. 465 466 // If the old size already had to be an extload, there's no harm in continuing 467 // to reduce the width. 468 return (OldSize < 32); 469 } 470 471 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, 472 EVT CastTy) const { 473 if (LoadTy.getSizeInBits() != CastTy.getSizeInBits()) 474 return true; 475 476 unsigned LScalarSize = LoadTy.getScalarType().getSizeInBits(); 477 unsigned CastScalarSize = CastTy.getScalarType().getSizeInBits(); 478 479 return ((LScalarSize <= CastScalarSize) || 480 (CastScalarSize >= 32) || 481 (LScalarSize < 32)); 482 } 483 484 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also 485 // profitable with the expansion for 64-bit since it's generally good to 486 // speculate things. 487 // FIXME: These should really have the size as a parameter. 488 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const { 489 return true; 490 } 491 492 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const { 493 return true; 494 } 495 496 //===---------------------------------------------------------------------===// 497 // Target Properties 498 //===---------------------------------------------------------------------===// 499 500 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { 501 assert(VT.isFloatingPoint()); 502 return VT == MVT::f32 || VT == MVT::f64; 503 } 504 505 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { 506 assert(VT.isFloatingPoint()); 507 return VT == MVT::f32 || VT == MVT::f64; 508 } 509 510 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, 511 unsigned NumElem, 512 unsigned AS) const { 513 return true; 514 } 515 516 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { 517 // There are few operations which truly have vector input operands. Any vector 518 // operation is going to involve operations on each component, and a 519 // build_vector will be a copy per element, so it always makes sense to use a 520 // build_vector input in place of the extracted element to avoid a copy into a 521 // super register. 522 // 523 // We should probably only do this if all users are extracts only, but this 524 // should be the common case. 525 return true; 526 } 527 528 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { 529 // Truncate is just accessing a subregister. 530 return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0); 531 } 532 533 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const { 534 // Truncate is just accessing a subregister. 535 return Dest->getPrimitiveSizeInBits() < Source->getPrimitiveSizeInBits() && 536 (Dest->getPrimitiveSizeInBits() % 32 == 0); 537 } 538 539 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const { 540 unsigned SrcSize = Src->getScalarSizeInBits(); 541 unsigned DestSize = Dest->getScalarSizeInBits(); 542 543 return SrcSize == 32 && DestSize == 64; 544 } 545 546 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const { 547 // Any register load of a 64-bit value really requires 2 32-bit moves. For all 548 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, 549 // this will enable reducing 64-bit operations the 32-bit, which is always 550 // good. 551 return Src == MVT::i32 && Dest == MVT::i64; 552 } 553 554 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 555 return isZExtFree(Val.getValueType(), VT2); 556 } 557 558 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 559 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a 560 // limited number of native 64-bit operations. Shrinking an operation to fit 561 // in a single 32-bit register should always be helpful. As currently used, 562 // this is much less general than the name suggests, and is only used in 563 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is 564 // not profitable, and may actually be harmful. 565 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32; 566 } 567 568 //===---------------------------------------------------------------------===// 569 // TargetLowering Callbacks 570 //===---------------------------------------------------------------------===// 571 572 void AMDGPUTargetLowering::AnalyzeFormalArguments(CCState &State, 573 const SmallVectorImpl<ISD::InputArg> &Ins) const { 574 575 State.AnalyzeFormalArguments(Ins, CC_AMDGPU); 576 } 577 578 void AMDGPUTargetLowering::AnalyzeReturn(CCState &State, 579 const SmallVectorImpl<ISD::OutputArg> &Outs) const { 580 581 State.AnalyzeReturn(Outs, RetCC_SI); 582 } 583 584 SDValue AMDGPUTargetLowering::LowerReturn( 585 SDValue Chain, 586 CallingConv::ID CallConv, 587 bool isVarArg, 588 const SmallVectorImpl<ISD::OutputArg> &Outs, 589 const SmallVectorImpl<SDValue> &OutVals, 590 SDLoc DL, SelectionDAG &DAG) const { 591 return DAG.getNode(AMDGPUISD::RET_FLAG, DL, MVT::Other, Chain); 592 } 593 594 //===---------------------------------------------------------------------===// 595 // Target specific lowering 596 //===---------------------------------------------------------------------===// 597 598 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, 599 SmallVectorImpl<SDValue> &InVals) const { 600 SDValue Callee = CLI.Callee; 601 SelectionDAG &DAG = CLI.DAG; 602 603 const Function &Fn = *DAG.getMachineFunction().getFunction(); 604 605 StringRef FuncName("<unknown>"); 606 607 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) 608 FuncName = G->getSymbol(); 609 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 610 FuncName = G->getGlobal()->getName(); 611 612 DiagnosticInfoUnsupported NoCalls(Fn, "call to function " + FuncName); 613 DAG.getContext()->diagnose(NoCalls); 614 return SDValue(); 615 } 616 617 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 618 SelectionDAG &DAG) const { 619 const Function &Fn = *DAG.getMachineFunction().getFunction(); 620 621 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "dynamic alloca"); 622 DAG.getContext()->diagnose(NoDynamicAlloca); 623 return SDValue(); 624 } 625 626 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 627 SelectionDAG &DAG) const { 628 switch (Op.getOpcode()) { 629 default: 630 Op.getNode()->dump(); 631 llvm_unreachable("Custom lowering code for this" 632 "instruction is not implemented yet!"); 633 break; 634 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 635 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 636 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 637 case ISD::FrameIndex: return LowerFrameIndex(Op, DAG); 638 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG); 639 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG); 640 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG); 641 case ISD::FREM: return LowerFREM(Op, DAG); 642 case ISD::FCEIL: return LowerFCEIL(Op, DAG); 643 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG); 644 case ISD::FRINT: return LowerFRINT(Op, DAG); 645 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); 646 case ISD::FROUND: return LowerFROUND(Op, DAG); 647 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); 648 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 649 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 650 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 651 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 652 case ISD::CTLZ: 653 case ISD::CTLZ_ZERO_UNDEF: 654 return LowerCTLZ(Op, DAG); 655 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 656 } 657 return Op; 658 } 659 660 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N, 661 SmallVectorImpl<SDValue> &Results, 662 SelectionDAG &DAG) const { 663 switch (N->getOpcode()) { 664 case ISD::SIGN_EXTEND_INREG: 665 // Different parts of legalization seem to interpret which type of 666 // sign_extend_inreg is the one to check for custom lowering. The extended 667 // from type is what really matters, but some places check for custom 668 // lowering of the result type. This results in trying to use 669 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do 670 // nothing here and let the illegal result integer be handled normally. 671 return; 672 case ISD::LOAD: { 673 SDNode *Node = LowerLOAD(SDValue(N, 0), DAG).getNode(); 674 if (!Node) 675 return; 676 677 Results.push_back(SDValue(Node, 0)); 678 Results.push_back(SDValue(Node, 1)); 679 // XXX: LLVM seems not to replace Chain Value inside CustomWidenLowerNode 680 // function 681 DAG.ReplaceAllUsesOfValueWith(SDValue(N,1), SDValue(Node, 1)); 682 return; 683 } 684 case ISD::STORE: { 685 SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG); 686 if (Lowered.getNode()) 687 Results.push_back(Lowered); 688 return; 689 } 690 default: 691 return; 692 } 693 } 694 695 // FIXME: This implements accesses to initialized globals in the constant 696 // address space by copying them to private and accessing that. It does not 697 // properly handle illegal types or vectors. The private vector loads are not 698 // scalarized, and the illegal scalars hit an assertion. This technique will not 699 // work well with large initializers, and this should eventually be 700 // removed. Initialized globals should be placed into a data section that the 701 // runtime will load into a buffer before the kernel is executed. Uses of the 702 // global need to be replaced with a pointer loaded from an implicit kernel 703 // argument into this buffer holding the copy of the data, which will remove the 704 // need for any of this. 705 SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init, 706 const GlobalValue *GV, 707 const SDValue &InitPtr, 708 SDValue Chain, 709 SelectionDAG &DAG) const { 710 const DataLayout &TD = DAG.getDataLayout(); 711 SDLoc DL(InitPtr); 712 Type *InitTy = Init->getType(); 713 714 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Init)) { 715 EVT VT = EVT::getEVT(InitTy); 716 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS); 717 return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr, 718 MachinePointerInfo(UndefValue::get(PtrTy)), false, 719 false, TD.getPrefTypeAlignment(InitTy)); 720 } 721 722 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) { 723 EVT VT = EVT::getEVT(CFP->getType()); 724 PointerType *PtrTy = PointerType::get(CFP->getType(), 0); 725 return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr, 726 MachinePointerInfo(UndefValue::get(PtrTy)), false, 727 false, TD.getPrefTypeAlignment(CFP->getType())); 728 } 729 730 if (StructType *ST = dyn_cast<StructType>(InitTy)) { 731 const StructLayout *SL = TD.getStructLayout(ST); 732 733 EVT PtrVT = InitPtr.getValueType(); 734 SmallVector<SDValue, 8> Chains; 735 736 for (unsigned I = 0, N = ST->getNumElements(); I != N; ++I) { 737 SDValue Offset = DAG.getConstant(SL->getElementOffset(I), DL, PtrVT); 738 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset); 739 740 Constant *Elt = Init->getAggregateElement(I); 741 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG)); 742 } 743 744 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 745 } 746 747 if (SequentialType *SeqTy = dyn_cast<SequentialType>(InitTy)) { 748 EVT PtrVT = InitPtr.getValueType(); 749 750 unsigned NumElements; 751 if (ArrayType *AT = dyn_cast<ArrayType>(SeqTy)) 752 NumElements = AT->getNumElements(); 753 else if (VectorType *VT = dyn_cast<VectorType>(SeqTy)) 754 NumElements = VT->getNumElements(); 755 else 756 llvm_unreachable("Unexpected type"); 757 758 unsigned EltSize = TD.getTypeAllocSize(SeqTy->getElementType()); 759 SmallVector<SDValue, 8> Chains; 760 for (unsigned i = 0; i < NumElements; ++i) { 761 SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT); 762 SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, InitPtr, Offset); 763 764 Constant *Elt = Init->getAggregateElement(i); 765 Chains.push_back(LowerConstantInitializer(Elt, GV, Ptr, Chain, DAG)); 766 } 767 768 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains); 769 } 770 771 if (isa<UndefValue>(Init)) { 772 EVT VT = EVT::getEVT(InitTy); 773 PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS); 774 return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr, 775 MachinePointerInfo(UndefValue::get(PtrTy)), false, 776 false, TD.getPrefTypeAlignment(InitTy)); 777 } 778 779 Init->dump(); 780 llvm_unreachable("Unhandled constant initializer"); 781 } 782 783 static bool hasDefinedInitializer(const GlobalValue *GV) { 784 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 785 if (!GVar || !GVar->hasInitializer()) 786 return false; 787 788 if (isa<UndefValue>(GVar->getInitializer())) 789 return false; 790 791 return true; 792 } 793 794 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, 795 SDValue Op, 796 SelectionDAG &DAG) const { 797 798 const DataLayout &DL = DAG.getDataLayout(); 799 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); 800 const GlobalValue *GV = G->getGlobal(); 801 802 switch (G->getAddressSpace()) { 803 case AMDGPUAS::LOCAL_ADDRESS: { 804 // XXX: What does the value of G->getOffset() mean? 805 assert(G->getOffset() == 0 && 806 "Do not know what to do with an non-zero offset"); 807 808 // TODO: We could emit code to handle the initialization somewhere. 809 if (hasDefinedInitializer(GV)) 810 break; 811 812 unsigned Offset; 813 if (MFI->LocalMemoryObjects.count(GV) == 0) { 814 uint64_t Size = DL.getTypeAllocSize(GV->getValueType()); 815 Offset = MFI->LDSSize; 816 MFI->LocalMemoryObjects[GV] = Offset; 817 // XXX: Account for alignment? 818 MFI->LDSSize += Size; 819 } else { 820 Offset = MFI->LocalMemoryObjects[GV]; 821 } 822 823 return DAG.getConstant(Offset, SDLoc(Op), 824 getPointerTy(DL, AMDGPUAS::LOCAL_ADDRESS)); 825 } 826 case AMDGPUAS::CONSTANT_ADDRESS: { 827 MachineFrameInfo *FrameInfo = DAG.getMachineFunction().getFrameInfo(); 828 Type *EltType = GV->getValueType(); 829 unsigned Size = DL.getTypeAllocSize(EltType); 830 unsigned Alignment = DL.getPrefTypeAlignment(EltType); 831 832 MVT PrivPtrVT = getPointerTy(DL, AMDGPUAS::PRIVATE_ADDRESS); 833 MVT ConstPtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS); 834 835 int FI = FrameInfo->CreateStackObject(Size, Alignment, false); 836 SDValue InitPtr = DAG.getFrameIndex(FI, PrivPtrVT); 837 838 const GlobalVariable *Var = cast<GlobalVariable>(GV); 839 if (!Var->hasInitializer()) { 840 // This has no use, but bugpoint will hit it. 841 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT); 842 } 843 844 const Constant *Init = Var->getInitializer(); 845 SmallVector<SDNode*, 8> WorkList; 846 847 for (SDNode::use_iterator I = DAG.getEntryNode()->use_begin(), 848 E = DAG.getEntryNode()->use_end(); I != E; ++I) { 849 if (I->getOpcode() != AMDGPUISD::REGISTER_LOAD && I->getOpcode() != ISD::LOAD) 850 continue; 851 WorkList.push_back(*I); 852 } 853 SDValue Chain = LowerConstantInitializer(Init, GV, InitPtr, DAG.getEntryNode(), DAG); 854 for (SmallVector<SDNode*, 8>::iterator I = WorkList.begin(), 855 E = WorkList.end(); I != E; ++I) { 856 SmallVector<SDValue, 8> Ops; 857 Ops.push_back(Chain); 858 for (unsigned i = 1; i < (*I)->getNumOperands(); ++i) { 859 Ops.push_back((*I)->getOperand(i)); 860 } 861 DAG.UpdateNodeOperands(*I, Ops); 862 } 863 return DAG.getZExtOrTrunc(InitPtr, SDLoc(Op), ConstPtrVT); 864 } 865 } 866 867 const Function &Fn = *DAG.getMachineFunction().getFunction(); 868 DiagnosticInfoUnsupported BadInit(Fn, 869 "initializer for address space"); 870 DAG.getContext()->diagnose(BadInit); 871 return SDValue(); 872 } 873 874 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 875 SelectionDAG &DAG) const { 876 SmallVector<SDValue, 8> Args; 877 878 for (const SDUse &U : Op->ops()) 879 DAG.ExtractVectorElements(U.get(), Args); 880 881 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args); 882 } 883 884 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 885 SelectionDAG &DAG) const { 886 887 SmallVector<SDValue, 8> Args; 888 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 889 EVT VT = Op.getValueType(); 890 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start, 891 VT.getVectorNumElements()); 892 893 return DAG.getNode(ISD::BUILD_VECTOR, SDLoc(Op), Op.getValueType(), Args); 894 } 895 896 SDValue AMDGPUTargetLowering::LowerFrameIndex(SDValue Op, 897 SelectionDAG &DAG) const { 898 899 MachineFunction &MF = DAG.getMachineFunction(); 900 const AMDGPUFrameLowering *TFL = Subtarget->getFrameLowering(); 901 902 FrameIndexSDNode *FIN = cast<FrameIndexSDNode>(Op); 903 904 unsigned FrameIndex = FIN->getIndex(); 905 unsigned IgnoredFrameReg; 906 unsigned Offset = 907 TFL->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg); 908 return DAG.getConstant(Offset * 4 * TFL->getStackWidth(MF), SDLoc(Op), 909 Op.getValueType()); 910 } 911 912 SDValue AMDGPUTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 913 SelectionDAG &DAG) const { 914 unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 915 SDLoc DL(Op); 916 EVT VT = Op.getValueType(); 917 918 switch (IntrinsicID) { 919 default: return Op; 920 case AMDGPUIntrinsic::AMDGPU_abs: 921 case AMDGPUIntrinsic::AMDIL_abs: // Legacy name. 922 return LowerIntrinsicIABS(Op, DAG); 923 case AMDGPUIntrinsic::AMDGPU_lrp: 924 return LowerIntrinsicLRP(Op, DAG); 925 926 case AMDGPUIntrinsic::AMDGPU_clamp: 927 case AMDGPUIntrinsic::AMDIL_clamp: // Legacy name. 928 return DAG.getNode(AMDGPUISD::CLAMP, DL, VT, 929 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 930 931 case Intrinsic::AMDGPU_div_scale: { 932 // 3rd parameter required to be a constant. 933 const ConstantSDNode *Param = dyn_cast<ConstantSDNode>(Op.getOperand(3)); 934 if (!Param) 935 return DAG.getUNDEF(VT); 936 937 // Translate to the operands expected by the machine instruction. The 938 // first parameter must be the same as the first instruction. 939 SDValue Numerator = Op.getOperand(1); 940 SDValue Denominator = Op.getOperand(2); 941 942 // Note this order is opposite of the machine instruction's operations, 943 // which is s0.f = Quotient, s1.f = Denominator, s2.f = Numerator. The 944 // intrinsic has the numerator as the first operand to match a normal 945 // division operation. 946 947 SDValue Src0 = Param->isAllOnesValue() ? Numerator : Denominator; 948 949 return DAG.getNode(AMDGPUISD::DIV_SCALE, DL, Op->getVTList(), Src0, 950 Denominator, Numerator); 951 } 952 953 case Intrinsic::AMDGPU_div_fmas: 954 return DAG.getNode(AMDGPUISD::DIV_FMAS, DL, VT, 955 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3), 956 Op.getOperand(4)); 957 958 case Intrinsic::AMDGPU_div_fixup: 959 return DAG.getNode(AMDGPUISD::DIV_FIXUP, DL, VT, 960 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 961 962 case Intrinsic::AMDGPU_trig_preop: 963 return DAG.getNode(AMDGPUISD::TRIG_PREOP, DL, VT, 964 Op.getOperand(1), Op.getOperand(2)); 965 966 case Intrinsic::AMDGPU_rcp: 967 return DAG.getNode(AMDGPUISD::RCP, DL, VT, Op.getOperand(1)); 968 969 case Intrinsic::AMDGPU_rsq: 970 return DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 971 972 case AMDGPUIntrinsic::AMDGPU_legacy_rsq: 973 return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); 974 975 case Intrinsic::AMDGPU_rsq_clamped: 976 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) { 977 Type *Type = VT.getTypeForEVT(*DAG.getContext()); 978 APFloat Max = APFloat::getLargest(Type->getFltSemantics()); 979 APFloat Min = APFloat::getLargest(Type->getFltSemantics(), true); 980 981 SDValue Rsq = DAG.getNode(AMDGPUISD::RSQ, DL, VT, Op.getOperand(1)); 982 SDValue Tmp = DAG.getNode(ISD::FMINNUM, DL, VT, Rsq, 983 DAG.getConstantFP(Max, DL, VT)); 984 return DAG.getNode(ISD::FMAXNUM, DL, VT, Tmp, 985 DAG.getConstantFP(Min, DL, VT)); 986 } else { 987 return DAG.getNode(AMDGPUISD::RSQ_CLAMPED, DL, VT, Op.getOperand(1)); 988 } 989 990 case Intrinsic::AMDGPU_ldexp: 991 return DAG.getNode(AMDGPUISD::LDEXP, DL, VT, Op.getOperand(1), 992 Op.getOperand(2)); 993 994 case AMDGPUIntrinsic::AMDGPU_imax: 995 return DAG.getNode(ISD::SMAX, DL, VT, Op.getOperand(1), 996 Op.getOperand(2)); 997 case AMDGPUIntrinsic::AMDGPU_umax: 998 return DAG.getNode(ISD::UMAX, DL, VT, Op.getOperand(1), 999 Op.getOperand(2)); 1000 case AMDGPUIntrinsic::AMDGPU_imin: 1001 return DAG.getNode(ISD::SMIN, DL, VT, Op.getOperand(1), 1002 Op.getOperand(2)); 1003 case AMDGPUIntrinsic::AMDGPU_umin: 1004 return DAG.getNode(ISD::UMIN, DL, VT, Op.getOperand(1), 1005 Op.getOperand(2)); 1006 1007 case AMDGPUIntrinsic::AMDGPU_umul24: 1008 return DAG.getNode(AMDGPUISD::MUL_U24, DL, VT, 1009 Op.getOperand(1), Op.getOperand(2)); 1010 1011 case AMDGPUIntrinsic::AMDGPU_imul24: 1012 return DAG.getNode(AMDGPUISD::MUL_I24, DL, VT, 1013 Op.getOperand(1), Op.getOperand(2)); 1014 1015 case AMDGPUIntrinsic::AMDGPU_umad24: 1016 return DAG.getNode(AMDGPUISD::MAD_U24, DL, VT, 1017 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 1018 1019 case AMDGPUIntrinsic::AMDGPU_imad24: 1020 return DAG.getNode(AMDGPUISD::MAD_I24, DL, VT, 1021 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); 1022 1023 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte0: 1024 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE0, DL, VT, Op.getOperand(1)); 1025 1026 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte1: 1027 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE1, DL, VT, Op.getOperand(1)); 1028 1029 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte2: 1030 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE2, DL, VT, Op.getOperand(1)); 1031 1032 case AMDGPUIntrinsic::AMDGPU_cvt_f32_ubyte3: 1033 return DAG.getNode(AMDGPUISD::CVT_F32_UBYTE3, DL, VT, Op.getOperand(1)); 1034 1035 case AMDGPUIntrinsic::AMDGPU_bfe_i32: 1036 return DAG.getNode(AMDGPUISD::BFE_I32, DL, VT, 1037 Op.getOperand(1), 1038 Op.getOperand(2), 1039 Op.getOperand(3)); 1040 1041 case AMDGPUIntrinsic::AMDGPU_bfe_u32: 1042 return DAG.getNode(AMDGPUISD::BFE_U32, DL, VT, 1043 Op.getOperand(1), 1044 Op.getOperand(2), 1045 Op.getOperand(3)); 1046 1047 case AMDGPUIntrinsic::AMDGPU_bfi: 1048 return DAG.getNode(AMDGPUISD::BFI, DL, VT, 1049 Op.getOperand(1), 1050 Op.getOperand(2), 1051 Op.getOperand(3)); 1052 1053 case AMDGPUIntrinsic::AMDGPU_bfm: 1054 return DAG.getNode(AMDGPUISD::BFM, DL, VT, 1055 Op.getOperand(1), 1056 Op.getOperand(2)); 1057 1058 case Intrinsic::AMDGPU_class: 1059 return DAG.getNode(AMDGPUISD::FP_CLASS, DL, VT, 1060 Op.getOperand(1), Op.getOperand(2)); 1061 1062 case AMDGPUIntrinsic::AMDIL_exp: // Legacy name. 1063 return DAG.getNode(ISD::FEXP2, DL, VT, Op.getOperand(1)); 1064 1065 case AMDGPUIntrinsic::AMDIL_round_nearest: // Legacy name. 1066 return DAG.getNode(ISD::FRINT, DL, VT, Op.getOperand(1)); 1067 case AMDGPUIntrinsic::AMDGPU_trunc: // Legacy name. 1068 return DAG.getNode(ISD::FTRUNC, DL, VT, Op.getOperand(1)); 1069 case AMDGPUIntrinsic::AMDGPU_brev: // Legacy name 1070 return DAG.getNode(ISD::BITREVERSE, DL, VT, Op.getOperand(1)); 1071 } 1072 } 1073 1074 ///IABS(a) = SMAX(sub(0, a), a) 1075 SDValue AMDGPUTargetLowering::LowerIntrinsicIABS(SDValue Op, 1076 SelectionDAG &DAG) const { 1077 SDLoc DL(Op); 1078 EVT VT = Op.getValueType(); 1079 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1080 Op.getOperand(1)); 1081 1082 return DAG.getNode(ISD::SMAX, DL, VT, Neg, Op.getOperand(1)); 1083 } 1084 1085 /// Linear Interpolation 1086 /// LRP(a, b, c) = muladd(a, b, (1 - a) * c) 1087 SDValue AMDGPUTargetLowering::LowerIntrinsicLRP(SDValue Op, 1088 SelectionDAG &DAG) const { 1089 SDLoc DL(Op); 1090 EVT VT = Op.getValueType(); 1091 // TODO: Should this propagate fast-math-flags? 1092 SDValue OneSubA = DAG.getNode(ISD::FSUB, DL, VT, 1093 DAG.getConstantFP(1.0f, DL, MVT::f32), 1094 Op.getOperand(1)); 1095 SDValue OneSubAC = DAG.getNode(ISD::FMUL, DL, VT, OneSubA, 1096 Op.getOperand(3)); 1097 return DAG.getNode(ISD::FADD, DL, VT, 1098 DAG.getNode(ISD::FMUL, DL, VT, Op.getOperand(1), Op.getOperand(2)), 1099 OneSubAC); 1100 } 1101 1102 /// \brief Generate Min/Max node 1103 SDValue AMDGPUTargetLowering::CombineFMinMaxLegacy(SDLoc DL, 1104 EVT VT, 1105 SDValue LHS, 1106 SDValue RHS, 1107 SDValue True, 1108 SDValue False, 1109 SDValue CC, 1110 DAGCombinerInfo &DCI) const { 1111 if (Subtarget->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) 1112 return SDValue(); 1113 1114 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 1115 return SDValue(); 1116 1117 SelectionDAG &DAG = DCI.DAG; 1118 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 1119 switch (CCOpcode) { 1120 case ISD::SETOEQ: 1121 case ISD::SETONE: 1122 case ISD::SETUNE: 1123 case ISD::SETNE: 1124 case ISD::SETUEQ: 1125 case ISD::SETEQ: 1126 case ISD::SETFALSE: 1127 case ISD::SETFALSE2: 1128 case ISD::SETTRUE: 1129 case ISD::SETTRUE2: 1130 case ISD::SETUO: 1131 case ISD::SETO: 1132 break; 1133 case ISD::SETULE: 1134 case ISD::SETULT: { 1135 if (LHS == True) 1136 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1137 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1138 } 1139 case ISD::SETOLE: 1140 case ISD::SETOLT: 1141 case ISD::SETLE: 1142 case ISD::SETLT: { 1143 // Ordered. Assume ordered for undefined. 1144 1145 // Only do this after legalization to avoid interfering with other combines 1146 // which might occur. 1147 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1148 !DCI.isCalledByLegalizer()) 1149 return SDValue(); 1150 1151 // We need to permute the operands to get the correct NaN behavior. The 1152 // selected operand is the second one based on the failing compare with NaN, 1153 // so permute it based on the compare type the hardware uses. 1154 if (LHS == True) 1155 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1156 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1157 } 1158 case ISD::SETUGE: 1159 case ISD::SETUGT: { 1160 if (LHS == True) 1161 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1162 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1163 } 1164 case ISD::SETGT: 1165 case ISD::SETGE: 1166 case ISD::SETOGE: 1167 case ISD::SETOGT: { 1168 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1169 !DCI.isCalledByLegalizer()) 1170 return SDValue(); 1171 1172 if (LHS == True) 1173 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1174 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1175 } 1176 case ISD::SETCC_INVALID: 1177 llvm_unreachable("Invalid setcc condcode!"); 1178 } 1179 return SDValue(); 1180 } 1181 1182 std::pair<SDValue, SDValue> 1183 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const { 1184 SDLoc SL(Op); 1185 1186 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1187 1188 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1189 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1190 1191 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1192 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1193 1194 return std::make_pair(Lo, Hi); 1195 } 1196 1197 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { 1198 SDLoc SL(Op); 1199 1200 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1201 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1202 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1203 } 1204 1205 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const { 1206 SDLoc SL(Op); 1207 1208 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1209 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1210 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1211 } 1212 1213 SDValue AMDGPUTargetLowering::ScalarizeVectorLoad(const SDValue Op, 1214 SelectionDAG &DAG) const { 1215 LoadSDNode *Load = cast<LoadSDNode>(Op); 1216 EVT MemVT = Load->getMemoryVT(); 1217 EVT MemEltVT = MemVT.getVectorElementType(); 1218 1219 EVT LoadVT = Op.getValueType(); 1220 EVT EltVT = LoadVT.getVectorElementType(); 1221 EVT PtrVT = Load->getBasePtr().getValueType(); 1222 1223 unsigned NumElts = Load->getMemoryVT().getVectorNumElements(); 1224 SmallVector<SDValue, 8> Loads; 1225 SmallVector<SDValue, 8> Chains; 1226 1227 SDLoc SL(Op); 1228 unsigned MemEltSize = MemEltVT.getStoreSize(); 1229 MachinePointerInfo SrcValue(Load->getMemOperand()->getValue()); 1230 1231 for (unsigned i = 0; i < NumElts; ++i) { 1232 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Load->getBasePtr(), 1233 DAG.getConstant(i * MemEltSize, SL, PtrVT)); 1234 1235 SDValue NewLoad 1236 = DAG.getExtLoad(Load->getExtensionType(), SL, EltVT, 1237 Load->getChain(), Ptr, 1238 SrcValue.getWithOffset(i * MemEltSize), 1239 MemEltVT, Load->isVolatile(), Load->isNonTemporal(), 1240 Load->isInvariant(), Load->getAlignment()); 1241 Loads.push_back(NewLoad.getValue(0)); 1242 Chains.push_back(NewLoad.getValue(1)); 1243 } 1244 1245 SDValue Ops[] = { 1246 DAG.getNode(ISD::BUILD_VECTOR, SL, LoadVT, Loads), 1247 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains) 1248 }; 1249 1250 return DAG.getMergeValues(Ops, SL); 1251 } 1252 1253 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, 1254 SelectionDAG &DAG) const { 1255 EVT VT = Op.getValueType(); 1256 1257 // If this is a 2 element vector, we really want to scalarize and not create 1258 // weird 1 element vectors. 1259 if (VT.getVectorNumElements() == 2) 1260 return ScalarizeVectorLoad(Op, DAG); 1261 1262 LoadSDNode *Load = cast<LoadSDNode>(Op); 1263 SDValue BasePtr = Load->getBasePtr(); 1264 EVT PtrVT = BasePtr.getValueType(); 1265 EVT MemVT = Load->getMemoryVT(); 1266 SDLoc SL(Op); 1267 1268 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1269 1270 EVT LoVT, HiVT; 1271 EVT LoMemVT, HiMemVT; 1272 SDValue Lo, Hi; 1273 1274 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1275 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1276 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT); 1277 1278 unsigned Size = LoMemVT.getStoreSize(); 1279 unsigned BaseAlign = Load->getAlignment(); 1280 unsigned HiAlign = MinAlign(BaseAlign, Size); 1281 1282 SDValue LoLoad 1283 = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, 1284 Load->getChain(), BasePtr, 1285 SrcValue, 1286 LoMemVT, Load->isVolatile(), Load->isNonTemporal(), 1287 Load->isInvariant(), BaseAlign); 1288 1289 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 1290 DAG.getConstant(Size, SL, PtrVT)); 1291 1292 SDValue HiLoad 1293 = DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, 1294 Load->getChain(), HiPtr, 1295 SrcValue.getWithOffset(LoMemVT.getStoreSize()), 1296 HiMemVT, Load->isVolatile(), Load->isNonTemporal(), 1297 Load->isInvariant(), HiAlign); 1298 1299 SDValue Ops[] = { 1300 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad), 1301 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 1302 LoLoad.getValue(1), HiLoad.getValue(1)) 1303 }; 1304 1305 return DAG.getMergeValues(Ops, SL); 1306 } 1307 1308 SDValue AMDGPUTargetLowering::MergeVectorStore(const SDValue &Op, 1309 SelectionDAG &DAG) const { 1310 StoreSDNode *Store = cast<StoreSDNode>(Op); 1311 EVT MemVT = Store->getMemoryVT(); 1312 unsigned MemBits = MemVT.getSizeInBits(); 1313 1314 // Byte stores are really expensive, so if possible, try to pack 32-bit vector 1315 // truncating store into an i32 store. 1316 // XXX: We could also handle optimize other vector bitwidths. 1317 if (!MemVT.isVector() || MemBits > 32) { 1318 return SDValue(); 1319 } 1320 1321 SDLoc DL(Op); 1322 SDValue Value = Store->getValue(); 1323 EVT VT = Value.getValueType(); 1324 EVT ElemVT = VT.getVectorElementType(); 1325 SDValue Ptr = Store->getBasePtr(); 1326 EVT MemEltVT = MemVT.getVectorElementType(); 1327 unsigned MemEltBits = MemEltVT.getSizeInBits(); 1328 unsigned MemNumElements = MemVT.getVectorNumElements(); 1329 unsigned PackedSize = MemVT.getStoreSizeInBits(); 1330 SDValue Mask = DAG.getConstant((1 << MemEltBits) - 1, DL, MVT::i32); 1331 1332 assert(Value.getValueType().getScalarSizeInBits() >= 32); 1333 1334 SDValue PackedValue; 1335 for (unsigned i = 0; i < MemNumElements; ++i) { 1336 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ElemVT, Value, 1337 DAG.getConstant(i, DL, MVT::i32)); 1338 Elt = DAG.getZExtOrTrunc(Elt, DL, MVT::i32); 1339 Elt = DAG.getNode(ISD::AND, DL, MVT::i32, Elt, Mask); // getZeroExtendInReg 1340 1341 SDValue Shift = DAG.getConstant(MemEltBits * i, DL, MVT::i32); 1342 Elt = DAG.getNode(ISD::SHL, DL, MVT::i32, Elt, Shift); 1343 1344 if (i == 0) { 1345 PackedValue = Elt; 1346 } else { 1347 PackedValue = DAG.getNode(ISD::OR, DL, MVT::i32, PackedValue, Elt); 1348 } 1349 } 1350 1351 if (PackedSize < 32) { 1352 EVT PackedVT = EVT::getIntegerVT(*DAG.getContext(), PackedSize); 1353 return DAG.getTruncStore(Store->getChain(), DL, PackedValue, Ptr, 1354 Store->getMemOperand()->getPointerInfo(), 1355 PackedVT, 1356 Store->isNonTemporal(), Store->isVolatile(), 1357 Store->getAlignment()); 1358 } 1359 1360 return DAG.getStore(Store->getChain(), DL, PackedValue, Ptr, 1361 Store->getMemOperand()->getPointerInfo(), 1362 Store->isVolatile(), Store->isNonTemporal(), 1363 Store->getAlignment()); 1364 } 1365 1366 SDValue AMDGPUTargetLowering::ScalarizeVectorStore(SDValue Op, 1367 SelectionDAG &DAG) const { 1368 StoreSDNode *Store = cast<StoreSDNode>(Op); 1369 EVT MemEltVT = Store->getMemoryVT().getVectorElementType(); 1370 EVT EltVT = Store->getValue().getValueType().getVectorElementType(); 1371 EVT PtrVT = Store->getBasePtr().getValueType(); 1372 unsigned NumElts = Store->getMemoryVT().getVectorNumElements(); 1373 SDLoc SL(Op); 1374 1375 SmallVector<SDValue, 8> Chains; 1376 1377 unsigned EltSize = MemEltVT.getStoreSize(); 1378 MachinePointerInfo SrcValue(Store->getMemOperand()->getValue()); 1379 1380 for (unsigned i = 0, e = NumElts; i != e; ++i) { 1381 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, EltVT, 1382 Store->getValue(), 1383 DAG.getConstant(i, SL, MVT::i32)); 1384 1385 SDValue Offset = DAG.getConstant(i * MemEltVT.getStoreSize(), SL, PtrVT); 1386 SDValue Ptr = DAG.getNode(ISD::ADD, SL, PtrVT, Store->getBasePtr(), Offset); 1387 SDValue NewStore = 1388 DAG.getTruncStore(Store->getChain(), SL, Val, Ptr, 1389 SrcValue.getWithOffset(i * EltSize), 1390 MemEltVT, Store->isNonTemporal(), Store->isVolatile(), 1391 Store->getAlignment()); 1392 Chains.push_back(NewStore); 1393 } 1394 1395 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, Chains); 1396 } 1397 1398 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, 1399 SelectionDAG &DAG) const { 1400 StoreSDNode *Store = cast<StoreSDNode>(Op); 1401 SDValue Val = Store->getValue(); 1402 EVT VT = Val.getValueType(); 1403 1404 // If this is a 2 element vector, we really want to scalarize and not create 1405 // weird 1 element vectors. 1406 if (VT.getVectorNumElements() == 2) 1407 return ScalarizeVectorStore(Op, DAG); 1408 1409 EVT MemVT = Store->getMemoryVT(); 1410 SDValue Chain = Store->getChain(); 1411 SDValue BasePtr = Store->getBasePtr(); 1412 SDLoc SL(Op); 1413 1414 EVT LoVT, HiVT; 1415 EVT LoMemVT, HiMemVT; 1416 SDValue Lo, Hi; 1417 1418 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1419 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1420 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT); 1421 1422 EVT PtrVT = BasePtr.getValueType(); 1423 SDValue HiPtr = DAG.getNode(ISD::ADD, SL, PtrVT, BasePtr, 1424 DAG.getConstant(LoMemVT.getStoreSize(), SL, 1425 PtrVT)); 1426 1427 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); 1428 unsigned BaseAlign = Store->getAlignment(); 1429 unsigned Size = LoMemVT.getStoreSize(); 1430 unsigned HiAlign = MinAlign(BaseAlign, Size); 1431 1432 SDValue LoStore 1433 = DAG.getTruncStore(Chain, SL, Lo, 1434 BasePtr, 1435 SrcValue, 1436 LoMemVT, 1437 Store->isNonTemporal(), 1438 Store->isVolatile(), 1439 BaseAlign); 1440 SDValue HiStore 1441 = DAG.getTruncStore(Chain, SL, Hi, 1442 HiPtr, 1443 SrcValue.getWithOffset(Size), 1444 HiMemVT, 1445 Store->isNonTemporal(), 1446 Store->isVolatile(), 1447 HiAlign); 1448 1449 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore); 1450 } 1451 1452 1453 SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const { 1454 SDLoc DL(Op); 1455 LoadSDNode *Load = cast<LoadSDNode>(Op); 1456 ISD::LoadExtType ExtType = Load->getExtensionType(); 1457 EVT VT = Op.getValueType(); 1458 EVT MemVT = Load->getMemoryVT(); 1459 1460 if (ExtType == ISD::NON_EXTLOAD && VT.getSizeInBits() < 32) { 1461 assert(VT == MVT::i1 && "Only i1 non-extloads expected"); 1462 // FIXME: Copied from PPC 1463 // First, load into 32 bits, then truncate to 1 bit. 1464 1465 SDValue Chain = Load->getChain(); 1466 SDValue BasePtr = Load->getBasePtr(); 1467 MachineMemOperand *MMO = Load->getMemOperand(); 1468 1469 SDValue NewLD = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain, 1470 BasePtr, MVT::i8, MMO); 1471 1472 SDValue Ops[] = { 1473 DAG.getNode(ISD::TRUNCATE, DL, VT, NewLD), 1474 NewLD.getValue(1) 1475 }; 1476 1477 return DAG.getMergeValues(Ops, DL); 1478 } 1479 1480 if (Subtarget->getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS || 1481 Load->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS || 1482 ExtType == ISD::NON_EXTLOAD || Load->getMemoryVT().bitsGE(MVT::i32)) 1483 return SDValue(); 1484 1485 // <SI && AS=PRIVATE && EXTLOAD && size < 32bit, 1486 // register (2-)byte extract. 1487 1488 // Get Register holding the target. 1489 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, Load->getBasePtr(), 1490 DAG.getConstant(2, DL, MVT::i32)); 1491 // Load the Register. 1492 SDValue Ret = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, Op.getValueType(), 1493 Load->getChain(), Ptr, 1494 DAG.getTargetConstant(0, DL, MVT::i32), 1495 Op.getOperand(2)); 1496 1497 // Get offset within the register. 1498 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, 1499 Load->getBasePtr(), 1500 DAG.getConstant(0x3, DL, MVT::i32)); 1501 1502 // Bit offset of target byte (byteIdx * 8). 1503 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, 1504 DAG.getConstant(3, DL, MVT::i32)); 1505 1506 // Shift to the right. 1507 Ret = DAG.getNode(ISD::SRL, DL, MVT::i32, Ret, ShiftAmt); 1508 1509 // Eliminate the upper bits by setting them to ... 1510 EVT MemEltVT = MemVT.getScalarType(); 1511 1512 // ... ones. 1513 if (ExtType == ISD::SEXTLOAD) { 1514 SDValue MemEltVTNode = DAG.getValueType(MemEltVT); 1515 1516 SDValue Ops[] = { 1517 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, Ret, MemEltVTNode), 1518 Load->getChain() 1519 }; 1520 1521 return DAG.getMergeValues(Ops, DL); 1522 } 1523 1524 // ... or zeros. 1525 SDValue Ops[] = { 1526 DAG.getZeroExtendInReg(Ret, DL, MemEltVT), 1527 Load->getChain() 1528 }; 1529 1530 return DAG.getMergeValues(Ops, DL); 1531 } 1532 1533 SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const { 1534 SDLoc DL(Op); 1535 SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG); 1536 if (Result.getNode()) { 1537 return Result; 1538 } 1539 1540 StoreSDNode *Store = cast<StoreSDNode>(Op); 1541 SDValue Chain = Store->getChain(); 1542 if ((Store->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS || 1543 Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS) && 1544 Store->getValue().getValueType().isVector()) { 1545 return SplitVectorStore(Op, DAG); 1546 } 1547 1548 EVT MemVT = Store->getMemoryVT(); 1549 if (Store->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS && 1550 MemVT.bitsLT(MVT::i32)) { 1551 unsigned Mask = 0; 1552 if (Store->getMemoryVT() == MVT::i8) { 1553 Mask = 0xff; 1554 } else if (Store->getMemoryVT() == MVT::i16) { 1555 Mask = 0xffff; 1556 } 1557 SDValue BasePtr = Store->getBasePtr(); 1558 SDValue Ptr = DAG.getNode(ISD::SRL, DL, MVT::i32, BasePtr, 1559 DAG.getConstant(2, DL, MVT::i32)); 1560 SDValue Dst = DAG.getNode(AMDGPUISD::REGISTER_LOAD, DL, MVT::i32, 1561 Chain, Ptr, 1562 DAG.getTargetConstant(0, DL, MVT::i32)); 1563 1564 SDValue ByteIdx = DAG.getNode(ISD::AND, DL, MVT::i32, BasePtr, 1565 DAG.getConstant(0x3, DL, MVT::i32)); 1566 1567 SDValue ShiftAmt = DAG.getNode(ISD::SHL, DL, MVT::i32, ByteIdx, 1568 DAG.getConstant(3, DL, MVT::i32)); 1569 1570 SDValue SExtValue = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i32, 1571 Store->getValue()); 1572 1573 SDValue MaskedValue = DAG.getZeroExtendInReg(SExtValue, DL, MemVT); 1574 1575 SDValue ShiftedValue = DAG.getNode(ISD::SHL, DL, MVT::i32, 1576 MaskedValue, ShiftAmt); 1577 1578 SDValue DstMask = DAG.getNode(ISD::SHL, DL, MVT::i32, 1579 DAG.getConstant(Mask, DL, MVT::i32), 1580 ShiftAmt); 1581 DstMask = DAG.getNode(ISD::XOR, DL, MVT::i32, DstMask, 1582 DAG.getConstant(0xffffffff, DL, MVT::i32)); 1583 Dst = DAG.getNode(ISD::AND, DL, MVT::i32, Dst, DstMask); 1584 1585 SDValue Value = DAG.getNode(ISD::OR, DL, MVT::i32, Dst, ShiftedValue); 1586 return DAG.getNode(AMDGPUISD::REGISTER_STORE, DL, MVT::Other, 1587 Chain, Value, Ptr, 1588 DAG.getTargetConstant(0, DL, MVT::i32)); 1589 } 1590 return SDValue(); 1591 } 1592 1593 // This is a shortcut for integer division because we have fast i32<->f32 1594 // conversions, and fast f32 reciprocal instructions. The fractional part of a 1595 // float is enough to accurately represent up to a 24-bit integer. 1596 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, bool sign) const { 1597 SDLoc DL(Op); 1598 EVT VT = Op.getValueType(); 1599 SDValue LHS = Op.getOperand(0); 1600 SDValue RHS = Op.getOperand(1); 1601 MVT IntVT = MVT::i32; 1602 MVT FltVT = MVT::f32; 1603 1604 ISD::NodeType ToFp = sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; 1605 ISD::NodeType ToInt = sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; 1606 1607 if (VT.isVector()) { 1608 unsigned NElts = VT.getVectorNumElements(); 1609 IntVT = MVT::getVectorVT(MVT::i32, NElts); 1610 FltVT = MVT::getVectorVT(MVT::f32, NElts); 1611 } 1612 1613 unsigned BitSize = VT.getScalarType().getSizeInBits(); 1614 1615 SDValue jq = DAG.getConstant(1, DL, IntVT); 1616 1617 if (sign) { 1618 // char|short jq = ia ^ ib; 1619 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); 1620 1621 // jq = jq >> (bitsize - 2) 1622 jq = DAG.getNode(ISD::SRA, DL, VT, jq, 1623 DAG.getConstant(BitSize - 2, DL, VT)); 1624 1625 // jq = jq | 0x1 1626 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); 1627 1628 // jq = (int)jq 1629 jq = DAG.getSExtOrTrunc(jq, DL, IntVT); 1630 } 1631 1632 // int ia = (int)LHS; 1633 SDValue ia = sign ? 1634 DAG.getSExtOrTrunc(LHS, DL, IntVT) : DAG.getZExtOrTrunc(LHS, DL, IntVT); 1635 1636 // int ib, (int)RHS; 1637 SDValue ib = sign ? 1638 DAG.getSExtOrTrunc(RHS, DL, IntVT) : DAG.getZExtOrTrunc(RHS, DL, IntVT); 1639 1640 // float fa = (float)ia; 1641 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia); 1642 1643 // float fb = (float)ib; 1644 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib); 1645 1646 // TODO: Should this propagate fast-math-flags? 1647 // float fq = native_divide(fa, fb); 1648 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT, 1649 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb)); 1650 1651 // fq = trunc(fq); 1652 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq); 1653 1654 // float fqneg = -fq; 1655 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq); 1656 1657 // float fr = mad(fqneg, fb, fa); 1658 SDValue fr = DAG.getNode(ISD::FADD, DL, FltVT, 1659 DAG.getNode(ISD::FMUL, DL, FltVT, fqneg, fb), fa); 1660 1661 // int iq = (int)fq; 1662 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq); 1663 1664 // fr = fabs(fr); 1665 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr); 1666 1667 // fb = fabs(fb); 1668 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb); 1669 1670 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 1671 1672 // int cv = fr >= fb; 1673 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE); 1674 1675 // jq = (cv ? jq : 0); 1676 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); 1677 1678 // dst = trunc/extend to legal type 1679 iq = sign ? DAG.getSExtOrTrunc(iq, DL, VT) : DAG.getZExtOrTrunc(iq, DL, VT); 1680 1681 // dst = iq + jq; 1682 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); 1683 1684 // Rem needs compensation, it's easier to recompute it 1685 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); 1686 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); 1687 1688 SDValue Res[2] = { 1689 Div, 1690 Rem 1691 }; 1692 return DAG.getMergeValues(Res, DL); 1693 } 1694 1695 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op, 1696 SelectionDAG &DAG, 1697 SmallVectorImpl<SDValue> &Results) const { 1698 assert(Op.getValueType() == MVT::i64); 1699 1700 SDLoc DL(Op); 1701 EVT VT = Op.getValueType(); 1702 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1703 1704 SDValue one = DAG.getConstant(1, DL, HalfVT); 1705 SDValue zero = DAG.getConstant(0, DL, HalfVT); 1706 1707 //HiLo split 1708 SDValue LHS = Op.getOperand(0); 1709 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, zero); 1710 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, one); 1711 1712 SDValue RHS = Op.getOperand(1); 1713 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, zero); 1714 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, one); 1715 1716 if (VT == MVT::i64 && 1717 DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) && 1718 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) { 1719 1720 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1721 LHS_Lo, RHS_Lo); 1722 1723 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(0), zero); 1724 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, Res.getValue(1), zero); 1725 Results.push_back(DIV); 1726 Results.push_back(REM); 1727 return; 1728 } 1729 1730 // Get Speculative values 1731 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo); 1732 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo); 1733 1734 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, zero, REM_Part, LHS_Hi, ISD::SETEQ); 1735 SDValue REM = DAG.getNode(ISD::BUILD_PAIR, DL, VT, REM_Lo, zero); 1736 1737 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, zero, DIV_Part, zero, ISD::SETEQ); 1738 SDValue DIV_Lo = zero; 1739 1740 const unsigned halfBitWidth = HalfVT.getSizeInBits(); 1741 1742 for (unsigned i = 0; i < halfBitWidth; ++i) { 1743 const unsigned bitPos = halfBitWidth - i - 1; 1744 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT); 1745 // Get value of high bit 1746 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS); 1747 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, one); 1748 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); 1749 1750 // Shift 1751 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); 1752 // Add LHS high bit 1753 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); 1754 1755 SDValue BIT = DAG.getConstant(1 << bitPos, DL, HalfVT); 1756 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, zero, ISD::SETUGE); 1757 1758 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT); 1759 1760 // Update REM 1761 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); 1762 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE); 1763 } 1764 1765 SDValue DIV = DAG.getNode(ISD::BUILD_PAIR, DL, VT, DIV_Lo, DIV_Hi); 1766 Results.push_back(DIV); 1767 Results.push_back(REM); 1768 } 1769 1770 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, 1771 SelectionDAG &DAG) const { 1772 SDLoc DL(Op); 1773 EVT VT = Op.getValueType(); 1774 1775 if (VT == MVT::i64) { 1776 SmallVector<SDValue, 2> Results; 1777 LowerUDIVREM64(Op, DAG, Results); 1778 return DAG.getMergeValues(Results, DL); 1779 } 1780 1781 SDValue Num = Op.getOperand(0); 1782 SDValue Den = Op.getOperand(1); 1783 1784 if (VT == MVT::i32) { 1785 if (DAG.MaskedValueIsZero(Num, APInt::getHighBitsSet(32, 8)) && 1786 DAG.MaskedValueIsZero(Den, APInt::getHighBitsSet(32, 8))) { 1787 // TODO: We technically could do this for i64, but shouldn't that just be 1788 // handled by something generally reducing 64-bit division on 32-bit 1789 // values to 32-bit? 1790 return LowerDIVREM24(Op, DAG, false); 1791 } 1792 } 1793 1794 // RCP = URECIP(Den) = 2^32 / Den + e 1795 // e is rounding error. 1796 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den); 1797 1798 // RCP_LO = mul(RCP, Den) */ 1799 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den); 1800 1801 // RCP_HI = mulhu (RCP, Den) */ 1802 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den); 1803 1804 // NEG_RCP_LO = -RCP_LO 1805 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1806 RCP_LO); 1807 1808 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 1809 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1810 NEG_RCP_LO, RCP_LO, 1811 ISD::SETEQ); 1812 // Calculate the rounding error from the URECIP instruction 1813 // E = mulhu(ABS_RCP_LO, RCP) 1814 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP); 1815 1816 // RCP_A_E = RCP + E 1817 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E); 1818 1819 // RCP_S_E = RCP - E 1820 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E); 1821 1822 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 1823 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1824 RCP_A_E, RCP_S_E, 1825 ISD::SETEQ); 1826 // Quotient = mulhu(Tmp0, Num) 1827 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num); 1828 1829 // Num_S_Remainder = Quotient * Den 1830 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den); 1831 1832 // Remainder = Num - Num_S_Remainder 1833 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder); 1834 1835 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 1836 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den, 1837 DAG.getConstant(-1, DL, VT), 1838 DAG.getConstant(0, DL, VT), 1839 ISD::SETUGE); 1840 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 1841 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num, 1842 Num_S_Remainder, 1843 DAG.getConstant(-1, DL, VT), 1844 DAG.getConstant(0, DL, VT), 1845 ISD::SETUGE); 1846 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 1847 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den, 1848 Remainder_GE_Zero); 1849 1850 // Calculate Division result: 1851 1852 // Quotient_A_One = Quotient + 1 1853 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient, 1854 DAG.getConstant(1, DL, VT)); 1855 1856 // Quotient_S_One = Quotient - 1 1857 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient, 1858 DAG.getConstant(1, DL, VT)); 1859 1860 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 1861 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1862 Quotient, Quotient_A_One, ISD::SETEQ); 1863 1864 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 1865 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1866 Quotient_S_One, Div, ISD::SETEQ); 1867 1868 // Calculate Rem result: 1869 1870 // Remainder_S_Den = Remainder - Den 1871 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den); 1872 1873 // Remainder_A_Den = Remainder + Den 1874 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den); 1875 1876 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 1877 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1878 Remainder, Remainder_S_Den, ISD::SETEQ); 1879 1880 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 1881 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1882 Remainder_A_Den, Rem, ISD::SETEQ); 1883 SDValue Ops[2] = { 1884 Div, 1885 Rem 1886 }; 1887 return DAG.getMergeValues(Ops, DL); 1888 } 1889 1890 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, 1891 SelectionDAG &DAG) const { 1892 SDLoc DL(Op); 1893 EVT VT = Op.getValueType(); 1894 1895 SDValue LHS = Op.getOperand(0); 1896 SDValue RHS = Op.getOperand(1); 1897 1898 SDValue Zero = DAG.getConstant(0, DL, VT); 1899 SDValue NegOne = DAG.getConstant(-1, DL, VT); 1900 1901 if (VT == MVT::i32 && 1902 DAG.ComputeNumSignBits(LHS) > 8 && 1903 DAG.ComputeNumSignBits(RHS) > 8) { 1904 return LowerDIVREM24(Op, DAG, true); 1905 } 1906 if (VT == MVT::i64 && 1907 DAG.ComputeNumSignBits(LHS) > 32 && 1908 DAG.ComputeNumSignBits(RHS) > 32) { 1909 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1910 1911 //HiLo split 1912 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1913 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1914 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1915 LHS_Lo, RHS_Lo); 1916 SDValue Res[2] = { 1917 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), 1918 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) 1919 }; 1920 return DAG.getMergeValues(Res, DL); 1921 } 1922 1923 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); 1924 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); 1925 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); 1926 SDValue RSign = LHSign; // Remainder sign is the same as LHS 1927 1928 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); 1929 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); 1930 1931 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); 1932 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); 1933 1934 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); 1935 SDValue Rem = Div.getValue(1); 1936 1937 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); 1938 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); 1939 1940 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); 1941 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); 1942 1943 SDValue Res[2] = { 1944 Div, 1945 Rem 1946 }; 1947 return DAG.getMergeValues(Res, DL); 1948 } 1949 1950 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y)) 1951 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const { 1952 SDLoc SL(Op); 1953 EVT VT = Op.getValueType(); 1954 SDValue X = Op.getOperand(0); 1955 SDValue Y = Op.getOperand(1); 1956 1957 // TODO: Should this propagate fast-math-flags? 1958 1959 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y); 1960 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div); 1961 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y); 1962 1963 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul); 1964 } 1965 1966 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const { 1967 SDLoc SL(Op); 1968 SDValue Src = Op.getOperand(0); 1969 1970 // result = trunc(src) 1971 // if (src > 0.0 && src != result) 1972 // result += 1.0 1973 1974 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 1975 1976 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 1977 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 1978 1979 EVT SetCCVT = 1980 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 1981 1982 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT); 1983 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 1984 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 1985 1986 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero); 1987 // TODO: Should this propagate fast-math-flags? 1988 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 1989 } 1990 1991 static SDValue extractF64Exponent(SDValue Hi, SDLoc SL, SelectionDAG &DAG) { 1992 const unsigned FractBits = 52; 1993 const unsigned ExpBits = 11; 1994 1995 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 1996 Hi, 1997 DAG.getConstant(FractBits - 32, SL, MVT::i32), 1998 DAG.getConstant(ExpBits, SL, MVT::i32)); 1999 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart, 2000 DAG.getConstant(1023, SL, MVT::i32)); 2001 2002 return Exp; 2003 } 2004 2005 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { 2006 SDLoc SL(Op); 2007 SDValue Src = Op.getOperand(0); 2008 2009 assert(Op.getValueType() == MVT::f64); 2010 2011 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2012 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2013 2014 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2015 2016 // Extract the upper half, since this is where we will find the sign and 2017 // exponent. 2018 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One); 2019 2020 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2021 2022 const unsigned FractBits = 52; 2023 2024 // Extract the sign bit. 2025 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); 2026 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); 2027 2028 // Extend back to to 64-bits. 2029 SDValue SignBit64 = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2030 Zero, SignBit); 2031 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); 2032 2033 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src); 2034 const SDValue FractMask 2035 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); 2036 2037 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp); 2038 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64); 2039 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not); 2040 2041 EVT SetCCVT = 2042 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2043 2044 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); 2045 2046 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2047 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2048 2049 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0); 2050 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1); 2051 2052 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2); 2053 } 2054 2055 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const { 2056 SDLoc SL(Op); 2057 SDValue Src = Op.getOperand(0); 2058 2059 assert(Op.getValueType() == MVT::f64); 2060 2061 APFloat C1Val(APFloat::IEEEdouble, "0x1.0p+52"); 2062 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64); 2063 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src); 2064 2065 // TODO: Should this propagate fast-math-flags? 2066 2067 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign); 2068 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign); 2069 2070 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src); 2071 2072 APFloat C2Val(APFloat::IEEEdouble, "0x1.fffffffffffffp+51"); 2073 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64); 2074 2075 EVT SetCCVT = 2076 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2077 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT); 2078 2079 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2); 2080 } 2081 2082 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const { 2083 // FNEARBYINT and FRINT are the same, except in their handling of FP 2084 // exceptions. Those aren't really meaningful for us, and OpenCL only has 2085 // rint, so just treat them as equivalent. 2086 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0)); 2087 } 2088 2089 // XXX - May require not supporting f32 denormals? 2090 SDValue AMDGPUTargetLowering::LowerFROUND32(SDValue Op, SelectionDAG &DAG) const { 2091 SDLoc SL(Op); 2092 SDValue X = Op.getOperand(0); 2093 2094 SDValue T = DAG.getNode(ISD::FTRUNC, SL, MVT::f32, X); 2095 2096 // TODO: Should this propagate fast-math-flags? 2097 2098 SDValue Diff = DAG.getNode(ISD::FSUB, SL, MVT::f32, X, T); 2099 2100 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, MVT::f32, Diff); 2101 2102 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f32); 2103 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f32); 2104 const SDValue Half = DAG.getConstantFP(0.5, SL, MVT::f32); 2105 2106 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f32, One, X); 2107 2108 EVT SetCCVT = 2109 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f32); 2110 2111 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE); 2112 2113 SDValue Sel = DAG.getNode(ISD::SELECT, SL, MVT::f32, Cmp, SignOne, Zero); 2114 2115 return DAG.getNode(ISD::FADD, SL, MVT::f32, T, Sel); 2116 } 2117 2118 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const { 2119 SDLoc SL(Op); 2120 SDValue X = Op.getOperand(0); 2121 2122 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X); 2123 2124 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2125 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2126 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32); 2127 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32); 2128 EVT SetCCVT = 2129 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2130 2131 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2132 2133 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One); 2134 2135 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2136 2137 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL, 2138 MVT::i64); 2139 2140 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp); 2141 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64, 2142 DAG.getConstant(INT64_C(0x0008000000000000), SL, 2143 MVT::i64), 2144 Exp); 2145 2146 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M); 2147 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT, 2148 DAG.getConstant(0, SL, MVT::i64), Tmp0, 2149 ISD::SETNE); 2150 2151 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1, 2152 D, DAG.getConstant(0, SL, MVT::i64)); 2153 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2); 2154 2155 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64)); 2156 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K); 2157 2158 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2159 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2160 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ); 2161 2162 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64, 2163 ExpEqNegOne, 2164 DAG.getConstantFP(1.0, SL, MVT::f64), 2165 DAG.getConstantFP(0.0, SL, MVT::f64)); 2166 2167 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X); 2168 2169 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K); 2170 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K); 2171 2172 return K; 2173 } 2174 2175 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { 2176 EVT VT = Op.getValueType(); 2177 2178 if (VT == MVT::f32) 2179 return LowerFROUND32(Op, DAG); 2180 2181 if (VT == MVT::f64) 2182 return LowerFROUND64(Op, DAG); 2183 2184 llvm_unreachable("unhandled type"); 2185 } 2186 2187 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { 2188 SDLoc SL(Op); 2189 SDValue Src = Op.getOperand(0); 2190 2191 // result = trunc(src); 2192 // if (src < 0.0 && src != result) 2193 // result += -1.0. 2194 2195 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2196 2197 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2198 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); 2199 2200 EVT SetCCVT = 2201 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2202 2203 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT); 2204 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2205 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2206 2207 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero); 2208 // TODO: Should this propagate fast-math-flags? 2209 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2210 } 2211 2212 SDValue AMDGPUTargetLowering::LowerCTLZ(SDValue Op, SelectionDAG &DAG) const { 2213 SDLoc SL(Op); 2214 SDValue Src = Op.getOperand(0); 2215 bool ZeroUndef = Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; 2216 2217 if (ZeroUndef && Src.getValueType() == MVT::i32) 2218 return DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Src); 2219 2220 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2221 2222 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2223 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2224 2225 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 2226 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 2227 2228 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2229 *DAG.getContext(), MVT::i32); 2230 2231 SDValue Hi0 = DAG.getSetCC(SL, SetCCVT, Hi, Zero, ISD::SETEQ); 2232 2233 SDValue CtlzLo = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Lo); 2234 SDValue CtlzHi = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i32, Hi); 2235 2236 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); 2237 SDValue Add = DAG.getNode(ISD::ADD, SL, MVT::i32, CtlzLo, Bits32); 2238 2239 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) 2240 SDValue NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0, Add, CtlzHi); 2241 2242 if (!ZeroUndef) { 2243 // Test if the full 64-bit input is zero. 2244 2245 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, 2246 // which we probably don't want. 2247 SDValue Lo0 = DAG.getSetCC(SL, SetCCVT, Lo, Zero, ISD::SETEQ); 2248 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0, Hi0); 2249 2250 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction 2251 // with the same cycles, otherwise it is slower. 2252 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, 2253 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); 2254 2255 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); 2256 2257 // The instruction returns -1 for 0 input, but the defined intrinsic 2258 // behavior is to return the number of bits. 2259 NewCtlz = DAG.getNode(ISD::SELECT, SL, MVT::i32, 2260 SrcIsZero, Bits32, NewCtlz); 2261 } 2262 2263 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewCtlz); 2264 } 2265 2266 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, 2267 bool Signed) const { 2268 // Unsigned 2269 // cul2f(ulong u) 2270 //{ 2271 // uint lz = clz(u); 2272 // uint e = (u != 0) ? 127U + 63U - lz : 0; 2273 // u = (u << lz) & 0x7fffffffffffffffUL; 2274 // ulong t = u & 0xffffffffffUL; 2275 // uint v = (e << 23) | (uint)(u >> 40); 2276 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 2277 // return as_float(v + r); 2278 //} 2279 // Signed 2280 // cl2f(long l) 2281 //{ 2282 // long s = l >> 63; 2283 // float r = cul2f((l + s) ^ s); 2284 // return s ? -r : r; 2285 //} 2286 2287 SDLoc SL(Op); 2288 SDValue Src = Op.getOperand(0); 2289 SDValue L = Src; 2290 2291 SDValue S; 2292 if (Signed) { 2293 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); 2294 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); 2295 2296 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); 2297 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); 2298 } 2299 2300 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2301 *DAG.getContext(), MVT::f32); 2302 2303 2304 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); 2305 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); 2306 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); 2307 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); 2308 2309 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); 2310 SDValue E = DAG.getSelect(SL, MVT::i32, 2311 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), 2312 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), 2313 ZeroI32); 2314 2315 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, 2316 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), 2317 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); 2318 2319 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, 2320 DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); 2321 2322 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, 2323 U, DAG.getConstant(40, SL, MVT::i64)); 2324 2325 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, 2326 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), 2327 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); 2328 2329 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); 2330 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); 2331 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); 2332 2333 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2334 2335 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); 2336 2337 SDValue R = DAG.getSelect(SL, MVT::i32, 2338 RCmp, 2339 One, 2340 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); 2341 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); 2342 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); 2343 2344 if (!Signed) 2345 return R; 2346 2347 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); 2348 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); 2349 } 2350 2351 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, 2352 bool Signed) const { 2353 SDLoc SL(Op); 2354 SDValue Src = Op.getOperand(0); 2355 2356 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2357 2358 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2359 DAG.getConstant(0, SL, MVT::i32)); 2360 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2361 DAG.getConstant(1, SL, MVT::i32)); 2362 2363 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, 2364 SL, MVT::f64, Hi); 2365 2366 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); 2367 2368 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, 2369 DAG.getConstant(32, SL, MVT::i32)); 2370 // TODO: Should this propagate fast-math-flags? 2371 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); 2372 } 2373 2374 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, 2375 SelectionDAG &DAG) const { 2376 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2377 "operation should be legal"); 2378 2379 EVT DestVT = Op.getValueType(); 2380 if (DestVT == MVT::f64) 2381 return LowerINT_TO_FP64(Op, DAG, false); 2382 2383 if (DestVT == MVT::f32) 2384 return LowerINT_TO_FP32(Op, DAG, false); 2385 2386 return SDValue(); 2387 } 2388 2389 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, 2390 SelectionDAG &DAG) const { 2391 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2392 "operation should be legal"); 2393 2394 EVT DestVT = Op.getValueType(); 2395 if (DestVT == MVT::f32) 2396 return LowerINT_TO_FP32(Op, DAG, true); 2397 2398 if (DestVT == MVT::f64) 2399 return LowerINT_TO_FP64(Op, DAG, true); 2400 2401 return SDValue(); 2402 } 2403 2404 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, 2405 bool Signed) const { 2406 SDLoc SL(Op); 2407 2408 SDValue Src = Op.getOperand(0); 2409 2410 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2411 2412 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL, 2413 MVT::f64); 2414 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL, 2415 MVT::f64); 2416 // TODO: Should this propagate fast-math-flags? 2417 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0); 2418 2419 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul); 2420 2421 2422 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc); 2423 2424 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL, 2425 MVT::i32, FloorMul); 2426 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma); 2427 2428 SDValue Result = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Lo, Hi); 2429 2430 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result); 2431 } 2432 2433 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op, 2434 SelectionDAG &DAG) const { 2435 SDValue Src = Op.getOperand(0); 2436 2437 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2438 return LowerFP64_TO_INT(Op, DAG, true); 2439 2440 return SDValue(); 2441 } 2442 2443 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op, 2444 SelectionDAG &DAG) const { 2445 SDValue Src = Op.getOperand(0); 2446 2447 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2448 return LowerFP64_TO_INT(Op, DAG, false); 2449 2450 return SDValue(); 2451 } 2452 2453 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2454 SelectionDAG &DAG) const { 2455 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2456 MVT VT = Op.getSimpleValueType(); 2457 MVT ScalarVT = VT.getScalarType(); 2458 2459 if (!VT.isVector()) 2460 return SDValue(); 2461 2462 SDValue Src = Op.getOperand(0); 2463 SDLoc DL(Op); 2464 2465 // TODO: Don't scalarize on Evergreen? 2466 unsigned NElts = VT.getVectorNumElements(); 2467 SmallVector<SDValue, 8> Args; 2468 DAG.ExtractVectorElements(Src, Args, 0, NElts); 2469 2470 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType()); 2471 for (unsigned I = 0; I < NElts; ++I) 2472 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp); 2473 2474 return DAG.getNode(ISD::BUILD_VECTOR, DL, VT, Args); 2475 } 2476 2477 //===----------------------------------------------------------------------===// 2478 // Custom DAG optimizations 2479 //===----------------------------------------------------------------------===// 2480 2481 static bool isU24(SDValue Op, SelectionDAG &DAG) { 2482 APInt KnownZero, KnownOne; 2483 EVT VT = Op.getValueType(); 2484 DAG.computeKnownBits(Op, KnownZero, KnownOne); 2485 2486 return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24; 2487 } 2488 2489 static bool isI24(SDValue Op, SelectionDAG &DAG) { 2490 EVT VT = Op.getValueType(); 2491 2492 // In order for this to be a signed 24-bit value, bit 23, must 2493 // be a sign bit. 2494 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated 2495 // as unsigned 24-bit values. 2496 (VT.getSizeInBits() - DAG.ComputeNumSignBits(Op)) < 24; 2497 } 2498 2499 static void simplifyI24(SDValue Op, TargetLowering::DAGCombinerInfo &DCI) { 2500 2501 SelectionDAG &DAG = DCI.DAG; 2502 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2503 EVT VT = Op.getValueType(); 2504 2505 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24); 2506 APInt KnownZero, KnownOne; 2507 TargetLowering::TargetLoweringOpt TLO(DAG, true, true); 2508 if (TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO)) 2509 DCI.CommitTargetLoweringOpt(TLO); 2510 } 2511 2512 template <typename IntTy> 2513 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, 2514 uint32_t Offset, uint32_t Width, SDLoc DL) { 2515 if (Width + Offset < 32) { 2516 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); 2517 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); 2518 return DAG.getConstant(Result, DL, MVT::i32); 2519 } 2520 2521 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32); 2522 } 2523 2524 static bool usesAllNormalStores(SDNode *LoadVal) { 2525 for (SDNode::use_iterator I = LoadVal->use_begin(); !I.atEnd(); ++I) { 2526 if (!ISD::isNormalStore(*I)) 2527 return false; 2528 } 2529 2530 return true; 2531 } 2532 2533 // If we have a copy of an illegal type, replace it with a load / store of an 2534 // equivalently sized legal type. This avoids intermediate bit pack / unpack 2535 // instructions emitted when handling extloads and truncstores. Ideally we could 2536 // recognize the pack / unpack pattern to eliminate it. 2537 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N, 2538 DAGCombinerInfo &DCI) const { 2539 if (!DCI.isBeforeLegalize()) 2540 return SDValue(); 2541 2542 StoreSDNode *SN = cast<StoreSDNode>(N); 2543 SDValue Value = SN->getValue(); 2544 EVT VT = Value.getValueType(); 2545 2546 if (isTypeLegal(VT) || SN->isVolatile() || 2547 !ISD::isNormalLoad(Value.getNode()) || VT.getSizeInBits() < 8) 2548 return SDValue(); 2549 2550 LoadSDNode *LoadVal = cast<LoadSDNode>(Value); 2551 if (LoadVal->isVolatile() || !usesAllNormalStores(LoadVal)) 2552 return SDValue(); 2553 2554 EVT MemVT = LoadVal->getMemoryVT(); 2555 2556 SDLoc SL(N); 2557 SelectionDAG &DAG = DCI.DAG; 2558 EVT LoadVT = getEquivalentMemType(*DAG.getContext(), MemVT); 2559 2560 SDValue NewLoad = DAG.getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, 2561 LoadVT, SL, 2562 LoadVal->getChain(), 2563 LoadVal->getBasePtr(), 2564 LoadVal->getOffset(), 2565 LoadVT, 2566 LoadVal->getMemOperand()); 2567 2568 SDValue CastLoad = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad.getValue(0)); 2569 DCI.CombineTo(LoadVal, CastLoad, NewLoad.getValue(1), false); 2570 2571 return DAG.getStore(SN->getChain(), SL, NewLoad, 2572 SN->getBasePtr(), SN->getMemOperand()); 2573 } 2574 2575 // TODO: Should repeat for other bit ops. 2576 SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N, 2577 DAGCombinerInfo &DCI) const { 2578 if (N->getValueType(0) != MVT::i64) 2579 return SDValue(); 2580 2581 // Break up 64-bit and of a constant into two 32-bit ands. This will typically 2582 // happen anyway for a VALU 64-bit and. This exposes other 32-bit integer 2583 // combine opportunities since most 64-bit operations are decomposed this way. 2584 // TODO: We won't want this for SALU especially if it is an inline immediate. 2585 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2586 if (!RHS) 2587 return SDValue(); 2588 2589 uint64_t Val = RHS->getZExtValue(); 2590 if (Lo_32(Val) != 0 && Hi_32(Val) != 0 && !RHS->hasOneUse()) { 2591 // If either half of the constant is 0, this is really a 32-bit and, so 2592 // split it. If we can re-use the full materialized constant, keep it. 2593 return SDValue(); 2594 } 2595 2596 SDLoc SL(N); 2597 SelectionDAG &DAG = DCI.DAG; 2598 2599 SDValue Lo, Hi; 2600 std::tie(Lo, Hi) = split64BitValue(N->getOperand(0), DAG); 2601 2602 SDValue LoRHS = DAG.getConstant(Lo_32(Val), SL, MVT::i32); 2603 SDValue HiRHS = DAG.getConstant(Hi_32(Val), SL, MVT::i32); 2604 2605 SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS); 2606 SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS); 2607 2608 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, LoAnd, HiAnd); 2609 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2610 } 2611 2612 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, 2613 DAGCombinerInfo &DCI) const { 2614 if (N->getValueType(0) != MVT::i64) 2615 return SDValue(); 2616 2617 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) 2618 2619 // On some subtargets, 64-bit shift is a quarter rate instruction. In the 2620 // common case, splitting this into a move and a 32-bit shift is faster and 2621 // the same code size. 2622 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2623 if (!RHS) 2624 return SDValue(); 2625 2626 unsigned RHSVal = RHS->getZExtValue(); 2627 if (RHSVal < 32) 2628 return SDValue(); 2629 2630 SDValue LHS = N->getOperand(0); 2631 2632 SDLoc SL(N); 2633 SelectionDAG &DAG = DCI.DAG; 2634 2635 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); 2636 2637 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS); 2638 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt); 2639 2640 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2641 2642 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, Zero, NewShift); 2643 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2644 } 2645 2646 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N, 2647 DAGCombinerInfo &DCI) const { 2648 if (N->getValueType(0) != MVT::i64) 2649 return SDValue(); 2650 2651 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2652 if (!RHS) 2653 return SDValue(); 2654 2655 SelectionDAG &DAG = DCI.DAG; 2656 SDLoc SL(N); 2657 unsigned RHSVal = RHS->getZExtValue(); 2658 2659 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) 2660 if (RHSVal == 32) { 2661 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 2662 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 2663 DAG.getConstant(31, SL, MVT::i32)); 2664 2665 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2666 Hi, NewShift); 2667 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 2668 } 2669 2670 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) 2671 if (RHSVal == 63) { 2672 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 2673 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 2674 DAG.getConstant(31, SL, MVT::i32)); 2675 SDValue BuildVec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2676 NewShift, NewShift); 2677 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 2678 } 2679 2680 return SDValue(); 2681 } 2682 2683 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, 2684 DAGCombinerInfo &DCI) const { 2685 if (N->getValueType(0) != MVT::i64) 2686 return SDValue(); 2687 2688 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2689 if (!RHS) 2690 return SDValue(); 2691 2692 unsigned ShiftAmt = RHS->getZExtValue(); 2693 if (ShiftAmt < 32) 2694 return SDValue(); 2695 2696 // srl i64:x, C for C >= 32 2697 // => 2698 // build_pair (srl hi_32(x), C - 32), 0 2699 2700 SelectionDAG &DAG = DCI.DAG; 2701 SDLoc SL(N); 2702 2703 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2704 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2705 2706 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0)); 2707 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, 2708 VecOp, One); 2709 2710 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); 2711 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst); 2712 2713 SDValue BuildPair = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 2714 NewShift, Zero); 2715 2716 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair); 2717 } 2718 2719 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, 2720 DAGCombinerInfo &DCI) const { 2721 EVT VT = N->getValueType(0); 2722 2723 if (VT.isVector() || VT.getSizeInBits() > 32) 2724 return SDValue(); 2725 2726 SelectionDAG &DAG = DCI.DAG; 2727 SDLoc DL(N); 2728 2729 SDValue N0 = N->getOperand(0); 2730 SDValue N1 = N->getOperand(1); 2731 SDValue Mul; 2732 2733 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { 2734 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 2735 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 2736 Mul = DAG.getNode(AMDGPUISD::MUL_U24, DL, MVT::i32, N0, N1); 2737 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { 2738 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 2739 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 2740 Mul = DAG.getNode(AMDGPUISD::MUL_I24, DL, MVT::i32, N0, N1); 2741 } else { 2742 return SDValue(); 2743 } 2744 2745 // We need to use sext even for MUL_U24, because MUL_U24 is used 2746 // for signed multiply of 8 and 16-bit types. 2747 return DAG.getSExtOrTrunc(Mul, DL, VT); 2748 } 2749 2750 static bool isNegativeOne(SDValue Val) { 2751 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) 2752 return C->isAllOnesValue(); 2753 return false; 2754 } 2755 2756 static bool isCtlzOpc(unsigned Opc) { 2757 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; 2758 } 2759 2760 // Get FFBH node if the incoming op may have been type legalized from a smaller 2761 // type VT. 2762 // Need to match pre-legalized type because the generic legalization inserts the 2763 // add/sub between the select and compare. 2764 static SDValue getFFBH_U32(const TargetLowering &TLI, 2765 SelectionDAG &DAG, SDLoc SL, SDValue Op) { 2766 EVT VT = Op.getValueType(); 2767 EVT LegalVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT); 2768 if (LegalVT != MVT::i32) 2769 return SDValue(); 2770 2771 if (VT != MVT::i32) 2772 Op = DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i32, Op); 2773 2774 SDValue FFBH = DAG.getNode(AMDGPUISD::FFBH_U32, SL, MVT::i32, Op); 2775 if (VT != MVT::i32) 2776 FFBH = DAG.getNode(ISD::TRUNCATE, SL, VT, FFBH); 2777 2778 return FFBH; 2779 } 2780 2781 // The native instructions return -1 on 0 input. Optimize out a select that 2782 // produces -1 on 0. 2783 // 2784 // TODO: If zero is not undef, we could also do this if the output is compared 2785 // against the bitwidth. 2786 // 2787 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly. 2788 SDValue AMDGPUTargetLowering::performCtlzCombine(SDLoc SL, 2789 SDValue Cond, 2790 SDValue LHS, 2791 SDValue RHS, 2792 DAGCombinerInfo &DCI) const { 2793 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 2794 if (!CmpRhs || !CmpRhs->isNullValue()) 2795 return SDValue(); 2796 2797 SelectionDAG &DAG = DCI.DAG; 2798 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 2799 SDValue CmpLHS = Cond.getOperand(0); 2800 2801 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x 2802 if (CCOpcode == ISD::SETEQ && 2803 isCtlzOpc(RHS.getOpcode()) && 2804 RHS.getOperand(0) == CmpLHS && 2805 isNegativeOne(LHS)) { 2806 return getFFBH_U32(*this, DAG, SL, CmpLHS); 2807 } 2808 2809 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x 2810 if (CCOpcode == ISD::SETNE && 2811 isCtlzOpc(LHS.getOpcode()) && 2812 LHS.getOperand(0) == CmpLHS && 2813 isNegativeOne(RHS)) { 2814 return getFFBH_U32(*this, DAG, SL, CmpLHS); 2815 } 2816 2817 return SDValue(); 2818 } 2819 2820 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N, 2821 DAGCombinerInfo &DCI) const { 2822 SDValue Cond = N->getOperand(0); 2823 if (Cond.getOpcode() != ISD::SETCC) 2824 return SDValue(); 2825 2826 EVT VT = N->getValueType(0); 2827 SDValue LHS = Cond.getOperand(0); 2828 SDValue RHS = Cond.getOperand(1); 2829 SDValue CC = Cond.getOperand(2); 2830 2831 SDValue True = N->getOperand(1); 2832 SDValue False = N->getOperand(2); 2833 2834 if (VT == MVT::f32 && Cond.hasOneUse()) 2835 return CombineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); 2836 2837 // There's no reason to not do this if the condition has other uses. 2838 return performCtlzCombine(SDLoc(N), Cond, True, False, DCI); 2839 } 2840 2841 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, 2842 DAGCombinerInfo &DCI) const { 2843 SelectionDAG &DAG = DCI.DAG; 2844 SDLoc DL(N); 2845 2846 switch(N->getOpcode()) { 2847 default: 2848 break; 2849 case ISD::SHL: { 2850 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2851 break; 2852 2853 return performShlCombine(N, DCI); 2854 } 2855 case ISD::SRL: { 2856 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2857 break; 2858 2859 return performSrlCombine(N, DCI); 2860 } 2861 case ISD::SRA: { 2862 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2863 break; 2864 2865 return performSraCombine(N, DCI); 2866 } 2867 case ISD::AND: { 2868 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 2869 break; 2870 2871 return performAndCombine(N, DCI); 2872 } 2873 case ISD::MUL: 2874 return performMulCombine(N, DCI); 2875 case AMDGPUISD::MUL_I24: 2876 case AMDGPUISD::MUL_U24: { 2877 SDValue N0 = N->getOperand(0); 2878 SDValue N1 = N->getOperand(1); 2879 simplifyI24(N0, DCI); 2880 simplifyI24(N1, DCI); 2881 return SDValue(); 2882 } 2883 case ISD::SELECT: 2884 return performSelectCombine(N, DCI); 2885 case AMDGPUISD::BFE_I32: 2886 case AMDGPUISD::BFE_U32: { 2887 assert(!N->getValueType(0).isVector() && 2888 "Vector handling of BFE not implemented"); 2889 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 2890 if (!Width) 2891 break; 2892 2893 uint32_t WidthVal = Width->getZExtValue() & 0x1f; 2894 if (WidthVal == 0) 2895 return DAG.getConstant(0, DL, MVT::i32); 2896 2897 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2898 if (!Offset) 2899 break; 2900 2901 SDValue BitsFrom = N->getOperand(0); 2902 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; 2903 2904 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; 2905 2906 if (OffsetVal == 0) { 2907 // This is already sign / zero extended, so try to fold away extra BFEs. 2908 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); 2909 2910 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom); 2911 if (OpSignBits >= SignBits) 2912 return BitsFrom; 2913 2914 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal); 2915 if (Signed) { 2916 // This is a sign_extend_inreg. Replace it to take advantage of existing 2917 // DAG Combines. If not eliminated, we will match back to BFE during 2918 // selection. 2919 2920 // TODO: The sext_inreg of extended types ends, although we can could 2921 // handle them in a single BFE. 2922 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom, 2923 DAG.getValueType(SmallVT)); 2924 } 2925 2926 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); 2927 } 2928 2929 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) { 2930 if (Signed) { 2931 return constantFoldBFE<int32_t>(DAG, 2932 CVal->getSExtValue(), 2933 OffsetVal, 2934 WidthVal, 2935 DL); 2936 } 2937 2938 return constantFoldBFE<uint32_t>(DAG, 2939 CVal->getZExtValue(), 2940 OffsetVal, 2941 WidthVal, 2942 DL); 2943 } 2944 2945 if ((OffsetVal + WidthVal) >= 32) { 2946 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); 2947 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32, 2948 BitsFrom, ShiftVal); 2949 } 2950 2951 if (BitsFrom.hasOneUse()) { 2952 APInt Demanded = APInt::getBitsSet(32, 2953 OffsetVal, 2954 OffsetVal + WidthVal); 2955 2956 APInt KnownZero, KnownOne; 2957 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 2958 !DCI.isBeforeLegalizeOps()); 2959 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2960 if (TLO.ShrinkDemandedConstant(BitsFrom, Demanded) || 2961 TLI.SimplifyDemandedBits(BitsFrom, Demanded, 2962 KnownZero, KnownOne, TLO)) { 2963 DCI.CommitTargetLoweringOpt(TLO); 2964 } 2965 } 2966 2967 break; 2968 } 2969 2970 case ISD::STORE: 2971 return performStoreCombine(N, DCI); 2972 } 2973 return SDValue(); 2974 } 2975 2976 //===----------------------------------------------------------------------===// 2977 // Helper functions 2978 //===----------------------------------------------------------------------===// 2979 2980 void AMDGPUTargetLowering::getOriginalFunctionArgs( 2981 SelectionDAG &DAG, 2982 const Function *F, 2983 const SmallVectorImpl<ISD::InputArg> &Ins, 2984 SmallVectorImpl<ISD::InputArg> &OrigIns) const { 2985 2986 for (unsigned i = 0, e = Ins.size(); i < e; ++i) { 2987 if (Ins[i].ArgVT == Ins[i].VT) { 2988 OrigIns.push_back(Ins[i]); 2989 continue; 2990 } 2991 2992 EVT VT; 2993 if (Ins[i].ArgVT.isVector() && !Ins[i].VT.isVector()) { 2994 // Vector has been split into scalars. 2995 VT = Ins[i].ArgVT.getVectorElementType(); 2996 } else if (Ins[i].VT.isVector() && Ins[i].ArgVT.isVector() && 2997 Ins[i].ArgVT.getVectorElementType() != 2998 Ins[i].VT.getVectorElementType()) { 2999 // Vector elements have been promoted 3000 VT = Ins[i].ArgVT; 3001 } else { 3002 // Vector has been spilt into smaller vectors. 3003 VT = Ins[i].VT; 3004 } 3005 3006 ISD::InputArg Arg(Ins[i].Flags, VT, VT, Ins[i].Used, 3007 Ins[i].OrigArgIndex, Ins[i].PartOffset); 3008 OrigIns.push_back(Arg); 3009 } 3010 } 3011 3012 bool AMDGPUTargetLowering::isHWTrueValue(SDValue Op) const { 3013 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) { 3014 return CFP->isExactlyValue(1.0); 3015 } 3016 return isAllOnesConstant(Op); 3017 } 3018 3019 bool AMDGPUTargetLowering::isHWFalseValue(SDValue Op) const { 3020 if (ConstantFPSDNode * CFP = dyn_cast<ConstantFPSDNode>(Op)) { 3021 return CFP->getValueAPF().isZero(); 3022 } 3023 return isNullConstant(Op); 3024 } 3025 3026 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 3027 const TargetRegisterClass *RC, 3028 unsigned Reg, EVT VT) const { 3029 MachineFunction &MF = DAG.getMachineFunction(); 3030 MachineRegisterInfo &MRI = MF.getRegInfo(); 3031 unsigned VirtualRegister; 3032 if (!MRI.isLiveIn(Reg)) { 3033 VirtualRegister = MRI.createVirtualRegister(RC); 3034 MRI.addLiveIn(Reg, VirtualRegister); 3035 } else { 3036 VirtualRegister = MRI.getLiveInVirtReg(Reg); 3037 } 3038 return DAG.getRegister(VirtualRegister, VT); 3039 } 3040 3041 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset( 3042 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const { 3043 uint64_t ArgOffset = MFI->ABIArgOffset; 3044 switch (Param) { 3045 case GRID_DIM: 3046 return ArgOffset; 3047 case GRID_OFFSET: 3048 return ArgOffset + 4; 3049 } 3050 llvm_unreachable("unexpected implicit parameter type"); 3051 } 3052 3053 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node; 3054 3055 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { 3056 switch ((AMDGPUISD::NodeType)Opcode) { 3057 case AMDGPUISD::FIRST_NUMBER: break; 3058 // AMDIL DAG nodes 3059 NODE_NAME_CASE(CALL); 3060 NODE_NAME_CASE(UMUL); 3061 NODE_NAME_CASE(RET_FLAG); 3062 NODE_NAME_CASE(BRANCH_COND); 3063 3064 // AMDGPU DAG nodes 3065 NODE_NAME_CASE(DWORDADDR) 3066 NODE_NAME_CASE(FRACT) 3067 NODE_NAME_CASE(CLAMP) 3068 NODE_NAME_CASE(COS_HW) 3069 NODE_NAME_CASE(SIN_HW) 3070 NODE_NAME_CASE(FMAX_LEGACY) 3071 NODE_NAME_CASE(FMIN_LEGACY) 3072 NODE_NAME_CASE(FMAX3) 3073 NODE_NAME_CASE(SMAX3) 3074 NODE_NAME_CASE(UMAX3) 3075 NODE_NAME_CASE(FMIN3) 3076 NODE_NAME_CASE(SMIN3) 3077 NODE_NAME_CASE(UMIN3) 3078 NODE_NAME_CASE(URECIP) 3079 NODE_NAME_CASE(DIV_SCALE) 3080 NODE_NAME_CASE(DIV_FMAS) 3081 NODE_NAME_CASE(DIV_FIXUP) 3082 NODE_NAME_CASE(TRIG_PREOP) 3083 NODE_NAME_CASE(RCP) 3084 NODE_NAME_CASE(RSQ) 3085 NODE_NAME_CASE(RSQ_LEGACY) 3086 NODE_NAME_CASE(RSQ_CLAMPED) 3087 NODE_NAME_CASE(LDEXP) 3088 NODE_NAME_CASE(FP_CLASS) 3089 NODE_NAME_CASE(DOT4) 3090 NODE_NAME_CASE(CARRY) 3091 NODE_NAME_CASE(BORROW) 3092 NODE_NAME_CASE(BFE_U32) 3093 NODE_NAME_CASE(BFE_I32) 3094 NODE_NAME_CASE(BFI) 3095 NODE_NAME_CASE(BFM) 3096 NODE_NAME_CASE(FFBH_U32) 3097 NODE_NAME_CASE(MUL_U24) 3098 NODE_NAME_CASE(MUL_I24) 3099 NODE_NAME_CASE(MAD_U24) 3100 NODE_NAME_CASE(MAD_I24) 3101 NODE_NAME_CASE(TEXTURE_FETCH) 3102 NODE_NAME_CASE(EXPORT) 3103 NODE_NAME_CASE(CONST_ADDRESS) 3104 NODE_NAME_CASE(REGISTER_LOAD) 3105 NODE_NAME_CASE(REGISTER_STORE) 3106 NODE_NAME_CASE(LOAD_CONSTANT) 3107 NODE_NAME_CASE(LOAD_INPUT) 3108 NODE_NAME_CASE(SAMPLE) 3109 NODE_NAME_CASE(SAMPLEB) 3110 NODE_NAME_CASE(SAMPLED) 3111 NODE_NAME_CASE(SAMPLEL) 3112 NODE_NAME_CASE(CVT_F32_UBYTE0) 3113 NODE_NAME_CASE(CVT_F32_UBYTE1) 3114 NODE_NAME_CASE(CVT_F32_UBYTE2) 3115 NODE_NAME_CASE(CVT_F32_UBYTE3) 3116 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR) 3117 NODE_NAME_CASE(CONST_DATA_PTR) 3118 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break; 3119 NODE_NAME_CASE(SENDMSG) 3120 NODE_NAME_CASE(INTERP_MOV) 3121 NODE_NAME_CASE(INTERP_P1) 3122 NODE_NAME_CASE(INTERP_P2) 3123 NODE_NAME_CASE(STORE_MSKOR) 3124 NODE_NAME_CASE(TBUFFER_STORE_FORMAT) 3125 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break; 3126 } 3127 return nullptr; 3128 } 3129 3130 SDValue AMDGPUTargetLowering::getRsqrtEstimate(SDValue Operand, 3131 DAGCombinerInfo &DCI, 3132 unsigned &RefinementSteps, 3133 bool &UseOneConstNR) const { 3134 SelectionDAG &DAG = DCI.DAG; 3135 EVT VT = Operand.getValueType(); 3136 3137 if (VT == MVT::f32) { 3138 RefinementSteps = 0; 3139 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); 3140 } 3141 3142 // TODO: There is also f64 rsq instruction, but the documentation is less 3143 // clear on its precision. 3144 3145 return SDValue(); 3146 } 3147 3148 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand, 3149 DAGCombinerInfo &DCI, 3150 unsigned &RefinementSteps) const { 3151 SelectionDAG &DAG = DCI.DAG; 3152 EVT VT = Operand.getValueType(); 3153 3154 if (VT == MVT::f32) { 3155 // Reciprocal, < 1 ulp error. 3156 // 3157 // This reciprocal approximation converges to < 0.5 ulp error with one 3158 // newton rhapson performed with two fused multiple adds (FMAs). 3159 3160 RefinementSteps = 0; 3161 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); 3162 } 3163 3164 // TODO: There is also f64 rcp instruction, but the documentation is less 3165 // clear on its precision. 3166 3167 return SDValue(); 3168 } 3169 3170 static void computeKnownBitsForMinMax(const SDValue Op0, 3171 const SDValue Op1, 3172 APInt &KnownZero, 3173 APInt &KnownOne, 3174 const SelectionDAG &DAG, 3175 unsigned Depth) { 3176 APInt Op0Zero, Op0One; 3177 APInt Op1Zero, Op1One; 3178 DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth); 3179 DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth); 3180 3181 KnownZero = Op0Zero & Op1Zero; 3182 KnownOne = Op0One & Op1One; 3183 } 3184 3185 void AMDGPUTargetLowering::computeKnownBitsForTargetNode( 3186 const SDValue Op, 3187 APInt &KnownZero, 3188 APInt &KnownOne, 3189 const SelectionDAG &DAG, 3190 unsigned Depth) const { 3191 3192 KnownZero = KnownOne = APInt(KnownOne.getBitWidth(), 0); // Don't know anything. 3193 3194 APInt KnownZero2; 3195 APInt KnownOne2; 3196 unsigned Opc = Op.getOpcode(); 3197 3198 switch (Opc) { 3199 default: 3200 break; 3201 case ISD::INTRINSIC_WO_CHAIN: { 3202 // FIXME: The intrinsic should just use the node. 3203 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) { 3204 case AMDGPUIntrinsic::AMDGPU_imax: 3205 case AMDGPUIntrinsic::AMDGPU_umax: 3206 case AMDGPUIntrinsic::AMDGPU_imin: 3207 case AMDGPUIntrinsic::AMDGPU_umin: 3208 computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2), 3209 KnownZero, KnownOne, DAG, Depth); 3210 break; 3211 default: 3212 break; 3213 } 3214 3215 break; 3216 } 3217 case AMDGPUISD::CARRY: 3218 case AMDGPUISD::BORROW: { 3219 KnownZero = APInt::getHighBitsSet(32, 31); 3220 break; 3221 } 3222 3223 case AMDGPUISD::BFE_I32: 3224 case AMDGPUISD::BFE_U32: { 3225 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3226 if (!CWidth) 3227 return; 3228 3229 unsigned BitWidth = 32; 3230 uint32_t Width = CWidth->getZExtValue() & 0x1f; 3231 3232 if (Opc == AMDGPUISD::BFE_U32) 3233 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - Width); 3234 3235 break; 3236 } 3237 } 3238 } 3239 3240 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode( 3241 SDValue Op, 3242 const SelectionDAG &DAG, 3243 unsigned Depth) const { 3244 switch (Op.getOpcode()) { 3245 case AMDGPUISD::BFE_I32: { 3246 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3247 if (!Width) 3248 return 1; 3249 3250 unsigned SignBits = 32 - Width->getZExtValue() + 1; 3251 if (!isNullConstant(Op.getOperand(1))) 3252 return SignBits; 3253 3254 // TODO: Could probably figure something out with non-0 offsets. 3255 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 3256 return std::max(SignBits, Op0SignBits); 3257 } 3258 3259 case AMDGPUISD::BFE_U32: { 3260 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 3261 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; 3262 } 3263 3264 case AMDGPUISD::CARRY: 3265 case AMDGPUISD::BORROW: 3266 return 31; 3267 3268 default: 3269 return 1; 3270 } 3271 } 3272