1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// \brief This is the parent TargetLowering class for hardware code gen 12 /// targets. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #define AMDGPU_LOG2E_F 1.44269504088896340735992468100189214f 17 #define AMDGPU_LN2_F 0.693147180559945309417232121458176568f 18 #define AMDGPU_LN10_F 2.30258509299404568401799145468436421f 19 20 #include "AMDGPUISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUCallLowering.h" 23 #include "AMDGPUFrameLowering.h" 24 #include "AMDGPUIntrinsicInfo.h" 25 #include "AMDGPURegisterInfo.h" 26 #include "AMDGPUSubtarget.h" 27 #include "AMDGPUTargetMachine.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "R600MachineFunctionInfo.h" 30 #include "SIInstrInfo.h" 31 #include "SIMachineFunctionInfo.h" 32 #include "llvm/CodeGen/CallingConvLower.h" 33 #include "llvm/CodeGen/MachineFunction.h" 34 #include "llvm/CodeGen/MachineRegisterInfo.h" 35 #include "llvm/CodeGen/SelectionDAG.h" 36 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 37 #include "llvm/IR/DataLayout.h" 38 #include "llvm/IR/DiagnosticInfo.h" 39 #include "llvm/Support/KnownBits.h" 40 using namespace llvm; 41 42 static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT, 43 CCValAssign::LocInfo LocInfo, 44 ISD::ArgFlagsTy ArgFlags, CCState &State) { 45 MachineFunction &MF = State.getMachineFunction(); 46 AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 47 48 uint64_t Offset = MFI->allocateKernArg(LocVT.getStoreSize(), 49 ArgFlags.getOrigAlign()); 50 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 51 return true; 52 } 53 54 static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 55 CCValAssign::LocInfo LocInfo, 56 ISD::ArgFlagsTy ArgFlags, CCState &State, 57 const TargetRegisterClass *RC, 58 unsigned NumRegs) { 59 ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs); 60 unsigned RegResult = State.AllocateReg(RegList); 61 if (RegResult == AMDGPU::NoRegister) 62 return false; 63 64 State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo)); 65 return true; 66 } 67 68 static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 69 CCValAssign::LocInfo LocInfo, 70 ISD::ArgFlagsTy ArgFlags, CCState &State) { 71 switch (LocVT.SimpleTy) { 72 case MVT::i64: 73 case MVT::f64: 74 case MVT::v2i32: 75 case MVT::v2f32: { 76 // Up to SGPR0-SGPR39 77 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 78 &AMDGPU::SGPR_64RegClass, 20); 79 } 80 default: 81 return false; 82 } 83 } 84 85 // Allocate up to VGPR31. 86 // 87 // TODO: Since there are no VGPR alignent requirements would it be better to 88 // split into individual scalar registers? 89 static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 90 CCValAssign::LocInfo LocInfo, 91 ISD::ArgFlagsTy ArgFlags, CCState &State) { 92 switch (LocVT.SimpleTy) { 93 case MVT::i64: 94 case MVT::f64: 95 case MVT::v2i32: 96 case MVT::v2f32: { 97 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 98 &AMDGPU::VReg_64RegClass, 31); 99 } 100 case MVT::v4i32: 101 case MVT::v4f32: 102 case MVT::v2i64: 103 case MVT::v2f64: { 104 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 105 &AMDGPU::VReg_128RegClass, 29); 106 } 107 case MVT::v8i32: 108 case MVT::v8f32: { 109 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 110 &AMDGPU::VReg_256RegClass, 25); 111 112 } 113 case MVT::v16i32: 114 case MVT::v16f32: { 115 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 116 &AMDGPU::VReg_512RegClass, 17); 117 118 } 119 default: 120 return false; 121 } 122 } 123 124 #include "AMDGPUGenCallingConv.inc" 125 126 // Find a larger type to do a load / store of a vector with. 127 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { 128 unsigned StoreSize = VT.getStoreSizeInBits(); 129 if (StoreSize <= 32) 130 return EVT::getIntegerVT(Ctx, StoreSize); 131 132 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); 133 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 134 } 135 136 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) { 137 KnownBits Known; 138 EVT VT = Op.getValueType(); 139 DAG.computeKnownBits(Op, Known); 140 141 return VT.getSizeInBits() - Known.countMinLeadingZeros(); 142 } 143 144 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) { 145 EVT VT = Op.getValueType(); 146 147 // In order for this to be a signed 24-bit value, bit 23, must 148 // be a sign bit. 149 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op); 150 } 151 152 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, 153 const AMDGPUSubtarget &STI) 154 : TargetLowering(TM), Subtarget(&STI) { 155 AMDGPUASI = AMDGPU::getAMDGPUAS(TM); 156 // Lower floating point store/load to integer store/load to reduce the number 157 // of patterns in tablegen. 158 setOperationAction(ISD::LOAD, MVT::f32, Promote); 159 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); 160 161 setOperationAction(ISD::LOAD, MVT::v2f32, Promote); 162 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); 163 164 setOperationAction(ISD::LOAD, MVT::v4f32, Promote); 165 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); 166 167 setOperationAction(ISD::LOAD, MVT::v8f32, Promote); 168 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32); 169 170 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); 171 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); 172 173 setOperationAction(ISD::LOAD, MVT::i64, Promote); 174 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); 175 176 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 177 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32); 178 179 setOperationAction(ISD::LOAD, MVT::f64, Promote); 180 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32); 181 182 setOperationAction(ISD::LOAD, MVT::v2f64, Promote); 183 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32); 184 185 // There are no 64-bit extloads. These should be done as a 32-bit extload and 186 // an extension to 64-bit. 187 for (MVT VT : MVT::integer_valuetypes()) { 188 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); 189 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); 190 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); 191 } 192 193 for (MVT VT : MVT::integer_valuetypes()) { 194 if (VT == MVT::i64) 195 continue; 196 197 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 198 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); 199 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); 200 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 201 202 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 203 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); 204 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); 205 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 206 207 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 208 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); 209 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); 210 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 211 } 212 213 for (MVT VT : MVT::integer_vector_valuetypes()) { 214 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); 215 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); 216 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); 217 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); 218 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); 219 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); 220 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); 221 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); 222 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); 223 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); 224 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); 225 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); 226 } 227 228 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 229 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); 230 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand); 231 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand); 232 233 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 234 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand); 235 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand); 236 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand); 237 238 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 239 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 240 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand); 241 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); 242 243 setOperationAction(ISD::STORE, MVT::f32, Promote); 244 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); 245 246 setOperationAction(ISD::STORE, MVT::v2f32, Promote); 247 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); 248 249 setOperationAction(ISD::STORE, MVT::v4f32, Promote); 250 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); 251 252 setOperationAction(ISD::STORE, MVT::v8f32, Promote); 253 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32); 254 255 setOperationAction(ISD::STORE, MVT::v16f32, Promote); 256 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); 257 258 setOperationAction(ISD::STORE, MVT::i64, Promote); 259 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32); 260 261 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 262 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32); 263 264 setOperationAction(ISD::STORE, MVT::f64, Promote); 265 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32); 266 267 setOperationAction(ISD::STORE, MVT::v2f64, Promote); 268 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32); 269 270 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 271 setTruncStoreAction(MVT::i64, MVT::i8, Expand); 272 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 273 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 274 275 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand); 276 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand); 277 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand); 278 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand); 279 280 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 281 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand); 282 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand); 283 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand); 284 285 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 286 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 287 288 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 289 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand); 290 291 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand); 292 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand); 293 294 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand); 295 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand); 296 297 298 setOperationAction(ISD::Constant, MVT::i32, Legal); 299 setOperationAction(ISD::Constant, MVT::i64, Legal); 300 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 301 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 302 303 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 304 setOperationAction(ISD::BRIND, MVT::Other, Expand); 305 306 // This is totally unsupported, just custom lower to produce an error. 307 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 308 309 // Library functions. These default to Expand, but we have instructions 310 // for them. 311 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 312 setOperationAction(ISD::FEXP2, MVT::f32, Legal); 313 setOperationAction(ISD::FPOW, MVT::f32, Legal); 314 setOperationAction(ISD::FLOG2, MVT::f32, Legal); 315 setOperationAction(ISD::FABS, MVT::f32, Legal); 316 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 317 setOperationAction(ISD::FRINT, MVT::f32, Legal); 318 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 319 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 320 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 321 322 setOperationAction(ISD::FROUND, MVT::f32, Custom); 323 setOperationAction(ISD::FROUND, MVT::f64, Custom); 324 325 setOperationAction(ISD::FLOG, MVT::f32, Custom); 326 setOperationAction(ISD::FLOG10, MVT::f32, Custom); 327 328 if (Subtarget->has16BitInsts()) { 329 setOperationAction(ISD::FLOG, MVT::f16, Custom); 330 setOperationAction(ISD::FLOG10, MVT::f16, Custom); 331 } 332 333 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); 334 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); 335 336 setOperationAction(ISD::FREM, MVT::f32, Custom); 337 setOperationAction(ISD::FREM, MVT::f64, Custom); 338 339 // v_mad_f32 does not support denormals according to some sources. 340 if (!Subtarget->hasFP32Denormals()) 341 setOperationAction(ISD::FMAD, MVT::f32, Legal); 342 343 // Expand to fneg + fadd. 344 setOperationAction(ISD::FSUB, MVT::f64, Expand); 345 346 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 347 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); 348 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 349 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 350 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); 351 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); 352 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); 353 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); 354 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); 355 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); 356 357 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) { 358 setOperationAction(ISD::FCEIL, MVT::f64, Custom); 359 setOperationAction(ISD::FTRUNC, MVT::f64, Custom); 360 setOperationAction(ISD::FRINT, MVT::f64, Custom); 361 setOperationAction(ISD::FFLOOR, MVT::f64, Custom); 362 } 363 364 if (!Subtarget->hasBFI()) { 365 // fcopysign can be done in a single instruction with BFI. 366 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 367 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 368 } 369 370 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 371 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom); 372 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom); 373 374 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 375 for (MVT VT : ScalarIntVTs) { 376 // These should use [SU]DIVREM, so set them to expand 377 setOperationAction(ISD::SDIV, VT, Expand); 378 setOperationAction(ISD::UDIV, VT, Expand); 379 setOperationAction(ISD::SREM, VT, Expand); 380 setOperationAction(ISD::UREM, VT, Expand); 381 382 // GPU does not have divrem function for signed or unsigned. 383 setOperationAction(ISD::SDIVREM, VT, Custom); 384 setOperationAction(ISD::UDIVREM, VT, Custom); 385 386 // GPU does not have [S|U]MUL_LOHI functions as a single instruction. 387 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 388 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 389 390 setOperationAction(ISD::BSWAP, VT, Expand); 391 setOperationAction(ISD::CTTZ, VT, Expand); 392 setOperationAction(ISD::CTLZ, VT, Expand); 393 } 394 395 if (!Subtarget->hasBCNT(32)) 396 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 397 398 if (!Subtarget->hasBCNT(64)) 399 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 400 401 // The hardware supports 32-bit ROTR, but not ROTL. 402 setOperationAction(ISD::ROTL, MVT::i32, Expand); 403 setOperationAction(ISD::ROTL, MVT::i64, Expand); 404 setOperationAction(ISD::ROTR, MVT::i64, Expand); 405 406 setOperationAction(ISD::MUL, MVT::i64, Expand); 407 setOperationAction(ISD::MULHU, MVT::i64, Expand); 408 setOperationAction(ISD::MULHS, MVT::i64, Expand); 409 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 410 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 411 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 412 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 413 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 414 415 setOperationAction(ISD::SMIN, MVT::i32, Legal); 416 setOperationAction(ISD::UMIN, MVT::i32, Legal); 417 setOperationAction(ISD::SMAX, MVT::i32, Legal); 418 setOperationAction(ISD::UMAX, MVT::i32, Legal); 419 420 if (Subtarget->hasFFBH()) 421 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 422 423 if (Subtarget->hasFFBL()) 424 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 425 426 setOperationAction(ISD::CTTZ, MVT::i64, Custom); 427 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); 428 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 429 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 430 431 // We only really have 32-bit BFE instructions (and 16-bit on VI). 432 // 433 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any 434 // effort to match them now. We want this to be false for i64 cases when the 435 // extraction isn't restricted to the upper or lower half. Ideally we would 436 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that 437 // span the midpoint are probably relatively rare, so don't worry about them 438 // for now. 439 if (Subtarget->hasBFE()) 440 setHasExtractBitsInsn(true); 441 442 static const MVT::SimpleValueType VectorIntTypes[] = { 443 MVT::v2i32, MVT::v4i32 444 }; 445 446 for (MVT VT : VectorIntTypes) { 447 // Expand the following operations for the current type by default. 448 setOperationAction(ISD::ADD, VT, Expand); 449 setOperationAction(ISD::AND, VT, Expand); 450 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 451 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 452 setOperationAction(ISD::MUL, VT, Expand); 453 setOperationAction(ISD::MULHU, VT, Expand); 454 setOperationAction(ISD::MULHS, VT, Expand); 455 setOperationAction(ISD::OR, VT, Expand); 456 setOperationAction(ISD::SHL, VT, Expand); 457 setOperationAction(ISD::SRA, VT, Expand); 458 setOperationAction(ISD::SRL, VT, Expand); 459 setOperationAction(ISD::ROTL, VT, Expand); 460 setOperationAction(ISD::ROTR, VT, Expand); 461 setOperationAction(ISD::SUB, VT, Expand); 462 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 463 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 464 setOperationAction(ISD::SDIV, VT, Expand); 465 setOperationAction(ISD::UDIV, VT, Expand); 466 setOperationAction(ISD::SREM, VT, Expand); 467 setOperationAction(ISD::UREM, VT, Expand); 468 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 469 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 470 setOperationAction(ISD::SDIVREM, VT, Custom); 471 setOperationAction(ISD::UDIVREM, VT, Expand); 472 setOperationAction(ISD::ADDC, VT, Expand); 473 setOperationAction(ISD::SUBC, VT, Expand); 474 setOperationAction(ISD::ADDE, VT, Expand); 475 setOperationAction(ISD::SUBE, VT, Expand); 476 setOperationAction(ISD::SELECT, VT, Expand); 477 setOperationAction(ISD::VSELECT, VT, Expand); 478 setOperationAction(ISD::SELECT_CC, VT, Expand); 479 setOperationAction(ISD::XOR, VT, Expand); 480 setOperationAction(ISD::BSWAP, VT, Expand); 481 setOperationAction(ISD::CTPOP, VT, Expand); 482 setOperationAction(ISD::CTTZ, VT, Expand); 483 setOperationAction(ISD::CTLZ, VT, Expand); 484 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 485 setOperationAction(ISD::SETCC, VT, Expand); 486 } 487 488 static const MVT::SimpleValueType FloatVectorTypes[] = { 489 MVT::v2f32, MVT::v4f32 490 }; 491 492 for (MVT VT : FloatVectorTypes) { 493 setOperationAction(ISD::FABS, VT, Expand); 494 setOperationAction(ISD::FMINNUM, VT, Expand); 495 setOperationAction(ISD::FMAXNUM, VT, Expand); 496 setOperationAction(ISD::FADD, VT, Expand); 497 setOperationAction(ISD::FCEIL, VT, Expand); 498 setOperationAction(ISD::FCOS, VT, Expand); 499 setOperationAction(ISD::FDIV, VT, Expand); 500 setOperationAction(ISD::FEXP2, VT, Expand); 501 setOperationAction(ISD::FLOG2, VT, Expand); 502 setOperationAction(ISD::FREM, VT, Expand); 503 setOperationAction(ISD::FLOG, VT, Expand); 504 setOperationAction(ISD::FLOG10, VT, Expand); 505 setOperationAction(ISD::FPOW, VT, Expand); 506 setOperationAction(ISD::FFLOOR, VT, Expand); 507 setOperationAction(ISD::FTRUNC, VT, Expand); 508 setOperationAction(ISD::FMUL, VT, Expand); 509 setOperationAction(ISD::FMA, VT, Expand); 510 setOperationAction(ISD::FRINT, VT, Expand); 511 setOperationAction(ISD::FNEARBYINT, VT, Expand); 512 setOperationAction(ISD::FSQRT, VT, Expand); 513 setOperationAction(ISD::FSIN, VT, Expand); 514 setOperationAction(ISD::FSUB, VT, Expand); 515 setOperationAction(ISD::FNEG, VT, Expand); 516 setOperationAction(ISD::VSELECT, VT, Expand); 517 setOperationAction(ISD::SELECT_CC, VT, Expand); 518 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 519 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 520 setOperationAction(ISD::SETCC, VT, Expand); 521 } 522 523 // This causes using an unrolled select operation rather than expansion with 524 // bit operations. This is in general better, but the alternative using BFI 525 // instructions may be better if the select sources are SGPRs. 526 setOperationAction(ISD::SELECT, MVT::v2f32, Promote); 527 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32); 528 529 setOperationAction(ISD::SELECT, MVT::v4f32, Promote); 530 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32); 531 532 // There are no libcalls of any kind. 533 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) 534 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr); 535 536 setBooleanContents(ZeroOrNegativeOneBooleanContent); 537 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 538 539 setSchedulingPreference(Sched::RegPressure); 540 setJumpIsExpensive(true); 541 542 // FIXME: This is only partially true. If we have to do vector compares, any 543 // SGPR pair can be a condition register. If we have a uniform condition, we 544 // are better off doing SALU operations, where there is only one SCC. For now, 545 // we don't have a way of knowing during instruction selection if a condition 546 // will be uniform and we always use vector compares. Assume we are using 547 // vector compares until that is fixed. 548 setHasMultipleConditionRegisters(true); 549 550 // SI at least has hardware support for floating point exceptions, but no way 551 // of using or handling them is implemented. They are also optional in OpenCL 552 // (Section 7.3) 553 setHasFloatingPointExceptions(Subtarget->hasFPExceptions()); 554 555 PredictableSelectIsExpensive = false; 556 557 // We want to find all load dependencies for long chains of stores to enable 558 // merging into very wide vectors. The problem is with vectors with > 4 559 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 560 // vectors are a legal type, even though we have to split the loads 561 // usually. When we can more precisely specify load legality per address 562 // space, we should be able to make FindBetterChain/MergeConsecutiveStores 563 // smarter so that they can figure out what to do in 2 iterations without all 564 // N > 4 stores on the same chain. 565 GatherAllAliasesMaxDepth = 16; 566 567 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry 568 // about these during lowering. 569 MaxStoresPerMemcpy = 0xffffffff; 570 MaxStoresPerMemmove = 0xffffffff; 571 MaxStoresPerMemset = 0xffffffff; 572 573 setTargetDAGCombine(ISD::BITCAST); 574 setTargetDAGCombine(ISD::SHL); 575 setTargetDAGCombine(ISD::SRA); 576 setTargetDAGCombine(ISD::SRL); 577 setTargetDAGCombine(ISD::MUL); 578 setTargetDAGCombine(ISD::MULHU); 579 setTargetDAGCombine(ISD::MULHS); 580 setTargetDAGCombine(ISD::SELECT); 581 setTargetDAGCombine(ISD::SELECT_CC); 582 setTargetDAGCombine(ISD::STORE); 583 setTargetDAGCombine(ISD::FADD); 584 setTargetDAGCombine(ISD::FSUB); 585 setTargetDAGCombine(ISD::FNEG); 586 setTargetDAGCombine(ISD::FABS); 587 setTargetDAGCombine(ISD::AssertZext); 588 setTargetDAGCombine(ISD::AssertSext); 589 } 590 591 //===----------------------------------------------------------------------===// 592 // Target Information 593 //===----------------------------------------------------------------------===// 594 595 LLVM_READNONE 596 static bool fnegFoldsIntoOp(unsigned Opc) { 597 switch (Opc) { 598 case ISD::FADD: 599 case ISD::FSUB: 600 case ISD::FMUL: 601 case ISD::FMA: 602 case ISD::FMAD: 603 case ISD::FMINNUM: 604 case ISD::FMAXNUM: 605 case ISD::FSIN: 606 case ISD::FTRUNC: 607 case ISD::FRINT: 608 case ISD::FNEARBYINT: 609 case AMDGPUISD::RCP: 610 case AMDGPUISD::RCP_LEGACY: 611 case AMDGPUISD::SIN_HW: 612 case AMDGPUISD::FMUL_LEGACY: 613 case AMDGPUISD::FMIN_LEGACY: 614 case AMDGPUISD::FMAX_LEGACY: 615 return true; 616 default: 617 return false; 618 } 619 } 620 621 /// \p returns true if the operation will definitely need to use a 64-bit 622 /// encoding, and thus will use a VOP3 encoding regardless of the source 623 /// modifiers. 624 LLVM_READONLY 625 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) { 626 return N->getNumOperands() > 2 || VT == MVT::f64; 627 } 628 629 // Most FP instructions support source modifiers, but this could be refined 630 // slightly. 631 LLVM_READONLY 632 static bool hasSourceMods(const SDNode *N) { 633 if (isa<MemSDNode>(N)) 634 return false; 635 636 switch (N->getOpcode()) { 637 case ISD::CopyToReg: 638 case ISD::SELECT: 639 case ISD::FDIV: 640 case ISD::FREM: 641 case ISD::INLINEASM: 642 case AMDGPUISD::INTERP_P1: 643 case AMDGPUISD::INTERP_P2: 644 case AMDGPUISD::DIV_SCALE: 645 646 // TODO: Should really be looking at the users of the bitcast. These are 647 // problematic because bitcasts are used to legalize all stores to integer 648 // types. 649 case ISD::BITCAST: 650 return false; 651 default: 652 return true; 653 } 654 } 655 656 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N, 657 unsigned CostThreshold) { 658 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus 659 // it is truly free to use a source modifier in all cases. If there are 660 // multiple users but for each one will necessitate using VOP3, there will be 661 // a code size increase. Try to avoid increasing code size unless we know it 662 // will save on the instruction count. 663 unsigned NumMayIncreaseSize = 0; 664 MVT VT = N->getValueType(0).getScalarType().getSimpleVT(); 665 666 // XXX - Should this limit number of uses to check? 667 for (const SDNode *U : N->uses()) { 668 if (!hasSourceMods(U)) 669 return false; 670 671 if (!opMustUseVOP3Encoding(U, VT)) { 672 if (++NumMayIncreaseSize > CostThreshold) 673 return false; 674 } 675 } 676 677 return true; 678 } 679 680 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const { 681 return MVT::i32; 682 } 683 684 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const { 685 return true; 686 } 687 688 // The backend supports 32 and 64 bit floating point immediates. 689 // FIXME: Why are we reporting vectors of FP immediates as legal? 690 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 691 EVT ScalarVT = VT.getScalarType(); 692 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 || 693 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts())); 694 } 695 696 // We don't want to shrink f64 / f32 constants. 697 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { 698 EVT ScalarVT = VT.getScalarType(); 699 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64); 700 } 701 702 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N, 703 ISD::LoadExtType, 704 EVT NewVT) const { 705 706 unsigned NewSize = NewVT.getStoreSizeInBits(); 707 708 // If we are reducing to a 32-bit load, this is always better. 709 if (NewSize == 32) 710 return true; 711 712 EVT OldVT = N->getValueType(0); 713 unsigned OldSize = OldVT.getStoreSizeInBits(); 714 715 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar 716 // extloads, so doing one requires using a buffer_load. In cases where we 717 // still couldn't use a scalar load, using the wider load shouldn't really 718 // hurt anything. 719 720 // If the old size already had to be an extload, there's no harm in continuing 721 // to reduce the width. 722 return (OldSize < 32); 723 } 724 725 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, 726 EVT CastTy) const { 727 728 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits()); 729 730 if (LoadTy.getScalarType() == MVT::i32) 731 return false; 732 733 unsigned LScalarSize = LoadTy.getScalarSizeInBits(); 734 unsigned CastScalarSize = CastTy.getScalarSizeInBits(); 735 736 return (LScalarSize < CastScalarSize) || 737 (CastScalarSize >= 32); 738 } 739 740 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also 741 // profitable with the expansion for 64-bit since it's generally good to 742 // speculate things. 743 // FIXME: These should really have the size as a parameter. 744 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const { 745 return true; 746 } 747 748 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const { 749 return true; 750 } 751 752 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const { 753 switch (N->getOpcode()) { 754 default: 755 return false; 756 case ISD::EntryToken: 757 case ISD::TokenFactor: 758 return true; 759 case ISD::INTRINSIC_WO_CHAIN: 760 { 761 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 762 switch (IntrID) { 763 default: 764 return false; 765 case Intrinsic::amdgcn_readfirstlane: 766 case Intrinsic::amdgcn_readlane: 767 return true; 768 } 769 } 770 break; 771 case ISD::LOAD: 772 { 773 const LoadSDNode * L = dyn_cast<LoadSDNode>(N); 774 if (L->getMemOperand()->getAddrSpace() 775 == Subtarget->getAMDGPUAS().CONSTANT_ADDRESS_32BIT) 776 return true; 777 return false; 778 } 779 break; 780 } 781 } 782 783 bool AMDGPUTargetLowering::isSDNodeSourceOfDivergence(const SDNode * N, 784 FunctionLoweringInfo * FLI, DivergenceAnalysis * DA) const 785 { 786 switch (N->getOpcode()) { 787 case ISD::Register: 788 case ISD::CopyFromReg: 789 { 790 const RegisterSDNode *R = nullptr; 791 if (N->getOpcode() == ISD::Register) { 792 R = dyn_cast<RegisterSDNode>(N); 793 } 794 else { 795 R = dyn_cast<RegisterSDNode>(N->getOperand(1)); 796 } 797 if (R) 798 { 799 const MachineFunction * MF = FLI->MF; 800 const SISubtarget &ST = MF->getSubtarget<SISubtarget>(); 801 const MachineRegisterInfo &MRI = MF->getRegInfo(); 802 const SIRegisterInfo &TRI = ST.getInstrInfo()->getRegisterInfo(); 803 unsigned Reg = R->getReg(); 804 if (TRI.isPhysicalRegister(Reg)) 805 return TRI.isVGPR(MRI, Reg); 806 807 if (MRI.isLiveIn(Reg)) { 808 // workitem.id.x workitem.id.y workitem.id.z 809 // Any VGPR formal argument is also considered divergent 810 if ((MRI.getLiveInPhysReg(Reg) == AMDGPU::T0_X) || 811 (MRI.getLiveInPhysReg(Reg) == AMDGPU::T0_Y) || 812 (MRI.getLiveInPhysReg(Reg) == AMDGPU::T0_Z) || 813 (TRI.isVGPR(MRI, Reg))) 814 return true; 815 // Formal arguments of non-entry functions 816 // are conservatively considered divergent 817 else if (!AMDGPU::isEntryFunctionCC(FLI->Fn->getCallingConv())) 818 return true; 819 } 820 return !DA || DA->isDivergent(FLI->getValueFromVirtualReg(Reg)); 821 } 822 } 823 break; 824 case ISD::LOAD: { 825 const LoadSDNode *L = dyn_cast<LoadSDNode>(N); 826 if (L->getMemOperand()->getAddrSpace() == 827 Subtarget->getAMDGPUAS().PRIVATE_ADDRESS) 828 return true; 829 } break; 830 case ISD::CALLSEQ_END: 831 return true; 832 break; 833 case ISD::INTRINSIC_WO_CHAIN: 834 { 835 836 } 837 return AMDGPU::isIntrinsicSourceOfDivergence( 838 cast<ConstantSDNode>(N->getOperand(0))->getZExtValue()); 839 case ISD::INTRINSIC_W_CHAIN: 840 return AMDGPU::isIntrinsicSourceOfDivergence( 841 cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()); 842 // In some cases intrinsics that are a source of divergence have been 843 // lowered to AMDGPUISD so we also need to check those too. 844 case AMDGPUISD::INTERP_MOV: 845 case AMDGPUISD::INTERP_P1: 846 case AMDGPUISD::INTERP_P2: 847 return true; 848 } 849 return false; 850 } 851 852 //===---------------------------------------------------------------------===// 853 // Target Properties 854 //===---------------------------------------------------------------------===// 855 856 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { 857 assert(VT.isFloatingPoint()); 858 859 // Packed operations do not have a fabs modifier. 860 return VT == MVT::f32 || VT == MVT::f64 || 861 (Subtarget->has16BitInsts() && VT == MVT::f16); 862 } 863 864 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { 865 assert(VT.isFloatingPoint()); 866 return VT == MVT::f32 || VT == MVT::f64 || 867 (Subtarget->has16BitInsts() && VT == MVT::f16) || 868 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16); 869 } 870 871 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, 872 unsigned NumElem, 873 unsigned AS) const { 874 return true; 875 } 876 877 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { 878 // There are few operations which truly have vector input operands. Any vector 879 // operation is going to involve operations on each component, and a 880 // build_vector will be a copy per element, so it always makes sense to use a 881 // build_vector input in place of the extracted element to avoid a copy into a 882 // super register. 883 // 884 // We should probably only do this if all users are extracts only, but this 885 // should be the common case. 886 return true; 887 } 888 889 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { 890 // Truncate is just accessing a subregister. 891 892 unsigned SrcSize = Source.getSizeInBits(); 893 unsigned DestSize = Dest.getSizeInBits(); 894 895 return DestSize < SrcSize && DestSize % 32 == 0 ; 896 } 897 898 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const { 899 // Truncate is just accessing a subregister. 900 901 unsigned SrcSize = Source->getScalarSizeInBits(); 902 unsigned DestSize = Dest->getScalarSizeInBits(); 903 904 if (DestSize== 16 && Subtarget->has16BitInsts()) 905 return SrcSize >= 32; 906 907 return DestSize < SrcSize && DestSize % 32 == 0; 908 } 909 910 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const { 911 unsigned SrcSize = Src->getScalarSizeInBits(); 912 unsigned DestSize = Dest->getScalarSizeInBits(); 913 914 if (SrcSize == 16 && Subtarget->has16BitInsts()) 915 return DestSize >= 32; 916 917 return SrcSize == 32 && DestSize == 64; 918 } 919 920 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const { 921 // Any register load of a 64-bit value really requires 2 32-bit moves. For all 922 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, 923 // this will enable reducing 64-bit operations the 32-bit, which is always 924 // good. 925 926 if (Src == MVT::i16) 927 return Dest == MVT::i32 ||Dest == MVT::i64 ; 928 929 return Src == MVT::i32 && Dest == MVT::i64; 930 } 931 932 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 933 return isZExtFree(Val.getValueType(), VT2); 934 } 935 936 // v_mad_mix* support a conversion from f16 to f32. 937 // 938 // There is only one special case when denormals are enabled we don't currently, 939 // where this is OK to use. 940 bool AMDGPUTargetLowering::isFPExtFoldable(unsigned Opcode, 941 EVT DestVT, EVT SrcVT) const { 942 return Opcode == ISD::FMAD && Subtarget->hasMadMixInsts() && 943 DestVT.getScalarType() == MVT::f32 && !Subtarget->hasFP32Denormals() && 944 SrcVT.getScalarType() == MVT::f16; 945 } 946 947 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 948 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a 949 // limited number of native 64-bit operations. Shrinking an operation to fit 950 // in a single 32-bit register should always be helpful. As currently used, 951 // this is much less general than the name suggests, and is only used in 952 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is 953 // not profitable, and may actually be harmful. 954 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32; 955 } 956 957 //===---------------------------------------------------------------------===// 958 // TargetLowering Callbacks 959 //===---------------------------------------------------------------------===// 960 961 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC, 962 bool IsVarArg) { 963 switch (CC) { 964 case CallingConv::AMDGPU_KERNEL: 965 case CallingConv::SPIR_KERNEL: 966 return CC_AMDGPU_Kernel; 967 case CallingConv::AMDGPU_VS: 968 case CallingConv::AMDGPU_GS: 969 case CallingConv::AMDGPU_PS: 970 case CallingConv::AMDGPU_CS: 971 case CallingConv::AMDGPU_HS: 972 case CallingConv::AMDGPU_ES: 973 case CallingConv::AMDGPU_LS: 974 return CC_AMDGPU; 975 case CallingConv::C: 976 case CallingConv::Fast: 977 case CallingConv::Cold: 978 return CC_AMDGPU_Func; 979 default: 980 report_fatal_error("Unsupported calling convention."); 981 } 982 } 983 984 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC, 985 bool IsVarArg) { 986 switch (CC) { 987 case CallingConv::AMDGPU_KERNEL: 988 case CallingConv::SPIR_KERNEL: 989 return CC_AMDGPU_Kernel; 990 case CallingConv::AMDGPU_VS: 991 case CallingConv::AMDGPU_GS: 992 case CallingConv::AMDGPU_PS: 993 case CallingConv::AMDGPU_CS: 994 case CallingConv::AMDGPU_HS: 995 case CallingConv::AMDGPU_ES: 996 case CallingConv::AMDGPU_LS: 997 return RetCC_SI_Shader; 998 case CallingConv::C: 999 case CallingConv::Fast: 1000 case CallingConv::Cold: 1001 return RetCC_AMDGPU_Func; 1002 default: 1003 report_fatal_error("Unsupported calling convention."); 1004 } 1005 } 1006 1007 /// The SelectionDAGBuilder will automatically promote function arguments 1008 /// with illegal types. However, this does not work for the AMDGPU targets 1009 /// since the function arguments are stored in memory as these illegal types. 1010 /// In order to handle this properly we need to get the original types sizes 1011 /// from the LLVM IR Function and fixup the ISD:InputArg values before 1012 /// passing them to AnalyzeFormalArguments() 1013 1014 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting 1015 /// input values across multiple registers. Each item in the Ins array 1016 /// represents a single value that will be stored in registers. Ins[x].VT is 1017 /// the value type of the value that will be stored in the register, so 1018 /// whatever SDNode we lower the argument to needs to be this type. 1019 /// 1020 /// In order to correctly lower the arguments we need to know the size of each 1021 /// argument. Since Ins[x].VT gives us the size of the register that will 1022 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type 1023 /// for the orignal function argument so that we can deduce the correct memory 1024 /// type to use for Ins[x]. In most cases the correct memory type will be 1025 /// Ins[x].ArgVT. However, this will not always be the case. If, for example, 1026 /// we have a kernel argument of type v8i8, this argument will be split into 1027 /// 8 parts and each part will be represented by its own item in the Ins array. 1028 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of 1029 /// the argument before it was split. From this, we deduce that the memory type 1030 /// for each individual part is i8. We pass the memory type as LocVT to the 1031 /// calling convention analysis function and the register type (Ins[x].VT) as 1032 /// the ValVT. 1033 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(CCState &State, 1034 const SmallVectorImpl<ISD::InputArg> &Ins) const { 1035 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 1036 const ISD::InputArg &In = Ins[i]; 1037 EVT MemVT; 1038 1039 unsigned NumRegs = getNumRegisters(State.getContext(), In.ArgVT); 1040 1041 if (!Subtarget->isAmdHsaOS() && 1042 (In.ArgVT == MVT::i16 || In.ArgVT == MVT::i8 || In.ArgVT == MVT::f16)) { 1043 // The ABI says the caller will extend these values to 32-bits. 1044 MemVT = In.ArgVT.isInteger() ? MVT::i32 : MVT::f32; 1045 } else if (NumRegs == 1) { 1046 // This argument is not split, so the IR type is the memory type. 1047 assert(!In.Flags.isSplit()); 1048 if (In.ArgVT.isExtended()) { 1049 // We have an extended type, like i24, so we should just use the register type 1050 MemVT = In.VT; 1051 } else { 1052 MemVT = In.ArgVT; 1053 } 1054 } else if (In.ArgVT.isVector() && In.VT.isVector() && 1055 In.ArgVT.getScalarType() == In.VT.getScalarType()) { 1056 assert(In.ArgVT.getVectorNumElements() > In.VT.getVectorNumElements()); 1057 // We have a vector value which has been split into a vector with 1058 // the same scalar type, but fewer elements. This should handle 1059 // all the floating-point vector types. 1060 MemVT = In.VT; 1061 } else if (In.ArgVT.isVector() && 1062 In.ArgVT.getVectorNumElements() == NumRegs) { 1063 // This arg has been split so that each element is stored in a separate 1064 // register. 1065 MemVT = In.ArgVT.getScalarType(); 1066 } else if (In.ArgVT.isExtended()) { 1067 // We have an extended type, like i65. 1068 MemVT = In.VT; 1069 } else { 1070 unsigned MemoryBits = In.ArgVT.getStoreSizeInBits() / NumRegs; 1071 assert(In.ArgVT.getStoreSizeInBits() % NumRegs == 0); 1072 if (In.VT.isInteger()) { 1073 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits); 1074 } else if (In.VT.isVector()) { 1075 assert(!In.VT.getScalarType().isFloatingPoint()); 1076 unsigned NumElements = In.VT.getVectorNumElements(); 1077 assert(MemoryBits % NumElements == 0); 1078 // This vector type has been split into another vector type with 1079 // a different elements size. 1080 EVT ScalarVT = EVT::getIntegerVT(State.getContext(), 1081 MemoryBits / NumElements); 1082 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements); 1083 } else { 1084 llvm_unreachable("cannot deduce memory type."); 1085 } 1086 } 1087 1088 // Convert one element vectors to scalar. 1089 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1) 1090 MemVT = MemVT.getScalarType(); 1091 1092 if (MemVT.isExtended()) { 1093 // This should really only happen if we have vec3 arguments 1094 assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3); 1095 MemVT = MemVT.getPow2VectorType(State.getContext()); 1096 } 1097 1098 assert(MemVT.isSimple()); 1099 allocateKernArg(i, In.VT, MemVT.getSimpleVT(), CCValAssign::Full, In.Flags, 1100 State); 1101 } 1102 } 1103 1104 SDValue AMDGPUTargetLowering::LowerReturn( 1105 SDValue Chain, CallingConv::ID CallConv, 1106 bool isVarArg, 1107 const SmallVectorImpl<ISD::OutputArg> &Outs, 1108 const SmallVectorImpl<SDValue> &OutVals, 1109 const SDLoc &DL, SelectionDAG &DAG) const { 1110 // FIXME: Fails for r600 tests 1111 //assert(!isVarArg && Outs.empty() && OutVals.empty() && 1112 // "wave terminate should not have return values"); 1113 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain); 1114 } 1115 1116 //===---------------------------------------------------------------------===// 1117 // Target specific lowering 1118 //===---------------------------------------------------------------------===// 1119 1120 /// Selects the correct CCAssignFn for a given CallingConvention value. 1121 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 1122 bool IsVarArg) { 1123 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg); 1124 } 1125 1126 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 1127 bool IsVarArg) { 1128 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg); 1129 } 1130 1131 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain, 1132 SelectionDAG &DAG, 1133 MachineFrameInfo &MFI, 1134 int ClobberedFI) const { 1135 SmallVector<SDValue, 8> ArgChains; 1136 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); 1137 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; 1138 1139 // Include the original chain at the beginning of the list. When this is 1140 // used by target LowerCall hooks, this helps legalize find the 1141 // CALLSEQ_BEGIN node. 1142 ArgChains.push_back(Chain); 1143 1144 // Add a chain value for each stack argument corresponding 1145 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1146 UE = DAG.getEntryNode().getNode()->use_end(); 1147 U != UE; ++U) { 1148 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) { 1149 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) { 1150 if (FI->getIndex() < 0) { 1151 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); 1152 int64_t InLastByte = InFirstByte; 1153 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; 1154 1155 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1156 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1157 ArgChains.push_back(SDValue(L, 1)); 1158 } 1159 } 1160 } 1161 } 1162 1163 // Build a tokenfactor for all the chains. 1164 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 1165 } 1166 1167 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI, 1168 SmallVectorImpl<SDValue> &InVals, 1169 StringRef Reason) const { 1170 SDValue Callee = CLI.Callee; 1171 SelectionDAG &DAG = CLI.DAG; 1172 1173 const Function &Fn = DAG.getMachineFunction().getFunction(); 1174 1175 StringRef FuncName("<unknown>"); 1176 1177 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) 1178 FuncName = G->getSymbol(); 1179 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1180 FuncName = G->getGlobal()->getName(); 1181 1182 DiagnosticInfoUnsupported NoCalls( 1183 Fn, Reason + FuncName, CLI.DL.getDebugLoc()); 1184 DAG.getContext()->diagnose(NoCalls); 1185 1186 if (!CLI.IsTailCall) { 1187 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) 1188 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); 1189 } 1190 1191 return DAG.getEntryNode(); 1192 } 1193 1194 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, 1195 SmallVectorImpl<SDValue> &InVals) const { 1196 return lowerUnhandledCall(CLI, InVals, "unsupported call to function "); 1197 } 1198 1199 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 1200 SelectionDAG &DAG) const { 1201 const Function &Fn = DAG.getMachineFunction().getFunction(); 1202 1203 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca", 1204 SDLoc(Op).getDebugLoc()); 1205 DAG.getContext()->diagnose(NoDynamicAlloca); 1206 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)}; 1207 return DAG.getMergeValues(Ops, SDLoc()); 1208 } 1209 1210 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 1211 SelectionDAG &DAG) const { 1212 switch (Op.getOpcode()) { 1213 default: 1214 Op->print(errs(), &DAG); 1215 llvm_unreachable("Custom lowering code for this" 1216 "instruction is not implemented yet!"); 1217 break; 1218 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 1219 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 1220 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 1221 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG); 1222 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG); 1223 case ISD::FREM: return LowerFREM(Op, DAG); 1224 case ISD::FCEIL: return LowerFCEIL(Op, DAG); 1225 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG); 1226 case ISD::FRINT: return LowerFRINT(Op, DAG); 1227 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); 1228 case ISD::FROUND: return LowerFROUND(Op, DAG); 1229 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); 1230 case ISD::FLOG: 1231 return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F); 1232 case ISD::FLOG10: 1233 return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F); 1234 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 1235 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 1236 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG); 1237 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 1238 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 1239 case ISD::CTTZ: 1240 case ISD::CTTZ_ZERO_UNDEF: 1241 case ISD::CTLZ: 1242 case ISD::CTLZ_ZERO_UNDEF: 1243 return LowerCTLZ_CTTZ(Op, DAG); 1244 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 1245 } 1246 return Op; 1247 } 1248 1249 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N, 1250 SmallVectorImpl<SDValue> &Results, 1251 SelectionDAG &DAG) const { 1252 switch (N->getOpcode()) { 1253 case ISD::SIGN_EXTEND_INREG: 1254 // Different parts of legalization seem to interpret which type of 1255 // sign_extend_inreg is the one to check for custom lowering. The extended 1256 // from type is what really matters, but some places check for custom 1257 // lowering of the result type. This results in trying to use 1258 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do 1259 // nothing here and let the illegal result integer be handled normally. 1260 return; 1261 default: 1262 return; 1263 } 1264 } 1265 1266 static bool hasDefinedInitializer(const GlobalValue *GV) { 1267 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1268 if (!GVar || !GVar->hasInitializer()) 1269 return false; 1270 1271 return !isa<UndefValue>(GVar->getInitializer()); 1272 } 1273 1274 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, 1275 SDValue Op, 1276 SelectionDAG &DAG) const { 1277 1278 const DataLayout &DL = DAG.getDataLayout(); 1279 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); 1280 const GlobalValue *GV = G->getGlobal(); 1281 1282 if (G->getAddressSpace() == AMDGPUASI.LOCAL_ADDRESS) { 1283 // XXX: What does the value of G->getOffset() mean? 1284 assert(G->getOffset() == 0 && 1285 "Do not know what to do with an non-zero offset"); 1286 1287 // TODO: We could emit code to handle the initialization somewhere. 1288 if (!hasDefinedInitializer(GV)) { 1289 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV); 1290 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType()); 1291 } 1292 } 1293 1294 const Function &Fn = DAG.getMachineFunction().getFunction(); 1295 DiagnosticInfoUnsupported BadInit( 1296 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc()); 1297 DAG.getContext()->diagnose(BadInit); 1298 return SDValue(); 1299 } 1300 1301 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 1302 SelectionDAG &DAG) const { 1303 SmallVector<SDValue, 8> Args; 1304 1305 for (const SDUse &U : Op->ops()) 1306 DAG.ExtractVectorElements(U.get(), Args); 1307 1308 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1309 } 1310 1311 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 1312 SelectionDAG &DAG) const { 1313 1314 SmallVector<SDValue, 8> Args; 1315 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1316 EVT VT = Op.getValueType(); 1317 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start, 1318 VT.getVectorNumElements()); 1319 1320 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1321 } 1322 1323 /// \brief Generate Min/Max node 1324 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT, 1325 SDValue LHS, SDValue RHS, 1326 SDValue True, SDValue False, 1327 SDValue CC, 1328 DAGCombinerInfo &DCI) const { 1329 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 1330 return SDValue(); 1331 1332 SelectionDAG &DAG = DCI.DAG; 1333 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 1334 switch (CCOpcode) { 1335 case ISD::SETOEQ: 1336 case ISD::SETONE: 1337 case ISD::SETUNE: 1338 case ISD::SETNE: 1339 case ISD::SETUEQ: 1340 case ISD::SETEQ: 1341 case ISD::SETFALSE: 1342 case ISD::SETFALSE2: 1343 case ISD::SETTRUE: 1344 case ISD::SETTRUE2: 1345 case ISD::SETUO: 1346 case ISD::SETO: 1347 break; 1348 case ISD::SETULE: 1349 case ISD::SETULT: { 1350 if (LHS == True) 1351 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1352 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1353 } 1354 case ISD::SETOLE: 1355 case ISD::SETOLT: 1356 case ISD::SETLE: 1357 case ISD::SETLT: { 1358 // Ordered. Assume ordered for undefined. 1359 1360 // Only do this after legalization to avoid interfering with other combines 1361 // which might occur. 1362 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1363 !DCI.isCalledByLegalizer()) 1364 return SDValue(); 1365 1366 // We need to permute the operands to get the correct NaN behavior. The 1367 // selected operand is the second one based on the failing compare with NaN, 1368 // so permute it based on the compare type the hardware uses. 1369 if (LHS == True) 1370 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1371 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1372 } 1373 case ISD::SETUGE: 1374 case ISD::SETUGT: { 1375 if (LHS == True) 1376 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1377 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1378 } 1379 case ISD::SETGT: 1380 case ISD::SETGE: 1381 case ISD::SETOGE: 1382 case ISD::SETOGT: { 1383 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1384 !DCI.isCalledByLegalizer()) 1385 return SDValue(); 1386 1387 if (LHS == True) 1388 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1389 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1390 } 1391 case ISD::SETCC_INVALID: 1392 llvm_unreachable("Invalid setcc condcode!"); 1393 } 1394 return SDValue(); 1395 } 1396 1397 std::pair<SDValue, SDValue> 1398 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const { 1399 SDLoc SL(Op); 1400 1401 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1402 1403 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1404 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1405 1406 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1407 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1408 1409 return std::make_pair(Lo, Hi); 1410 } 1411 1412 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { 1413 SDLoc SL(Op); 1414 1415 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1416 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1417 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1418 } 1419 1420 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const { 1421 SDLoc SL(Op); 1422 1423 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1424 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1425 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1426 } 1427 1428 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, 1429 SelectionDAG &DAG) const { 1430 LoadSDNode *Load = cast<LoadSDNode>(Op); 1431 EVT VT = Op.getValueType(); 1432 1433 1434 // If this is a 2 element vector, we really want to scalarize and not create 1435 // weird 1 element vectors. 1436 if (VT.getVectorNumElements() == 2) 1437 return scalarizeVectorLoad(Load, DAG); 1438 1439 SDValue BasePtr = Load->getBasePtr(); 1440 EVT MemVT = Load->getMemoryVT(); 1441 SDLoc SL(Op); 1442 1443 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1444 1445 EVT LoVT, HiVT; 1446 EVT LoMemVT, HiMemVT; 1447 SDValue Lo, Hi; 1448 1449 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1450 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1451 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT); 1452 1453 unsigned Size = LoMemVT.getStoreSize(); 1454 unsigned BaseAlign = Load->getAlignment(); 1455 unsigned HiAlign = MinAlign(BaseAlign, Size); 1456 1457 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, 1458 Load->getChain(), BasePtr, SrcValue, LoMemVT, 1459 BaseAlign, Load->getMemOperand()->getFlags()); 1460 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size); 1461 SDValue HiLoad = 1462 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(), 1463 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()), 1464 HiMemVT, HiAlign, Load->getMemOperand()->getFlags()); 1465 1466 SDValue Ops[] = { 1467 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad), 1468 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 1469 LoLoad.getValue(1), HiLoad.getValue(1)) 1470 }; 1471 1472 return DAG.getMergeValues(Ops, SL); 1473 } 1474 1475 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, 1476 SelectionDAG &DAG) const { 1477 StoreSDNode *Store = cast<StoreSDNode>(Op); 1478 SDValue Val = Store->getValue(); 1479 EVT VT = Val.getValueType(); 1480 1481 // If this is a 2 element vector, we really want to scalarize and not create 1482 // weird 1 element vectors. 1483 if (VT.getVectorNumElements() == 2) 1484 return scalarizeVectorStore(Store, DAG); 1485 1486 EVT MemVT = Store->getMemoryVT(); 1487 SDValue Chain = Store->getChain(); 1488 SDValue BasePtr = Store->getBasePtr(); 1489 SDLoc SL(Op); 1490 1491 EVT LoVT, HiVT; 1492 EVT LoMemVT, HiMemVT; 1493 SDValue Lo, Hi; 1494 1495 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1496 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1497 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT); 1498 1499 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize()); 1500 1501 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); 1502 unsigned BaseAlign = Store->getAlignment(); 1503 unsigned Size = LoMemVT.getStoreSize(); 1504 unsigned HiAlign = MinAlign(BaseAlign, Size); 1505 1506 SDValue LoStore = 1507 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign, 1508 Store->getMemOperand()->getFlags()); 1509 SDValue HiStore = 1510 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size), 1511 HiMemVT, HiAlign, Store->getMemOperand()->getFlags()); 1512 1513 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore); 1514 } 1515 1516 // This is a shortcut for integer division because we have fast i32<->f32 1517 // conversions, and fast f32 reciprocal instructions. The fractional part of a 1518 // float is enough to accurately represent up to a 24-bit signed integer. 1519 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, 1520 bool Sign) const { 1521 SDLoc DL(Op); 1522 EVT VT = Op.getValueType(); 1523 SDValue LHS = Op.getOperand(0); 1524 SDValue RHS = Op.getOperand(1); 1525 MVT IntVT = MVT::i32; 1526 MVT FltVT = MVT::f32; 1527 1528 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS); 1529 if (LHSSignBits < 9) 1530 return SDValue(); 1531 1532 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS); 1533 if (RHSSignBits < 9) 1534 return SDValue(); 1535 1536 unsigned BitSize = VT.getSizeInBits(); 1537 unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 1538 unsigned DivBits = BitSize - SignBits; 1539 if (Sign) 1540 ++DivBits; 1541 1542 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; 1543 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; 1544 1545 SDValue jq = DAG.getConstant(1, DL, IntVT); 1546 1547 if (Sign) { 1548 // char|short jq = ia ^ ib; 1549 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); 1550 1551 // jq = jq >> (bitsize - 2) 1552 jq = DAG.getNode(ISD::SRA, DL, VT, jq, 1553 DAG.getConstant(BitSize - 2, DL, VT)); 1554 1555 // jq = jq | 0x1 1556 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); 1557 } 1558 1559 // int ia = (int)LHS; 1560 SDValue ia = LHS; 1561 1562 // int ib, (int)RHS; 1563 SDValue ib = RHS; 1564 1565 // float fa = (float)ia; 1566 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia); 1567 1568 // float fb = (float)ib; 1569 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib); 1570 1571 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT, 1572 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb)); 1573 1574 // fq = trunc(fq); 1575 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq); 1576 1577 // float fqneg = -fq; 1578 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq); 1579 1580 // float fr = mad(fqneg, fb, fa); 1581 unsigned OpCode = Subtarget->hasFP32Denormals() ? 1582 (unsigned)AMDGPUISD::FMAD_FTZ : 1583 (unsigned)ISD::FMAD; 1584 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa); 1585 1586 // int iq = (int)fq; 1587 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq); 1588 1589 // fr = fabs(fr); 1590 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr); 1591 1592 // fb = fabs(fb); 1593 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb); 1594 1595 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 1596 1597 // int cv = fr >= fb; 1598 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE); 1599 1600 // jq = (cv ? jq : 0); 1601 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); 1602 1603 // dst = iq + jq; 1604 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); 1605 1606 // Rem needs compensation, it's easier to recompute it 1607 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); 1608 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); 1609 1610 // Truncate to number of bits this divide really is. 1611 if (Sign) { 1612 SDValue InRegSize 1613 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits)); 1614 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize); 1615 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize); 1616 } else { 1617 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT); 1618 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask); 1619 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask); 1620 } 1621 1622 return DAG.getMergeValues({ Div, Rem }, DL); 1623 } 1624 1625 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op, 1626 SelectionDAG &DAG, 1627 SmallVectorImpl<SDValue> &Results) const { 1628 SDLoc DL(Op); 1629 EVT VT = Op.getValueType(); 1630 1631 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64"); 1632 1633 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1634 1635 SDValue One = DAG.getConstant(1, DL, HalfVT); 1636 SDValue Zero = DAG.getConstant(0, DL, HalfVT); 1637 1638 //HiLo split 1639 SDValue LHS = Op.getOperand(0); 1640 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1641 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One); 1642 1643 SDValue RHS = Op.getOperand(1); 1644 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1645 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One); 1646 1647 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) && 1648 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) { 1649 1650 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1651 LHS_Lo, RHS_Lo); 1652 1653 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero}); 1654 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero}); 1655 1656 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV)); 1657 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM)); 1658 return; 1659 } 1660 1661 if (isTypeLegal(MVT::i64)) { 1662 // Compute denominator reciprocal. 1663 unsigned FMAD = Subtarget->hasFP32Denormals() ? 1664 (unsigned)AMDGPUISD::FMAD_FTZ : 1665 (unsigned)ISD::FMAD; 1666 1667 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo); 1668 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi); 1669 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi, 1670 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32), 1671 Cvt_Lo); 1672 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1); 1673 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp, 1674 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32)); 1675 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1, 1676 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32)); 1677 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2); 1678 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc, 1679 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32), 1680 Mul1); 1681 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2); 1682 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc); 1683 SDValue Rcp64 = DAG.getBitcast(VT, 1684 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi})); 1685 1686 SDValue Zero64 = DAG.getConstant(0, DL, VT); 1687 SDValue One64 = DAG.getConstant(1, DL, VT); 1688 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1); 1689 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1); 1690 1691 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS); 1692 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64); 1693 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1); 1694 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1695 Zero); 1696 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1697 One); 1698 1699 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo, 1700 Mulhi1_Lo, Zero1); 1701 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi, 1702 Mulhi1_Hi, Add1_Lo.getValue(1)); 1703 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi); 1704 SDValue Add1 = DAG.getBitcast(VT, 1705 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi})); 1706 1707 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1); 1708 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2); 1709 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1710 Zero); 1711 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1712 One); 1713 1714 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo, 1715 Mulhi2_Lo, Zero1); 1716 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc, 1717 Mulhi2_Hi, Add1_Lo.getValue(1)); 1718 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC, 1719 Zero, Add2_Lo.getValue(1)); 1720 SDValue Add2 = DAG.getBitcast(VT, 1721 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi})); 1722 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2); 1723 1724 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3); 1725 1726 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero); 1727 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One); 1728 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo, 1729 Mul3_Lo, Zero1); 1730 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi, 1731 Mul3_Hi, Sub1_Lo.getValue(1)); 1732 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi); 1733 SDValue Sub1 = DAG.getBitcast(VT, 1734 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi})); 1735 1736 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT); 1737 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero, 1738 ISD::SETUGE); 1739 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero, 1740 ISD::SETUGE); 1741 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ); 1742 1743 // TODO: Here and below portions of the code can be enclosed into if/endif. 1744 // Currently control flow is unconditional and we have 4 selects after 1745 // potential endif to substitute PHIs. 1746 1747 // if C3 != 0 ... 1748 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo, 1749 RHS_Lo, Zero1); 1750 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi, 1751 RHS_Hi, Sub1_Lo.getValue(1)); 1752 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1753 Zero, Sub2_Lo.getValue(1)); 1754 SDValue Sub2 = DAG.getBitcast(VT, 1755 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi})); 1756 1757 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64); 1758 1759 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero, 1760 ISD::SETUGE); 1761 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero, 1762 ISD::SETUGE); 1763 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ); 1764 1765 // if (C6 != 0) 1766 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64); 1767 1768 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo, 1769 RHS_Lo, Zero1); 1770 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1771 RHS_Hi, Sub2_Lo.getValue(1)); 1772 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi, 1773 Zero, Sub3_Lo.getValue(1)); 1774 SDValue Sub3 = DAG.getBitcast(VT, 1775 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi})); 1776 1777 // endif C6 1778 // endif C3 1779 1780 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE); 1781 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE); 1782 1783 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE); 1784 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE); 1785 1786 Results.push_back(Div); 1787 Results.push_back(Rem); 1788 1789 return; 1790 } 1791 1792 // r600 expandion. 1793 // Get Speculative values 1794 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo); 1795 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo); 1796 1797 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ); 1798 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero}); 1799 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM); 1800 1801 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ); 1802 SDValue DIV_Lo = Zero; 1803 1804 const unsigned halfBitWidth = HalfVT.getSizeInBits(); 1805 1806 for (unsigned i = 0; i < halfBitWidth; ++i) { 1807 const unsigned bitPos = halfBitWidth - i - 1; 1808 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT); 1809 // Get value of high bit 1810 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS); 1811 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One); 1812 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); 1813 1814 // Shift 1815 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); 1816 // Add LHS high bit 1817 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); 1818 1819 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT); 1820 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE); 1821 1822 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT); 1823 1824 // Update REM 1825 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); 1826 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE); 1827 } 1828 1829 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi}); 1830 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV); 1831 Results.push_back(DIV); 1832 Results.push_back(REM); 1833 } 1834 1835 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, 1836 SelectionDAG &DAG) const { 1837 SDLoc DL(Op); 1838 EVT VT = Op.getValueType(); 1839 1840 if (VT == MVT::i64) { 1841 SmallVector<SDValue, 2> Results; 1842 LowerUDIVREM64(Op, DAG, Results); 1843 return DAG.getMergeValues(Results, DL); 1844 } 1845 1846 if (VT == MVT::i32) { 1847 if (SDValue Res = LowerDIVREM24(Op, DAG, false)) 1848 return Res; 1849 } 1850 1851 SDValue Num = Op.getOperand(0); 1852 SDValue Den = Op.getOperand(1); 1853 1854 // RCP = URECIP(Den) = 2^32 / Den + e 1855 // e is rounding error. 1856 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den); 1857 1858 // RCP_LO = mul(RCP, Den) */ 1859 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den); 1860 1861 // RCP_HI = mulhu (RCP, Den) */ 1862 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den); 1863 1864 // NEG_RCP_LO = -RCP_LO 1865 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1866 RCP_LO); 1867 1868 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 1869 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1870 NEG_RCP_LO, RCP_LO, 1871 ISD::SETEQ); 1872 // Calculate the rounding error from the URECIP instruction 1873 // E = mulhu(ABS_RCP_LO, RCP) 1874 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP); 1875 1876 // RCP_A_E = RCP + E 1877 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E); 1878 1879 // RCP_S_E = RCP - E 1880 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E); 1881 1882 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 1883 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1884 RCP_A_E, RCP_S_E, 1885 ISD::SETEQ); 1886 // Quotient = mulhu(Tmp0, Num) 1887 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num); 1888 1889 // Num_S_Remainder = Quotient * Den 1890 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den); 1891 1892 // Remainder = Num - Num_S_Remainder 1893 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder); 1894 1895 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 1896 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den, 1897 DAG.getConstant(-1, DL, VT), 1898 DAG.getConstant(0, DL, VT), 1899 ISD::SETUGE); 1900 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 1901 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num, 1902 Num_S_Remainder, 1903 DAG.getConstant(-1, DL, VT), 1904 DAG.getConstant(0, DL, VT), 1905 ISD::SETUGE); 1906 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 1907 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den, 1908 Remainder_GE_Zero); 1909 1910 // Calculate Division result: 1911 1912 // Quotient_A_One = Quotient + 1 1913 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient, 1914 DAG.getConstant(1, DL, VT)); 1915 1916 // Quotient_S_One = Quotient - 1 1917 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient, 1918 DAG.getConstant(1, DL, VT)); 1919 1920 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 1921 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1922 Quotient, Quotient_A_One, ISD::SETEQ); 1923 1924 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 1925 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1926 Quotient_S_One, Div, ISD::SETEQ); 1927 1928 // Calculate Rem result: 1929 1930 // Remainder_S_Den = Remainder - Den 1931 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den); 1932 1933 // Remainder_A_Den = Remainder + Den 1934 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den); 1935 1936 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 1937 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1938 Remainder, Remainder_S_Den, ISD::SETEQ); 1939 1940 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 1941 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1942 Remainder_A_Den, Rem, ISD::SETEQ); 1943 SDValue Ops[2] = { 1944 Div, 1945 Rem 1946 }; 1947 return DAG.getMergeValues(Ops, DL); 1948 } 1949 1950 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, 1951 SelectionDAG &DAG) const { 1952 SDLoc DL(Op); 1953 EVT VT = Op.getValueType(); 1954 1955 SDValue LHS = Op.getOperand(0); 1956 SDValue RHS = Op.getOperand(1); 1957 1958 SDValue Zero = DAG.getConstant(0, DL, VT); 1959 SDValue NegOne = DAG.getConstant(-1, DL, VT); 1960 1961 if (VT == MVT::i32) { 1962 if (SDValue Res = LowerDIVREM24(Op, DAG, true)) 1963 return Res; 1964 } 1965 1966 if (VT == MVT::i64 && 1967 DAG.ComputeNumSignBits(LHS) > 32 && 1968 DAG.ComputeNumSignBits(RHS) > 32) { 1969 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1970 1971 //HiLo split 1972 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1973 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1974 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1975 LHS_Lo, RHS_Lo); 1976 SDValue Res[2] = { 1977 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), 1978 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) 1979 }; 1980 return DAG.getMergeValues(Res, DL); 1981 } 1982 1983 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); 1984 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); 1985 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); 1986 SDValue RSign = LHSign; // Remainder sign is the same as LHS 1987 1988 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); 1989 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); 1990 1991 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); 1992 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); 1993 1994 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); 1995 SDValue Rem = Div.getValue(1); 1996 1997 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); 1998 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); 1999 2000 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); 2001 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); 2002 2003 SDValue Res[2] = { 2004 Div, 2005 Rem 2006 }; 2007 return DAG.getMergeValues(Res, DL); 2008 } 2009 2010 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y)) 2011 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const { 2012 SDLoc SL(Op); 2013 EVT VT = Op.getValueType(); 2014 SDValue X = Op.getOperand(0); 2015 SDValue Y = Op.getOperand(1); 2016 2017 // TODO: Should this propagate fast-math-flags? 2018 2019 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y); 2020 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div); 2021 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y); 2022 2023 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul); 2024 } 2025 2026 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const { 2027 SDLoc SL(Op); 2028 SDValue Src = Op.getOperand(0); 2029 2030 // result = trunc(src) 2031 // if (src > 0.0 && src != result) 2032 // result += 1.0 2033 2034 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2035 2036 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2037 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 2038 2039 EVT SetCCVT = 2040 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2041 2042 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT); 2043 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2044 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2045 2046 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero); 2047 // TODO: Should this propagate fast-math-flags? 2048 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2049 } 2050 2051 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, 2052 SelectionDAG &DAG) { 2053 const unsigned FractBits = 52; 2054 const unsigned ExpBits = 11; 2055 2056 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 2057 Hi, 2058 DAG.getConstant(FractBits - 32, SL, MVT::i32), 2059 DAG.getConstant(ExpBits, SL, MVT::i32)); 2060 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart, 2061 DAG.getConstant(1023, SL, MVT::i32)); 2062 2063 return Exp; 2064 } 2065 2066 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { 2067 SDLoc SL(Op); 2068 SDValue Src = Op.getOperand(0); 2069 2070 assert(Op.getValueType() == MVT::f64); 2071 2072 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2073 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2074 2075 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2076 2077 // Extract the upper half, since this is where we will find the sign and 2078 // exponent. 2079 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One); 2080 2081 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2082 2083 const unsigned FractBits = 52; 2084 2085 // Extract the sign bit. 2086 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); 2087 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); 2088 2089 // Extend back to 64-bits. 2090 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit}); 2091 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); 2092 2093 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src); 2094 const SDValue FractMask 2095 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); 2096 2097 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp); 2098 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64); 2099 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not); 2100 2101 EVT SetCCVT = 2102 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2103 2104 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); 2105 2106 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2107 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2108 2109 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0); 2110 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1); 2111 2112 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2); 2113 } 2114 2115 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const { 2116 SDLoc SL(Op); 2117 SDValue Src = Op.getOperand(0); 2118 2119 assert(Op.getValueType() == MVT::f64); 2120 2121 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52"); 2122 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64); 2123 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src); 2124 2125 // TODO: Should this propagate fast-math-flags? 2126 2127 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign); 2128 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign); 2129 2130 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src); 2131 2132 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51"); 2133 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64); 2134 2135 EVT SetCCVT = 2136 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2137 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT); 2138 2139 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2); 2140 } 2141 2142 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const { 2143 // FNEARBYINT and FRINT are the same, except in their handling of FP 2144 // exceptions. Those aren't really meaningful for us, and OpenCL only has 2145 // rint, so just treat them as equivalent. 2146 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0)); 2147 } 2148 2149 // XXX - May require not supporting f32 denormals? 2150 2151 // Don't handle v2f16. The extra instructions to scalarize and repack around the 2152 // compare and vselect end up producing worse code than scalarizing the whole 2153 // operation. 2154 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const { 2155 SDLoc SL(Op); 2156 SDValue X = Op.getOperand(0); 2157 EVT VT = Op.getValueType(); 2158 2159 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X); 2160 2161 // TODO: Should this propagate fast-math-flags? 2162 2163 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T); 2164 2165 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff); 2166 2167 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT); 2168 const SDValue One = DAG.getConstantFP(1.0, SL, VT); 2169 const SDValue Half = DAG.getConstantFP(0.5, SL, VT); 2170 2171 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X); 2172 2173 EVT SetCCVT = 2174 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 2175 2176 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE); 2177 2178 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero); 2179 2180 return DAG.getNode(ISD::FADD, SL, VT, T, Sel); 2181 } 2182 2183 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const { 2184 SDLoc SL(Op); 2185 SDValue X = Op.getOperand(0); 2186 2187 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X); 2188 2189 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2190 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2191 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32); 2192 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32); 2193 EVT SetCCVT = 2194 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2195 2196 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2197 2198 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One); 2199 2200 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2201 2202 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL, 2203 MVT::i64); 2204 2205 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp); 2206 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64, 2207 DAG.getConstant(INT64_C(0x0008000000000000), SL, 2208 MVT::i64), 2209 Exp); 2210 2211 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M); 2212 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT, 2213 DAG.getConstant(0, SL, MVT::i64), Tmp0, 2214 ISD::SETNE); 2215 2216 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1, 2217 D, DAG.getConstant(0, SL, MVT::i64)); 2218 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2); 2219 2220 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64)); 2221 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K); 2222 2223 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2224 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2225 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ); 2226 2227 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64, 2228 ExpEqNegOne, 2229 DAG.getConstantFP(1.0, SL, MVT::f64), 2230 DAG.getConstantFP(0.0, SL, MVT::f64)); 2231 2232 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X); 2233 2234 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K); 2235 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K); 2236 2237 return K; 2238 } 2239 2240 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { 2241 EVT VT = Op.getValueType(); 2242 2243 if (VT == MVT::f32 || VT == MVT::f16) 2244 return LowerFROUND32_16(Op, DAG); 2245 2246 if (VT == MVT::f64) 2247 return LowerFROUND64(Op, DAG); 2248 2249 llvm_unreachable("unhandled type"); 2250 } 2251 2252 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { 2253 SDLoc SL(Op); 2254 SDValue Src = Op.getOperand(0); 2255 2256 // result = trunc(src); 2257 // if (src < 0.0 && src != result) 2258 // result += -1.0. 2259 2260 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2261 2262 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2263 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); 2264 2265 EVT SetCCVT = 2266 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2267 2268 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT); 2269 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2270 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2271 2272 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero); 2273 // TODO: Should this propagate fast-math-flags? 2274 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2275 } 2276 2277 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG, 2278 double Log2BaseInverted) const { 2279 EVT VT = Op.getValueType(); 2280 2281 SDLoc SL(Op); 2282 SDValue Operand = Op.getOperand(0); 2283 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand); 2284 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT); 2285 2286 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand); 2287 } 2288 2289 static bool isCtlzOpc(unsigned Opc) { 2290 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; 2291 } 2292 2293 static bool isCttzOpc(unsigned Opc) { 2294 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF; 2295 } 2296 2297 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const { 2298 SDLoc SL(Op); 2299 SDValue Src = Op.getOperand(0); 2300 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF || 2301 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; 2302 2303 unsigned ISDOpc, NewOpc; 2304 if (isCtlzOpc(Op.getOpcode())) { 2305 ISDOpc = ISD::CTLZ_ZERO_UNDEF; 2306 NewOpc = AMDGPUISD::FFBH_U32; 2307 } else if (isCttzOpc(Op.getOpcode())) { 2308 ISDOpc = ISD::CTTZ_ZERO_UNDEF; 2309 NewOpc = AMDGPUISD::FFBL_B32; 2310 } else 2311 llvm_unreachable("Unexpected OPCode!!!"); 2312 2313 2314 if (ZeroUndef && Src.getValueType() == MVT::i32) 2315 return DAG.getNode(NewOpc, SL, MVT::i32, Src); 2316 2317 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2318 2319 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2320 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2321 2322 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 2323 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 2324 2325 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2326 *DAG.getContext(), MVT::i32); 2327 2328 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo; 2329 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ); 2330 2331 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo); 2332 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi); 2333 2334 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); 2335 SDValue Add, NewOpr; 2336 if (isCtlzOpc(Op.getOpcode())) { 2337 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32); 2338 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) 2339 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi); 2340 } else { 2341 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32); 2342 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x)) 2343 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo); 2344 } 2345 2346 if (!ZeroUndef) { 2347 // Test if the full 64-bit input is zero. 2348 2349 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, 2350 // which we probably don't want. 2351 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi; 2352 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ); 2353 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0); 2354 2355 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction 2356 // with the same cycles, otherwise it is slower. 2357 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, 2358 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); 2359 2360 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); 2361 2362 // The instruction returns -1 for 0 input, but the defined intrinsic 2363 // behavior is to return the number of bits. 2364 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, 2365 SrcIsZero, Bits32, NewOpr); 2366 } 2367 2368 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr); 2369 } 2370 2371 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, 2372 bool Signed) const { 2373 // Unsigned 2374 // cul2f(ulong u) 2375 //{ 2376 // uint lz = clz(u); 2377 // uint e = (u != 0) ? 127U + 63U - lz : 0; 2378 // u = (u << lz) & 0x7fffffffffffffffUL; 2379 // ulong t = u & 0xffffffffffUL; 2380 // uint v = (e << 23) | (uint)(u >> 40); 2381 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 2382 // return as_float(v + r); 2383 //} 2384 // Signed 2385 // cl2f(long l) 2386 //{ 2387 // long s = l >> 63; 2388 // float r = cul2f((l + s) ^ s); 2389 // return s ? -r : r; 2390 //} 2391 2392 SDLoc SL(Op); 2393 SDValue Src = Op.getOperand(0); 2394 SDValue L = Src; 2395 2396 SDValue S; 2397 if (Signed) { 2398 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); 2399 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); 2400 2401 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); 2402 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); 2403 } 2404 2405 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2406 *DAG.getContext(), MVT::f32); 2407 2408 2409 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); 2410 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); 2411 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); 2412 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); 2413 2414 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); 2415 SDValue E = DAG.getSelect(SL, MVT::i32, 2416 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), 2417 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), 2418 ZeroI32); 2419 2420 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, 2421 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), 2422 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); 2423 2424 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, 2425 DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); 2426 2427 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, 2428 U, DAG.getConstant(40, SL, MVT::i64)); 2429 2430 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, 2431 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), 2432 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); 2433 2434 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); 2435 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); 2436 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); 2437 2438 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2439 2440 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); 2441 2442 SDValue R = DAG.getSelect(SL, MVT::i32, 2443 RCmp, 2444 One, 2445 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); 2446 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); 2447 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); 2448 2449 if (!Signed) 2450 return R; 2451 2452 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); 2453 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); 2454 } 2455 2456 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, 2457 bool Signed) const { 2458 SDLoc SL(Op); 2459 SDValue Src = Op.getOperand(0); 2460 2461 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2462 2463 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2464 DAG.getConstant(0, SL, MVT::i32)); 2465 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2466 DAG.getConstant(1, SL, MVT::i32)); 2467 2468 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, 2469 SL, MVT::f64, Hi); 2470 2471 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); 2472 2473 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, 2474 DAG.getConstant(32, SL, MVT::i32)); 2475 // TODO: Should this propagate fast-math-flags? 2476 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); 2477 } 2478 2479 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, 2480 SelectionDAG &DAG) const { 2481 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2482 "operation should be legal"); 2483 2484 // TODO: Factor out code common with LowerSINT_TO_FP. 2485 2486 EVT DestVT = Op.getValueType(); 2487 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2488 SDLoc DL(Op); 2489 SDValue Src = Op.getOperand(0); 2490 2491 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2492 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2493 SDValue FPRound = 2494 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2495 2496 return FPRound; 2497 } 2498 2499 if (DestVT == MVT::f32) 2500 return LowerINT_TO_FP32(Op, DAG, false); 2501 2502 assert(DestVT == MVT::f64); 2503 return LowerINT_TO_FP64(Op, DAG, false); 2504 } 2505 2506 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, 2507 SelectionDAG &DAG) const { 2508 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2509 "operation should be legal"); 2510 2511 // TODO: Factor out code common with LowerUINT_TO_FP. 2512 2513 EVT DestVT = Op.getValueType(); 2514 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2515 SDLoc DL(Op); 2516 SDValue Src = Op.getOperand(0); 2517 2518 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2519 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2520 SDValue FPRound = 2521 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2522 2523 return FPRound; 2524 } 2525 2526 if (DestVT == MVT::f32) 2527 return LowerINT_TO_FP32(Op, DAG, true); 2528 2529 assert(DestVT == MVT::f64); 2530 return LowerINT_TO_FP64(Op, DAG, true); 2531 } 2532 2533 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, 2534 bool Signed) const { 2535 SDLoc SL(Op); 2536 2537 SDValue Src = Op.getOperand(0); 2538 2539 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2540 2541 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL, 2542 MVT::f64); 2543 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL, 2544 MVT::f64); 2545 // TODO: Should this propagate fast-math-flags? 2546 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0); 2547 2548 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul); 2549 2550 2551 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc); 2552 2553 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL, 2554 MVT::i32, FloorMul); 2555 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma); 2556 2557 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}); 2558 2559 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result); 2560 } 2561 2562 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const { 2563 SDLoc DL(Op); 2564 SDValue N0 = Op.getOperand(0); 2565 2566 // Convert to target node to get known bits 2567 if (N0.getValueType() == MVT::f32) 2568 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0); 2569 2570 if (getTargetMachine().Options.UnsafeFPMath) { 2571 // There is a generic expand for FP_TO_FP16 with unsafe fast math. 2572 return SDValue(); 2573 } 2574 2575 assert(N0.getSimpleValueType() == MVT::f64); 2576 2577 // f64 -> f16 conversion using round-to-nearest-even rounding mode. 2578 const unsigned ExpMask = 0x7ff; 2579 const unsigned ExpBiasf64 = 1023; 2580 const unsigned ExpBiasf16 = 15; 2581 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2582 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2583 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0); 2584 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U, 2585 DAG.getConstant(32, DL, MVT::i64)); 2586 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32); 2587 U = DAG.getZExtOrTrunc(U, DL, MVT::i32); 2588 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2589 DAG.getConstant(20, DL, MVT::i64)); 2590 E = DAG.getNode(ISD::AND, DL, MVT::i32, E, 2591 DAG.getConstant(ExpMask, DL, MVT::i32)); 2592 // Subtract the fp64 exponent bias (1023) to get the real exponent and 2593 // add the f16 bias (15) to get the biased exponent for the f16 format. 2594 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E, 2595 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32)); 2596 2597 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2598 DAG.getConstant(8, DL, MVT::i32)); 2599 M = DAG.getNode(ISD::AND, DL, MVT::i32, M, 2600 DAG.getConstant(0xffe, DL, MVT::i32)); 2601 2602 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH, 2603 DAG.getConstant(0x1ff, DL, MVT::i32)); 2604 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U); 2605 2606 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ); 2607 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set); 2608 2609 // (M != 0 ? 0x0200 : 0) | 0x7c00; 2610 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32, 2611 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32), 2612 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32)); 2613 2614 // N = M | (E << 12); 2615 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2616 DAG.getNode(ISD::SHL, DL, MVT::i32, E, 2617 DAG.getConstant(12, DL, MVT::i32))); 2618 2619 // B = clamp(1-E, 0, 13); 2620 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32, 2621 One, E); 2622 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero); 2623 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B, 2624 DAG.getConstant(13, DL, MVT::i32)); 2625 2626 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2627 DAG.getConstant(0x1000, DL, MVT::i32)); 2628 2629 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B); 2630 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B); 2631 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE); 2632 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1); 2633 2634 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT); 2635 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V, 2636 DAG.getConstant(0x7, DL, MVT::i32)); 2637 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V, 2638 DAG.getConstant(2, DL, MVT::i32)); 2639 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32), 2640 One, Zero, ISD::SETEQ); 2641 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32), 2642 One, Zero, ISD::SETGT); 2643 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1); 2644 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1); 2645 2646 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32), 2647 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT); 2648 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32), 2649 I, V, ISD::SETEQ); 2650 2651 // Extract the sign bit. 2652 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2653 DAG.getConstant(16, DL, MVT::i32)); 2654 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign, 2655 DAG.getConstant(0x8000, DL, MVT::i32)); 2656 2657 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V); 2658 return DAG.getZExtOrTrunc(V, DL, Op.getValueType()); 2659 } 2660 2661 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op, 2662 SelectionDAG &DAG) const { 2663 SDValue Src = Op.getOperand(0); 2664 2665 // TODO: Factor out code common with LowerFP_TO_UINT. 2666 2667 EVT SrcVT = Src.getValueType(); 2668 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2669 SDLoc DL(Op); 2670 2671 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2672 SDValue FpToInt32 = 2673 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2674 2675 return FpToInt32; 2676 } 2677 2678 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2679 return LowerFP64_TO_INT(Op, DAG, true); 2680 2681 return SDValue(); 2682 } 2683 2684 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op, 2685 SelectionDAG &DAG) const { 2686 SDValue Src = Op.getOperand(0); 2687 2688 // TODO: Factor out code common with LowerFP_TO_SINT. 2689 2690 EVT SrcVT = Src.getValueType(); 2691 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2692 SDLoc DL(Op); 2693 2694 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2695 SDValue FpToInt32 = 2696 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2697 2698 return FpToInt32; 2699 } 2700 2701 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2702 return LowerFP64_TO_INT(Op, DAG, false); 2703 2704 return SDValue(); 2705 } 2706 2707 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2708 SelectionDAG &DAG) const { 2709 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2710 MVT VT = Op.getSimpleValueType(); 2711 MVT ScalarVT = VT.getScalarType(); 2712 2713 assert(VT.isVector()); 2714 2715 SDValue Src = Op.getOperand(0); 2716 SDLoc DL(Op); 2717 2718 // TODO: Don't scalarize on Evergreen? 2719 unsigned NElts = VT.getVectorNumElements(); 2720 SmallVector<SDValue, 8> Args; 2721 DAG.ExtractVectorElements(Src, Args, 0, NElts); 2722 2723 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType()); 2724 for (unsigned I = 0; I < NElts; ++I) 2725 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp); 2726 2727 return DAG.getBuildVector(VT, DL, Args); 2728 } 2729 2730 //===----------------------------------------------------------------------===// 2731 // Custom DAG optimizations 2732 //===----------------------------------------------------------------------===// 2733 2734 static bool isU24(SDValue Op, SelectionDAG &DAG) { 2735 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24; 2736 } 2737 2738 static bool isI24(SDValue Op, SelectionDAG &DAG) { 2739 EVT VT = Op.getValueType(); 2740 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated 2741 // as unsigned 24-bit values. 2742 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24; 2743 } 2744 2745 static bool simplifyI24(SDNode *Node24, unsigned OpIdx, 2746 TargetLowering::DAGCombinerInfo &DCI) { 2747 2748 SelectionDAG &DAG = DCI.DAG; 2749 SDValue Op = Node24->getOperand(OpIdx); 2750 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2751 EVT VT = Op.getValueType(); 2752 2753 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24); 2754 APInt KnownZero, KnownOne; 2755 TargetLowering::TargetLoweringOpt TLO(DAG, true, true); 2756 if (TLI.SimplifyDemandedBits(Node24, OpIdx, Demanded, DCI, TLO)) 2757 return true; 2758 2759 return false; 2760 } 2761 2762 template <typename IntTy> 2763 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, 2764 uint32_t Width, const SDLoc &DL) { 2765 if (Width + Offset < 32) { 2766 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); 2767 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); 2768 return DAG.getConstant(Result, DL, MVT::i32); 2769 } 2770 2771 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32); 2772 } 2773 2774 static bool hasVolatileUser(SDNode *Val) { 2775 for (SDNode *U : Val->uses()) { 2776 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) { 2777 if (M->isVolatile()) 2778 return true; 2779 } 2780 } 2781 2782 return false; 2783 } 2784 2785 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const { 2786 // i32 vectors are the canonical memory type. 2787 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT)) 2788 return false; 2789 2790 if (!VT.isByteSized()) 2791 return false; 2792 2793 unsigned Size = VT.getStoreSize(); 2794 2795 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector()) 2796 return false; 2797 2798 if (Size == 3 || (Size > 4 && (Size % 4 != 0))) 2799 return false; 2800 2801 return true; 2802 } 2803 2804 // Replace load of an illegal type with a store of a bitcast to a friendlier 2805 // type. 2806 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N, 2807 DAGCombinerInfo &DCI) const { 2808 if (!DCI.isBeforeLegalize()) 2809 return SDValue(); 2810 2811 LoadSDNode *LN = cast<LoadSDNode>(N); 2812 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) 2813 return SDValue(); 2814 2815 SDLoc SL(N); 2816 SelectionDAG &DAG = DCI.DAG; 2817 EVT VT = LN->getMemoryVT(); 2818 2819 unsigned Size = VT.getStoreSize(); 2820 unsigned Align = LN->getAlignment(); 2821 if (Align < Size && isTypeLegal(VT)) { 2822 bool IsFast; 2823 unsigned AS = LN->getAddressSpace(); 2824 2825 // Expand unaligned loads earlier than legalization. Due to visitation order 2826 // problems during legalization, the emitted instructions to pack and unpack 2827 // the bytes again are not eliminated in the case of an unaligned copy. 2828 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { 2829 if (VT.isVector()) 2830 return scalarizeVectorLoad(LN, DAG); 2831 2832 SDValue Ops[2]; 2833 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG); 2834 return DAG.getMergeValues(Ops, SDLoc(N)); 2835 } 2836 2837 if (!IsFast) 2838 return SDValue(); 2839 } 2840 2841 if (!shouldCombineMemoryType(VT)) 2842 return SDValue(); 2843 2844 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2845 2846 SDValue NewLoad 2847 = DAG.getLoad(NewVT, SL, LN->getChain(), 2848 LN->getBasePtr(), LN->getMemOperand()); 2849 2850 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad); 2851 DCI.CombineTo(N, BC, NewLoad.getValue(1)); 2852 return SDValue(N, 0); 2853 } 2854 2855 // Replace store of an illegal type with a store of a bitcast to a friendlier 2856 // type. 2857 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N, 2858 DAGCombinerInfo &DCI) const { 2859 if (!DCI.isBeforeLegalize()) 2860 return SDValue(); 2861 2862 StoreSDNode *SN = cast<StoreSDNode>(N); 2863 if (SN->isVolatile() || !ISD::isNormalStore(SN)) 2864 return SDValue(); 2865 2866 EVT VT = SN->getMemoryVT(); 2867 unsigned Size = VT.getStoreSize(); 2868 2869 SDLoc SL(N); 2870 SelectionDAG &DAG = DCI.DAG; 2871 unsigned Align = SN->getAlignment(); 2872 if (Align < Size && isTypeLegal(VT)) { 2873 bool IsFast; 2874 unsigned AS = SN->getAddressSpace(); 2875 2876 // Expand unaligned stores earlier than legalization. Due to visitation 2877 // order problems during legalization, the emitted instructions to pack and 2878 // unpack the bytes again are not eliminated in the case of an unaligned 2879 // copy. 2880 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { 2881 if (VT.isVector()) 2882 return scalarizeVectorStore(SN, DAG); 2883 2884 return expandUnalignedStore(SN, DAG); 2885 } 2886 2887 if (!IsFast) 2888 return SDValue(); 2889 } 2890 2891 if (!shouldCombineMemoryType(VT)) 2892 return SDValue(); 2893 2894 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2895 SDValue Val = SN->getValue(); 2896 2897 //DCI.AddToWorklist(Val.getNode()); 2898 2899 bool OtherUses = !Val.hasOneUse(); 2900 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val); 2901 if (OtherUses) { 2902 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal); 2903 DAG.ReplaceAllUsesOfValueWith(Val, CastBack); 2904 } 2905 2906 return DAG.getStore(SN->getChain(), SL, CastVal, 2907 SN->getBasePtr(), SN->getMemOperand()); 2908 } 2909 2910 SDValue AMDGPUTargetLowering::performClampCombine(SDNode *N, 2911 DAGCombinerInfo &DCI) const { 2912 ConstantFPSDNode *CSrc = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 2913 if (!CSrc) 2914 return SDValue(); 2915 2916 const APFloat &F = CSrc->getValueAPF(); 2917 APFloat Zero = APFloat::getZero(F.getSemantics()); 2918 APFloat::cmpResult Cmp0 = F.compare(Zero); 2919 if (Cmp0 == APFloat::cmpLessThan || 2920 (Cmp0 == APFloat::cmpUnordered && Subtarget->enableDX10Clamp())) { 2921 return DCI.DAG.getConstantFP(Zero, SDLoc(N), N->getValueType(0)); 2922 } 2923 2924 APFloat One(F.getSemantics(), "1.0"); 2925 APFloat::cmpResult Cmp1 = F.compare(One); 2926 if (Cmp1 == APFloat::cmpGreaterThan) 2927 return DCI.DAG.getConstantFP(One, SDLoc(N), N->getValueType(0)); 2928 2929 return SDValue(CSrc, 0); 2930 } 2931 2932 // FIXME: This should go in generic DAG combiner with an isTruncateFree check, 2933 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU 2934 // issues. 2935 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N, 2936 DAGCombinerInfo &DCI) const { 2937 SelectionDAG &DAG = DCI.DAG; 2938 SDValue N0 = N->getOperand(0); 2939 2940 // (vt2 (assertzext (truncate vt0:x), vt1)) -> 2941 // (vt2 (truncate (assertzext vt0:x, vt1))) 2942 if (N0.getOpcode() == ISD::TRUNCATE) { 2943 SDValue N1 = N->getOperand(1); 2944 EVT ExtVT = cast<VTSDNode>(N1)->getVT(); 2945 SDLoc SL(N); 2946 2947 SDValue Src = N0.getOperand(0); 2948 EVT SrcVT = Src.getValueType(); 2949 if (SrcVT.bitsGE(ExtVT)) { 2950 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1); 2951 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg); 2952 } 2953 } 2954 2955 return SDValue(); 2956 } 2957 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the 2958 /// binary operation \p Opc to it with the corresponding constant operands. 2959 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl( 2960 DAGCombinerInfo &DCI, const SDLoc &SL, 2961 unsigned Opc, SDValue LHS, 2962 uint32_t ValLo, uint32_t ValHi) const { 2963 SelectionDAG &DAG = DCI.DAG; 2964 SDValue Lo, Hi; 2965 std::tie(Lo, Hi) = split64BitValue(LHS, DAG); 2966 2967 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32); 2968 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32); 2969 2970 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS); 2971 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS); 2972 2973 // Re-visit the ands. It's possible we eliminated one of them and it could 2974 // simplify the vector. 2975 DCI.AddToWorklist(Lo.getNode()); 2976 DCI.AddToWorklist(Hi.getNode()); 2977 2978 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd}); 2979 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2980 } 2981 2982 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, 2983 DAGCombinerInfo &DCI) const { 2984 EVT VT = N->getValueType(0); 2985 2986 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2987 if (!RHS) 2988 return SDValue(); 2989 2990 SDValue LHS = N->getOperand(0); 2991 unsigned RHSVal = RHS->getZExtValue(); 2992 if (!RHSVal) 2993 return LHS; 2994 2995 SDLoc SL(N); 2996 SelectionDAG &DAG = DCI.DAG; 2997 2998 switch (LHS->getOpcode()) { 2999 default: 3000 break; 3001 case ISD::ZERO_EXTEND: 3002 case ISD::SIGN_EXTEND: 3003 case ISD::ANY_EXTEND: { 3004 SDValue X = LHS->getOperand(0); 3005 3006 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 && 3007 isTypeLegal(MVT::v2i16)) { 3008 // Prefer build_vector as the canonical form if packed types are legal. 3009 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x 3010 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL, 3011 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) }); 3012 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 3013 } 3014 3015 // shl (ext x) => zext (shl x), if shift does not overflow int 3016 if (VT != MVT::i64) 3017 break; 3018 KnownBits Known; 3019 DAG.computeKnownBits(X, Known); 3020 unsigned LZ = Known.countMinLeadingZeros(); 3021 if (LZ < RHSVal) 3022 break; 3023 EVT XVT = X.getValueType(); 3024 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0)); 3025 return DAG.getZExtOrTrunc(Shl, SL, VT); 3026 } 3027 } 3028 3029 if (VT != MVT::i64) 3030 return SDValue(); 3031 3032 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) 3033 3034 // On some subtargets, 64-bit shift is a quarter rate instruction. In the 3035 // common case, splitting this into a move and a 32-bit shift is faster and 3036 // the same code size. 3037 if (RHSVal < 32) 3038 return SDValue(); 3039 3040 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); 3041 3042 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS); 3043 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt); 3044 3045 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3046 3047 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift}); 3048 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 3049 } 3050 3051 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N, 3052 DAGCombinerInfo &DCI) const { 3053 if (N->getValueType(0) != MVT::i64) 3054 return SDValue(); 3055 3056 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3057 if (!RHS) 3058 return SDValue(); 3059 3060 SelectionDAG &DAG = DCI.DAG; 3061 SDLoc SL(N); 3062 unsigned RHSVal = RHS->getZExtValue(); 3063 3064 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) 3065 if (RHSVal == 32) { 3066 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 3067 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 3068 DAG.getConstant(31, SL, MVT::i32)); 3069 3070 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift}); 3071 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 3072 } 3073 3074 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) 3075 if (RHSVal == 63) { 3076 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 3077 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 3078 DAG.getConstant(31, SL, MVT::i32)); 3079 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift}); 3080 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 3081 } 3082 3083 return SDValue(); 3084 } 3085 3086 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, 3087 DAGCombinerInfo &DCI) const { 3088 if (N->getValueType(0) != MVT::i64) 3089 return SDValue(); 3090 3091 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3092 if (!RHS) 3093 return SDValue(); 3094 3095 unsigned ShiftAmt = RHS->getZExtValue(); 3096 if (ShiftAmt < 32) 3097 return SDValue(); 3098 3099 // srl i64:x, C for C >= 32 3100 // => 3101 // build_pair (srl hi_32(x), C - 32), 0 3102 3103 SelectionDAG &DAG = DCI.DAG; 3104 SDLoc SL(N); 3105 3106 SDValue One = DAG.getConstant(1, SL, MVT::i32); 3107 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3108 3109 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0)); 3110 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, 3111 VecOp, One); 3112 3113 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); 3114 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst); 3115 3116 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero}); 3117 3118 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair); 3119 } 3120 3121 // We need to specifically handle i64 mul here to avoid unnecessary conversion 3122 // instructions. If we only match on the legalized i64 mul expansion, 3123 // SimplifyDemandedBits will be unable to remove them because there will be 3124 // multiple uses due to the separate mul + mulh[su]. 3125 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, 3126 SDValue N0, SDValue N1, unsigned Size, bool Signed) { 3127 if (Size <= 32) { 3128 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3129 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1); 3130 } 3131 3132 // Because we want to eliminate extension instructions before the 3133 // operation, we need to create a single user here (i.e. not the separate 3134 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it. 3135 3136 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24; 3137 3138 SDValue Mul = DAG.getNode(MulOpc, SL, 3139 DAG.getVTList(MVT::i32, MVT::i32), N0, N1); 3140 3141 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, 3142 Mul.getValue(0), Mul.getValue(1)); 3143 } 3144 3145 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, 3146 DAGCombinerInfo &DCI) const { 3147 EVT VT = N->getValueType(0); 3148 3149 unsigned Size = VT.getSizeInBits(); 3150 if (VT.isVector() || Size > 64) 3151 return SDValue(); 3152 3153 // There are i16 integer mul/mad. 3154 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16)) 3155 return SDValue(); 3156 3157 SelectionDAG &DAG = DCI.DAG; 3158 SDLoc DL(N); 3159 3160 SDValue N0 = N->getOperand(0); 3161 SDValue N1 = N->getOperand(1); 3162 SDValue Mul; 3163 3164 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { 3165 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3166 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3167 Mul = getMul24(DAG, DL, N0, N1, Size, false); 3168 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { 3169 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3170 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3171 Mul = getMul24(DAG, DL, N0, N1, Size, true); 3172 } else { 3173 return SDValue(); 3174 } 3175 3176 // We need to use sext even for MUL_U24, because MUL_U24 is used 3177 // for signed multiply of 8 and 16-bit types. 3178 return DAG.getSExtOrTrunc(Mul, DL, VT); 3179 } 3180 3181 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N, 3182 DAGCombinerInfo &DCI) const { 3183 EVT VT = N->getValueType(0); 3184 3185 if (!Subtarget->hasMulI24() || VT.isVector()) 3186 return SDValue(); 3187 3188 SelectionDAG &DAG = DCI.DAG; 3189 SDLoc DL(N); 3190 3191 SDValue N0 = N->getOperand(0); 3192 SDValue N1 = N->getOperand(1); 3193 3194 if (!isI24(N0, DAG) || !isI24(N1, DAG)) 3195 return SDValue(); 3196 3197 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3198 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3199 3200 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1); 3201 DCI.AddToWorklist(Mulhi.getNode()); 3202 return DAG.getSExtOrTrunc(Mulhi, DL, VT); 3203 } 3204 3205 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N, 3206 DAGCombinerInfo &DCI) const { 3207 EVT VT = N->getValueType(0); 3208 3209 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32) 3210 return SDValue(); 3211 3212 SelectionDAG &DAG = DCI.DAG; 3213 SDLoc DL(N); 3214 3215 SDValue N0 = N->getOperand(0); 3216 SDValue N1 = N->getOperand(1); 3217 3218 if (!isU24(N0, DAG) || !isU24(N1, DAG)) 3219 return SDValue(); 3220 3221 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3222 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3223 3224 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1); 3225 DCI.AddToWorklist(Mulhi.getNode()); 3226 return DAG.getZExtOrTrunc(Mulhi, DL, VT); 3227 } 3228 3229 SDValue AMDGPUTargetLowering::performMulLoHi24Combine( 3230 SDNode *N, DAGCombinerInfo &DCI) const { 3231 SelectionDAG &DAG = DCI.DAG; 3232 3233 // Simplify demanded bits before splitting into multiple users. 3234 if (simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI)) 3235 return SDValue(); 3236 3237 SDValue N0 = N->getOperand(0); 3238 SDValue N1 = N->getOperand(1); 3239 3240 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24); 3241 3242 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3243 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24; 3244 3245 SDLoc SL(N); 3246 3247 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1); 3248 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1); 3249 return DAG.getMergeValues({ MulLo, MulHi }, SL); 3250 } 3251 3252 static bool isNegativeOne(SDValue Val) { 3253 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) 3254 return C->isAllOnesValue(); 3255 return false; 3256 } 3257 3258 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG, 3259 SDValue Op, 3260 const SDLoc &DL, 3261 unsigned Opc) const { 3262 EVT VT = Op.getValueType(); 3263 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT); 3264 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() && 3265 LegalVT != MVT::i16)) 3266 return SDValue(); 3267 3268 if (VT != MVT::i32) 3269 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op); 3270 3271 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op); 3272 if (VT != MVT::i32) 3273 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX); 3274 3275 return FFBX; 3276 } 3277 3278 // The native instructions return -1 on 0 input. Optimize out a select that 3279 // produces -1 on 0. 3280 // 3281 // TODO: If zero is not undef, we could also do this if the output is compared 3282 // against the bitwidth. 3283 // 3284 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly. 3285 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, 3286 SDValue LHS, SDValue RHS, 3287 DAGCombinerInfo &DCI) const { 3288 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3289 if (!CmpRhs || !CmpRhs->isNullValue()) 3290 return SDValue(); 3291 3292 SelectionDAG &DAG = DCI.DAG; 3293 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 3294 SDValue CmpLHS = Cond.getOperand(0); 3295 3296 unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : 3297 AMDGPUISD::FFBH_U32; 3298 3299 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x 3300 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x 3301 if (CCOpcode == ISD::SETEQ && 3302 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3303 RHS.getOperand(0) == CmpLHS && 3304 isNegativeOne(LHS)) { 3305 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3306 } 3307 3308 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x 3309 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x 3310 if (CCOpcode == ISD::SETNE && 3311 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3312 LHS.getOperand(0) == CmpLHS && 3313 isNegativeOne(RHS)) { 3314 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3315 } 3316 3317 return SDValue(); 3318 } 3319 3320 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, 3321 unsigned Op, 3322 const SDLoc &SL, 3323 SDValue Cond, 3324 SDValue N1, 3325 SDValue N2) { 3326 SelectionDAG &DAG = DCI.DAG; 3327 EVT VT = N1.getValueType(); 3328 3329 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond, 3330 N1.getOperand(0), N2.getOperand(0)); 3331 DCI.AddToWorklist(NewSelect.getNode()); 3332 return DAG.getNode(Op, SL, VT, NewSelect); 3333 } 3334 3335 // Pull a free FP operation out of a select so it may fold into uses. 3336 // 3337 // select c, (fneg x), (fneg y) -> fneg (select c, x, y) 3338 // select c, (fneg x), k -> fneg (select c, x, (fneg k)) 3339 // 3340 // select c, (fabs x), (fabs y) -> fabs (select c, x, y) 3341 // select c, (fabs x), +k -> fabs (select c, x, k) 3342 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, 3343 SDValue N) { 3344 SelectionDAG &DAG = DCI.DAG; 3345 SDValue Cond = N.getOperand(0); 3346 SDValue LHS = N.getOperand(1); 3347 SDValue RHS = N.getOperand(2); 3348 3349 EVT VT = N.getValueType(); 3350 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) || 3351 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) { 3352 return distributeOpThroughSelect(DCI, LHS.getOpcode(), 3353 SDLoc(N), Cond, LHS, RHS); 3354 } 3355 3356 bool Inv = false; 3357 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) { 3358 std::swap(LHS, RHS); 3359 Inv = true; 3360 } 3361 3362 // TODO: Support vector constants. 3363 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 3364 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) { 3365 SDLoc SL(N); 3366 // If one side is an fneg/fabs and the other is a constant, we can push the 3367 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative. 3368 SDValue NewLHS = LHS.getOperand(0); 3369 SDValue NewRHS = RHS; 3370 3371 // Careful: if the neg can be folded up, don't try to pull it back down. 3372 bool ShouldFoldNeg = true; 3373 3374 if (NewLHS.hasOneUse()) { 3375 unsigned Opc = NewLHS.getOpcode(); 3376 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc)) 3377 ShouldFoldNeg = false; 3378 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL) 3379 ShouldFoldNeg = false; 3380 } 3381 3382 if (ShouldFoldNeg) { 3383 if (LHS.getOpcode() == ISD::FNEG) 3384 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3385 else if (CRHS->isNegative()) 3386 return SDValue(); 3387 3388 if (Inv) 3389 std::swap(NewLHS, NewRHS); 3390 3391 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, 3392 Cond, NewLHS, NewRHS); 3393 DCI.AddToWorklist(NewSelect.getNode()); 3394 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect); 3395 } 3396 } 3397 3398 return SDValue(); 3399 } 3400 3401 3402 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N, 3403 DAGCombinerInfo &DCI) const { 3404 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0))) 3405 return Folded; 3406 3407 SDValue Cond = N->getOperand(0); 3408 if (Cond.getOpcode() != ISD::SETCC) 3409 return SDValue(); 3410 3411 EVT VT = N->getValueType(0); 3412 SDValue LHS = Cond.getOperand(0); 3413 SDValue RHS = Cond.getOperand(1); 3414 SDValue CC = Cond.getOperand(2); 3415 3416 SDValue True = N->getOperand(1); 3417 SDValue False = N->getOperand(2); 3418 3419 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses. 3420 SelectionDAG &DAG = DCI.DAG; 3421 if ((DAG.isConstantValueOfAnyType(True) || 3422 DAG.isConstantValueOfAnyType(True)) && 3423 (!DAG.isConstantValueOfAnyType(False) && 3424 !DAG.isConstantValueOfAnyType(False))) { 3425 // Swap cmp + select pair to move constant to false input. 3426 // This will allow using VOPC cndmasks more often. 3427 // select (setcc x, y), k, x -> select (setcc y, x) x, x 3428 3429 SDLoc SL(N); 3430 ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 3431 LHS.getValueType().isInteger()); 3432 3433 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC); 3434 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True); 3435 } 3436 3437 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) { 3438 SDValue MinMax 3439 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); 3440 // Revisit this node so we can catch min3/max3/med3 patterns. 3441 //DCI.AddToWorklist(MinMax.getNode()); 3442 return MinMax; 3443 } 3444 } 3445 3446 // There's no reason to not do this if the condition has other uses. 3447 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI); 3448 } 3449 3450 static bool isConstantFPZero(SDValue N) { 3451 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) 3452 return C->isZero() && !C->isNegative(); 3453 return false; 3454 } 3455 3456 static unsigned inverseMinMax(unsigned Opc) { 3457 switch (Opc) { 3458 case ISD::FMAXNUM: 3459 return ISD::FMINNUM; 3460 case ISD::FMINNUM: 3461 return ISD::FMAXNUM; 3462 case AMDGPUISD::FMAX_LEGACY: 3463 return AMDGPUISD::FMIN_LEGACY; 3464 case AMDGPUISD::FMIN_LEGACY: 3465 return AMDGPUISD::FMAX_LEGACY; 3466 default: 3467 llvm_unreachable("invalid min/max opcode"); 3468 } 3469 } 3470 3471 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N, 3472 DAGCombinerInfo &DCI) const { 3473 SelectionDAG &DAG = DCI.DAG; 3474 SDValue N0 = N->getOperand(0); 3475 EVT VT = N->getValueType(0); 3476 3477 unsigned Opc = N0.getOpcode(); 3478 3479 // If the input has multiple uses and we can either fold the negate down, or 3480 // the other uses cannot, give up. This both prevents unprofitable 3481 // transformations and infinite loops: we won't repeatedly try to fold around 3482 // a negate that has no 'good' form. 3483 if (N0.hasOneUse()) { 3484 // This may be able to fold into the source, but at a code size cost. Don't 3485 // fold if the fold into the user is free. 3486 if (allUsesHaveSourceMods(N, 0)) 3487 return SDValue(); 3488 } else { 3489 if (fnegFoldsIntoOp(Opc) && 3490 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode()))) 3491 return SDValue(); 3492 } 3493 3494 SDLoc SL(N); 3495 switch (Opc) { 3496 case ISD::FADD: { 3497 if (!mayIgnoreSignedZero(N0)) 3498 return SDValue(); 3499 3500 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y)) 3501 SDValue LHS = N0.getOperand(0); 3502 SDValue RHS = N0.getOperand(1); 3503 3504 if (LHS.getOpcode() != ISD::FNEG) 3505 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3506 else 3507 LHS = LHS.getOperand(0); 3508 3509 if (RHS.getOpcode() != ISD::FNEG) 3510 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3511 else 3512 RHS = RHS.getOperand(0); 3513 3514 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags()); 3515 if (!N0.hasOneUse()) 3516 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3517 return Res; 3518 } 3519 case ISD::FMUL: 3520 case AMDGPUISD::FMUL_LEGACY: { 3521 // (fneg (fmul x, y)) -> (fmul x, (fneg y)) 3522 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y)) 3523 SDValue LHS = N0.getOperand(0); 3524 SDValue RHS = N0.getOperand(1); 3525 3526 if (LHS.getOpcode() == ISD::FNEG) 3527 LHS = LHS.getOperand(0); 3528 else if (RHS.getOpcode() == ISD::FNEG) 3529 RHS = RHS.getOperand(0); 3530 else 3531 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3532 3533 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags()); 3534 if (!N0.hasOneUse()) 3535 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3536 return Res; 3537 } 3538 case ISD::FMA: 3539 case ISD::FMAD: { 3540 if (!mayIgnoreSignedZero(N0)) 3541 return SDValue(); 3542 3543 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z)) 3544 SDValue LHS = N0.getOperand(0); 3545 SDValue MHS = N0.getOperand(1); 3546 SDValue RHS = N0.getOperand(2); 3547 3548 if (LHS.getOpcode() == ISD::FNEG) 3549 LHS = LHS.getOperand(0); 3550 else if (MHS.getOpcode() == ISD::FNEG) 3551 MHS = MHS.getOperand(0); 3552 else 3553 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS); 3554 3555 if (RHS.getOpcode() != ISD::FNEG) 3556 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3557 else 3558 RHS = RHS.getOperand(0); 3559 3560 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS); 3561 if (!N0.hasOneUse()) 3562 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3563 return Res; 3564 } 3565 case ISD::FMAXNUM: 3566 case ISD::FMINNUM: 3567 case AMDGPUISD::FMAX_LEGACY: 3568 case AMDGPUISD::FMIN_LEGACY: { 3569 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y) 3570 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y) 3571 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y) 3572 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y) 3573 3574 SDValue LHS = N0.getOperand(0); 3575 SDValue RHS = N0.getOperand(1); 3576 3577 // 0 doesn't have a negated inline immediate. 3578 // TODO: Shouldn't fold 1/2pi either, and should be generalized to other 3579 // operations. 3580 if (isConstantFPZero(RHS)) 3581 return SDValue(); 3582 3583 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3584 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3585 unsigned Opposite = inverseMinMax(Opc); 3586 3587 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags()); 3588 if (!N0.hasOneUse()) 3589 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3590 return Res; 3591 } 3592 case ISD::FP_EXTEND: 3593 case ISD::FTRUNC: 3594 case ISD::FRINT: 3595 case ISD::FNEARBYINT: // XXX - Should fround be handled? 3596 case ISD::FSIN: 3597 case AMDGPUISD::RCP: 3598 case AMDGPUISD::RCP_LEGACY: 3599 case AMDGPUISD::SIN_HW: { 3600 SDValue CvtSrc = N0.getOperand(0); 3601 if (CvtSrc.getOpcode() == ISD::FNEG) { 3602 // (fneg (fp_extend (fneg x))) -> (fp_extend x) 3603 // (fneg (rcp (fneg x))) -> (rcp x) 3604 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0)); 3605 } 3606 3607 if (!N0.hasOneUse()) 3608 return SDValue(); 3609 3610 // (fneg (fp_extend x)) -> (fp_extend (fneg x)) 3611 // (fneg (rcp x)) -> (rcp (fneg x)) 3612 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3613 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags()); 3614 } 3615 case ISD::FP_ROUND: { 3616 SDValue CvtSrc = N0.getOperand(0); 3617 3618 if (CvtSrc.getOpcode() == ISD::FNEG) { 3619 // (fneg (fp_round (fneg x))) -> (fp_round x) 3620 return DAG.getNode(ISD::FP_ROUND, SL, VT, 3621 CvtSrc.getOperand(0), N0.getOperand(1)); 3622 } 3623 3624 if (!N0.hasOneUse()) 3625 return SDValue(); 3626 3627 // (fneg (fp_round x)) -> (fp_round (fneg x)) 3628 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3629 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1)); 3630 } 3631 case ISD::FP16_TO_FP: { 3632 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal 3633 // f16, but legalization of f16 fneg ends up pulling it out of the source. 3634 // Put the fneg back as a legal source operation that can be matched later. 3635 SDLoc SL(N); 3636 3637 SDValue Src = N0.getOperand(0); 3638 EVT SrcVT = Src.getValueType(); 3639 3640 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000) 3641 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src, 3642 DAG.getConstant(0x8000, SL, SrcVT)); 3643 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg); 3644 } 3645 default: 3646 return SDValue(); 3647 } 3648 } 3649 3650 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N, 3651 DAGCombinerInfo &DCI) const { 3652 SelectionDAG &DAG = DCI.DAG; 3653 SDValue N0 = N->getOperand(0); 3654 3655 if (!N0.hasOneUse()) 3656 return SDValue(); 3657 3658 switch (N0.getOpcode()) { 3659 case ISD::FP16_TO_FP: { 3660 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal"); 3661 SDLoc SL(N); 3662 SDValue Src = N0.getOperand(0); 3663 EVT SrcVT = Src.getValueType(); 3664 3665 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff) 3666 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src, 3667 DAG.getConstant(0x7fff, SL, SrcVT)); 3668 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs); 3669 } 3670 default: 3671 return SDValue(); 3672 } 3673 } 3674 3675 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, 3676 DAGCombinerInfo &DCI) const { 3677 SelectionDAG &DAG = DCI.DAG; 3678 SDLoc DL(N); 3679 3680 switch(N->getOpcode()) { 3681 default: 3682 break; 3683 case ISD::BITCAST: { 3684 EVT DestVT = N->getValueType(0); 3685 3686 // Push casts through vector builds. This helps avoid emitting a large 3687 // number of copies when materializing floating point vector constants. 3688 // 3689 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) => 3690 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y)) 3691 if (DestVT.isVector()) { 3692 SDValue Src = N->getOperand(0); 3693 if (Src.getOpcode() == ISD::BUILD_VECTOR) { 3694 EVT SrcVT = Src.getValueType(); 3695 unsigned NElts = DestVT.getVectorNumElements(); 3696 3697 if (SrcVT.getVectorNumElements() == NElts) { 3698 EVT DestEltVT = DestVT.getVectorElementType(); 3699 3700 SmallVector<SDValue, 8> CastedElts; 3701 SDLoc SL(N); 3702 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) { 3703 SDValue Elt = Src.getOperand(I); 3704 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt)); 3705 } 3706 3707 return DAG.getBuildVector(DestVT, SL, CastedElts); 3708 } 3709 } 3710 } 3711 3712 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector()) 3713 break; 3714 3715 // Fold bitcasts of constants. 3716 // 3717 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) 3718 // TODO: Generalize and move to DAGCombiner 3719 SDValue Src = N->getOperand(0); 3720 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) { 3721 assert(Src.getValueType() == MVT::i64); 3722 SDLoc SL(N); 3723 uint64_t CVal = C->getZExtValue(); 3724 return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT, 3725 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3726 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3727 } 3728 3729 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) { 3730 const APInt &Val = C->getValueAPF().bitcastToAPInt(); 3731 SDLoc SL(N); 3732 uint64_t CVal = Val.getZExtValue(); 3733 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3734 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3735 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3736 3737 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec); 3738 } 3739 3740 break; 3741 } 3742 case ISD::SHL: { 3743 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3744 break; 3745 3746 return performShlCombine(N, DCI); 3747 } 3748 case ISD::SRL: { 3749 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3750 break; 3751 3752 return performSrlCombine(N, DCI); 3753 } 3754 case ISD::SRA: { 3755 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3756 break; 3757 3758 return performSraCombine(N, DCI); 3759 } 3760 case ISD::MUL: 3761 return performMulCombine(N, DCI); 3762 case ISD::MULHS: 3763 return performMulhsCombine(N, DCI); 3764 case ISD::MULHU: 3765 return performMulhuCombine(N, DCI); 3766 case AMDGPUISD::MUL_I24: 3767 case AMDGPUISD::MUL_U24: 3768 case AMDGPUISD::MULHI_I24: 3769 case AMDGPUISD::MULHI_U24: { 3770 // If the first call to simplify is successfull, then N may end up being 3771 // deleted, so we shouldn't call simplifyI24 again. 3772 simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI); 3773 return SDValue(); 3774 } 3775 case AMDGPUISD::MUL_LOHI_I24: 3776 case AMDGPUISD::MUL_LOHI_U24: 3777 return performMulLoHi24Combine(N, DCI); 3778 case ISD::SELECT: 3779 return performSelectCombine(N, DCI); 3780 case ISD::FNEG: 3781 return performFNegCombine(N, DCI); 3782 case ISD::FABS: 3783 return performFAbsCombine(N, DCI); 3784 case AMDGPUISD::BFE_I32: 3785 case AMDGPUISD::BFE_U32: { 3786 assert(!N->getValueType(0).isVector() && 3787 "Vector handling of BFE not implemented"); 3788 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 3789 if (!Width) 3790 break; 3791 3792 uint32_t WidthVal = Width->getZExtValue() & 0x1f; 3793 if (WidthVal == 0) 3794 return DAG.getConstant(0, DL, MVT::i32); 3795 3796 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3797 if (!Offset) 3798 break; 3799 3800 SDValue BitsFrom = N->getOperand(0); 3801 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; 3802 3803 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; 3804 3805 if (OffsetVal == 0) { 3806 // This is already sign / zero extended, so try to fold away extra BFEs. 3807 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); 3808 3809 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom); 3810 if (OpSignBits >= SignBits) 3811 return BitsFrom; 3812 3813 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal); 3814 if (Signed) { 3815 // This is a sign_extend_inreg. Replace it to take advantage of existing 3816 // DAG Combines. If not eliminated, we will match back to BFE during 3817 // selection. 3818 3819 // TODO: The sext_inreg of extended types ends, although we can could 3820 // handle them in a single BFE. 3821 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom, 3822 DAG.getValueType(SmallVT)); 3823 } 3824 3825 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); 3826 } 3827 3828 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) { 3829 if (Signed) { 3830 return constantFoldBFE<int32_t>(DAG, 3831 CVal->getSExtValue(), 3832 OffsetVal, 3833 WidthVal, 3834 DL); 3835 } 3836 3837 return constantFoldBFE<uint32_t>(DAG, 3838 CVal->getZExtValue(), 3839 OffsetVal, 3840 WidthVal, 3841 DL); 3842 } 3843 3844 if ((OffsetVal + WidthVal) >= 32 && 3845 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) { 3846 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); 3847 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32, 3848 BitsFrom, ShiftVal); 3849 } 3850 3851 if (BitsFrom.hasOneUse()) { 3852 APInt Demanded = APInt::getBitsSet(32, 3853 OffsetVal, 3854 OffsetVal + WidthVal); 3855 3856 KnownBits Known; 3857 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 3858 !DCI.isBeforeLegalizeOps()); 3859 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3860 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) || 3861 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) { 3862 DCI.CommitTargetLoweringOpt(TLO); 3863 } 3864 } 3865 3866 break; 3867 } 3868 case ISD::LOAD: 3869 return performLoadCombine(N, DCI); 3870 case ISD::STORE: 3871 return performStoreCombine(N, DCI); 3872 case AMDGPUISD::CLAMP: 3873 return performClampCombine(N, DCI); 3874 case AMDGPUISD::RCP: { 3875 if (const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0))) { 3876 // XXX - Should this flush denormals? 3877 const APFloat &Val = CFP->getValueAPF(); 3878 APFloat One(Val.getSemantics(), "1.0"); 3879 return DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); 3880 } 3881 3882 break; 3883 } 3884 case ISD::AssertZext: 3885 case ISD::AssertSext: 3886 return performAssertSZExtCombine(N, DCI); 3887 } 3888 return SDValue(); 3889 } 3890 3891 //===----------------------------------------------------------------------===// 3892 // Helper functions 3893 //===----------------------------------------------------------------------===// 3894 3895 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 3896 const TargetRegisterClass *RC, 3897 unsigned Reg, EVT VT, 3898 const SDLoc &SL, 3899 bool RawReg) const { 3900 MachineFunction &MF = DAG.getMachineFunction(); 3901 MachineRegisterInfo &MRI = MF.getRegInfo(); 3902 unsigned VReg; 3903 3904 if (!MRI.isLiveIn(Reg)) { 3905 VReg = MRI.createVirtualRegister(RC); 3906 MRI.addLiveIn(Reg, VReg); 3907 } else { 3908 VReg = MRI.getLiveInVirtReg(Reg); 3909 } 3910 3911 if (RawReg) 3912 return DAG.getRegister(VReg, VT); 3913 3914 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT); 3915 } 3916 3917 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG, 3918 EVT VT, 3919 const SDLoc &SL, 3920 int64_t Offset) const { 3921 MachineFunction &MF = DAG.getMachineFunction(); 3922 MachineFrameInfo &MFI = MF.getFrameInfo(); 3923 3924 int FI = MFI.CreateFixedObject(VT.getStoreSize(), Offset, true); 3925 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset); 3926 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32); 3927 3928 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4, 3929 MachineMemOperand::MODereferenceable | 3930 MachineMemOperand::MOInvariant); 3931 } 3932 3933 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG, 3934 const SDLoc &SL, 3935 SDValue Chain, 3936 SDValue StackPtr, 3937 SDValue ArgVal, 3938 int64_t Offset) const { 3939 MachineFunction &MF = DAG.getMachineFunction(); 3940 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset); 3941 3942 SDValue Ptr = DAG.getObjectPtrOffset(SL, StackPtr, Offset); 3943 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4, 3944 MachineMemOperand::MODereferenceable); 3945 return Store; 3946 } 3947 3948 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG, 3949 const TargetRegisterClass *RC, 3950 EVT VT, const SDLoc &SL, 3951 const ArgDescriptor &Arg) const { 3952 assert(Arg && "Attempting to load missing argument"); 3953 3954 if (Arg.isRegister()) 3955 return CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL); 3956 return loadStackInputValue(DAG, VT, SL, Arg.getStackOffset()); 3957 } 3958 3959 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset( 3960 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const { 3961 unsigned Alignment = Subtarget->getAlignmentForImplicitArgPtr(); 3962 uint64_t ArgOffset = alignTo(MFI->getABIArgOffset(), Alignment); 3963 switch (Param) { 3964 case GRID_DIM: 3965 return ArgOffset; 3966 case GRID_OFFSET: 3967 return ArgOffset + 4; 3968 } 3969 llvm_unreachable("unexpected implicit parameter type"); 3970 } 3971 3972 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node; 3973 3974 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { 3975 switch ((AMDGPUISD::NodeType)Opcode) { 3976 case AMDGPUISD::FIRST_NUMBER: break; 3977 // AMDIL DAG nodes 3978 NODE_NAME_CASE(UMUL); 3979 NODE_NAME_CASE(BRANCH_COND); 3980 3981 // AMDGPU DAG nodes 3982 NODE_NAME_CASE(IF) 3983 NODE_NAME_CASE(ELSE) 3984 NODE_NAME_CASE(LOOP) 3985 NODE_NAME_CASE(CALL) 3986 NODE_NAME_CASE(TC_RETURN) 3987 NODE_NAME_CASE(TRAP) 3988 NODE_NAME_CASE(RET_FLAG) 3989 NODE_NAME_CASE(RETURN_TO_EPILOG) 3990 NODE_NAME_CASE(ENDPGM) 3991 NODE_NAME_CASE(DWORDADDR) 3992 NODE_NAME_CASE(FRACT) 3993 NODE_NAME_CASE(SETCC) 3994 NODE_NAME_CASE(SETREG) 3995 NODE_NAME_CASE(FMA_W_CHAIN) 3996 NODE_NAME_CASE(FMUL_W_CHAIN) 3997 NODE_NAME_CASE(CLAMP) 3998 NODE_NAME_CASE(COS_HW) 3999 NODE_NAME_CASE(SIN_HW) 4000 NODE_NAME_CASE(FMAX_LEGACY) 4001 NODE_NAME_CASE(FMIN_LEGACY) 4002 NODE_NAME_CASE(FMAX3) 4003 NODE_NAME_CASE(SMAX3) 4004 NODE_NAME_CASE(UMAX3) 4005 NODE_NAME_CASE(FMIN3) 4006 NODE_NAME_CASE(SMIN3) 4007 NODE_NAME_CASE(UMIN3) 4008 NODE_NAME_CASE(FMED3) 4009 NODE_NAME_CASE(SMED3) 4010 NODE_NAME_CASE(UMED3) 4011 NODE_NAME_CASE(URECIP) 4012 NODE_NAME_CASE(DIV_SCALE) 4013 NODE_NAME_CASE(DIV_FMAS) 4014 NODE_NAME_CASE(DIV_FIXUP) 4015 NODE_NAME_CASE(FMAD_FTZ) 4016 NODE_NAME_CASE(TRIG_PREOP) 4017 NODE_NAME_CASE(RCP) 4018 NODE_NAME_CASE(RSQ) 4019 NODE_NAME_CASE(RCP_LEGACY) 4020 NODE_NAME_CASE(RSQ_LEGACY) 4021 NODE_NAME_CASE(FMUL_LEGACY) 4022 NODE_NAME_CASE(RSQ_CLAMP) 4023 NODE_NAME_CASE(LDEXP) 4024 NODE_NAME_CASE(FP_CLASS) 4025 NODE_NAME_CASE(DOT4) 4026 NODE_NAME_CASE(CARRY) 4027 NODE_NAME_CASE(BORROW) 4028 NODE_NAME_CASE(BFE_U32) 4029 NODE_NAME_CASE(BFE_I32) 4030 NODE_NAME_CASE(BFI) 4031 NODE_NAME_CASE(BFM) 4032 NODE_NAME_CASE(FFBH_U32) 4033 NODE_NAME_CASE(FFBH_I32) 4034 NODE_NAME_CASE(FFBL_B32) 4035 NODE_NAME_CASE(MUL_U24) 4036 NODE_NAME_CASE(MUL_I24) 4037 NODE_NAME_CASE(MULHI_U24) 4038 NODE_NAME_CASE(MULHI_I24) 4039 NODE_NAME_CASE(MUL_LOHI_U24) 4040 NODE_NAME_CASE(MUL_LOHI_I24) 4041 NODE_NAME_CASE(MAD_U24) 4042 NODE_NAME_CASE(MAD_I24) 4043 NODE_NAME_CASE(MAD_I64_I32) 4044 NODE_NAME_CASE(MAD_U64_U32) 4045 NODE_NAME_CASE(TEXTURE_FETCH) 4046 NODE_NAME_CASE(EXPORT) 4047 NODE_NAME_CASE(EXPORT_DONE) 4048 NODE_NAME_CASE(R600_EXPORT) 4049 NODE_NAME_CASE(CONST_ADDRESS) 4050 NODE_NAME_CASE(REGISTER_LOAD) 4051 NODE_NAME_CASE(REGISTER_STORE) 4052 NODE_NAME_CASE(SAMPLE) 4053 NODE_NAME_CASE(SAMPLEB) 4054 NODE_NAME_CASE(SAMPLED) 4055 NODE_NAME_CASE(SAMPLEL) 4056 NODE_NAME_CASE(CVT_F32_UBYTE0) 4057 NODE_NAME_CASE(CVT_F32_UBYTE1) 4058 NODE_NAME_CASE(CVT_F32_UBYTE2) 4059 NODE_NAME_CASE(CVT_F32_UBYTE3) 4060 NODE_NAME_CASE(CVT_PKRTZ_F16_F32) 4061 NODE_NAME_CASE(CVT_PKNORM_I16_F32) 4062 NODE_NAME_CASE(CVT_PKNORM_U16_F32) 4063 NODE_NAME_CASE(CVT_PK_I16_I32) 4064 NODE_NAME_CASE(CVT_PK_U16_U32) 4065 NODE_NAME_CASE(FP_TO_FP16) 4066 NODE_NAME_CASE(FP16_ZEXT) 4067 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR) 4068 NODE_NAME_CASE(CONST_DATA_PTR) 4069 NODE_NAME_CASE(PC_ADD_REL_OFFSET) 4070 NODE_NAME_CASE(KILL) 4071 NODE_NAME_CASE(DUMMY_CHAIN) 4072 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break; 4073 NODE_NAME_CASE(INIT_EXEC) 4074 NODE_NAME_CASE(INIT_EXEC_FROM_INPUT) 4075 NODE_NAME_CASE(SENDMSG) 4076 NODE_NAME_CASE(SENDMSGHALT) 4077 NODE_NAME_CASE(INTERP_MOV) 4078 NODE_NAME_CASE(INTERP_P1) 4079 NODE_NAME_CASE(INTERP_P2) 4080 NODE_NAME_CASE(STORE_MSKOR) 4081 NODE_NAME_CASE(LOAD_CONSTANT) 4082 NODE_NAME_CASE(TBUFFER_STORE_FORMAT) 4083 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_X3) 4084 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16) 4085 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT) 4086 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16) 4087 NODE_NAME_CASE(ATOMIC_CMP_SWAP) 4088 NODE_NAME_CASE(ATOMIC_INC) 4089 NODE_NAME_CASE(ATOMIC_DEC) 4090 NODE_NAME_CASE(ATOMIC_LOAD_FADD) 4091 NODE_NAME_CASE(ATOMIC_LOAD_FMIN) 4092 NODE_NAME_CASE(ATOMIC_LOAD_FMAX) 4093 NODE_NAME_CASE(BUFFER_LOAD) 4094 NODE_NAME_CASE(BUFFER_LOAD_FORMAT) 4095 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16) 4096 NODE_NAME_CASE(BUFFER_STORE) 4097 NODE_NAME_CASE(BUFFER_STORE_FORMAT) 4098 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16) 4099 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP) 4100 NODE_NAME_CASE(BUFFER_ATOMIC_ADD) 4101 NODE_NAME_CASE(BUFFER_ATOMIC_SUB) 4102 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN) 4103 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN) 4104 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX) 4105 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX) 4106 NODE_NAME_CASE(BUFFER_ATOMIC_AND) 4107 NODE_NAME_CASE(BUFFER_ATOMIC_OR) 4108 NODE_NAME_CASE(BUFFER_ATOMIC_XOR) 4109 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP) 4110 NODE_NAME_CASE(IMAGE_LOAD) 4111 NODE_NAME_CASE(IMAGE_LOAD_MIP) 4112 NODE_NAME_CASE(IMAGE_STORE) 4113 NODE_NAME_CASE(IMAGE_STORE_MIP) 4114 // Basic sample. 4115 NODE_NAME_CASE(IMAGE_SAMPLE) 4116 NODE_NAME_CASE(IMAGE_SAMPLE_CL) 4117 NODE_NAME_CASE(IMAGE_SAMPLE_D) 4118 NODE_NAME_CASE(IMAGE_SAMPLE_D_CL) 4119 NODE_NAME_CASE(IMAGE_SAMPLE_L) 4120 NODE_NAME_CASE(IMAGE_SAMPLE_B) 4121 NODE_NAME_CASE(IMAGE_SAMPLE_B_CL) 4122 NODE_NAME_CASE(IMAGE_SAMPLE_LZ) 4123 NODE_NAME_CASE(IMAGE_SAMPLE_CD) 4124 NODE_NAME_CASE(IMAGE_SAMPLE_CD_CL) 4125 // Sample with comparison. 4126 NODE_NAME_CASE(IMAGE_SAMPLE_C) 4127 NODE_NAME_CASE(IMAGE_SAMPLE_C_CL) 4128 NODE_NAME_CASE(IMAGE_SAMPLE_C_D) 4129 NODE_NAME_CASE(IMAGE_SAMPLE_C_D_CL) 4130 NODE_NAME_CASE(IMAGE_SAMPLE_C_L) 4131 NODE_NAME_CASE(IMAGE_SAMPLE_C_B) 4132 NODE_NAME_CASE(IMAGE_SAMPLE_C_B_CL) 4133 NODE_NAME_CASE(IMAGE_SAMPLE_C_LZ) 4134 NODE_NAME_CASE(IMAGE_SAMPLE_C_CD) 4135 NODE_NAME_CASE(IMAGE_SAMPLE_C_CD_CL) 4136 // Sample with offsets. 4137 NODE_NAME_CASE(IMAGE_SAMPLE_O) 4138 NODE_NAME_CASE(IMAGE_SAMPLE_CL_O) 4139 NODE_NAME_CASE(IMAGE_SAMPLE_D_O) 4140 NODE_NAME_CASE(IMAGE_SAMPLE_D_CL_O) 4141 NODE_NAME_CASE(IMAGE_SAMPLE_L_O) 4142 NODE_NAME_CASE(IMAGE_SAMPLE_B_O) 4143 NODE_NAME_CASE(IMAGE_SAMPLE_B_CL_O) 4144 NODE_NAME_CASE(IMAGE_SAMPLE_LZ_O) 4145 NODE_NAME_CASE(IMAGE_SAMPLE_CD_O) 4146 NODE_NAME_CASE(IMAGE_SAMPLE_CD_CL_O) 4147 // Sample with comparison and offsets. 4148 NODE_NAME_CASE(IMAGE_SAMPLE_C_O) 4149 NODE_NAME_CASE(IMAGE_SAMPLE_C_CL_O) 4150 NODE_NAME_CASE(IMAGE_SAMPLE_C_D_O) 4151 NODE_NAME_CASE(IMAGE_SAMPLE_C_D_CL_O) 4152 NODE_NAME_CASE(IMAGE_SAMPLE_C_L_O) 4153 NODE_NAME_CASE(IMAGE_SAMPLE_C_B_O) 4154 NODE_NAME_CASE(IMAGE_SAMPLE_C_B_CL_O) 4155 NODE_NAME_CASE(IMAGE_SAMPLE_C_LZ_O) 4156 NODE_NAME_CASE(IMAGE_SAMPLE_C_CD_O) 4157 NODE_NAME_CASE(IMAGE_SAMPLE_C_CD_CL_O) 4158 // Basic gather4. 4159 NODE_NAME_CASE(IMAGE_GATHER4) 4160 NODE_NAME_CASE(IMAGE_GATHER4_CL) 4161 NODE_NAME_CASE(IMAGE_GATHER4_L) 4162 NODE_NAME_CASE(IMAGE_GATHER4_B) 4163 NODE_NAME_CASE(IMAGE_GATHER4_B_CL) 4164 NODE_NAME_CASE(IMAGE_GATHER4_LZ) 4165 // Gather4 with comparison. 4166 NODE_NAME_CASE(IMAGE_GATHER4_C) 4167 NODE_NAME_CASE(IMAGE_GATHER4_C_CL) 4168 NODE_NAME_CASE(IMAGE_GATHER4_C_L) 4169 NODE_NAME_CASE(IMAGE_GATHER4_C_B) 4170 NODE_NAME_CASE(IMAGE_GATHER4_C_B_CL) 4171 NODE_NAME_CASE(IMAGE_GATHER4_C_LZ) 4172 // Gather4 with offsets. 4173 NODE_NAME_CASE(IMAGE_GATHER4_O) 4174 NODE_NAME_CASE(IMAGE_GATHER4_CL_O) 4175 NODE_NAME_CASE(IMAGE_GATHER4_L_O) 4176 NODE_NAME_CASE(IMAGE_GATHER4_B_O) 4177 NODE_NAME_CASE(IMAGE_GATHER4_B_CL_O) 4178 NODE_NAME_CASE(IMAGE_GATHER4_LZ_O) 4179 // Gather4 with comparison and offsets. 4180 NODE_NAME_CASE(IMAGE_GATHER4_C_O) 4181 NODE_NAME_CASE(IMAGE_GATHER4_C_CL_O) 4182 NODE_NAME_CASE(IMAGE_GATHER4_C_L_O) 4183 NODE_NAME_CASE(IMAGE_GATHER4_C_B_O) 4184 NODE_NAME_CASE(IMAGE_GATHER4_C_B_CL_O) 4185 NODE_NAME_CASE(IMAGE_GATHER4_C_LZ_O) 4186 4187 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break; 4188 } 4189 return nullptr; 4190 } 4191 4192 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand, 4193 SelectionDAG &DAG, int Enabled, 4194 int &RefinementSteps, 4195 bool &UseOneConstNR, 4196 bool Reciprocal) const { 4197 EVT VT = Operand.getValueType(); 4198 4199 if (VT == MVT::f32) { 4200 RefinementSteps = 0; 4201 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); 4202 } 4203 4204 // TODO: There is also f64 rsq instruction, but the documentation is less 4205 // clear on its precision. 4206 4207 return SDValue(); 4208 } 4209 4210 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand, 4211 SelectionDAG &DAG, int Enabled, 4212 int &RefinementSteps) const { 4213 EVT VT = Operand.getValueType(); 4214 4215 if (VT == MVT::f32) { 4216 // Reciprocal, < 1 ulp error. 4217 // 4218 // This reciprocal approximation converges to < 0.5 ulp error with one 4219 // newton rhapson performed with two fused multiple adds (FMAs). 4220 4221 RefinementSteps = 0; 4222 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); 4223 } 4224 4225 // TODO: There is also f64 rcp instruction, but the documentation is less 4226 // clear on its precision. 4227 4228 return SDValue(); 4229 } 4230 4231 void AMDGPUTargetLowering::computeKnownBitsForTargetNode( 4232 const SDValue Op, KnownBits &Known, 4233 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { 4234 4235 Known.resetAll(); // Don't know anything. 4236 4237 unsigned Opc = Op.getOpcode(); 4238 4239 switch (Opc) { 4240 default: 4241 break; 4242 case AMDGPUISD::CARRY: 4243 case AMDGPUISD::BORROW: { 4244 Known.Zero = APInt::getHighBitsSet(32, 31); 4245 break; 4246 } 4247 4248 case AMDGPUISD::BFE_I32: 4249 case AMDGPUISD::BFE_U32: { 4250 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4251 if (!CWidth) 4252 return; 4253 4254 uint32_t Width = CWidth->getZExtValue() & 0x1f; 4255 4256 if (Opc == AMDGPUISD::BFE_U32) 4257 Known.Zero = APInt::getHighBitsSet(32, 32 - Width); 4258 4259 break; 4260 } 4261 case AMDGPUISD::FP_TO_FP16: 4262 case AMDGPUISD::FP16_ZEXT: { 4263 unsigned BitWidth = Known.getBitWidth(); 4264 4265 // High bits are zero. 4266 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16); 4267 break; 4268 } 4269 case AMDGPUISD::MUL_U24: 4270 case AMDGPUISD::MUL_I24: { 4271 KnownBits LHSKnown, RHSKnown; 4272 DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1); 4273 DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1); 4274 4275 unsigned TrailZ = LHSKnown.countMinTrailingZeros() + 4276 RHSKnown.countMinTrailingZeros(); 4277 Known.Zero.setLowBits(std::min(TrailZ, 32u)); 4278 4279 unsigned LHSValBits = 32 - std::max(LHSKnown.countMinSignBits(), 8u); 4280 unsigned RHSValBits = 32 - std::max(RHSKnown.countMinSignBits(), 8u); 4281 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u); 4282 if (MaxValBits >= 32) 4283 break; 4284 bool Negative = false; 4285 if (Opc == AMDGPUISD::MUL_I24) { 4286 bool LHSNegative = !!(LHSKnown.One & (1 << 23)); 4287 bool LHSPositive = !!(LHSKnown.Zero & (1 << 23)); 4288 bool RHSNegative = !!(RHSKnown.One & (1 << 23)); 4289 bool RHSPositive = !!(RHSKnown.Zero & (1 << 23)); 4290 if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive)) 4291 break; 4292 Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative); 4293 } 4294 if (Negative) 4295 Known.One.setHighBits(32 - MaxValBits); 4296 else 4297 Known.Zero.setHighBits(32 - MaxValBits); 4298 break; 4299 } 4300 case ISD::INTRINSIC_WO_CHAIN: { 4301 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4302 switch (IID) { 4303 case Intrinsic::amdgcn_mbcnt_lo: 4304 case Intrinsic::amdgcn_mbcnt_hi: { 4305 // These return at most the wavefront size - 1. 4306 unsigned Size = Op.getValueType().getSizeInBits(); 4307 Known.Zero.setHighBits(Size - Subtarget->getWavefrontSizeLog2()); 4308 break; 4309 } 4310 default: 4311 break; 4312 } 4313 } 4314 } 4315 } 4316 4317 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode( 4318 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4319 unsigned Depth) const { 4320 switch (Op.getOpcode()) { 4321 case AMDGPUISD::BFE_I32: { 4322 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4323 if (!Width) 4324 return 1; 4325 4326 unsigned SignBits = 32 - Width->getZExtValue() + 1; 4327 if (!isNullConstant(Op.getOperand(1))) 4328 return SignBits; 4329 4330 // TODO: Could probably figure something out with non-0 offsets. 4331 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 4332 return std::max(SignBits, Op0SignBits); 4333 } 4334 4335 case AMDGPUISD::BFE_U32: { 4336 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4337 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; 4338 } 4339 4340 case AMDGPUISD::CARRY: 4341 case AMDGPUISD::BORROW: 4342 return 31; 4343 case AMDGPUISD::FP_TO_FP16: 4344 case AMDGPUISD::FP16_ZEXT: 4345 return 16; 4346 default: 4347 return 1; 4348 } 4349 } 4350