1 //===-- AMDGPUISelLowering.cpp - AMDGPU Common DAG lowering functions -----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 /// \file 11 /// This is the parent TargetLowering class for hardware code gen 12 /// targets. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #define AMDGPU_LOG2E_F 1.44269504088896340735992468100189214f 17 #define AMDGPU_LN2_F 0.693147180559945309417232121458176568f 18 #define AMDGPU_LN10_F 2.30258509299404568401799145468436421f 19 20 #include "AMDGPUISelLowering.h" 21 #include "AMDGPU.h" 22 #include "AMDGPUCallLowering.h" 23 #include "AMDGPUFrameLowering.h" 24 #include "AMDGPUIntrinsicInfo.h" 25 #include "AMDGPURegisterInfo.h" 26 #include "AMDGPUSubtarget.h" 27 #include "AMDGPUTargetMachine.h" 28 #include "Utils/AMDGPUBaseInfo.h" 29 #include "R600MachineFunctionInfo.h" 30 #include "SIInstrInfo.h" 31 #include "SIMachineFunctionInfo.h" 32 #include "MCTargetDesc/AMDGPUMCTargetDesc.h" 33 #include "llvm/CodeGen/CallingConvLower.h" 34 #include "llvm/CodeGen/MachineFunction.h" 35 #include "llvm/CodeGen/MachineRegisterInfo.h" 36 #include "llvm/CodeGen/SelectionDAG.h" 37 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" 38 #include "llvm/IR/DataLayout.h" 39 #include "llvm/IR/DiagnosticInfo.h" 40 #include "llvm/Support/KnownBits.h" 41 using namespace llvm; 42 43 static bool allocateKernArg(unsigned ValNo, MVT ValVT, MVT LocVT, 44 CCValAssign::LocInfo LocInfo, 45 ISD::ArgFlagsTy ArgFlags, CCState &State) { 46 MachineFunction &MF = State.getMachineFunction(); 47 AMDGPUMachineFunction *MFI = MF.getInfo<AMDGPUMachineFunction>(); 48 49 uint64_t Offset = MFI->allocateKernArg(LocVT.getStoreSize(), 50 ArgFlags.getOrigAlign()); 51 State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 52 return true; 53 } 54 55 static bool allocateCCRegs(unsigned ValNo, MVT ValVT, MVT LocVT, 56 CCValAssign::LocInfo LocInfo, 57 ISD::ArgFlagsTy ArgFlags, CCState &State, 58 const TargetRegisterClass *RC, 59 unsigned NumRegs) { 60 ArrayRef<MCPhysReg> RegList = makeArrayRef(RC->begin(), NumRegs); 61 unsigned RegResult = State.AllocateReg(RegList); 62 if (RegResult == AMDGPU::NoRegister) 63 return false; 64 65 State.addLoc(CCValAssign::getReg(ValNo, ValVT, RegResult, LocVT, LocInfo)); 66 return true; 67 } 68 69 static bool allocateSGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 70 CCValAssign::LocInfo LocInfo, 71 ISD::ArgFlagsTy ArgFlags, CCState &State) { 72 switch (LocVT.SimpleTy) { 73 case MVT::i64: 74 case MVT::f64: 75 case MVT::v2i32: 76 case MVT::v2f32: 77 case MVT::v4i16: 78 case MVT::v4f16: { 79 // Up to SGPR0-SGPR39 80 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 81 &AMDGPU::SGPR_64RegClass, 20); 82 } 83 default: 84 return false; 85 } 86 } 87 88 // Allocate up to VGPR31. 89 // 90 // TODO: Since there are no VGPR alignent requirements would it be better to 91 // split into individual scalar registers? 92 static bool allocateVGPRTuple(unsigned ValNo, MVT ValVT, MVT LocVT, 93 CCValAssign::LocInfo LocInfo, 94 ISD::ArgFlagsTy ArgFlags, CCState &State) { 95 switch (LocVT.SimpleTy) { 96 case MVT::i64: 97 case MVT::f64: 98 case MVT::v2i32: 99 case MVT::v2f32: 100 case MVT::v4i16: 101 case MVT::v4f16: { 102 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 103 &AMDGPU::VReg_64RegClass, 31); 104 } 105 case MVT::v4i32: 106 case MVT::v4f32: 107 case MVT::v2i64: 108 case MVT::v2f64: { 109 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 110 &AMDGPU::VReg_128RegClass, 29); 111 } 112 case MVT::v8i32: 113 case MVT::v8f32: { 114 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 115 &AMDGPU::VReg_256RegClass, 25); 116 117 } 118 case MVT::v16i32: 119 case MVT::v16f32: { 120 return allocateCCRegs(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, 121 &AMDGPU::VReg_512RegClass, 17); 122 123 } 124 default: 125 return false; 126 } 127 } 128 129 #include "AMDGPUGenCallingConv.inc" 130 131 // Find a larger type to do a load / store of a vector with. 132 EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) { 133 unsigned StoreSize = VT.getStoreSizeInBits(); 134 if (StoreSize <= 32) 135 return EVT::getIntegerVT(Ctx, StoreSize); 136 137 assert(StoreSize % 32 == 0 && "Store size not a multiple of 32"); 138 return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32); 139 } 140 141 unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) { 142 KnownBits Known; 143 EVT VT = Op.getValueType(); 144 DAG.computeKnownBits(Op, Known); 145 146 return VT.getSizeInBits() - Known.countMinLeadingZeros(); 147 } 148 149 unsigned AMDGPUTargetLowering::numBitsSigned(SDValue Op, SelectionDAG &DAG) { 150 EVT VT = Op.getValueType(); 151 152 // In order for this to be a signed 24-bit value, bit 23, must 153 // be a sign bit. 154 return VT.getSizeInBits() - DAG.ComputeNumSignBits(Op); 155 } 156 157 AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM, 158 const AMDGPUSubtarget &STI) 159 : TargetLowering(TM), Subtarget(&STI) { 160 AMDGPUASI = AMDGPU::getAMDGPUAS(TM); 161 // Lower floating point store/load to integer store/load to reduce the number 162 // of patterns in tablegen. 163 setOperationAction(ISD::LOAD, MVT::f32, Promote); 164 AddPromotedToType(ISD::LOAD, MVT::f32, MVT::i32); 165 166 setOperationAction(ISD::LOAD, MVT::v2f32, Promote); 167 AddPromotedToType(ISD::LOAD, MVT::v2f32, MVT::v2i32); 168 169 setOperationAction(ISD::LOAD, MVT::v4f32, Promote); 170 AddPromotedToType(ISD::LOAD, MVT::v4f32, MVT::v4i32); 171 172 setOperationAction(ISD::LOAD, MVT::v8f32, Promote); 173 AddPromotedToType(ISD::LOAD, MVT::v8f32, MVT::v8i32); 174 175 setOperationAction(ISD::LOAD, MVT::v16f32, Promote); 176 AddPromotedToType(ISD::LOAD, MVT::v16f32, MVT::v16i32); 177 178 setOperationAction(ISD::LOAD, MVT::i64, Promote); 179 AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32); 180 181 setOperationAction(ISD::LOAD, MVT::v2i64, Promote); 182 AddPromotedToType(ISD::LOAD, MVT::v2i64, MVT::v4i32); 183 184 setOperationAction(ISD::LOAD, MVT::f64, Promote); 185 AddPromotedToType(ISD::LOAD, MVT::f64, MVT::v2i32); 186 187 setOperationAction(ISD::LOAD, MVT::v2f64, Promote); 188 AddPromotedToType(ISD::LOAD, MVT::v2f64, MVT::v4i32); 189 190 // There are no 64-bit extloads. These should be done as a 32-bit extload and 191 // an extension to 64-bit. 192 for (MVT VT : MVT::integer_valuetypes()) { 193 setLoadExtAction(ISD::EXTLOAD, MVT::i64, VT, Expand); 194 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, VT, Expand); 195 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, VT, Expand); 196 } 197 198 for (MVT VT : MVT::integer_valuetypes()) { 199 if (VT == MVT::i64) 200 continue; 201 202 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote); 203 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i8, Legal); 204 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i16, Legal); 205 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i32, Expand); 206 207 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote); 208 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i8, Legal); 209 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i16, Legal); 210 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i32, Expand); 211 212 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote); 213 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i8, Legal); 214 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i16, Legal); 215 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i32, Expand); 216 } 217 218 for (MVT VT : MVT::integer_vector_valuetypes()) { 219 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Expand); 220 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Expand); 221 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i8, Expand); 222 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Expand); 223 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Expand); 224 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i8, Expand); 225 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Expand); 226 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Expand); 227 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i16, Expand); 228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Expand); 229 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Expand); 230 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v4i16, Expand); 231 } 232 233 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand); 234 setLoadExtAction(ISD::EXTLOAD, MVT::v2f32, MVT::v2f16, Expand); 235 setLoadExtAction(ISD::EXTLOAD, MVT::v4f32, MVT::v4f16, Expand); 236 setLoadExtAction(ISD::EXTLOAD, MVT::v8f32, MVT::v8f16, Expand); 237 238 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand); 239 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f32, Expand); 240 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f32, Expand); 241 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f32, Expand); 242 243 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand); 244 setLoadExtAction(ISD::EXTLOAD, MVT::v2f64, MVT::v2f16, Expand); 245 setLoadExtAction(ISD::EXTLOAD, MVT::v4f64, MVT::v4f16, Expand); 246 setLoadExtAction(ISD::EXTLOAD, MVT::v8f64, MVT::v8f16, Expand); 247 248 setOperationAction(ISD::STORE, MVT::f32, Promote); 249 AddPromotedToType(ISD::STORE, MVT::f32, MVT::i32); 250 251 setOperationAction(ISD::STORE, MVT::v2f32, Promote); 252 AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32); 253 254 setOperationAction(ISD::STORE, MVT::v4f32, Promote); 255 AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32); 256 257 setOperationAction(ISD::STORE, MVT::v8f32, Promote); 258 AddPromotedToType(ISD::STORE, MVT::v8f32, MVT::v8i32); 259 260 setOperationAction(ISD::STORE, MVT::v16f32, Promote); 261 AddPromotedToType(ISD::STORE, MVT::v16f32, MVT::v16i32); 262 263 setOperationAction(ISD::STORE, MVT::i64, Promote); 264 AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32); 265 266 setOperationAction(ISD::STORE, MVT::v2i64, Promote); 267 AddPromotedToType(ISD::STORE, MVT::v2i64, MVT::v4i32); 268 269 setOperationAction(ISD::STORE, MVT::f64, Promote); 270 AddPromotedToType(ISD::STORE, MVT::f64, MVT::v2i32); 271 272 setOperationAction(ISD::STORE, MVT::v2f64, Promote); 273 AddPromotedToType(ISD::STORE, MVT::v2f64, MVT::v4i32); 274 275 setTruncStoreAction(MVT::i64, MVT::i1, Expand); 276 setTruncStoreAction(MVT::i64, MVT::i8, Expand); 277 setTruncStoreAction(MVT::i64, MVT::i16, Expand); 278 setTruncStoreAction(MVT::i64, MVT::i32, Expand); 279 280 setTruncStoreAction(MVT::v2i64, MVT::v2i1, Expand); 281 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Expand); 282 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Expand); 283 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Expand); 284 285 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 286 setTruncStoreAction(MVT::v2f32, MVT::v2f16, Expand); 287 setTruncStoreAction(MVT::v4f32, MVT::v4f16, Expand); 288 setTruncStoreAction(MVT::v8f32, MVT::v8f16, Expand); 289 290 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 291 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 292 293 setTruncStoreAction(MVT::v2f64, MVT::v2f32, Expand); 294 setTruncStoreAction(MVT::v2f64, MVT::v2f16, Expand); 295 296 setTruncStoreAction(MVT::v4f64, MVT::v4f32, Expand); 297 setTruncStoreAction(MVT::v4f64, MVT::v4f16, Expand); 298 299 setTruncStoreAction(MVT::v8f64, MVT::v8f32, Expand); 300 setTruncStoreAction(MVT::v8f64, MVT::v8f16, Expand); 301 302 303 setOperationAction(ISD::Constant, MVT::i32, Legal); 304 setOperationAction(ISD::Constant, MVT::i64, Legal); 305 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 306 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 307 308 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 309 setOperationAction(ISD::BRIND, MVT::Other, Expand); 310 311 // This is totally unsupported, just custom lower to produce an error. 312 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom); 313 314 // Library functions. These default to Expand, but we have instructions 315 // for them. 316 setOperationAction(ISD::FCEIL, MVT::f32, Legal); 317 setOperationAction(ISD::FEXP2, MVT::f32, Legal); 318 setOperationAction(ISD::FPOW, MVT::f32, Legal); 319 setOperationAction(ISD::FLOG2, MVT::f32, Legal); 320 setOperationAction(ISD::FABS, MVT::f32, Legal); 321 setOperationAction(ISD::FFLOOR, MVT::f32, Legal); 322 setOperationAction(ISD::FRINT, MVT::f32, Legal); 323 setOperationAction(ISD::FTRUNC, MVT::f32, Legal); 324 setOperationAction(ISD::FMINNUM, MVT::f32, Legal); 325 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal); 326 327 setOperationAction(ISD::FROUND, MVT::f32, Custom); 328 setOperationAction(ISD::FROUND, MVT::f64, Custom); 329 330 setOperationAction(ISD::FLOG, MVT::f32, Custom); 331 setOperationAction(ISD::FLOG10, MVT::f32, Custom); 332 333 if (Subtarget->has16BitInsts()) { 334 setOperationAction(ISD::FLOG, MVT::f16, Custom); 335 setOperationAction(ISD::FLOG10, MVT::f16, Custom); 336 } 337 338 setOperationAction(ISD::FNEARBYINT, MVT::f32, Custom); 339 setOperationAction(ISD::FNEARBYINT, MVT::f64, Custom); 340 341 setOperationAction(ISD::FREM, MVT::f32, Custom); 342 setOperationAction(ISD::FREM, MVT::f64, Custom); 343 344 // v_mad_f32 does not support denormals according to some sources. 345 if (!Subtarget->hasFP32Denormals()) 346 setOperationAction(ISD::FMAD, MVT::f32, Legal); 347 348 // Expand to fneg + fadd. 349 setOperationAction(ISD::FSUB, MVT::f64, Expand); 350 351 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Custom); 352 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Custom); 353 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i32, Custom); 354 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f32, Custom); 355 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2f32, Custom); 356 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v2i32, Custom); 357 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4f32, Custom); 358 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v4i32, Custom); 359 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8f32, Custom); 360 setOperationAction(ISD::EXTRACT_SUBVECTOR, MVT::v8i32, Custom); 361 362 if (Subtarget->getGeneration() < AMDGPUSubtarget::SEA_ISLANDS) { 363 setOperationAction(ISD::FCEIL, MVT::f64, Custom); 364 setOperationAction(ISD::FTRUNC, MVT::f64, Custom); 365 setOperationAction(ISD::FRINT, MVT::f64, Custom); 366 setOperationAction(ISD::FFLOOR, MVT::f64, Custom); 367 } 368 369 if (!Subtarget->hasBFI()) { 370 // fcopysign can be done in a single instruction with BFI. 371 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand); 372 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand); 373 } 374 375 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand); 376 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Custom); 377 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Custom); 378 379 const MVT ScalarIntVTs[] = { MVT::i32, MVT::i64 }; 380 for (MVT VT : ScalarIntVTs) { 381 // These should use [SU]DIVREM, so set them to expand 382 setOperationAction(ISD::SDIV, VT, Expand); 383 setOperationAction(ISD::UDIV, VT, Expand); 384 setOperationAction(ISD::SREM, VT, Expand); 385 setOperationAction(ISD::UREM, VT, Expand); 386 387 // GPU does not have divrem function for signed or unsigned. 388 setOperationAction(ISD::SDIVREM, VT, Custom); 389 setOperationAction(ISD::UDIVREM, VT, Custom); 390 391 // GPU does not have [S|U]MUL_LOHI functions as a single instruction. 392 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 393 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 394 395 setOperationAction(ISD::BSWAP, VT, Expand); 396 setOperationAction(ISD::CTTZ, VT, Expand); 397 setOperationAction(ISD::CTLZ, VT, Expand); 398 399 // AMDGPU uses ADDC/SUBC/ADDE/SUBE 400 setOperationAction(ISD::ADDC, VT, Legal); 401 setOperationAction(ISD::SUBC, VT, Legal); 402 setOperationAction(ISD::ADDE, VT, Legal); 403 setOperationAction(ISD::SUBE, VT, Legal); 404 } 405 406 if (!Subtarget->hasBCNT(32)) 407 setOperationAction(ISD::CTPOP, MVT::i32, Expand); 408 409 if (!Subtarget->hasBCNT(64)) 410 setOperationAction(ISD::CTPOP, MVT::i64, Expand); 411 412 // The hardware supports 32-bit ROTR, but not ROTL. 413 setOperationAction(ISD::ROTL, MVT::i32, Expand); 414 setOperationAction(ISD::ROTL, MVT::i64, Expand); 415 setOperationAction(ISD::ROTR, MVT::i64, Expand); 416 417 setOperationAction(ISD::MUL, MVT::i64, Expand); 418 setOperationAction(ISD::MULHU, MVT::i64, Expand); 419 setOperationAction(ISD::MULHS, MVT::i64, Expand); 420 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 421 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 422 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 423 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 424 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand); 425 426 setOperationAction(ISD::SMIN, MVT::i32, Legal); 427 setOperationAction(ISD::UMIN, MVT::i32, Legal); 428 setOperationAction(ISD::SMAX, MVT::i32, Legal); 429 setOperationAction(ISD::UMAX, MVT::i32, Legal); 430 431 if (Subtarget->hasFFBH()) 432 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom); 433 434 if (Subtarget->hasFFBL()) 435 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom); 436 437 setOperationAction(ISD::CTTZ, MVT::i64, Custom); 438 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Custom); 439 setOperationAction(ISD::CTLZ, MVT::i64, Custom); 440 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom); 441 442 // We only really have 32-bit BFE instructions (and 16-bit on VI). 443 // 444 // On SI+ there are 64-bit BFEs, but they are scalar only and there isn't any 445 // effort to match them now. We want this to be false for i64 cases when the 446 // extraction isn't restricted to the upper or lower half. Ideally we would 447 // have some pass reduce 64-bit extracts to 32-bit if possible. Extracts that 448 // span the midpoint are probably relatively rare, so don't worry about them 449 // for now. 450 if (Subtarget->hasBFE()) 451 setHasExtractBitsInsn(true); 452 453 static const MVT::SimpleValueType VectorIntTypes[] = { 454 MVT::v2i32, MVT::v4i32 455 }; 456 457 for (MVT VT : VectorIntTypes) { 458 // Expand the following operations for the current type by default. 459 setOperationAction(ISD::ADD, VT, Expand); 460 setOperationAction(ISD::AND, VT, Expand); 461 setOperationAction(ISD::FP_TO_SINT, VT, Expand); 462 setOperationAction(ISD::FP_TO_UINT, VT, Expand); 463 setOperationAction(ISD::MUL, VT, Expand); 464 setOperationAction(ISD::MULHU, VT, Expand); 465 setOperationAction(ISD::MULHS, VT, Expand); 466 setOperationAction(ISD::OR, VT, Expand); 467 setOperationAction(ISD::SHL, VT, Expand); 468 setOperationAction(ISD::SRA, VT, Expand); 469 setOperationAction(ISD::SRL, VT, Expand); 470 setOperationAction(ISD::ROTL, VT, Expand); 471 setOperationAction(ISD::ROTR, VT, Expand); 472 setOperationAction(ISD::SUB, VT, Expand); 473 setOperationAction(ISD::SINT_TO_FP, VT, Expand); 474 setOperationAction(ISD::UINT_TO_FP, VT, Expand); 475 setOperationAction(ISD::SDIV, VT, Expand); 476 setOperationAction(ISD::UDIV, VT, Expand); 477 setOperationAction(ISD::SREM, VT, Expand); 478 setOperationAction(ISD::UREM, VT, Expand); 479 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 480 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 481 setOperationAction(ISD::SDIVREM, VT, Custom); 482 setOperationAction(ISD::UDIVREM, VT, Expand); 483 setOperationAction(ISD::SELECT, VT, Expand); 484 setOperationAction(ISD::VSELECT, VT, Expand); 485 setOperationAction(ISD::SELECT_CC, VT, Expand); 486 setOperationAction(ISD::XOR, VT, Expand); 487 setOperationAction(ISD::BSWAP, VT, Expand); 488 setOperationAction(ISD::CTPOP, VT, Expand); 489 setOperationAction(ISD::CTTZ, VT, Expand); 490 setOperationAction(ISD::CTLZ, VT, Expand); 491 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 492 setOperationAction(ISD::SETCC, VT, Expand); 493 } 494 495 static const MVT::SimpleValueType FloatVectorTypes[] = { 496 MVT::v2f32, MVT::v4f32 497 }; 498 499 for (MVT VT : FloatVectorTypes) { 500 setOperationAction(ISD::FABS, VT, Expand); 501 setOperationAction(ISD::FMINNUM, VT, Expand); 502 setOperationAction(ISD::FMAXNUM, VT, Expand); 503 setOperationAction(ISD::FADD, VT, Expand); 504 setOperationAction(ISD::FCEIL, VT, Expand); 505 setOperationAction(ISD::FCOS, VT, Expand); 506 setOperationAction(ISD::FDIV, VT, Expand); 507 setOperationAction(ISD::FEXP2, VT, Expand); 508 setOperationAction(ISD::FLOG2, VT, Expand); 509 setOperationAction(ISD::FREM, VT, Expand); 510 setOperationAction(ISD::FLOG, VT, Expand); 511 setOperationAction(ISD::FLOG10, VT, Expand); 512 setOperationAction(ISD::FPOW, VT, Expand); 513 setOperationAction(ISD::FFLOOR, VT, Expand); 514 setOperationAction(ISD::FTRUNC, VT, Expand); 515 setOperationAction(ISD::FMUL, VT, Expand); 516 setOperationAction(ISD::FMA, VT, Expand); 517 setOperationAction(ISD::FRINT, VT, Expand); 518 setOperationAction(ISD::FNEARBYINT, VT, Expand); 519 setOperationAction(ISD::FSQRT, VT, Expand); 520 setOperationAction(ISD::FSIN, VT, Expand); 521 setOperationAction(ISD::FSUB, VT, Expand); 522 setOperationAction(ISD::FNEG, VT, Expand); 523 setOperationAction(ISD::VSELECT, VT, Expand); 524 setOperationAction(ISD::SELECT_CC, VT, Expand); 525 setOperationAction(ISD::FCOPYSIGN, VT, Expand); 526 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand); 527 setOperationAction(ISD::SETCC, VT, Expand); 528 } 529 530 // This causes using an unrolled select operation rather than expansion with 531 // bit operations. This is in general better, but the alternative using BFI 532 // instructions may be better if the select sources are SGPRs. 533 setOperationAction(ISD::SELECT, MVT::v2f32, Promote); 534 AddPromotedToType(ISD::SELECT, MVT::v2f32, MVT::v2i32); 535 536 setOperationAction(ISD::SELECT, MVT::v4f32, Promote); 537 AddPromotedToType(ISD::SELECT, MVT::v4f32, MVT::v4i32); 538 539 // There are no libcalls of any kind. 540 for (int I = 0; I < RTLIB::UNKNOWN_LIBCALL; ++I) 541 setLibcallName(static_cast<RTLIB::Libcall>(I), nullptr); 542 543 setBooleanContents(ZeroOrNegativeOneBooleanContent); 544 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 545 546 setSchedulingPreference(Sched::RegPressure); 547 setJumpIsExpensive(true); 548 549 // FIXME: This is only partially true. If we have to do vector compares, any 550 // SGPR pair can be a condition register. If we have a uniform condition, we 551 // are better off doing SALU operations, where there is only one SCC. For now, 552 // we don't have a way of knowing during instruction selection if a condition 553 // will be uniform and we always use vector compares. Assume we are using 554 // vector compares until that is fixed. 555 setHasMultipleConditionRegisters(true); 556 557 // SI at least has hardware support for floating point exceptions, but no way 558 // of using or handling them is implemented. They are also optional in OpenCL 559 // (Section 7.3) 560 setHasFloatingPointExceptions(Subtarget->hasFPExceptions()); 561 562 PredictableSelectIsExpensive = false; 563 564 // We want to find all load dependencies for long chains of stores to enable 565 // merging into very wide vectors. The problem is with vectors with > 4 566 // elements. MergeConsecutiveStores will attempt to merge these because x8/x16 567 // vectors are a legal type, even though we have to split the loads 568 // usually. When we can more precisely specify load legality per address 569 // space, we should be able to make FindBetterChain/MergeConsecutiveStores 570 // smarter so that they can figure out what to do in 2 iterations without all 571 // N > 4 stores on the same chain. 572 GatherAllAliasesMaxDepth = 16; 573 574 // memcpy/memmove/memset are expanded in the IR, so we shouldn't need to worry 575 // about these during lowering. 576 MaxStoresPerMemcpy = 0xffffffff; 577 MaxStoresPerMemmove = 0xffffffff; 578 MaxStoresPerMemset = 0xffffffff; 579 580 setTargetDAGCombine(ISD::BITCAST); 581 setTargetDAGCombine(ISD::SHL); 582 setTargetDAGCombine(ISD::SRA); 583 setTargetDAGCombine(ISD::SRL); 584 setTargetDAGCombine(ISD::TRUNCATE); 585 setTargetDAGCombine(ISD::MUL); 586 setTargetDAGCombine(ISD::MULHU); 587 setTargetDAGCombine(ISD::MULHS); 588 setTargetDAGCombine(ISD::SELECT); 589 setTargetDAGCombine(ISD::SELECT_CC); 590 setTargetDAGCombine(ISD::STORE); 591 setTargetDAGCombine(ISD::FADD); 592 setTargetDAGCombine(ISD::FSUB); 593 setTargetDAGCombine(ISD::FNEG); 594 setTargetDAGCombine(ISD::FABS); 595 setTargetDAGCombine(ISD::AssertZext); 596 setTargetDAGCombine(ISD::AssertSext); 597 } 598 599 //===----------------------------------------------------------------------===// 600 // Target Information 601 //===----------------------------------------------------------------------===// 602 603 LLVM_READNONE 604 static bool fnegFoldsIntoOp(unsigned Opc) { 605 switch (Opc) { 606 case ISD::FADD: 607 case ISD::FSUB: 608 case ISD::FMUL: 609 case ISD::FMA: 610 case ISD::FMAD: 611 case ISD::FMINNUM: 612 case ISD::FMAXNUM: 613 case ISD::FSIN: 614 case ISD::FTRUNC: 615 case ISD::FRINT: 616 case ISD::FNEARBYINT: 617 case AMDGPUISD::RCP: 618 case AMDGPUISD::RCP_LEGACY: 619 case AMDGPUISD::RCP_IFLAG: 620 case AMDGPUISD::SIN_HW: 621 case AMDGPUISD::FMUL_LEGACY: 622 case AMDGPUISD::FMIN_LEGACY: 623 case AMDGPUISD::FMAX_LEGACY: 624 return true; 625 default: 626 return false; 627 } 628 } 629 630 /// \p returns true if the operation will definitely need to use a 64-bit 631 /// encoding, and thus will use a VOP3 encoding regardless of the source 632 /// modifiers. 633 LLVM_READONLY 634 static bool opMustUseVOP3Encoding(const SDNode *N, MVT VT) { 635 return N->getNumOperands() > 2 || VT == MVT::f64; 636 } 637 638 // Most FP instructions support source modifiers, but this could be refined 639 // slightly. 640 LLVM_READONLY 641 static bool hasSourceMods(const SDNode *N) { 642 if (isa<MemSDNode>(N)) 643 return false; 644 645 switch (N->getOpcode()) { 646 case ISD::CopyToReg: 647 case ISD::SELECT: 648 case ISD::FDIV: 649 case ISD::FREM: 650 case ISD::INLINEASM: 651 case AMDGPUISD::INTERP_P1: 652 case AMDGPUISD::INTERP_P2: 653 case AMDGPUISD::DIV_SCALE: 654 655 // TODO: Should really be looking at the users of the bitcast. These are 656 // problematic because bitcasts are used to legalize all stores to integer 657 // types. 658 case ISD::BITCAST: 659 return false; 660 default: 661 return true; 662 } 663 } 664 665 bool AMDGPUTargetLowering::allUsesHaveSourceMods(const SDNode *N, 666 unsigned CostThreshold) { 667 // Some users (such as 3-operand FMA/MAD) must use a VOP3 encoding, and thus 668 // it is truly free to use a source modifier in all cases. If there are 669 // multiple users but for each one will necessitate using VOP3, there will be 670 // a code size increase. Try to avoid increasing code size unless we know it 671 // will save on the instruction count. 672 unsigned NumMayIncreaseSize = 0; 673 MVT VT = N->getValueType(0).getScalarType().getSimpleVT(); 674 675 // XXX - Should this limit number of uses to check? 676 for (const SDNode *U : N->uses()) { 677 if (!hasSourceMods(U)) 678 return false; 679 680 if (!opMustUseVOP3Encoding(U, VT)) { 681 if (++NumMayIncreaseSize > CostThreshold) 682 return false; 683 } 684 } 685 686 return true; 687 } 688 689 MVT AMDGPUTargetLowering::getVectorIdxTy(const DataLayout &) const { 690 return MVT::i32; 691 } 692 693 bool AMDGPUTargetLowering::isSelectSupported(SelectSupportKind SelType) const { 694 return true; 695 } 696 697 // The backend supports 32 and 64 bit floating point immediates. 698 // FIXME: Why are we reporting vectors of FP immediates as legal? 699 bool AMDGPUTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 700 EVT ScalarVT = VT.getScalarType(); 701 return (ScalarVT == MVT::f32 || ScalarVT == MVT::f64 || 702 (ScalarVT == MVT::f16 && Subtarget->has16BitInsts())); 703 } 704 705 // We don't want to shrink f64 / f32 constants. 706 bool AMDGPUTargetLowering::ShouldShrinkFPConstant(EVT VT) const { 707 EVT ScalarVT = VT.getScalarType(); 708 return (ScalarVT != MVT::f32 && ScalarVT != MVT::f64); 709 } 710 711 bool AMDGPUTargetLowering::shouldReduceLoadWidth(SDNode *N, 712 ISD::LoadExtType, 713 EVT NewVT) const { 714 715 unsigned NewSize = NewVT.getStoreSizeInBits(); 716 717 // If we are reducing to a 32-bit load, this is always better. 718 if (NewSize == 32) 719 return true; 720 721 EVT OldVT = N->getValueType(0); 722 unsigned OldSize = OldVT.getStoreSizeInBits(); 723 724 // Don't produce extloads from sub 32-bit types. SI doesn't have scalar 725 // extloads, so doing one requires using a buffer_load. In cases where we 726 // still couldn't use a scalar load, using the wider load shouldn't really 727 // hurt anything. 728 729 // If the old size already had to be an extload, there's no harm in continuing 730 // to reduce the width. 731 return (OldSize < 32); 732 } 733 734 bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, 735 EVT CastTy) const { 736 737 assert(LoadTy.getSizeInBits() == CastTy.getSizeInBits()); 738 739 if (LoadTy.getScalarType() == MVT::i32) 740 return false; 741 742 unsigned LScalarSize = LoadTy.getScalarSizeInBits(); 743 unsigned CastScalarSize = CastTy.getScalarSizeInBits(); 744 745 return (LScalarSize < CastScalarSize) || 746 (CastScalarSize >= 32); 747 } 748 749 // SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also 750 // profitable with the expansion for 64-bit since it's generally good to 751 // speculate things. 752 // FIXME: These should really have the size as a parameter. 753 bool AMDGPUTargetLowering::isCheapToSpeculateCttz() const { 754 return true; 755 } 756 757 bool AMDGPUTargetLowering::isCheapToSpeculateCtlz() const { 758 return true; 759 } 760 761 bool AMDGPUTargetLowering::isSDNodeAlwaysUniform(const SDNode * N) const { 762 switch (N->getOpcode()) { 763 default: 764 return false; 765 case ISD::EntryToken: 766 case ISD::TokenFactor: 767 return true; 768 case ISD::INTRINSIC_WO_CHAIN: 769 { 770 unsigned IntrID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 771 switch (IntrID) { 772 default: 773 return false; 774 case Intrinsic::amdgcn_readfirstlane: 775 case Intrinsic::amdgcn_readlane: 776 return true; 777 } 778 } 779 break; 780 case ISD::LOAD: 781 { 782 const LoadSDNode * L = dyn_cast<LoadSDNode>(N); 783 if (L->getMemOperand()->getAddrSpace() 784 == Subtarget->getAMDGPUAS().CONSTANT_ADDRESS_32BIT) 785 return true; 786 return false; 787 } 788 break; 789 } 790 } 791 792 //===---------------------------------------------------------------------===// 793 // Target Properties 794 //===---------------------------------------------------------------------===// 795 796 bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const { 797 assert(VT.isFloatingPoint()); 798 799 // Packed operations do not have a fabs modifier. 800 return VT == MVT::f32 || VT == MVT::f64 || 801 (Subtarget->has16BitInsts() && VT == MVT::f16); 802 } 803 804 bool AMDGPUTargetLowering::isFNegFree(EVT VT) const { 805 assert(VT.isFloatingPoint()); 806 return VT == MVT::f32 || VT == MVT::f64 || 807 (Subtarget->has16BitInsts() && VT == MVT::f16) || 808 (Subtarget->hasVOP3PInsts() && VT == MVT::v2f16); 809 } 810 811 bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, 812 unsigned NumElem, 813 unsigned AS) const { 814 return true; 815 } 816 817 bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { 818 // There are few operations which truly have vector input operands. Any vector 819 // operation is going to involve operations on each component, and a 820 // build_vector will be a copy per element, so it always makes sense to use a 821 // build_vector input in place of the extracted element to avoid a copy into a 822 // super register. 823 // 824 // We should probably only do this if all users are extracts only, but this 825 // should be the common case. 826 return true; 827 } 828 829 bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { 830 // Truncate is just accessing a subregister. 831 832 unsigned SrcSize = Source.getSizeInBits(); 833 unsigned DestSize = Dest.getSizeInBits(); 834 835 return DestSize < SrcSize && DestSize % 32 == 0 ; 836 } 837 838 bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const { 839 // Truncate is just accessing a subregister. 840 841 unsigned SrcSize = Source->getScalarSizeInBits(); 842 unsigned DestSize = Dest->getScalarSizeInBits(); 843 844 if (DestSize== 16 && Subtarget->has16BitInsts()) 845 return SrcSize >= 32; 846 847 return DestSize < SrcSize && DestSize % 32 == 0; 848 } 849 850 bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const { 851 unsigned SrcSize = Src->getScalarSizeInBits(); 852 unsigned DestSize = Dest->getScalarSizeInBits(); 853 854 if (SrcSize == 16 && Subtarget->has16BitInsts()) 855 return DestSize >= 32; 856 857 return SrcSize == 32 && DestSize == 64; 858 } 859 860 bool AMDGPUTargetLowering::isZExtFree(EVT Src, EVT Dest) const { 861 // Any register load of a 64-bit value really requires 2 32-bit moves. For all 862 // practical purposes, the extra mov 0 to load a 64-bit is free. As used, 863 // this will enable reducing 64-bit operations the 32-bit, which is always 864 // good. 865 866 if (Src == MVT::i16) 867 return Dest == MVT::i32 ||Dest == MVT::i64 ; 868 869 return Src == MVT::i32 && Dest == MVT::i64; 870 } 871 872 bool AMDGPUTargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 873 return isZExtFree(Val.getValueType(), VT2); 874 } 875 876 bool AMDGPUTargetLowering::isNarrowingProfitable(EVT SrcVT, EVT DestVT) const { 877 // There aren't really 64-bit registers, but pairs of 32-bit ones and only a 878 // limited number of native 64-bit operations. Shrinking an operation to fit 879 // in a single 32-bit register should always be helpful. As currently used, 880 // this is much less general than the name suggests, and is only used in 881 // places trying to reduce the sizes of loads. Shrinking loads to < 32-bits is 882 // not profitable, and may actually be harmful. 883 return SrcVT.getSizeInBits() > 32 && DestVT.getSizeInBits() == 32; 884 } 885 886 //===---------------------------------------------------------------------===// 887 // TargetLowering Callbacks 888 //===---------------------------------------------------------------------===// 889 890 CCAssignFn *AMDGPUCallLowering::CCAssignFnForCall(CallingConv::ID CC, 891 bool IsVarArg) { 892 switch (CC) { 893 case CallingConv::AMDGPU_KERNEL: 894 case CallingConv::SPIR_KERNEL: 895 return CC_AMDGPU_Kernel; 896 case CallingConv::AMDGPU_VS: 897 case CallingConv::AMDGPU_GS: 898 case CallingConv::AMDGPU_PS: 899 case CallingConv::AMDGPU_CS: 900 case CallingConv::AMDGPU_HS: 901 case CallingConv::AMDGPU_ES: 902 case CallingConv::AMDGPU_LS: 903 return CC_AMDGPU; 904 case CallingConv::C: 905 case CallingConv::Fast: 906 case CallingConv::Cold: 907 return CC_AMDGPU_Func; 908 default: 909 report_fatal_error("Unsupported calling convention."); 910 } 911 } 912 913 CCAssignFn *AMDGPUCallLowering::CCAssignFnForReturn(CallingConv::ID CC, 914 bool IsVarArg) { 915 switch (CC) { 916 case CallingConv::AMDGPU_KERNEL: 917 case CallingConv::SPIR_KERNEL: 918 return CC_AMDGPU_Kernel; 919 case CallingConv::AMDGPU_VS: 920 case CallingConv::AMDGPU_GS: 921 case CallingConv::AMDGPU_PS: 922 case CallingConv::AMDGPU_CS: 923 case CallingConv::AMDGPU_HS: 924 case CallingConv::AMDGPU_ES: 925 case CallingConv::AMDGPU_LS: 926 return RetCC_SI_Shader; 927 case CallingConv::C: 928 case CallingConv::Fast: 929 case CallingConv::Cold: 930 return RetCC_AMDGPU_Func; 931 default: 932 report_fatal_error("Unsupported calling convention."); 933 } 934 } 935 936 /// The SelectionDAGBuilder will automatically promote function arguments 937 /// with illegal types. However, this does not work for the AMDGPU targets 938 /// since the function arguments are stored in memory as these illegal types. 939 /// In order to handle this properly we need to get the original types sizes 940 /// from the LLVM IR Function and fixup the ISD:InputArg values before 941 /// passing them to AnalyzeFormalArguments() 942 943 /// When the SelectionDAGBuilder computes the Ins, it takes care of splitting 944 /// input values across multiple registers. Each item in the Ins array 945 /// represents a single value that will be stored in registers. Ins[x].VT is 946 /// the value type of the value that will be stored in the register, so 947 /// whatever SDNode we lower the argument to needs to be this type. 948 /// 949 /// In order to correctly lower the arguments we need to know the size of each 950 /// argument. Since Ins[x].VT gives us the size of the register that will 951 /// hold the value, we need to look at Ins[x].ArgVT to see the 'real' type 952 /// for the orignal function argument so that we can deduce the correct memory 953 /// type to use for Ins[x]. In most cases the correct memory type will be 954 /// Ins[x].ArgVT. However, this will not always be the case. If, for example, 955 /// we have a kernel argument of type v8i8, this argument will be split into 956 /// 8 parts and each part will be represented by its own item in the Ins array. 957 /// For each part the Ins[x].ArgVT will be the v8i8, which is the full type of 958 /// the argument before it was split. From this, we deduce that the memory type 959 /// for each individual part is i8. We pass the memory type as LocVT to the 960 /// calling convention analysis function and the register type (Ins[x].VT) as 961 /// the ValVT. 962 void AMDGPUTargetLowering::analyzeFormalArgumentsCompute(CCState &State, 963 const SmallVectorImpl<ISD::InputArg> &Ins) const { 964 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 965 const ISD::InputArg &In = Ins[i]; 966 EVT MemVT; 967 968 unsigned NumRegs = getNumRegisters(State.getContext(), In.ArgVT); 969 970 if (!Subtarget->isAmdHsaOS() && 971 (In.ArgVT == MVT::i16 || In.ArgVT == MVT::i8 || In.ArgVT == MVT::f16)) { 972 // The ABI says the caller will extend these values to 32-bits. 973 MemVT = In.ArgVT.isInteger() ? MVT::i32 : MVT::f32; 974 } else if (NumRegs == 1) { 975 // This argument is not split, so the IR type is the memory type. 976 assert(!In.Flags.isSplit()); 977 if (In.ArgVT.isExtended()) { 978 // We have an extended type, like i24, so we should just use the register type 979 MemVT = In.VT; 980 } else { 981 MemVT = In.ArgVT; 982 } 983 } else if (In.ArgVT.isVector() && In.VT.isVector() && 984 In.ArgVT.getScalarType() == In.VT.getScalarType()) { 985 assert(In.ArgVT.getVectorNumElements() > In.VT.getVectorNumElements()); 986 // We have a vector value which has been split into a vector with 987 // the same scalar type, but fewer elements. This should handle 988 // all the floating-point vector types. 989 MemVT = In.VT; 990 } else if (In.ArgVT.isVector() && 991 In.ArgVT.getVectorNumElements() == NumRegs) { 992 // This arg has been split so that each element is stored in a separate 993 // register. 994 MemVT = In.ArgVT.getScalarType(); 995 } else if (In.ArgVT.isExtended()) { 996 // We have an extended type, like i65. 997 MemVT = In.VT; 998 } else { 999 unsigned MemoryBits = In.ArgVT.getStoreSizeInBits() / NumRegs; 1000 assert(In.ArgVT.getStoreSizeInBits() % NumRegs == 0); 1001 if (In.VT.isInteger()) { 1002 MemVT = EVT::getIntegerVT(State.getContext(), MemoryBits); 1003 } else if (In.VT.isVector()) { 1004 assert(!In.VT.getScalarType().isFloatingPoint()); 1005 unsigned NumElements = In.VT.getVectorNumElements(); 1006 assert(MemoryBits % NumElements == 0); 1007 // This vector type has been split into another vector type with 1008 // a different elements size. 1009 EVT ScalarVT = EVT::getIntegerVT(State.getContext(), 1010 MemoryBits / NumElements); 1011 MemVT = EVT::getVectorVT(State.getContext(), ScalarVT, NumElements); 1012 } else { 1013 llvm_unreachable("cannot deduce memory type."); 1014 } 1015 } 1016 1017 // Convert one element vectors to scalar. 1018 if (MemVT.isVector() && MemVT.getVectorNumElements() == 1) 1019 MemVT = MemVT.getScalarType(); 1020 1021 if (MemVT.isExtended()) { 1022 // This should really only happen if we have vec3 arguments 1023 assert(MemVT.isVector() && MemVT.getVectorNumElements() == 3); 1024 MemVT = MemVT.getPow2VectorType(State.getContext()); 1025 } 1026 1027 assert(MemVT.isSimple()); 1028 allocateKernArg(i, In.VT, MemVT.getSimpleVT(), CCValAssign::Full, In.Flags, 1029 State); 1030 } 1031 } 1032 1033 SDValue AMDGPUTargetLowering::LowerReturn( 1034 SDValue Chain, CallingConv::ID CallConv, 1035 bool isVarArg, 1036 const SmallVectorImpl<ISD::OutputArg> &Outs, 1037 const SmallVectorImpl<SDValue> &OutVals, 1038 const SDLoc &DL, SelectionDAG &DAG) const { 1039 // FIXME: Fails for r600 tests 1040 //assert(!isVarArg && Outs.empty() && OutVals.empty() && 1041 // "wave terminate should not have return values"); 1042 return DAG.getNode(AMDGPUISD::ENDPGM, DL, MVT::Other, Chain); 1043 } 1044 1045 //===---------------------------------------------------------------------===// 1046 // Target specific lowering 1047 //===---------------------------------------------------------------------===// 1048 1049 /// Selects the correct CCAssignFn for a given CallingConvention value. 1050 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForCall(CallingConv::ID CC, 1051 bool IsVarArg) { 1052 return AMDGPUCallLowering::CCAssignFnForCall(CC, IsVarArg); 1053 } 1054 1055 CCAssignFn *AMDGPUTargetLowering::CCAssignFnForReturn(CallingConv::ID CC, 1056 bool IsVarArg) { 1057 return AMDGPUCallLowering::CCAssignFnForReturn(CC, IsVarArg); 1058 } 1059 1060 SDValue AMDGPUTargetLowering::addTokenForArgument(SDValue Chain, 1061 SelectionDAG &DAG, 1062 MachineFrameInfo &MFI, 1063 int ClobberedFI) const { 1064 SmallVector<SDValue, 8> ArgChains; 1065 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); 1066 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; 1067 1068 // Include the original chain at the beginning of the list. When this is 1069 // used by target LowerCall hooks, this helps legalize find the 1070 // CALLSEQ_BEGIN node. 1071 ArgChains.push_back(Chain); 1072 1073 // Add a chain value for each stack argument corresponding 1074 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 1075 UE = DAG.getEntryNode().getNode()->use_end(); 1076 U != UE; ++U) { 1077 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) { 1078 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) { 1079 if (FI->getIndex() < 0) { 1080 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); 1081 int64_t InLastByte = InFirstByte; 1082 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; 1083 1084 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 1085 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 1086 ArgChains.push_back(SDValue(L, 1)); 1087 } 1088 } 1089 } 1090 } 1091 1092 // Build a tokenfactor for all the chains. 1093 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 1094 } 1095 1096 SDValue AMDGPUTargetLowering::lowerUnhandledCall(CallLoweringInfo &CLI, 1097 SmallVectorImpl<SDValue> &InVals, 1098 StringRef Reason) const { 1099 SDValue Callee = CLI.Callee; 1100 SelectionDAG &DAG = CLI.DAG; 1101 1102 const Function &Fn = DAG.getMachineFunction().getFunction(); 1103 1104 StringRef FuncName("<unknown>"); 1105 1106 if (const ExternalSymbolSDNode *G = dyn_cast<ExternalSymbolSDNode>(Callee)) 1107 FuncName = G->getSymbol(); 1108 else if (const GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) 1109 FuncName = G->getGlobal()->getName(); 1110 1111 DiagnosticInfoUnsupported NoCalls( 1112 Fn, Reason + FuncName, CLI.DL.getDebugLoc()); 1113 DAG.getContext()->diagnose(NoCalls); 1114 1115 if (!CLI.IsTailCall) { 1116 for (unsigned I = 0, E = CLI.Ins.size(); I != E; ++I) 1117 InVals.push_back(DAG.getUNDEF(CLI.Ins[I].VT)); 1118 } 1119 1120 return DAG.getEntryNode(); 1121 } 1122 1123 SDValue AMDGPUTargetLowering::LowerCall(CallLoweringInfo &CLI, 1124 SmallVectorImpl<SDValue> &InVals) const { 1125 return lowerUnhandledCall(CLI, InVals, "unsupported call to function "); 1126 } 1127 1128 SDValue AMDGPUTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 1129 SelectionDAG &DAG) const { 1130 const Function &Fn = DAG.getMachineFunction().getFunction(); 1131 1132 DiagnosticInfoUnsupported NoDynamicAlloca(Fn, "unsupported dynamic alloca", 1133 SDLoc(Op).getDebugLoc()); 1134 DAG.getContext()->diagnose(NoDynamicAlloca); 1135 auto Ops = {DAG.getConstant(0, SDLoc(), Op.getValueType()), Op.getOperand(0)}; 1136 return DAG.getMergeValues(Ops, SDLoc()); 1137 } 1138 1139 SDValue AMDGPUTargetLowering::LowerOperation(SDValue Op, 1140 SelectionDAG &DAG) const { 1141 switch (Op.getOpcode()) { 1142 default: 1143 Op->print(errs(), &DAG); 1144 llvm_unreachable("Custom lowering code for this" 1145 "instruction is not implemented yet!"); 1146 break; 1147 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG); 1148 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG); 1149 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG); 1150 case ISD::UDIVREM: return LowerUDIVREM(Op, DAG); 1151 case ISD::SDIVREM: return LowerSDIVREM(Op, DAG); 1152 case ISD::FREM: return LowerFREM(Op, DAG); 1153 case ISD::FCEIL: return LowerFCEIL(Op, DAG); 1154 case ISD::FTRUNC: return LowerFTRUNC(Op, DAG); 1155 case ISD::FRINT: return LowerFRINT(Op, DAG); 1156 case ISD::FNEARBYINT: return LowerFNEARBYINT(Op, DAG); 1157 case ISD::FROUND: return LowerFROUND(Op, DAG); 1158 case ISD::FFLOOR: return LowerFFLOOR(Op, DAG); 1159 case ISD::FLOG: 1160 return LowerFLOG(Op, DAG, 1 / AMDGPU_LOG2E_F); 1161 case ISD::FLOG10: 1162 return LowerFLOG(Op, DAG, AMDGPU_LN2_F / AMDGPU_LN10_F); 1163 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG); 1164 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG); 1165 case ISD::FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG); 1166 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG); 1167 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG); 1168 case ISD::CTTZ: 1169 case ISD::CTTZ_ZERO_UNDEF: 1170 case ISD::CTLZ: 1171 case ISD::CTLZ_ZERO_UNDEF: 1172 return LowerCTLZ_CTTZ(Op, DAG); 1173 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG); 1174 } 1175 return Op; 1176 } 1177 1178 void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N, 1179 SmallVectorImpl<SDValue> &Results, 1180 SelectionDAG &DAG) const { 1181 switch (N->getOpcode()) { 1182 case ISD::SIGN_EXTEND_INREG: 1183 // Different parts of legalization seem to interpret which type of 1184 // sign_extend_inreg is the one to check for custom lowering. The extended 1185 // from type is what really matters, but some places check for custom 1186 // lowering of the result type. This results in trying to use 1187 // ReplaceNodeResults to sext_in_reg to an illegal type, so we'll just do 1188 // nothing here and let the illegal result integer be handled normally. 1189 return; 1190 default: 1191 return; 1192 } 1193 } 1194 1195 static bool hasDefinedInitializer(const GlobalValue *GV) { 1196 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV); 1197 if (!GVar || !GVar->hasInitializer()) 1198 return false; 1199 1200 return !isa<UndefValue>(GVar->getInitializer()); 1201 } 1202 1203 SDValue AMDGPUTargetLowering::LowerGlobalAddress(AMDGPUMachineFunction* MFI, 1204 SDValue Op, 1205 SelectionDAG &DAG) const { 1206 1207 const DataLayout &DL = DAG.getDataLayout(); 1208 GlobalAddressSDNode *G = cast<GlobalAddressSDNode>(Op); 1209 const GlobalValue *GV = G->getGlobal(); 1210 1211 if (G->getAddressSpace() == AMDGPUASI.LOCAL_ADDRESS || 1212 G->getAddressSpace() == AMDGPUASI.REGION_ADDRESS) { 1213 if (!MFI->isEntryFunction()) { 1214 const Function &Fn = DAG.getMachineFunction().getFunction(); 1215 DiagnosticInfoUnsupported BadLDSDecl( 1216 Fn, "local memory global used by non-kernel function", SDLoc(Op).getDebugLoc()); 1217 DAG.getContext()->diagnose(BadLDSDecl); 1218 } 1219 1220 // XXX: What does the value of G->getOffset() mean? 1221 assert(G->getOffset() == 0 && 1222 "Do not know what to do with an non-zero offset"); 1223 1224 // TODO: We could emit code to handle the initialization somewhere. 1225 if (!hasDefinedInitializer(GV)) { 1226 unsigned Offset = MFI->allocateLDSGlobal(DL, *GV); 1227 return DAG.getConstant(Offset, SDLoc(Op), Op.getValueType()); 1228 } 1229 } 1230 1231 const Function &Fn = DAG.getMachineFunction().getFunction(); 1232 DiagnosticInfoUnsupported BadInit( 1233 Fn, "unsupported initializer for address space", SDLoc(Op).getDebugLoc()); 1234 DAG.getContext()->diagnose(BadInit); 1235 return SDValue(); 1236 } 1237 1238 SDValue AMDGPUTargetLowering::LowerCONCAT_VECTORS(SDValue Op, 1239 SelectionDAG &DAG) const { 1240 SmallVector<SDValue, 8> Args; 1241 1242 EVT VT = Op.getValueType(); 1243 if (VT == MVT::v4i16 || VT == MVT::v4f16) { 1244 SDLoc SL(Op); 1245 SDValue Lo = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(0)); 1246 SDValue Hi = DAG.getNode(ISD::BITCAST, SL, MVT::i32, Op.getOperand(1)); 1247 1248 SDValue BV = DAG.getBuildVector(MVT::v2i32, SL, { Lo, Hi }); 1249 return DAG.getNode(ISD::BITCAST, SL, VT, BV); 1250 } 1251 1252 for (const SDUse &U : Op->ops()) 1253 DAG.ExtractVectorElements(U.get(), Args); 1254 1255 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1256 } 1257 1258 SDValue AMDGPUTargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 1259 SelectionDAG &DAG) const { 1260 1261 SmallVector<SDValue, 8> Args; 1262 unsigned Start = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); 1263 EVT VT = Op.getValueType(); 1264 DAG.ExtractVectorElements(Op.getOperand(0), Args, Start, 1265 VT.getVectorNumElements()); 1266 1267 return DAG.getBuildVector(Op.getValueType(), SDLoc(Op), Args); 1268 } 1269 1270 /// Generate Min/Max node 1271 SDValue AMDGPUTargetLowering::combineFMinMaxLegacy(const SDLoc &DL, EVT VT, 1272 SDValue LHS, SDValue RHS, 1273 SDValue True, SDValue False, 1274 SDValue CC, 1275 DAGCombinerInfo &DCI) const { 1276 if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True)) 1277 return SDValue(); 1278 1279 SelectionDAG &DAG = DCI.DAG; 1280 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(CC)->get(); 1281 switch (CCOpcode) { 1282 case ISD::SETOEQ: 1283 case ISD::SETONE: 1284 case ISD::SETUNE: 1285 case ISD::SETNE: 1286 case ISD::SETUEQ: 1287 case ISD::SETEQ: 1288 case ISD::SETFALSE: 1289 case ISD::SETFALSE2: 1290 case ISD::SETTRUE: 1291 case ISD::SETTRUE2: 1292 case ISD::SETUO: 1293 case ISD::SETO: 1294 break; 1295 case ISD::SETULE: 1296 case ISD::SETULT: { 1297 if (LHS == True) 1298 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1299 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1300 } 1301 case ISD::SETOLE: 1302 case ISD::SETOLT: 1303 case ISD::SETLE: 1304 case ISD::SETLT: { 1305 // Ordered. Assume ordered for undefined. 1306 1307 // Only do this after legalization to avoid interfering with other combines 1308 // which might occur. 1309 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1310 !DCI.isCalledByLegalizer()) 1311 return SDValue(); 1312 1313 // We need to permute the operands to get the correct NaN behavior. The 1314 // selected operand is the second one based on the failing compare with NaN, 1315 // so permute it based on the compare type the hardware uses. 1316 if (LHS == True) 1317 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1318 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1319 } 1320 case ISD::SETUGE: 1321 case ISD::SETUGT: { 1322 if (LHS == True) 1323 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, RHS, LHS); 1324 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, LHS, RHS); 1325 } 1326 case ISD::SETGT: 1327 case ISD::SETGE: 1328 case ISD::SETOGE: 1329 case ISD::SETOGT: { 1330 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG && 1331 !DCI.isCalledByLegalizer()) 1332 return SDValue(); 1333 1334 if (LHS == True) 1335 return DAG.getNode(AMDGPUISD::FMAX_LEGACY, DL, VT, LHS, RHS); 1336 return DAG.getNode(AMDGPUISD::FMIN_LEGACY, DL, VT, RHS, LHS); 1337 } 1338 case ISD::SETCC_INVALID: 1339 llvm_unreachable("Invalid setcc condcode!"); 1340 } 1341 return SDValue(); 1342 } 1343 1344 std::pair<SDValue, SDValue> 1345 AMDGPUTargetLowering::split64BitValue(SDValue Op, SelectionDAG &DAG) const { 1346 SDLoc SL(Op); 1347 1348 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1349 1350 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1351 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1352 1353 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1354 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1355 1356 return std::make_pair(Lo, Hi); 1357 } 1358 1359 SDValue AMDGPUTargetLowering::getLoHalf64(SDValue Op, SelectionDAG &DAG) const { 1360 SDLoc SL(Op); 1361 1362 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1363 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 1364 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 1365 } 1366 1367 SDValue AMDGPUTargetLowering::getHiHalf64(SDValue Op, SelectionDAG &DAG) const { 1368 SDLoc SL(Op); 1369 1370 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Op); 1371 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 1372 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 1373 } 1374 1375 SDValue AMDGPUTargetLowering::SplitVectorLoad(const SDValue Op, 1376 SelectionDAG &DAG) const { 1377 LoadSDNode *Load = cast<LoadSDNode>(Op); 1378 EVT VT = Op.getValueType(); 1379 1380 1381 // If this is a 2 element vector, we really want to scalarize and not create 1382 // weird 1 element vectors. 1383 if (VT.getVectorNumElements() == 2) 1384 return scalarizeVectorLoad(Load, DAG); 1385 1386 SDValue BasePtr = Load->getBasePtr(); 1387 EVT MemVT = Load->getMemoryVT(); 1388 SDLoc SL(Op); 1389 1390 const MachinePointerInfo &SrcValue = Load->getMemOperand()->getPointerInfo(); 1391 1392 EVT LoVT, HiVT; 1393 EVT LoMemVT, HiMemVT; 1394 SDValue Lo, Hi; 1395 1396 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1397 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1398 std::tie(Lo, Hi) = DAG.SplitVector(Op, SL, LoVT, HiVT); 1399 1400 unsigned Size = LoMemVT.getStoreSize(); 1401 unsigned BaseAlign = Load->getAlignment(); 1402 unsigned HiAlign = MinAlign(BaseAlign, Size); 1403 1404 SDValue LoLoad = DAG.getExtLoad(Load->getExtensionType(), SL, LoVT, 1405 Load->getChain(), BasePtr, SrcValue, LoMemVT, 1406 BaseAlign, Load->getMemOperand()->getFlags()); 1407 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, Size); 1408 SDValue HiLoad = 1409 DAG.getExtLoad(Load->getExtensionType(), SL, HiVT, Load->getChain(), 1410 HiPtr, SrcValue.getWithOffset(LoMemVT.getStoreSize()), 1411 HiMemVT, HiAlign, Load->getMemOperand()->getFlags()); 1412 1413 SDValue Ops[] = { 1414 DAG.getNode(ISD::CONCAT_VECTORS, SL, VT, LoLoad, HiLoad), 1415 DAG.getNode(ISD::TokenFactor, SL, MVT::Other, 1416 LoLoad.getValue(1), HiLoad.getValue(1)) 1417 }; 1418 1419 return DAG.getMergeValues(Ops, SL); 1420 } 1421 1422 SDValue AMDGPUTargetLowering::SplitVectorStore(SDValue Op, 1423 SelectionDAG &DAG) const { 1424 StoreSDNode *Store = cast<StoreSDNode>(Op); 1425 SDValue Val = Store->getValue(); 1426 EVT VT = Val.getValueType(); 1427 1428 // If this is a 2 element vector, we really want to scalarize and not create 1429 // weird 1 element vectors. 1430 if (VT.getVectorNumElements() == 2) 1431 return scalarizeVectorStore(Store, DAG); 1432 1433 EVT MemVT = Store->getMemoryVT(); 1434 SDValue Chain = Store->getChain(); 1435 SDValue BasePtr = Store->getBasePtr(); 1436 SDLoc SL(Op); 1437 1438 EVT LoVT, HiVT; 1439 EVT LoMemVT, HiMemVT; 1440 SDValue Lo, Hi; 1441 1442 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT); 1443 std::tie(LoMemVT, HiMemVT) = DAG.GetSplitDestVTs(MemVT); 1444 std::tie(Lo, Hi) = DAG.SplitVector(Val, SL, LoVT, HiVT); 1445 1446 SDValue HiPtr = DAG.getObjectPtrOffset(SL, BasePtr, LoMemVT.getStoreSize()); 1447 1448 const MachinePointerInfo &SrcValue = Store->getMemOperand()->getPointerInfo(); 1449 unsigned BaseAlign = Store->getAlignment(); 1450 unsigned Size = LoMemVT.getStoreSize(); 1451 unsigned HiAlign = MinAlign(BaseAlign, Size); 1452 1453 SDValue LoStore = 1454 DAG.getTruncStore(Chain, SL, Lo, BasePtr, SrcValue, LoMemVT, BaseAlign, 1455 Store->getMemOperand()->getFlags()); 1456 SDValue HiStore = 1457 DAG.getTruncStore(Chain, SL, Hi, HiPtr, SrcValue.getWithOffset(Size), 1458 HiMemVT, HiAlign, Store->getMemOperand()->getFlags()); 1459 1460 return DAG.getNode(ISD::TokenFactor, SL, MVT::Other, LoStore, HiStore); 1461 } 1462 1463 // This is a shortcut for integer division because we have fast i32<->f32 1464 // conversions, and fast f32 reciprocal instructions. The fractional part of a 1465 // float is enough to accurately represent up to a 24-bit signed integer. 1466 SDValue AMDGPUTargetLowering::LowerDIVREM24(SDValue Op, SelectionDAG &DAG, 1467 bool Sign) const { 1468 SDLoc DL(Op); 1469 EVT VT = Op.getValueType(); 1470 SDValue LHS = Op.getOperand(0); 1471 SDValue RHS = Op.getOperand(1); 1472 MVT IntVT = MVT::i32; 1473 MVT FltVT = MVT::f32; 1474 1475 unsigned LHSSignBits = DAG.ComputeNumSignBits(LHS); 1476 if (LHSSignBits < 9) 1477 return SDValue(); 1478 1479 unsigned RHSSignBits = DAG.ComputeNumSignBits(RHS); 1480 if (RHSSignBits < 9) 1481 return SDValue(); 1482 1483 unsigned BitSize = VT.getSizeInBits(); 1484 unsigned SignBits = std::min(LHSSignBits, RHSSignBits); 1485 unsigned DivBits = BitSize - SignBits; 1486 if (Sign) 1487 ++DivBits; 1488 1489 ISD::NodeType ToFp = Sign ? ISD::SINT_TO_FP : ISD::UINT_TO_FP; 1490 ISD::NodeType ToInt = Sign ? ISD::FP_TO_SINT : ISD::FP_TO_UINT; 1491 1492 SDValue jq = DAG.getConstant(1, DL, IntVT); 1493 1494 if (Sign) { 1495 // char|short jq = ia ^ ib; 1496 jq = DAG.getNode(ISD::XOR, DL, VT, LHS, RHS); 1497 1498 // jq = jq >> (bitsize - 2) 1499 jq = DAG.getNode(ISD::SRA, DL, VT, jq, 1500 DAG.getConstant(BitSize - 2, DL, VT)); 1501 1502 // jq = jq | 0x1 1503 jq = DAG.getNode(ISD::OR, DL, VT, jq, DAG.getConstant(1, DL, VT)); 1504 } 1505 1506 // int ia = (int)LHS; 1507 SDValue ia = LHS; 1508 1509 // int ib, (int)RHS; 1510 SDValue ib = RHS; 1511 1512 // float fa = (float)ia; 1513 SDValue fa = DAG.getNode(ToFp, DL, FltVT, ia); 1514 1515 // float fb = (float)ib; 1516 SDValue fb = DAG.getNode(ToFp, DL, FltVT, ib); 1517 1518 SDValue fq = DAG.getNode(ISD::FMUL, DL, FltVT, 1519 fa, DAG.getNode(AMDGPUISD::RCP, DL, FltVT, fb)); 1520 1521 // fq = trunc(fq); 1522 fq = DAG.getNode(ISD::FTRUNC, DL, FltVT, fq); 1523 1524 // float fqneg = -fq; 1525 SDValue fqneg = DAG.getNode(ISD::FNEG, DL, FltVT, fq); 1526 1527 // float fr = mad(fqneg, fb, fa); 1528 unsigned OpCode = Subtarget->hasFP32Denormals() ? 1529 (unsigned)AMDGPUISD::FMAD_FTZ : 1530 (unsigned)ISD::FMAD; 1531 SDValue fr = DAG.getNode(OpCode, DL, FltVT, fqneg, fb, fa); 1532 1533 // int iq = (int)fq; 1534 SDValue iq = DAG.getNode(ToInt, DL, IntVT, fq); 1535 1536 // fr = fabs(fr); 1537 fr = DAG.getNode(ISD::FABS, DL, FltVT, fr); 1538 1539 // fb = fabs(fb); 1540 fb = DAG.getNode(ISD::FABS, DL, FltVT, fb); 1541 1542 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 1543 1544 // int cv = fr >= fb; 1545 SDValue cv = DAG.getSetCC(DL, SetCCVT, fr, fb, ISD::SETOGE); 1546 1547 // jq = (cv ? jq : 0); 1548 jq = DAG.getNode(ISD::SELECT, DL, VT, cv, jq, DAG.getConstant(0, DL, VT)); 1549 1550 // dst = iq + jq; 1551 SDValue Div = DAG.getNode(ISD::ADD, DL, VT, iq, jq); 1552 1553 // Rem needs compensation, it's easier to recompute it 1554 SDValue Rem = DAG.getNode(ISD::MUL, DL, VT, Div, RHS); 1555 Rem = DAG.getNode(ISD::SUB, DL, VT, LHS, Rem); 1556 1557 // Truncate to number of bits this divide really is. 1558 if (Sign) { 1559 SDValue InRegSize 1560 = DAG.getValueType(EVT::getIntegerVT(*DAG.getContext(), DivBits)); 1561 Div = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Div, InRegSize); 1562 Rem = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Rem, InRegSize); 1563 } else { 1564 SDValue TruncMask = DAG.getConstant((UINT64_C(1) << DivBits) - 1, DL, VT); 1565 Div = DAG.getNode(ISD::AND, DL, VT, Div, TruncMask); 1566 Rem = DAG.getNode(ISD::AND, DL, VT, Rem, TruncMask); 1567 } 1568 1569 return DAG.getMergeValues({ Div, Rem }, DL); 1570 } 1571 1572 void AMDGPUTargetLowering::LowerUDIVREM64(SDValue Op, 1573 SelectionDAG &DAG, 1574 SmallVectorImpl<SDValue> &Results) const { 1575 SDLoc DL(Op); 1576 EVT VT = Op.getValueType(); 1577 1578 assert(VT == MVT::i64 && "LowerUDIVREM64 expects an i64"); 1579 1580 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1581 1582 SDValue One = DAG.getConstant(1, DL, HalfVT); 1583 SDValue Zero = DAG.getConstant(0, DL, HalfVT); 1584 1585 //HiLo split 1586 SDValue LHS = Op.getOperand(0); 1587 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1588 SDValue LHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, One); 1589 1590 SDValue RHS = Op.getOperand(1); 1591 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1592 SDValue RHS_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, One); 1593 1594 if (DAG.MaskedValueIsZero(RHS, APInt::getHighBitsSet(64, 32)) && 1595 DAG.MaskedValueIsZero(LHS, APInt::getHighBitsSet(64, 32))) { 1596 1597 SDValue Res = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1598 LHS_Lo, RHS_Lo); 1599 1600 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(0), Zero}); 1601 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {Res.getValue(1), Zero}); 1602 1603 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV)); 1604 Results.push_back(DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM)); 1605 return; 1606 } 1607 1608 if (isTypeLegal(MVT::i64)) { 1609 // Compute denominator reciprocal. 1610 unsigned FMAD = Subtarget->hasFP32Denormals() ? 1611 (unsigned)AMDGPUISD::FMAD_FTZ : 1612 (unsigned)ISD::FMAD; 1613 1614 SDValue Cvt_Lo = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Lo); 1615 SDValue Cvt_Hi = DAG.getNode(ISD::UINT_TO_FP, DL, MVT::f32, RHS_Hi); 1616 SDValue Mad1 = DAG.getNode(FMAD, DL, MVT::f32, Cvt_Hi, 1617 DAG.getConstantFP(APInt(32, 0x4f800000).bitsToFloat(), DL, MVT::f32), 1618 Cvt_Lo); 1619 SDValue Rcp = DAG.getNode(AMDGPUISD::RCP, DL, MVT::f32, Mad1); 1620 SDValue Mul1 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Rcp, 1621 DAG.getConstantFP(APInt(32, 0x5f7ffffc).bitsToFloat(), DL, MVT::f32)); 1622 SDValue Mul2 = DAG.getNode(ISD::FMUL, DL, MVT::f32, Mul1, 1623 DAG.getConstantFP(APInt(32, 0x2f800000).bitsToFloat(), DL, MVT::f32)); 1624 SDValue Trunc = DAG.getNode(ISD::FTRUNC, DL, MVT::f32, Mul2); 1625 SDValue Mad2 = DAG.getNode(FMAD, DL, MVT::f32, Trunc, 1626 DAG.getConstantFP(APInt(32, 0xcf800000).bitsToFloat(), DL, MVT::f32), 1627 Mul1); 1628 SDValue Rcp_Lo = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Mad2); 1629 SDValue Rcp_Hi = DAG.getNode(ISD::FP_TO_UINT, DL, HalfVT, Trunc); 1630 SDValue Rcp64 = DAG.getBitcast(VT, 1631 DAG.getBuildVector(MVT::v2i32, DL, {Rcp_Lo, Rcp_Hi})); 1632 1633 SDValue Zero64 = DAG.getConstant(0, DL, VT); 1634 SDValue One64 = DAG.getConstant(1, DL, VT); 1635 SDValue Zero1 = DAG.getConstant(0, DL, MVT::i1); 1636 SDVTList HalfCarryVT = DAG.getVTList(HalfVT, MVT::i1); 1637 1638 SDValue Neg_RHS = DAG.getNode(ISD::SUB, DL, VT, Zero64, RHS); 1639 SDValue Mullo1 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Rcp64); 1640 SDValue Mulhi1 = DAG.getNode(ISD::MULHU, DL, VT, Rcp64, Mullo1); 1641 SDValue Mulhi1_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1642 Zero); 1643 SDValue Mulhi1_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi1, 1644 One); 1645 1646 SDValue Add1_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Lo, 1647 Mulhi1_Lo, Zero1); 1648 SDValue Add1_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Rcp_Hi, 1649 Mulhi1_Hi, Add1_Lo.getValue(1)); 1650 SDValue Add1_HiNc = DAG.getNode(ISD::ADD, DL, HalfVT, Rcp_Hi, Mulhi1_Hi); 1651 SDValue Add1 = DAG.getBitcast(VT, 1652 DAG.getBuildVector(MVT::v2i32, DL, {Add1_Lo, Add1_Hi})); 1653 1654 SDValue Mullo2 = DAG.getNode(ISD::MUL, DL, VT, Neg_RHS, Add1); 1655 SDValue Mulhi2 = DAG.getNode(ISD::MULHU, DL, VT, Add1, Mullo2); 1656 SDValue Mulhi2_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1657 Zero); 1658 SDValue Mulhi2_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mulhi2, 1659 One); 1660 1661 SDValue Add2_Lo = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_Lo, 1662 Mulhi2_Lo, Zero1); 1663 SDValue Add2_HiC = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add1_HiNc, 1664 Mulhi2_Hi, Add1_Lo.getValue(1)); 1665 SDValue Add2_Hi = DAG.getNode(ISD::ADDCARRY, DL, HalfCarryVT, Add2_HiC, 1666 Zero, Add2_Lo.getValue(1)); 1667 SDValue Add2 = DAG.getBitcast(VT, 1668 DAG.getBuildVector(MVT::v2i32, DL, {Add2_Lo, Add2_Hi})); 1669 SDValue Mulhi3 = DAG.getNode(ISD::MULHU, DL, VT, LHS, Add2); 1670 1671 SDValue Mul3 = DAG.getNode(ISD::MUL, DL, VT, RHS, Mulhi3); 1672 1673 SDValue Mul3_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, Zero); 1674 SDValue Mul3_Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, Mul3, One); 1675 SDValue Sub1_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Lo, 1676 Mul3_Lo, Zero1); 1677 SDValue Sub1_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, LHS_Hi, 1678 Mul3_Hi, Sub1_Lo.getValue(1)); 1679 SDValue Sub1_Mi = DAG.getNode(ISD::SUB, DL, HalfVT, LHS_Hi, Mul3_Hi); 1680 SDValue Sub1 = DAG.getBitcast(VT, 1681 DAG.getBuildVector(MVT::v2i32, DL, {Sub1_Lo, Sub1_Hi})); 1682 1683 SDValue MinusOne = DAG.getConstant(0xffffffffu, DL, HalfVT); 1684 SDValue C1 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, MinusOne, Zero, 1685 ISD::SETUGE); 1686 SDValue C2 = DAG.getSelectCC(DL, Sub1_Lo, RHS_Lo, MinusOne, Zero, 1687 ISD::SETUGE); 1688 SDValue C3 = DAG.getSelectCC(DL, Sub1_Hi, RHS_Hi, C2, C1, ISD::SETEQ); 1689 1690 // TODO: Here and below portions of the code can be enclosed into if/endif. 1691 // Currently control flow is unconditional and we have 4 selects after 1692 // potential endif to substitute PHIs. 1693 1694 // if C3 != 0 ... 1695 SDValue Sub2_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Lo, 1696 RHS_Lo, Zero1); 1697 SDValue Sub2_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub1_Mi, 1698 RHS_Hi, Sub1_Lo.getValue(1)); 1699 SDValue Sub2_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1700 Zero, Sub2_Lo.getValue(1)); 1701 SDValue Sub2 = DAG.getBitcast(VT, 1702 DAG.getBuildVector(MVT::v2i32, DL, {Sub2_Lo, Sub2_Hi})); 1703 1704 SDValue Add3 = DAG.getNode(ISD::ADD, DL, VT, Mulhi3, One64); 1705 1706 SDValue C4 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, MinusOne, Zero, 1707 ISD::SETUGE); 1708 SDValue C5 = DAG.getSelectCC(DL, Sub2_Lo, RHS_Lo, MinusOne, Zero, 1709 ISD::SETUGE); 1710 SDValue C6 = DAG.getSelectCC(DL, Sub2_Hi, RHS_Hi, C5, C4, ISD::SETEQ); 1711 1712 // if (C6 != 0) 1713 SDValue Add4 = DAG.getNode(ISD::ADD, DL, VT, Add3, One64); 1714 1715 SDValue Sub3_Lo = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Lo, 1716 RHS_Lo, Zero1); 1717 SDValue Sub3_Mi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub2_Mi, 1718 RHS_Hi, Sub2_Lo.getValue(1)); 1719 SDValue Sub3_Hi = DAG.getNode(ISD::SUBCARRY, DL, HalfCarryVT, Sub3_Mi, 1720 Zero, Sub3_Lo.getValue(1)); 1721 SDValue Sub3 = DAG.getBitcast(VT, 1722 DAG.getBuildVector(MVT::v2i32, DL, {Sub3_Lo, Sub3_Hi})); 1723 1724 // endif C6 1725 // endif C3 1726 1727 SDValue Sel1 = DAG.getSelectCC(DL, C6, Zero, Add4, Add3, ISD::SETNE); 1728 SDValue Div = DAG.getSelectCC(DL, C3, Zero, Sel1, Mulhi3, ISD::SETNE); 1729 1730 SDValue Sel2 = DAG.getSelectCC(DL, C6, Zero, Sub3, Sub2, ISD::SETNE); 1731 SDValue Rem = DAG.getSelectCC(DL, C3, Zero, Sel2, Sub1, ISD::SETNE); 1732 1733 Results.push_back(Div); 1734 Results.push_back(Rem); 1735 1736 return; 1737 } 1738 1739 // r600 expandion. 1740 // Get Speculative values 1741 SDValue DIV_Part = DAG.getNode(ISD::UDIV, DL, HalfVT, LHS_Hi, RHS_Lo); 1742 SDValue REM_Part = DAG.getNode(ISD::UREM, DL, HalfVT, LHS_Hi, RHS_Lo); 1743 1744 SDValue REM_Lo = DAG.getSelectCC(DL, RHS_Hi, Zero, REM_Part, LHS_Hi, ISD::SETEQ); 1745 SDValue REM = DAG.getBuildVector(MVT::v2i32, DL, {REM_Lo, Zero}); 1746 REM = DAG.getNode(ISD::BITCAST, DL, MVT::i64, REM); 1747 1748 SDValue DIV_Hi = DAG.getSelectCC(DL, RHS_Hi, Zero, DIV_Part, Zero, ISD::SETEQ); 1749 SDValue DIV_Lo = Zero; 1750 1751 const unsigned halfBitWidth = HalfVT.getSizeInBits(); 1752 1753 for (unsigned i = 0; i < halfBitWidth; ++i) { 1754 const unsigned bitPos = halfBitWidth - i - 1; 1755 SDValue POS = DAG.getConstant(bitPos, DL, HalfVT); 1756 // Get value of high bit 1757 SDValue HBit = DAG.getNode(ISD::SRL, DL, HalfVT, LHS_Lo, POS); 1758 HBit = DAG.getNode(ISD::AND, DL, HalfVT, HBit, One); 1759 HBit = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, HBit); 1760 1761 // Shift 1762 REM = DAG.getNode(ISD::SHL, DL, VT, REM, DAG.getConstant(1, DL, VT)); 1763 // Add LHS high bit 1764 REM = DAG.getNode(ISD::OR, DL, VT, REM, HBit); 1765 1766 SDValue BIT = DAG.getConstant(1ULL << bitPos, DL, HalfVT); 1767 SDValue realBIT = DAG.getSelectCC(DL, REM, RHS, BIT, Zero, ISD::SETUGE); 1768 1769 DIV_Lo = DAG.getNode(ISD::OR, DL, HalfVT, DIV_Lo, realBIT); 1770 1771 // Update REM 1772 SDValue REM_sub = DAG.getNode(ISD::SUB, DL, VT, REM, RHS); 1773 REM = DAG.getSelectCC(DL, REM, RHS, REM_sub, REM, ISD::SETUGE); 1774 } 1775 1776 SDValue DIV = DAG.getBuildVector(MVT::v2i32, DL, {DIV_Lo, DIV_Hi}); 1777 DIV = DAG.getNode(ISD::BITCAST, DL, MVT::i64, DIV); 1778 Results.push_back(DIV); 1779 Results.push_back(REM); 1780 } 1781 1782 SDValue AMDGPUTargetLowering::LowerUDIVREM(SDValue Op, 1783 SelectionDAG &DAG) const { 1784 SDLoc DL(Op); 1785 EVT VT = Op.getValueType(); 1786 1787 if (VT == MVT::i64) { 1788 SmallVector<SDValue, 2> Results; 1789 LowerUDIVREM64(Op, DAG, Results); 1790 return DAG.getMergeValues(Results, DL); 1791 } 1792 1793 if (VT == MVT::i32) { 1794 if (SDValue Res = LowerDIVREM24(Op, DAG, false)) 1795 return Res; 1796 } 1797 1798 SDValue Num = Op.getOperand(0); 1799 SDValue Den = Op.getOperand(1); 1800 1801 // RCP = URECIP(Den) = 2^32 / Den + e 1802 // e is rounding error. 1803 SDValue RCP = DAG.getNode(AMDGPUISD::URECIP, DL, VT, Den); 1804 1805 // RCP_LO = mul(RCP, Den) */ 1806 SDValue RCP_LO = DAG.getNode(ISD::MUL, DL, VT, RCP, Den); 1807 1808 // RCP_HI = mulhu (RCP, Den) */ 1809 SDValue RCP_HI = DAG.getNode(ISD::MULHU, DL, VT, RCP, Den); 1810 1811 // NEG_RCP_LO = -RCP_LO 1812 SDValue NEG_RCP_LO = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 1813 RCP_LO); 1814 1815 // ABS_RCP_LO = (RCP_HI == 0 ? NEG_RCP_LO : RCP_LO) 1816 SDValue ABS_RCP_LO = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1817 NEG_RCP_LO, RCP_LO, 1818 ISD::SETEQ); 1819 // Calculate the rounding error from the URECIP instruction 1820 // E = mulhu(ABS_RCP_LO, RCP) 1821 SDValue E = DAG.getNode(ISD::MULHU, DL, VT, ABS_RCP_LO, RCP); 1822 1823 // RCP_A_E = RCP + E 1824 SDValue RCP_A_E = DAG.getNode(ISD::ADD, DL, VT, RCP, E); 1825 1826 // RCP_S_E = RCP - E 1827 SDValue RCP_S_E = DAG.getNode(ISD::SUB, DL, VT, RCP, E); 1828 1829 // Tmp0 = (RCP_HI == 0 ? RCP_A_E : RCP_SUB_E) 1830 SDValue Tmp0 = DAG.getSelectCC(DL, RCP_HI, DAG.getConstant(0, DL, VT), 1831 RCP_A_E, RCP_S_E, 1832 ISD::SETEQ); 1833 // Quotient = mulhu(Tmp0, Num) 1834 SDValue Quotient = DAG.getNode(ISD::MULHU, DL, VT, Tmp0, Num); 1835 1836 // Num_S_Remainder = Quotient * Den 1837 SDValue Num_S_Remainder = DAG.getNode(ISD::MUL, DL, VT, Quotient, Den); 1838 1839 // Remainder = Num - Num_S_Remainder 1840 SDValue Remainder = DAG.getNode(ISD::SUB, DL, VT, Num, Num_S_Remainder); 1841 1842 // Remainder_GE_Den = (Remainder >= Den ? -1 : 0) 1843 SDValue Remainder_GE_Den = DAG.getSelectCC(DL, Remainder, Den, 1844 DAG.getConstant(-1, DL, VT), 1845 DAG.getConstant(0, DL, VT), 1846 ISD::SETUGE); 1847 // Remainder_GE_Zero = (Num >= Num_S_Remainder ? -1 : 0) 1848 SDValue Remainder_GE_Zero = DAG.getSelectCC(DL, Num, 1849 Num_S_Remainder, 1850 DAG.getConstant(-1, DL, VT), 1851 DAG.getConstant(0, DL, VT), 1852 ISD::SETUGE); 1853 // Tmp1 = Remainder_GE_Den & Remainder_GE_Zero 1854 SDValue Tmp1 = DAG.getNode(ISD::AND, DL, VT, Remainder_GE_Den, 1855 Remainder_GE_Zero); 1856 1857 // Calculate Division result: 1858 1859 // Quotient_A_One = Quotient + 1 1860 SDValue Quotient_A_One = DAG.getNode(ISD::ADD, DL, VT, Quotient, 1861 DAG.getConstant(1, DL, VT)); 1862 1863 // Quotient_S_One = Quotient - 1 1864 SDValue Quotient_S_One = DAG.getNode(ISD::SUB, DL, VT, Quotient, 1865 DAG.getConstant(1, DL, VT)); 1866 1867 // Div = (Tmp1 == 0 ? Quotient : Quotient_A_One) 1868 SDValue Div = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1869 Quotient, Quotient_A_One, ISD::SETEQ); 1870 1871 // Div = (Remainder_GE_Zero == 0 ? Quotient_S_One : Div) 1872 Div = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1873 Quotient_S_One, Div, ISD::SETEQ); 1874 1875 // Calculate Rem result: 1876 1877 // Remainder_S_Den = Remainder - Den 1878 SDValue Remainder_S_Den = DAG.getNode(ISD::SUB, DL, VT, Remainder, Den); 1879 1880 // Remainder_A_Den = Remainder + Den 1881 SDValue Remainder_A_Den = DAG.getNode(ISD::ADD, DL, VT, Remainder, Den); 1882 1883 // Rem = (Tmp1 == 0 ? Remainder : Remainder_S_Den) 1884 SDValue Rem = DAG.getSelectCC(DL, Tmp1, DAG.getConstant(0, DL, VT), 1885 Remainder, Remainder_S_Den, ISD::SETEQ); 1886 1887 // Rem = (Remainder_GE_Zero == 0 ? Remainder_A_Den : Rem) 1888 Rem = DAG.getSelectCC(DL, Remainder_GE_Zero, DAG.getConstant(0, DL, VT), 1889 Remainder_A_Den, Rem, ISD::SETEQ); 1890 SDValue Ops[2] = { 1891 Div, 1892 Rem 1893 }; 1894 return DAG.getMergeValues(Ops, DL); 1895 } 1896 1897 SDValue AMDGPUTargetLowering::LowerSDIVREM(SDValue Op, 1898 SelectionDAG &DAG) const { 1899 SDLoc DL(Op); 1900 EVT VT = Op.getValueType(); 1901 1902 SDValue LHS = Op.getOperand(0); 1903 SDValue RHS = Op.getOperand(1); 1904 1905 SDValue Zero = DAG.getConstant(0, DL, VT); 1906 SDValue NegOne = DAG.getConstant(-1, DL, VT); 1907 1908 if (VT == MVT::i32) { 1909 if (SDValue Res = LowerDIVREM24(Op, DAG, true)) 1910 return Res; 1911 } 1912 1913 if (VT == MVT::i64 && 1914 DAG.ComputeNumSignBits(LHS) > 32 && 1915 DAG.ComputeNumSignBits(RHS) > 32) { 1916 EVT HalfVT = VT.getHalfSizedIntegerVT(*DAG.getContext()); 1917 1918 //HiLo split 1919 SDValue LHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, LHS, Zero); 1920 SDValue RHS_Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, HalfVT, RHS, Zero); 1921 SDValue DIVREM = DAG.getNode(ISD::SDIVREM, DL, DAG.getVTList(HalfVT, HalfVT), 1922 LHS_Lo, RHS_Lo); 1923 SDValue Res[2] = { 1924 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(0)), 1925 DAG.getNode(ISD::SIGN_EXTEND, DL, VT, DIVREM.getValue(1)) 1926 }; 1927 return DAG.getMergeValues(Res, DL); 1928 } 1929 1930 SDValue LHSign = DAG.getSelectCC(DL, LHS, Zero, NegOne, Zero, ISD::SETLT); 1931 SDValue RHSign = DAG.getSelectCC(DL, RHS, Zero, NegOne, Zero, ISD::SETLT); 1932 SDValue DSign = DAG.getNode(ISD::XOR, DL, VT, LHSign, RHSign); 1933 SDValue RSign = LHSign; // Remainder sign is the same as LHS 1934 1935 LHS = DAG.getNode(ISD::ADD, DL, VT, LHS, LHSign); 1936 RHS = DAG.getNode(ISD::ADD, DL, VT, RHS, RHSign); 1937 1938 LHS = DAG.getNode(ISD::XOR, DL, VT, LHS, LHSign); 1939 RHS = DAG.getNode(ISD::XOR, DL, VT, RHS, RHSign); 1940 1941 SDValue Div = DAG.getNode(ISD::UDIVREM, DL, DAG.getVTList(VT, VT), LHS, RHS); 1942 SDValue Rem = Div.getValue(1); 1943 1944 Div = DAG.getNode(ISD::XOR, DL, VT, Div, DSign); 1945 Rem = DAG.getNode(ISD::XOR, DL, VT, Rem, RSign); 1946 1947 Div = DAG.getNode(ISD::SUB, DL, VT, Div, DSign); 1948 Rem = DAG.getNode(ISD::SUB, DL, VT, Rem, RSign); 1949 1950 SDValue Res[2] = { 1951 Div, 1952 Rem 1953 }; 1954 return DAG.getMergeValues(Res, DL); 1955 } 1956 1957 // (frem x, y) -> (fsub x, (fmul (ftrunc (fdiv x, y)), y)) 1958 SDValue AMDGPUTargetLowering::LowerFREM(SDValue Op, SelectionDAG &DAG) const { 1959 SDLoc SL(Op); 1960 EVT VT = Op.getValueType(); 1961 SDValue X = Op.getOperand(0); 1962 SDValue Y = Op.getOperand(1); 1963 1964 // TODO: Should this propagate fast-math-flags? 1965 1966 SDValue Div = DAG.getNode(ISD::FDIV, SL, VT, X, Y); 1967 SDValue Floor = DAG.getNode(ISD::FTRUNC, SL, VT, Div); 1968 SDValue Mul = DAG.getNode(ISD::FMUL, SL, VT, Floor, Y); 1969 1970 return DAG.getNode(ISD::FSUB, SL, VT, X, Mul); 1971 } 1972 1973 SDValue AMDGPUTargetLowering::LowerFCEIL(SDValue Op, SelectionDAG &DAG) const { 1974 SDLoc SL(Op); 1975 SDValue Src = Op.getOperand(0); 1976 1977 // result = trunc(src) 1978 // if (src > 0.0 && src != result) 1979 // result += 1.0 1980 1981 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 1982 1983 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 1984 const SDValue One = DAG.getConstantFP(1.0, SL, MVT::f64); 1985 1986 EVT SetCCVT = 1987 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 1988 1989 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOGT); 1990 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 1991 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 1992 1993 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, One, Zero); 1994 // TODO: Should this propagate fast-math-flags? 1995 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 1996 } 1997 1998 static SDValue extractF64Exponent(SDValue Hi, const SDLoc &SL, 1999 SelectionDAG &DAG) { 2000 const unsigned FractBits = 52; 2001 const unsigned ExpBits = 11; 2002 2003 SDValue ExpPart = DAG.getNode(AMDGPUISD::BFE_U32, SL, MVT::i32, 2004 Hi, 2005 DAG.getConstant(FractBits - 32, SL, MVT::i32), 2006 DAG.getConstant(ExpBits, SL, MVT::i32)); 2007 SDValue Exp = DAG.getNode(ISD::SUB, SL, MVT::i32, ExpPart, 2008 DAG.getConstant(1023, SL, MVT::i32)); 2009 2010 return Exp; 2011 } 2012 2013 SDValue AMDGPUTargetLowering::LowerFTRUNC(SDValue Op, SelectionDAG &DAG) const { 2014 SDLoc SL(Op); 2015 SDValue Src = Op.getOperand(0); 2016 2017 assert(Op.getValueType() == MVT::f64); 2018 2019 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2020 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2021 2022 SDValue VecSrc = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2023 2024 // Extract the upper half, since this is where we will find the sign and 2025 // exponent. 2026 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, VecSrc, One); 2027 2028 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2029 2030 const unsigned FractBits = 52; 2031 2032 // Extract the sign bit. 2033 const SDValue SignBitMask = DAG.getConstant(UINT32_C(1) << 31, SL, MVT::i32); 2034 SDValue SignBit = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, SignBitMask); 2035 2036 // Extend back to 64-bits. 2037 SDValue SignBit64 = DAG.getBuildVector(MVT::v2i32, SL, {Zero, SignBit}); 2038 SignBit64 = DAG.getNode(ISD::BITCAST, SL, MVT::i64, SignBit64); 2039 2040 SDValue BcInt = DAG.getNode(ISD::BITCAST, SL, MVT::i64, Src); 2041 const SDValue FractMask 2042 = DAG.getConstant((UINT64_C(1) << FractBits) - 1, SL, MVT::i64); 2043 2044 SDValue Shr = DAG.getNode(ISD::SRA, SL, MVT::i64, FractMask, Exp); 2045 SDValue Not = DAG.getNOT(SL, Shr, MVT::i64); 2046 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, BcInt, Not); 2047 2048 EVT SetCCVT = 2049 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2050 2051 const SDValue FiftyOne = DAG.getConstant(FractBits - 1, SL, MVT::i32); 2052 2053 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2054 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2055 2056 SDValue Tmp1 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpLt0, SignBit64, Tmp0); 2057 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, ExpGt51, BcInt, Tmp1); 2058 2059 return DAG.getNode(ISD::BITCAST, SL, MVT::f64, Tmp2); 2060 } 2061 2062 SDValue AMDGPUTargetLowering::LowerFRINT(SDValue Op, SelectionDAG &DAG) const { 2063 SDLoc SL(Op); 2064 SDValue Src = Op.getOperand(0); 2065 2066 assert(Op.getValueType() == MVT::f64); 2067 2068 APFloat C1Val(APFloat::IEEEdouble(), "0x1.0p+52"); 2069 SDValue C1 = DAG.getConstantFP(C1Val, SL, MVT::f64); 2070 SDValue CopySign = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, C1, Src); 2071 2072 // TODO: Should this propagate fast-math-flags? 2073 2074 SDValue Tmp1 = DAG.getNode(ISD::FADD, SL, MVT::f64, Src, CopySign); 2075 SDValue Tmp2 = DAG.getNode(ISD::FSUB, SL, MVT::f64, Tmp1, CopySign); 2076 2077 SDValue Fabs = DAG.getNode(ISD::FABS, SL, MVT::f64, Src); 2078 2079 APFloat C2Val(APFloat::IEEEdouble(), "0x1.fffffffffffffp+51"); 2080 SDValue C2 = DAG.getConstantFP(C2Val, SL, MVT::f64); 2081 2082 EVT SetCCVT = 2083 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2084 SDValue Cond = DAG.getSetCC(SL, SetCCVT, Fabs, C2, ISD::SETOGT); 2085 2086 return DAG.getSelect(SL, MVT::f64, Cond, Src, Tmp2); 2087 } 2088 2089 SDValue AMDGPUTargetLowering::LowerFNEARBYINT(SDValue Op, SelectionDAG &DAG) const { 2090 // FNEARBYINT and FRINT are the same, except in their handling of FP 2091 // exceptions. Those aren't really meaningful for us, and OpenCL only has 2092 // rint, so just treat them as equivalent. 2093 return DAG.getNode(ISD::FRINT, SDLoc(Op), Op.getValueType(), Op.getOperand(0)); 2094 } 2095 2096 // XXX - May require not supporting f32 denormals? 2097 2098 // Don't handle v2f16. The extra instructions to scalarize and repack around the 2099 // compare and vselect end up producing worse code than scalarizing the whole 2100 // operation. 2101 SDValue AMDGPUTargetLowering::LowerFROUND32_16(SDValue Op, SelectionDAG &DAG) const { 2102 SDLoc SL(Op); 2103 SDValue X = Op.getOperand(0); 2104 EVT VT = Op.getValueType(); 2105 2106 SDValue T = DAG.getNode(ISD::FTRUNC, SL, VT, X); 2107 2108 // TODO: Should this propagate fast-math-flags? 2109 2110 SDValue Diff = DAG.getNode(ISD::FSUB, SL, VT, X, T); 2111 2112 SDValue AbsDiff = DAG.getNode(ISD::FABS, SL, VT, Diff); 2113 2114 const SDValue Zero = DAG.getConstantFP(0.0, SL, VT); 2115 const SDValue One = DAG.getConstantFP(1.0, SL, VT); 2116 const SDValue Half = DAG.getConstantFP(0.5, SL, VT); 2117 2118 SDValue SignOne = DAG.getNode(ISD::FCOPYSIGN, SL, VT, One, X); 2119 2120 EVT SetCCVT = 2121 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT); 2122 2123 SDValue Cmp = DAG.getSetCC(SL, SetCCVT, AbsDiff, Half, ISD::SETOGE); 2124 2125 SDValue Sel = DAG.getNode(ISD::SELECT, SL, VT, Cmp, SignOne, Zero); 2126 2127 return DAG.getNode(ISD::FADD, SL, VT, T, Sel); 2128 } 2129 2130 SDValue AMDGPUTargetLowering::LowerFROUND64(SDValue Op, SelectionDAG &DAG) const { 2131 SDLoc SL(Op); 2132 SDValue X = Op.getOperand(0); 2133 2134 SDValue L = DAG.getNode(ISD::BITCAST, SL, MVT::i64, X); 2135 2136 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2137 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2138 const SDValue NegOne = DAG.getConstant(-1, SL, MVT::i32); 2139 const SDValue FiftyOne = DAG.getConstant(51, SL, MVT::i32); 2140 EVT SetCCVT = 2141 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i32); 2142 2143 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, X); 2144 2145 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, One); 2146 2147 SDValue Exp = extractF64Exponent(Hi, SL, DAG); 2148 2149 const SDValue Mask = DAG.getConstant(INT64_C(0x000fffffffffffff), SL, 2150 MVT::i64); 2151 2152 SDValue M = DAG.getNode(ISD::SRA, SL, MVT::i64, Mask, Exp); 2153 SDValue D = DAG.getNode(ISD::SRA, SL, MVT::i64, 2154 DAG.getConstant(INT64_C(0x0008000000000000), SL, 2155 MVT::i64), 2156 Exp); 2157 2158 SDValue Tmp0 = DAG.getNode(ISD::AND, SL, MVT::i64, L, M); 2159 SDValue Tmp1 = DAG.getSetCC(SL, SetCCVT, 2160 DAG.getConstant(0, SL, MVT::i64), Tmp0, 2161 ISD::SETNE); 2162 2163 SDValue Tmp2 = DAG.getNode(ISD::SELECT, SL, MVT::i64, Tmp1, 2164 D, DAG.getConstant(0, SL, MVT::i64)); 2165 SDValue K = DAG.getNode(ISD::ADD, SL, MVT::i64, L, Tmp2); 2166 2167 K = DAG.getNode(ISD::AND, SL, MVT::i64, K, DAG.getNOT(SL, M, MVT::i64)); 2168 K = DAG.getNode(ISD::BITCAST, SL, MVT::f64, K); 2169 2170 SDValue ExpLt0 = DAG.getSetCC(SL, SetCCVT, Exp, Zero, ISD::SETLT); 2171 SDValue ExpGt51 = DAG.getSetCC(SL, SetCCVT, Exp, FiftyOne, ISD::SETGT); 2172 SDValue ExpEqNegOne = DAG.getSetCC(SL, SetCCVT, NegOne, Exp, ISD::SETEQ); 2173 2174 SDValue Mag = DAG.getNode(ISD::SELECT, SL, MVT::f64, 2175 ExpEqNegOne, 2176 DAG.getConstantFP(1.0, SL, MVT::f64), 2177 DAG.getConstantFP(0.0, SL, MVT::f64)); 2178 2179 SDValue S = DAG.getNode(ISD::FCOPYSIGN, SL, MVT::f64, Mag, X); 2180 2181 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpLt0, S, K); 2182 K = DAG.getNode(ISD::SELECT, SL, MVT::f64, ExpGt51, X, K); 2183 2184 return K; 2185 } 2186 2187 SDValue AMDGPUTargetLowering::LowerFROUND(SDValue Op, SelectionDAG &DAG) const { 2188 EVT VT = Op.getValueType(); 2189 2190 if (VT == MVT::f32 || VT == MVT::f16) 2191 return LowerFROUND32_16(Op, DAG); 2192 2193 if (VT == MVT::f64) 2194 return LowerFROUND64(Op, DAG); 2195 2196 llvm_unreachable("unhandled type"); 2197 } 2198 2199 SDValue AMDGPUTargetLowering::LowerFFLOOR(SDValue Op, SelectionDAG &DAG) const { 2200 SDLoc SL(Op); 2201 SDValue Src = Op.getOperand(0); 2202 2203 // result = trunc(src); 2204 // if (src < 0.0 && src != result) 2205 // result += -1.0. 2206 2207 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2208 2209 const SDValue Zero = DAG.getConstantFP(0.0, SL, MVT::f64); 2210 const SDValue NegOne = DAG.getConstantFP(-1.0, SL, MVT::f64); 2211 2212 EVT SetCCVT = 2213 getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::f64); 2214 2215 SDValue Lt0 = DAG.getSetCC(SL, SetCCVT, Src, Zero, ISD::SETOLT); 2216 SDValue NeTrunc = DAG.getSetCC(SL, SetCCVT, Src, Trunc, ISD::SETONE); 2217 SDValue And = DAG.getNode(ISD::AND, SL, SetCCVT, Lt0, NeTrunc); 2218 2219 SDValue Add = DAG.getNode(ISD::SELECT, SL, MVT::f64, And, NegOne, Zero); 2220 // TODO: Should this propagate fast-math-flags? 2221 return DAG.getNode(ISD::FADD, SL, MVT::f64, Trunc, Add); 2222 } 2223 2224 SDValue AMDGPUTargetLowering::LowerFLOG(SDValue Op, SelectionDAG &DAG, 2225 double Log2BaseInverted) const { 2226 EVT VT = Op.getValueType(); 2227 2228 SDLoc SL(Op); 2229 SDValue Operand = Op.getOperand(0); 2230 SDValue Log2Operand = DAG.getNode(ISD::FLOG2, SL, VT, Operand); 2231 SDValue Log2BaseInvertedOperand = DAG.getConstantFP(Log2BaseInverted, SL, VT); 2232 2233 return DAG.getNode(ISD::FMUL, SL, VT, Log2Operand, Log2BaseInvertedOperand); 2234 } 2235 2236 static bool isCtlzOpc(unsigned Opc) { 2237 return Opc == ISD::CTLZ || Opc == ISD::CTLZ_ZERO_UNDEF; 2238 } 2239 2240 static bool isCttzOpc(unsigned Opc) { 2241 return Opc == ISD::CTTZ || Opc == ISD::CTTZ_ZERO_UNDEF; 2242 } 2243 2244 SDValue AMDGPUTargetLowering::LowerCTLZ_CTTZ(SDValue Op, SelectionDAG &DAG) const { 2245 SDLoc SL(Op); 2246 SDValue Src = Op.getOperand(0); 2247 bool ZeroUndef = Op.getOpcode() == ISD::CTTZ_ZERO_UNDEF || 2248 Op.getOpcode() == ISD::CTLZ_ZERO_UNDEF; 2249 2250 unsigned ISDOpc, NewOpc; 2251 if (isCtlzOpc(Op.getOpcode())) { 2252 ISDOpc = ISD::CTLZ_ZERO_UNDEF; 2253 NewOpc = AMDGPUISD::FFBH_U32; 2254 } else if (isCttzOpc(Op.getOpcode())) { 2255 ISDOpc = ISD::CTTZ_ZERO_UNDEF; 2256 NewOpc = AMDGPUISD::FFBL_B32; 2257 } else 2258 llvm_unreachable("Unexpected OPCode!!!"); 2259 2260 2261 if (ZeroUndef && Src.getValueType() == MVT::i32) 2262 return DAG.getNode(NewOpc, SL, MVT::i32, Src); 2263 2264 SDValue Vec = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2265 2266 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2267 const SDValue One = DAG.getConstant(1, SL, MVT::i32); 2268 2269 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, Zero); 2270 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, Vec, One); 2271 2272 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2273 *DAG.getContext(), MVT::i32); 2274 2275 SDValue HiOrLo = isCtlzOpc(Op.getOpcode()) ? Hi : Lo; 2276 SDValue Hi0orLo0 = DAG.getSetCC(SL, SetCCVT, HiOrLo, Zero, ISD::SETEQ); 2277 2278 SDValue OprLo = DAG.getNode(ISDOpc, SL, MVT::i32, Lo); 2279 SDValue OprHi = DAG.getNode(ISDOpc, SL, MVT::i32, Hi); 2280 2281 const SDValue Bits32 = DAG.getConstant(32, SL, MVT::i32); 2282 SDValue Add, NewOpr; 2283 if (isCtlzOpc(Op.getOpcode())) { 2284 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprLo, Bits32); 2285 // ctlz(x) = hi_32(x) == 0 ? ctlz(lo_32(x)) + 32 : ctlz(hi_32(x)) 2286 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprHi); 2287 } else { 2288 Add = DAG.getNode(ISD::ADD, SL, MVT::i32, OprHi, Bits32); 2289 // cttz(x) = lo_32(x) == 0 ? cttz(hi_32(x)) + 32 : cttz(lo_32(x)) 2290 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, Hi0orLo0, Add, OprLo); 2291 } 2292 2293 if (!ZeroUndef) { 2294 // Test if the full 64-bit input is zero. 2295 2296 // FIXME: DAG combines turn what should be an s_and_b64 into a v_or_b32, 2297 // which we probably don't want. 2298 SDValue LoOrHi = isCtlzOpc(Op.getOpcode()) ? Lo : Hi; 2299 SDValue Lo0OrHi0 = DAG.getSetCC(SL, SetCCVT, LoOrHi, Zero, ISD::SETEQ); 2300 SDValue SrcIsZero = DAG.getNode(ISD::AND, SL, SetCCVT, Lo0OrHi0, Hi0orLo0); 2301 2302 // TODO: If i64 setcc is half rate, it can result in 1 fewer instruction 2303 // with the same cycles, otherwise it is slower. 2304 // SDValue SrcIsZero = DAG.getSetCC(SL, SetCCVT, Src, 2305 // DAG.getConstant(0, SL, MVT::i64), ISD::SETEQ); 2306 2307 const SDValue Bits32 = DAG.getConstant(64, SL, MVT::i32); 2308 2309 // The instruction returns -1 for 0 input, but the defined intrinsic 2310 // behavior is to return the number of bits. 2311 NewOpr = DAG.getNode(ISD::SELECT, SL, MVT::i32, 2312 SrcIsZero, Bits32, NewOpr); 2313 } 2314 2315 return DAG.getNode(ISD::ZERO_EXTEND, SL, MVT::i64, NewOpr); 2316 } 2317 2318 SDValue AMDGPUTargetLowering::LowerINT_TO_FP32(SDValue Op, SelectionDAG &DAG, 2319 bool Signed) const { 2320 // Unsigned 2321 // cul2f(ulong u) 2322 //{ 2323 // uint lz = clz(u); 2324 // uint e = (u != 0) ? 127U + 63U - lz : 0; 2325 // u = (u << lz) & 0x7fffffffffffffffUL; 2326 // ulong t = u & 0xffffffffffUL; 2327 // uint v = (e << 23) | (uint)(u >> 40); 2328 // uint r = t > 0x8000000000UL ? 1U : (t == 0x8000000000UL ? v & 1U : 0U); 2329 // return as_float(v + r); 2330 //} 2331 // Signed 2332 // cl2f(long l) 2333 //{ 2334 // long s = l >> 63; 2335 // float r = cul2f((l + s) ^ s); 2336 // return s ? -r : r; 2337 //} 2338 2339 SDLoc SL(Op); 2340 SDValue Src = Op.getOperand(0); 2341 SDValue L = Src; 2342 2343 SDValue S; 2344 if (Signed) { 2345 const SDValue SignBit = DAG.getConstant(63, SL, MVT::i64); 2346 S = DAG.getNode(ISD::SRA, SL, MVT::i64, L, SignBit); 2347 2348 SDValue LPlusS = DAG.getNode(ISD::ADD, SL, MVT::i64, L, S); 2349 L = DAG.getNode(ISD::XOR, SL, MVT::i64, LPlusS, S); 2350 } 2351 2352 EVT SetCCVT = getSetCCResultType(DAG.getDataLayout(), 2353 *DAG.getContext(), MVT::f32); 2354 2355 2356 SDValue ZeroI32 = DAG.getConstant(0, SL, MVT::i32); 2357 SDValue ZeroI64 = DAG.getConstant(0, SL, MVT::i64); 2358 SDValue LZ = DAG.getNode(ISD::CTLZ_ZERO_UNDEF, SL, MVT::i64, L); 2359 LZ = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LZ); 2360 2361 SDValue K = DAG.getConstant(127U + 63U, SL, MVT::i32); 2362 SDValue E = DAG.getSelect(SL, MVT::i32, 2363 DAG.getSetCC(SL, SetCCVT, L, ZeroI64, ISD::SETNE), 2364 DAG.getNode(ISD::SUB, SL, MVT::i32, K, LZ), 2365 ZeroI32); 2366 2367 SDValue U = DAG.getNode(ISD::AND, SL, MVT::i64, 2368 DAG.getNode(ISD::SHL, SL, MVT::i64, L, LZ), 2369 DAG.getConstant((-1ULL) >> 1, SL, MVT::i64)); 2370 2371 SDValue T = DAG.getNode(ISD::AND, SL, MVT::i64, U, 2372 DAG.getConstant(0xffffffffffULL, SL, MVT::i64)); 2373 2374 SDValue UShl = DAG.getNode(ISD::SRL, SL, MVT::i64, 2375 U, DAG.getConstant(40, SL, MVT::i64)); 2376 2377 SDValue V = DAG.getNode(ISD::OR, SL, MVT::i32, 2378 DAG.getNode(ISD::SHL, SL, MVT::i32, E, DAG.getConstant(23, SL, MVT::i32)), 2379 DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, UShl)); 2380 2381 SDValue C = DAG.getConstant(0x8000000000ULL, SL, MVT::i64); 2382 SDValue RCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETUGT); 2383 SDValue TCmp = DAG.getSetCC(SL, SetCCVT, T, C, ISD::SETEQ); 2384 2385 SDValue One = DAG.getConstant(1, SL, MVT::i32); 2386 2387 SDValue VTrunc1 = DAG.getNode(ISD::AND, SL, MVT::i32, V, One); 2388 2389 SDValue R = DAG.getSelect(SL, MVT::i32, 2390 RCmp, 2391 One, 2392 DAG.getSelect(SL, MVT::i32, TCmp, VTrunc1, ZeroI32)); 2393 R = DAG.getNode(ISD::ADD, SL, MVT::i32, V, R); 2394 R = DAG.getNode(ISD::BITCAST, SL, MVT::f32, R); 2395 2396 if (!Signed) 2397 return R; 2398 2399 SDValue RNeg = DAG.getNode(ISD::FNEG, SL, MVT::f32, R); 2400 return DAG.getSelect(SL, MVT::f32, DAG.getSExtOrTrunc(S, SL, SetCCVT), RNeg, R); 2401 } 2402 2403 SDValue AMDGPUTargetLowering::LowerINT_TO_FP64(SDValue Op, SelectionDAG &DAG, 2404 bool Signed) const { 2405 SDLoc SL(Op); 2406 SDValue Src = Op.getOperand(0); 2407 2408 SDValue BC = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, Src); 2409 2410 SDValue Lo = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2411 DAG.getConstant(0, SL, MVT::i32)); 2412 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, BC, 2413 DAG.getConstant(1, SL, MVT::i32)); 2414 2415 SDValue CvtHi = DAG.getNode(Signed ? ISD::SINT_TO_FP : ISD::UINT_TO_FP, 2416 SL, MVT::f64, Hi); 2417 2418 SDValue CvtLo = DAG.getNode(ISD::UINT_TO_FP, SL, MVT::f64, Lo); 2419 2420 SDValue LdExp = DAG.getNode(AMDGPUISD::LDEXP, SL, MVT::f64, CvtHi, 2421 DAG.getConstant(32, SL, MVT::i32)); 2422 // TODO: Should this propagate fast-math-flags? 2423 return DAG.getNode(ISD::FADD, SL, MVT::f64, LdExp, CvtLo); 2424 } 2425 2426 SDValue AMDGPUTargetLowering::LowerUINT_TO_FP(SDValue Op, 2427 SelectionDAG &DAG) const { 2428 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2429 "operation should be legal"); 2430 2431 // TODO: Factor out code common with LowerSINT_TO_FP. 2432 2433 EVT DestVT = Op.getValueType(); 2434 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2435 SDLoc DL(Op); 2436 SDValue Src = Op.getOperand(0); 2437 2438 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2439 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2440 SDValue FPRound = 2441 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2442 2443 return FPRound; 2444 } 2445 2446 if (DestVT == MVT::f32) 2447 return LowerINT_TO_FP32(Op, DAG, false); 2448 2449 assert(DestVT == MVT::f64); 2450 return LowerINT_TO_FP64(Op, DAG, false); 2451 } 2452 2453 SDValue AMDGPUTargetLowering::LowerSINT_TO_FP(SDValue Op, 2454 SelectionDAG &DAG) const { 2455 assert(Op.getOperand(0).getValueType() == MVT::i64 && 2456 "operation should be legal"); 2457 2458 // TODO: Factor out code common with LowerUINT_TO_FP. 2459 2460 EVT DestVT = Op.getValueType(); 2461 if (Subtarget->has16BitInsts() && DestVT == MVT::f16) { 2462 SDLoc DL(Op); 2463 SDValue Src = Op.getOperand(0); 2464 2465 SDValue IntToFp32 = DAG.getNode(Op.getOpcode(), DL, MVT::f32, Src); 2466 SDValue FPRoundFlag = DAG.getIntPtrConstant(0, SDLoc(Op)); 2467 SDValue FPRound = 2468 DAG.getNode(ISD::FP_ROUND, DL, MVT::f16, IntToFp32, FPRoundFlag); 2469 2470 return FPRound; 2471 } 2472 2473 if (DestVT == MVT::f32) 2474 return LowerINT_TO_FP32(Op, DAG, true); 2475 2476 assert(DestVT == MVT::f64); 2477 return LowerINT_TO_FP64(Op, DAG, true); 2478 } 2479 2480 SDValue AMDGPUTargetLowering::LowerFP64_TO_INT(SDValue Op, SelectionDAG &DAG, 2481 bool Signed) const { 2482 SDLoc SL(Op); 2483 2484 SDValue Src = Op.getOperand(0); 2485 2486 SDValue Trunc = DAG.getNode(ISD::FTRUNC, SL, MVT::f64, Src); 2487 2488 SDValue K0 = DAG.getConstantFP(BitsToDouble(UINT64_C(0x3df0000000000000)), SL, 2489 MVT::f64); 2490 SDValue K1 = DAG.getConstantFP(BitsToDouble(UINT64_C(0xc1f0000000000000)), SL, 2491 MVT::f64); 2492 // TODO: Should this propagate fast-math-flags? 2493 SDValue Mul = DAG.getNode(ISD::FMUL, SL, MVT::f64, Trunc, K0); 2494 2495 SDValue FloorMul = DAG.getNode(ISD::FFLOOR, SL, MVT::f64, Mul); 2496 2497 2498 SDValue Fma = DAG.getNode(ISD::FMA, SL, MVT::f64, FloorMul, K1, Trunc); 2499 2500 SDValue Hi = DAG.getNode(Signed ? ISD::FP_TO_SINT : ISD::FP_TO_UINT, SL, 2501 MVT::i32, FloorMul); 2502 SDValue Lo = DAG.getNode(ISD::FP_TO_UINT, SL, MVT::i32, Fma); 2503 2504 SDValue Result = DAG.getBuildVector(MVT::v2i32, SL, {Lo, Hi}); 2505 2506 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Result); 2507 } 2508 2509 SDValue AMDGPUTargetLowering::LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) const { 2510 SDLoc DL(Op); 2511 SDValue N0 = Op.getOperand(0); 2512 2513 // Convert to target node to get known bits 2514 if (N0.getValueType() == MVT::f32) 2515 return DAG.getNode(AMDGPUISD::FP_TO_FP16, DL, Op.getValueType(), N0); 2516 2517 if (getTargetMachine().Options.UnsafeFPMath) { 2518 // There is a generic expand for FP_TO_FP16 with unsafe fast math. 2519 return SDValue(); 2520 } 2521 2522 assert(N0.getSimpleValueType() == MVT::f64); 2523 2524 // f64 -> f16 conversion using round-to-nearest-even rounding mode. 2525 const unsigned ExpMask = 0x7ff; 2526 const unsigned ExpBiasf64 = 1023; 2527 const unsigned ExpBiasf16 = 15; 2528 SDValue Zero = DAG.getConstant(0, DL, MVT::i32); 2529 SDValue One = DAG.getConstant(1, DL, MVT::i32); 2530 SDValue U = DAG.getNode(ISD::BITCAST, DL, MVT::i64, N0); 2531 SDValue UH = DAG.getNode(ISD::SRL, DL, MVT::i64, U, 2532 DAG.getConstant(32, DL, MVT::i64)); 2533 UH = DAG.getZExtOrTrunc(UH, DL, MVT::i32); 2534 U = DAG.getZExtOrTrunc(U, DL, MVT::i32); 2535 SDValue E = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2536 DAG.getConstant(20, DL, MVT::i64)); 2537 E = DAG.getNode(ISD::AND, DL, MVT::i32, E, 2538 DAG.getConstant(ExpMask, DL, MVT::i32)); 2539 // Subtract the fp64 exponent bias (1023) to get the real exponent and 2540 // add the f16 bias (15) to get the biased exponent for the f16 format. 2541 E = DAG.getNode(ISD::ADD, DL, MVT::i32, E, 2542 DAG.getConstant(-ExpBiasf64 + ExpBiasf16, DL, MVT::i32)); 2543 2544 SDValue M = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2545 DAG.getConstant(8, DL, MVT::i32)); 2546 M = DAG.getNode(ISD::AND, DL, MVT::i32, M, 2547 DAG.getConstant(0xffe, DL, MVT::i32)); 2548 2549 SDValue MaskedSig = DAG.getNode(ISD::AND, DL, MVT::i32, UH, 2550 DAG.getConstant(0x1ff, DL, MVT::i32)); 2551 MaskedSig = DAG.getNode(ISD::OR, DL, MVT::i32, MaskedSig, U); 2552 2553 SDValue Lo40Set = DAG.getSelectCC(DL, MaskedSig, Zero, Zero, One, ISD::SETEQ); 2554 M = DAG.getNode(ISD::OR, DL, MVT::i32, M, Lo40Set); 2555 2556 // (M != 0 ? 0x0200 : 0) | 0x7c00; 2557 SDValue I = DAG.getNode(ISD::OR, DL, MVT::i32, 2558 DAG.getSelectCC(DL, M, Zero, DAG.getConstant(0x0200, DL, MVT::i32), 2559 Zero, ISD::SETNE), DAG.getConstant(0x7c00, DL, MVT::i32)); 2560 2561 // N = M | (E << 12); 2562 SDValue N = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2563 DAG.getNode(ISD::SHL, DL, MVT::i32, E, 2564 DAG.getConstant(12, DL, MVT::i32))); 2565 2566 // B = clamp(1-E, 0, 13); 2567 SDValue OneSubExp = DAG.getNode(ISD::SUB, DL, MVT::i32, 2568 One, E); 2569 SDValue B = DAG.getNode(ISD::SMAX, DL, MVT::i32, OneSubExp, Zero); 2570 B = DAG.getNode(ISD::SMIN, DL, MVT::i32, B, 2571 DAG.getConstant(13, DL, MVT::i32)); 2572 2573 SDValue SigSetHigh = DAG.getNode(ISD::OR, DL, MVT::i32, M, 2574 DAG.getConstant(0x1000, DL, MVT::i32)); 2575 2576 SDValue D = DAG.getNode(ISD::SRL, DL, MVT::i32, SigSetHigh, B); 2577 SDValue D0 = DAG.getNode(ISD::SHL, DL, MVT::i32, D, B); 2578 SDValue D1 = DAG.getSelectCC(DL, D0, SigSetHigh, One, Zero, ISD::SETNE); 2579 D = DAG.getNode(ISD::OR, DL, MVT::i32, D, D1); 2580 2581 SDValue V = DAG.getSelectCC(DL, E, One, D, N, ISD::SETLT); 2582 SDValue VLow3 = DAG.getNode(ISD::AND, DL, MVT::i32, V, 2583 DAG.getConstant(0x7, DL, MVT::i32)); 2584 V = DAG.getNode(ISD::SRL, DL, MVT::i32, V, 2585 DAG.getConstant(2, DL, MVT::i32)); 2586 SDValue V0 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(3, DL, MVT::i32), 2587 One, Zero, ISD::SETEQ); 2588 SDValue V1 = DAG.getSelectCC(DL, VLow3, DAG.getConstant(5, DL, MVT::i32), 2589 One, Zero, ISD::SETGT); 2590 V1 = DAG.getNode(ISD::OR, DL, MVT::i32, V0, V1); 2591 V = DAG.getNode(ISD::ADD, DL, MVT::i32, V, V1); 2592 2593 V = DAG.getSelectCC(DL, E, DAG.getConstant(30, DL, MVT::i32), 2594 DAG.getConstant(0x7c00, DL, MVT::i32), V, ISD::SETGT); 2595 V = DAG.getSelectCC(DL, E, DAG.getConstant(1039, DL, MVT::i32), 2596 I, V, ISD::SETEQ); 2597 2598 // Extract the sign bit. 2599 SDValue Sign = DAG.getNode(ISD::SRL, DL, MVT::i32, UH, 2600 DAG.getConstant(16, DL, MVT::i32)); 2601 Sign = DAG.getNode(ISD::AND, DL, MVT::i32, Sign, 2602 DAG.getConstant(0x8000, DL, MVT::i32)); 2603 2604 V = DAG.getNode(ISD::OR, DL, MVT::i32, Sign, V); 2605 return DAG.getZExtOrTrunc(V, DL, Op.getValueType()); 2606 } 2607 2608 SDValue AMDGPUTargetLowering::LowerFP_TO_SINT(SDValue Op, 2609 SelectionDAG &DAG) const { 2610 SDValue Src = Op.getOperand(0); 2611 2612 // TODO: Factor out code common with LowerFP_TO_UINT. 2613 2614 EVT SrcVT = Src.getValueType(); 2615 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2616 SDLoc DL(Op); 2617 2618 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2619 SDValue FpToInt32 = 2620 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2621 2622 return FpToInt32; 2623 } 2624 2625 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2626 return LowerFP64_TO_INT(Op, DAG, true); 2627 2628 return SDValue(); 2629 } 2630 2631 SDValue AMDGPUTargetLowering::LowerFP_TO_UINT(SDValue Op, 2632 SelectionDAG &DAG) const { 2633 SDValue Src = Op.getOperand(0); 2634 2635 // TODO: Factor out code common with LowerFP_TO_SINT. 2636 2637 EVT SrcVT = Src.getValueType(); 2638 if (Subtarget->has16BitInsts() && SrcVT == MVT::f16) { 2639 SDLoc DL(Op); 2640 2641 SDValue FPExtend = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, Src); 2642 SDValue FpToInt32 = 2643 DAG.getNode(Op.getOpcode(), DL, MVT::i64, FPExtend); 2644 2645 return FpToInt32; 2646 } 2647 2648 if (Op.getValueType() == MVT::i64 && Src.getValueType() == MVT::f64) 2649 return LowerFP64_TO_INT(Op, DAG, false); 2650 2651 return SDValue(); 2652 } 2653 2654 SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op, 2655 SelectionDAG &DAG) const { 2656 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT(); 2657 MVT VT = Op.getSimpleValueType(); 2658 MVT ScalarVT = VT.getScalarType(); 2659 2660 assert(VT.isVector()); 2661 2662 SDValue Src = Op.getOperand(0); 2663 SDLoc DL(Op); 2664 2665 // TODO: Don't scalarize on Evergreen? 2666 unsigned NElts = VT.getVectorNumElements(); 2667 SmallVector<SDValue, 8> Args; 2668 DAG.ExtractVectorElements(Src, Args, 0, NElts); 2669 2670 SDValue VTOp = DAG.getValueType(ExtraVT.getScalarType()); 2671 for (unsigned I = 0; I < NElts; ++I) 2672 Args[I] = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, ScalarVT, Args[I], VTOp); 2673 2674 return DAG.getBuildVector(VT, DL, Args); 2675 } 2676 2677 //===----------------------------------------------------------------------===// 2678 // Custom DAG optimizations 2679 //===----------------------------------------------------------------------===// 2680 2681 static bool isU24(SDValue Op, SelectionDAG &DAG) { 2682 return AMDGPUTargetLowering::numBitsUnsigned(Op, DAG) <= 24; 2683 } 2684 2685 static bool isI24(SDValue Op, SelectionDAG &DAG) { 2686 EVT VT = Op.getValueType(); 2687 return VT.getSizeInBits() >= 24 && // Types less than 24-bit should be treated 2688 // as unsigned 24-bit values. 2689 AMDGPUTargetLowering::numBitsSigned(Op, DAG) < 24; 2690 } 2691 2692 static bool simplifyI24(SDNode *Node24, unsigned OpIdx, 2693 TargetLowering::DAGCombinerInfo &DCI) { 2694 2695 SelectionDAG &DAG = DCI.DAG; 2696 SDValue Op = Node24->getOperand(OpIdx); 2697 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 2698 EVT VT = Op.getValueType(); 2699 2700 APInt Demanded = APInt::getLowBitsSet(VT.getSizeInBits(), 24); 2701 APInt KnownZero, KnownOne; 2702 TargetLowering::TargetLoweringOpt TLO(DAG, true, true); 2703 if (TLI.SimplifyDemandedBits(Node24, OpIdx, Demanded, DCI, TLO)) 2704 return true; 2705 2706 return false; 2707 } 2708 2709 template <typename IntTy> 2710 static SDValue constantFoldBFE(SelectionDAG &DAG, IntTy Src0, uint32_t Offset, 2711 uint32_t Width, const SDLoc &DL) { 2712 if (Width + Offset < 32) { 2713 uint32_t Shl = static_cast<uint32_t>(Src0) << (32 - Offset - Width); 2714 IntTy Result = static_cast<IntTy>(Shl) >> (32 - Width); 2715 return DAG.getConstant(Result, DL, MVT::i32); 2716 } 2717 2718 return DAG.getConstant(Src0 >> Offset, DL, MVT::i32); 2719 } 2720 2721 static bool hasVolatileUser(SDNode *Val) { 2722 for (SDNode *U : Val->uses()) { 2723 if (MemSDNode *M = dyn_cast<MemSDNode>(U)) { 2724 if (M->isVolatile()) 2725 return true; 2726 } 2727 } 2728 2729 return false; 2730 } 2731 2732 bool AMDGPUTargetLowering::shouldCombineMemoryType(EVT VT) const { 2733 // i32 vectors are the canonical memory type. 2734 if (VT.getScalarType() == MVT::i32 || isTypeLegal(VT)) 2735 return false; 2736 2737 if (!VT.isByteSized()) 2738 return false; 2739 2740 unsigned Size = VT.getStoreSize(); 2741 2742 if ((Size == 1 || Size == 2 || Size == 4) && !VT.isVector()) 2743 return false; 2744 2745 if (Size == 3 || (Size > 4 && (Size % 4 != 0))) 2746 return false; 2747 2748 return true; 2749 } 2750 2751 // Replace load of an illegal type with a store of a bitcast to a friendlier 2752 // type. 2753 SDValue AMDGPUTargetLowering::performLoadCombine(SDNode *N, 2754 DAGCombinerInfo &DCI) const { 2755 if (!DCI.isBeforeLegalize()) 2756 return SDValue(); 2757 2758 LoadSDNode *LN = cast<LoadSDNode>(N); 2759 if (LN->isVolatile() || !ISD::isNormalLoad(LN) || hasVolatileUser(LN)) 2760 return SDValue(); 2761 2762 SDLoc SL(N); 2763 SelectionDAG &DAG = DCI.DAG; 2764 EVT VT = LN->getMemoryVT(); 2765 2766 unsigned Size = VT.getStoreSize(); 2767 unsigned Align = LN->getAlignment(); 2768 if (Align < Size && isTypeLegal(VT)) { 2769 bool IsFast; 2770 unsigned AS = LN->getAddressSpace(); 2771 2772 // Expand unaligned loads earlier than legalization. Due to visitation order 2773 // problems during legalization, the emitted instructions to pack and unpack 2774 // the bytes again are not eliminated in the case of an unaligned copy. 2775 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { 2776 if (VT.isVector()) 2777 return scalarizeVectorLoad(LN, DAG); 2778 2779 SDValue Ops[2]; 2780 std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(LN, DAG); 2781 return DAG.getMergeValues(Ops, SDLoc(N)); 2782 } 2783 2784 if (!IsFast) 2785 return SDValue(); 2786 } 2787 2788 if (!shouldCombineMemoryType(VT)) 2789 return SDValue(); 2790 2791 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2792 2793 SDValue NewLoad 2794 = DAG.getLoad(NewVT, SL, LN->getChain(), 2795 LN->getBasePtr(), LN->getMemOperand()); 2796 2797 SDValue BC = DAG.getNode(ISD::BITCAST, SL, VT, NewLoad); 2798 DCI.CombineTo(N, BC, NewLoad.getValue(1)); 2799 return SDValue(N, 0); 2800 } 2801 2802 // Replace store of an illegal type with a store of a bitcast to a friendlier 2803 // type. 2804 SDValue AMDGPUTargetLowering::performStoreCombine(SDNode *N, 2805 DAGCombinerInfo &DCI) const { 2806 if (!DCI.isBeforeLegalize()) 2807 return SDValue(); 2808 2809 StoreSDNode *SN = cast<StoreSDNode>(N); 2810 if (SN->isVolatile() || !ISD::isNormalStore(SN)) 2811 return SDValue(); 2812 2813 EVT VT = SN->getMemoryVT(); 2814 unsigned Size = VT.getStoreSize(); 2815 2816 SDLoc SL(N); 2817 SelectionDAG &DAG = DCI.DAG; 2818 unsigned Align = SN->getAlignment(); 2819 if (Align < Size && isTypeLegal(VT)) { 2820 bool IsFast; 2821 unsigned AS = SN->getAddressSpace(); 2822 2823 // Expand unaligned stores earlier than legalization. Due to visitation 2824 // order problems during legalization, the emitted instructions to pack and 2825 // unpack the bytes again are not eliminated in the case of an unaligned 2826 // copy. 2827 if (!allowsMisalignedMemoryAccesses(VT, AS, Align, &IsFast)) { 2828 if (VT.isVector()) 2829 return scalarizeVectorStore(SN, DAG); 2830 2831 return expandUnalignedStore(SN, DAG); 2832 } 2833 2834 if (!IsFast) 2835 return SDValue(); 2836 } 2837 2838 if (!shouldCombineMemoryType(VT)) 2839 return SDValue(); 2840 2841 EVT NewVT = getEquivalentMemType(*DAG.getContext(), VT); 2842 SDValue Val = SN->getValue(); 2843 2844 //DCI.AddToWorklist(Val.getNode()); 2845 2846 bool OtherUses = !Val.hasOneUse(); 2847 SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NewVT, Val); 2848 if (OtherUses) { 2849 SDValue CastBack = DAG.getNode(ISD::BITCAST, SL, VT, CastVal); 2850 DAG.ReplaceAllUsesOfValueWith(Val, CastBack); 2851 } 2852 2853 return DAG.getStore(SN->getChain(), SL, CastVal, 2854 SN->getBasePtr(), SN->getMemOperand()); 2855 } 2856 2857 // FIXME: This should go in generic DAG combiner with an isTruncateFree check, 2858 // but isTruncateFree is inaccurate for i16 now because of SALU vs. VALU 2859 // issues. 2860 SDValue AMDGPUTargetLowering::performAssertSZExtCombine(SDNode *N, 2861 DAGCombinerInfo &DCI) const { 2862 SelectionDAG &DAG = DCI.DAG; 2863 SDValue N0 = N->getOperand(0); 2864 2865 // (vt2 (assertzext (truncate vt0:x), vt1)) -> 2866 // (vt2 (truncate (assertzext vt0:x, vt1))) 2867 if (N0.getOpcode() == ISD::TRUNCATE) { 2868 SDValue N1 = N->getOperand(1); 2869 EVT ExtVT = cast<VTSDNode>(N1)->getVT(); 2870 SDLoc SL(N); 2871 2872 SDValue Src = N0.getOperand(0); 2873 EVT SrcVT = Src.getValueType(); 2874 if (SrcVT.bitsGE(ExtVT)) { 2875 SDValue NewInReg = DAG.getNode(N->getOpcode(), SL, SrcVT, Src, N1); 2876 return DAG.getNode(ISD::TRUNCATE, SL, N->getValueType(0), NewInReg); 2877 } 2878 } 2879 2880 return SDValue(); 2881 } 2882 /// Split the 64-bit value \p LHS into two 32-bit components, and perform the 2883 /// binary operation \p Opc to it with the corresponding constant operands. 2884 SDValue AMDGPUTargetLowering::splitBinaryBitConstantOpImpl( 2885 DAGCombinerInfo &DCI, const SDLoc &SL, 2886 unsigned Opc, SDValue LHS, 2887 uint32_t ValLo, uint32_t ValHi) const { 2888 SelectionDAG &DAG = DCI.DAG; 2889 SDValue Lo, Hi; 2890 std::tie(Lo, Hi) = split64BitValue(LHS, DAG); 2891 2892 SDValue LoRHS = DAG.getConstant(ValLo, SL, MVT::i32); 2893 SDValue HiRHS = DAG.getConstant(ValHi, SL, MVT::i32); 2894 2895 SDValue LoAnd = DAG.getNode(Opc, SL, MVT::i32, Lo, LoRHS); 2896 SDValue HiAnd = DAG.getNode(Opc, SL, MVT::i32, Hi, HiRHS); 2897 2898 // Re-visit the ands. It's possible we eliminated one of them and it could 2899 // simplify the vector. 2900 DCI.AddToWorklist(Lo.getNode()); 2901 DCI.AddToWorklist(Hi.getNode()); 2902 2903 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {LoAnd, HiAnd}); 2904 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2905 } 2906 2907 SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N, 2908 DAGCombinerInfo &DCI) const { 2909 EVT VT = N->getValueType(0); 2910 2911 ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2912 if (!RHS) 2913 return SDValue(); 2914 2915 SDValue LHS = N->getOperand(0); 2916 unsigned RHSVal = RHS->getZExtValue(); 2917 if (!RHSVal) 2918 return LHS; 2919 2920 SDLoc SL(N); 2921 SelectionDAG &DAG = DCI.DAG; 2922 2923 switch (LHS->getOpcode()) { 2924 default: 2925 break; 2926 case ISD::ZERO_EXTEND: 2927 case ISD::SIGN_EXTEND: 2928 case ISD::ANY_EXTEND: { 2929 SDValue X = LHS->getOperand(0); 2930 2931 if (VT == MVT::i32 && RHSVal == 16 && X.getValueType() == MVT::i16 && 2932 isOperationLegal(ISD::BUILD_VECTOR, MVT::v2i16)) { 2933 // Prefer build_vector as the canonical form if packed types are legal. 2934 // (shl ([asz]ext i16:x), 16 -> build_vector 0, x 2935 SDValue Vec = DAG.getBuildVector(MVT::v2i16, SL, 2936 { DAG.getConstant(0, SL, MVT::i16), LHS->getOperand(0) }); 2937 return DAG.getNode(ISD::BITCAST, SL, MVT::i32, Vec); 2938 } 2939 2940 // shl (ext x) => zext (shl x), if shift does not overflow int 2941 if (VT != MVT::i64) 2942 break; 2943 KnownBits Known; 2944 DAG.computeKnownBits(X, Known); 2945 unsigned LZ = Known.countMinLeadingZeros(); 2946 if (LZ < RHSVal) 2947 break; 2948 EVT XVT = X.getValueType(); 2949 SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0)); 2950 return DAG.getZExtOrTrunc(Shl, SL, VT); 2951 } 2952 } 2953 2954 if (VT != MVT::i64) 2955 return SDValue(); 2956 2957 // i64 (shl x, C) -> (build_pair 0, (shl x, C -32)) 2958 2959 // On some subtargets, 64-bit shift is a quarter rate instruction. In the 2960 // common case, splitting this into a move and a 32-bit shift is faster and 2961 // the same code size. 2962 if (RHSVal < 32) 2963 return SDValue(); 2964 2965 SDValue ShiftAmt = DAG.getConstant(RHSVal - 32, SL, MVT::i32); 2966 2967 SDValue Lo = DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, LHS); 2968 SDValue NewShift = DAG.getNode(ISD::SHL, SL, MVT::i32, Lo, ShiftAmt); 2969 2970 const SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 2971 2972 SDValue Vec = DAG.getBuildVector(MVT::v2i32, SL, {Zero, NewShift}); 2973 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); 2974 } 2975 2976 SDValue AMDGPUTargetLowering::performSraCombine(SDNode *N, 2977 DAGCombinerInfo &DCI) const { 2978 if (N->getValueType(0) != MVT::i64) 2979 return SDValue(); 2980 2981 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 2982 if (!RHS) 2983 return SDValue(); 2984 2985 SelectionDAG &DAG = DCI.DAG; 2986 SDLoc SL(N); 2987 unsigned RHSVal = RHS->getZExtValue(); 2988 2989 // (sra i64:x, 32) -> build_pair x, (sra hi_32(x), 31) 2990 if (RHSVal == 32) { 2991 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 2992 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 2993 DAG.getConstant(31, SL, MVT::i32)); 2994 2995 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {Hi, NewShift}); 2996 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 2997 } 2998 2999 // (sra i64:x, 63) -> build_pair (sra hi_32(x), 31), (sra hi_32(x), 31) 3000 if (RHSVal == 63) { 3001 SDValue Hi = getHiHalf64(N->getOperand(0), DAG); 3002 SDValue NewShift = DAG.getNode(ISD::SRA, SL, MVT::i32, Hi, 3003 DAG.getConstant(31, SL, MVT::i32)); 3004 SDValue BuildVec = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, NewShift}); 3005 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildVec); 3006 } 3007 3008 return SDValue(); 3009 } 3010 3011 SDValue AMDGPUTargetLowering::performSrlCombine(SDNode *N, 3012 DAGCombinerInfo &DCI) const { 3013 if (N->getValueType(0) != MVT::i64) 3014 return SDValue(); 3015 3016 const ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3017 if (!RHS) 3018 return SDValue(); 3019 3020 unsigned ShiftAmt = RHS->getZExtValue(); 3021 if (ShiftAmt < 32) 3022 return SDValue(); 3023 3024 // srl i64:x, C for C >= 32 3025 // => 3026 // build_pair (srl hi_32(x), C - 32), 0 3027 3028 SelectionDAG &DAG = DCI.DAG; 3029 SDLoc SL(N); 3030 3031 SDValue One = DAG.getConstant(1, SL, MVT::i32); 3032 SDValue Zero = DAG.getConstant(0, SL, MVT::i32); 3033 3034 SDValue VecOp = DAG.getNode(ISD::BITCAST, SL, MVT::v2i32, N->getOperand(0)); 3035 SDValue Hi = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SL, MVT::i32, 3036 VecOp, One); 3037 3038 SDValue NewConst = DAG.getConstant(ShiftAmt - 32, SL, MVT::i32); 3039 SDValue NewShift = DAG.getNode(ISD::SRL, SL, MVT::i32, Hi, NewConst); 3040 3041 SDValue BuildPair = DAG.getBuildVector(MVT::v2i32, SL, {NewShift, Zero}); 3042 3043 return DAG.getNode(ISD::BITCAST, SL, MVT::i64, BuildPair); 3044 } 3045 3046 SDValue AMDGPUTargetLowering::performTruncateCombine( 3047 SDNode *N, DAGCombinerInfo &DCI) const { 3048 SDLoc SL(N); 3049 SelectionDAG &DAG = DCI.DAG; 3050 EVT VT = N->getValueType(0); 3051 SDValue Src = N->getOperand(0); 3052 3053 // vt1 (truncate (bitcast (build_vector vt0:x, ...))) -> vt1 (bitcast vt0:x) 3054 if (Src.getOpcode() == ISD::BITCAST) { 3055 SDValue Vec = Src.getOperand(0); 3056 if (Vec.getOpcode() == ISD::BUILD_VECTOR) { 3057 SDValue Elt0 = Vec.getOperand(0); 3058 EVT EltVT = Elt0.getValueType(); 3059 if (VT.getSizeInBits() <= EltVT.getSizeInBits()) { 3060 if (EltVT.isFloatingPoint()) { 3061 Elt0 = DAG.getNode(ISD::BITCAST, SL, 3062 EltVT.changeTypeToInteger(), Elt0); 3063 } 3064 3065 return DAG.getNode(ISD::TRUNCATE, SL, VT, Elt0); 3066 } 3067 } 3068 } 3069 3070 // Equivalent of above for accessing the high element of a vector as an 3071 // integer operation. 3072 // trunc (srl (bitcast (build_vector x, y))), 16 -> trunc (bitcast y) 3073 if (Src.getOpcode() == ISD::SRL) { 3074 if (auto K = isConstOrConstSplat(Src.getOperand(1))) { 3075 if (2 * K->getZExtValue() == Src.getValueType().getScalarSizeInBits()) { 3076 SDValue BV = stripBitcast(Src.getOperand(0)); 3077 if (BV.getOpcode() == ISD::BUILD_VECTOR && 3078 BV.getValueType().getVectorNumElements() == 2) { 3079 SDValue SrcElt = BV.getOperand(1); 3080 EVT SrcEltVT = SrcElt.getValueType(); 3081 if (SrcEltVT.isFloatingPoint()) { 3082 SrcElt = DAG.getNode(ISD::BITCAST, SL, 3083 SrcEltVT.changeTypeToInteger(), SrcElt); 3084 } 3085 3086 return DAG.getNode(ISD::TRUNCATE, SL, VT, SrcElt); 3087 } 3088 } 3089 } 3090 } 3091 3092 // Partially shrink 64-bit shifts to 32-bit if reduced to 16-bit. 3093 // 3094 // i16 (trunc (srl i64:x, K)), K <= 16 -> 3095 // i16 (trunc (srl (i32 (trunc x), K))) 3096 if (VT.getScalarSizeInBits() < 32) { 3097 EVT SrcVT = Src.getValueType(); 3098 if (SrcVT.getScalarSizeInBits() > 32 && 3099 (Src.getOpcode() == ISD::SRL || 3100 Src.getOpcode() == ISD::SRA || 3101 Src.getOpcode() == ISD::SHL)) { 3102 SDValue Amt = Src.getOperand(1); 3103 KnownBits Known; 3104 DAG.computeKnownBits(Amt, Known); 3105 unsigned Size = VT.getScalarSizeInBits(); 3106 if ((Known.isConstant() && Known.getConstant().ule(Size)) || 3107 (Known.getBitWidth() - Known.countMinLeadingZeros() <= Log2_32(Size))) { 3108 EVT MidVT = VT.isVector() ? 3109 EVT::getVectorVT(*DAG.getContext(), MVT::i32, 3110 VT.getVectorNumElements()) : MVT::i32; 3111 3112 EVT NewShiftVT = getShiftAmountTy(MidVT, DAG.getDataLayout()); 3113 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SL, MidVT, 3114 Src.getOperand(0)); 3115 DCI.AddToWorklist(Trunc.getNode()); 3116 3117 if (Amt.getValueType() != NewShiftVT) { 3118 Amt = DAG.getZExtOrTrunc(Amt, SL, NewShiftVT); 3119 DCI.AddToWorklist(Amt.getNode()); 3120 } 3121 3122 SDValue ShrunkShift = DAG.getNode(Src.getOpcode(), SL, MidVT, 3123 Trunc, Amt); 3124 return DAG.getNode(ISD::TRUNCATE, SL, VT, ShrunkShift); 3125 } 3126 } 3127 } 3128 3129 return SDValue(); 3130 } 3131 3132 // We need to specifically handle i64 mul here to avoid unnecessary conversion 3133 // instructions. If we only match on the legalized i64 mul expansion, 3134 // SimplifyDemandedBits will be unable to remove them because there will be 3135 // multiple uses due to the separate mul + mulh[su]. 3136 static SDValue getMul24(SelectionDAG &DAG, const SDLoc &SL, 3137 SDValue N0, SDValue N1, unsigned Size, bool Signed) { 3138 if (Size <= 32) { 3139 unsigned MulOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3140 return DAG.getNode(MulOpc, SL, MVT::i32, N0, N1); 3141 } 3142 3143 // Because we want to eliminate extension instructions before the 3144 // operation, we need to create a single user here (i.e. not the separate 3145 // mul_lo + mul_hi) so that SimplifyDemandedBits will deal with it. 3146 3147 unsigned MulOpc = Signed ? AMDGPUISD::MUL_LOHI_I24 : AMDGPUISD::MUL_LOHI_U24; 3148 3149 SDValue Mul = DAG.getNode(MulOpc, SL, 3150 DAG.getVTList(MVT::i32, MVT::i32), N0, N1); 3151 3152 return DAG.getNode(ISD::BUILD_PAIR, SL, MVT::i64, 3153 Mul.getValue(0), Mul.getValue(1)); 3154 } 3155 3156 SDValue AMDGPUTargetLowering::performMulCombine(SDNode *N, 3157 DAGCombinerInfo &DCI) const { 3158 EVT VT = N->getValueType(0); 3159 3160 unsigned Size = VT.getSizeInBits(); 3161 if (VT.isVector() || Size > 64) 3162 return SDValue(); 3163 3164 // There are i16 integer mul/mad. 3165 if (Subtarget->has16BitInsts() && VT.getScalarType().bitsLE(MVT::i16)) 3166 return SDValue(); 3167 3168 SelectionDAG &DAG = DCI.DAG; 3169 SDLoc DL(N); 3170 3171 SDValue N0 = N->getOperand(0); 3172 SDValue N1 = N->getOperand(1); 3173 3174 // SimplifyDemandedBits has the annoying habit of turning useful zero_extends 3175 // in the source into any_extends if the result of the mul is truncated. Since 3176 // we can assume the high bits are whatever we want, use the underlying value 3177 // to avoid the unknown high bits from interfering. 3178 if (N0.getOpcode() == ISD::ANY_EXTEND) 3179 N0 = N0.getOperand(0); 3180 3181 if (N1.getOpcode() == ISD::ANY_EXTEND) 3182 N1 = N1.getOperand(0); 3183 3184 SDValue Mul; 3185 3186 if (Subtarget->hasMulU24() && isU24(N0, DAG) && isU24(N1, DAG)) { 3187 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3188 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3189 Mul = getMul24(DAG, DL, N0, N1, Size, false); 3190 } else if (Subtarget->hasMulI24() && isI24(N0, DAG) && isI24(N1, DAG)) { 3191 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3192 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3193 Mul = getMul24(DAG, DL, N0, N1, Size, true); 3194 } else { 3195 return SDValue(); 3196 } 3197 3198 // We need to use sext even for MUL_U24, because MUL_U24 is used 3199 // for signed multiply of 8 and 16-bit types. 3200 return DAG.getSExtOrTrunc(Mul, DL, VT); 3201 } 3202 3203 SDValue AMDGPUTargetLowering::performMulhsCombine(SDNode *N, 3204 DAGCombinerInfo &DCI) const { 3205 EVT VT = N->getValueType(0); 3206 3207 if (!Subtarget->hasMulI24() || VT.isVector()) 3208 return SDValue(); 3209 3210 SelectionDAG &DAG = DCI.DAG; 3211 SDLoc DL(N); 3212 3213 SDValue N0 = N->getOperand(0); 3214 SDValue N1 = N->getOperand(1); 3215 3216 if (!isI24(N0, DAG) || !isI24(N1, DAG)) 3217 return SDValue(); 3218 3219 N0 = DAG.getSExtOrTrunc(N0, DL, MVT::i32); 3220 N1 = DAG.getSExtOrTrunc(N1, DL, MVT::i32); 3221 3222 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_I24, DL, MVT::i32, N0, N1); 3223 DCI.AddToWorklist(Mulhi.getNode()); 3224 return DAG.getSExtOrTrunc(Mulhi, DL, VT); 3225 } 3226 3227 SDValue AMDGPUTargetLowering::performMulhuCombine(SDNode *N, 3228 DAGCombinerInfo &DCI) const { 3229 EVT VT = N->getValueType(0); 3230 3231 if (!Subtarget->hasMulU24() || VT.isVector() || VT.getSizeInBits() > 32) 3232 return SDValue(); 3233 3234 SelectionDAG &DAG = DCI.DAG; 3235 SDLoc DL(N); 3236 3237 SDValue N0 = N->getOperand(0); 3238 SDValue N1 = N->getOperand(1); 3239 3240 if (!isU24(N0, DAG) || !isU24(N1, DAG)) 3241 return SDValue(); 3242 3243 N0 = DAG.getZExtOrTrunc(N0, DL, MVT::i32); 3244 N1 = DAG.getZExtOrTrunc(N1, DL, MVT::i32); 3245 3246 SDValue Mulhi = DAG.getNode(AMDGPUISD::MULHI_U24, DL, MVT::i32, N0, N1); 3247 DCI.AddToWorklist(Mulhi.getNode()); 3248 return DAG.getZExtOrTrunc(Mulhi, DL, VT); 3249 } 3250 3251 SDValue AMDGPUTargetLowering::performMulLoHi24Combine( 3252 SDNode *N, DAGCombinerInfo &DCI) const { 3253 SelectionDAG &DAG = DCI.DAG; 3254 3255 // Simplify demanded bits before splitting into multiple users. 3256 if (simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI)) 3257 return SDValue(); 3258 3259 SDValue N0 = N->getOperand(0); 3260 SDValue N1 = N->getOperand(1); 3261 3262 bool Signed = (N->getOpcode() == AMDGPUISD::MUL_LOHI_I24); 3263 3264 unsigned MulLoOpc = Signed ? AMDGPUISD::MUL_I24 : AMDGPUISD::MUL_U24; 3265 unsigned MulHiOpc = Signed ? AMDGPUISD::MULHI_I24 : AMDGPUISD::MULHI_U24; 3266 3267 SDLoc SL(N); 3268 3269 SDValue MulLo = DAG.getNode(MulLoOpc, SL, MVT::i32, N0, N1); 3270 SDValue MulHi = DAG.getNode(MulHiOpc, SL, MVT::i32, N0, N1); 3271 return DAG.getMergeValues({ MulLo, MulHi }, SL); 3272 } 3273 3274 static bool isNegativeOne(SDValue Val) { 3275 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Val)) 3276 return C->isAllOnesValue(); 3277 return false; 3278 } 3279 3280 SDValue AMDGPUTargetLowering::getFFBX_U32(SelectionDAG &DAG, 3281 SDValue Op, 3282 const SDLoc &DL, 3283 unsigned Opc) const { 3284 EVT VT = Op.getValueType(); 3285 EVT LegalVT = getTypeToTransformTo(*DAG.getContext(), VT); 3286 if (LegalVT != MVT::i32 && (Subtarget->has16BitInsts() && 3287 LegalVT != MVT::i16)) 3288 return SDValue(); 3289 3290 if (VT != MVT::i32) 3291 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, Op); 3292 3293 SDValue FFBX = DAG.getNode(Opc, DL, MVT::i32, Op); 3294 if (VT != MVT::i32) 3295 FFBX = DAG.getNode(ISD::TRUNCATE, DL, VT, FFBX); 3296 3297 return FFBX; 3298 } 3299 3300 // The native instructions return -1 on 0 input. Optimize out a select that 3301 // produces -1 on 0. 3302 // 3303 // TODO: If zero is not undef, we could also do this if the output is compared 3304 // against the bitwidth. 3305 // 3306 // TODO: Should probably combine against FFBH_U32 instead of ctlz directly. 3307 SDValue AMDGPUTargetLowering::performCtlz_CttzCombine(const SDLoc &SL, SDValue Cond, 3308 SDValue LHS, SDValue RHS, 3309 DAGCombinerInfo &DCI) const { 3310 ConstantSDNode *CmpRhs = dyn_cast<ConstantSDNode>(Cond.getOperand(1)); 3311 if (!CmpRhs || !CmpRhs->isNullValue()) 3312 return SDValue(); 3313 3314 SelectionDAG &DAG = DCI.DAG; 3315 ISD::CondCode CCOpcode = cast<CondCodeSDNode>(Cond.getOperand(2))->get(); 3316 SDValue CmpLHS = Cond.getOperand(0); 3317 3318 unsigned Opc = isCttzOpc(RHS.getOpcode()) ? AMDGPUISD::FFBL_B32 : 3319 AMDGPUISD::FFBH_U32; 3320 3321 // select (setcc x, 0, eq), -1, (ctlz_zero_undef x) -> ffbh_u32 x 3322 // select (setcc x, 0, eq), -1, (cttz_zero_undef x) -> ffbl_u32 x 3323 if (CCOpcode == ISD::SETEQ && 3324 (isCtlzOpc(RHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3325 RHS.getOperand(0) == CmpLHS && 3326 isNegativeOne(LHS)) { 3327 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3328 } 3329 3330 // select (setcc x, 0, ne), (ctlz_zero_undef x), -1 -> ffbh_u32 x 3331 // select (setcc x, 0, ne), (cttz_zero_undef x), -1 -> ffbl_u32 x 3332 if (CCOpcode == ISD::SETNE && 3333 (isCtlzOpc(LHS.getOpcode()) || isCttzOpc(RHS.getOpcode())) && 3334 LHS.getOperand(0) == CmpLHS && 3335 isNegativeOne(RHS)) { 3336 return getFFBX_U32(DAG, CmpLHS, SL, Opc); 3337 } 3338 3339 return SDValue(); 3340 } 3341 3342 static SDValue distributeOpThroughSelect(TargetLowering::DAGCombinerInfo &DCI, 3343 unsigned Op, 3344 const SDLoc &SL, 3345 SDValue Cond, 3346 SDValue N1, 3347 SDValue N2) { 3348 SelectionDAG &DAG = DCI.DAG; 3349 EVT VT = N1.getValueType(); 3350 3351 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, Cond, 3352 N1.getOperand(0), N2.getOperand(0)); 3353 DCI.AddToWorklist(NewSelect.getNode()); 3354 return DAG.getNode(Op, SL, VT, NewSelect); 3355 } 3356 3357 // Pull a free FP operation out of a select so it may fold into uses. 3358 // 3359 // select c, (fneg x), (fneg y) -> fneg (select c, x, y) 3360 // select c, (fneg x), k -> fneg (select c, x, (fneg k)) 3361 // 3362 // select c, (fabs x), (fabs y) -> fabs (select c, x, y) 3363 // select c, (fabs x), +k -> fabs (select c, x, k) 3364 static SDValue foldFreeOpFromSelect(TargetLowering::DAGCombinerInfo &DCI, 3365 SDValue N) { 3366 SelectionDAG &DAG = DCI.DAG; 3367 SDValue Cond = N.getOperand(0); 3368 SDValue LHS = N.getOperand(1); 3369 SDValue RHS = N.getOperand(2); 3370 3371 EVT VT = N.getValueType(); 3372 if ((LHS.getOpcode() == ISD::FABS && RHS.getOpcode() == ISD::FABS) || 3373 (LHS.getOpcode() == ISD::FNEG && RHS.getOpcode() == ISD::FNEG)) { 3374 return distributeOpThroughSelect(DCI, LHS.getOpcode(), 3375 SDLoc(N), Cond, LHS, RHS); 3376 } 3377 3378 bool Inv = false; 3379 if (RHS.getOpcode() == ISD::FABS || RHS.getOpcode() == ISD::FNEG) { 3380 std::swap(LHS, RHS); 3381 Inv = true; 3382 } 3383 3384 // TODO: Support vector constants. 3385 ConstantFPSDNode *CRHS = dyn_cast<ConstantFPSDNode>(RHS); 3386 if ((LHS.getOpcode() == ISD::FNEG || LHS.getOpcode() == ISD::FABS) && CRHS) { 3387 SDLoc SL(N); 3388 // If one side is an fneg/fabs and the other is a constant, we can push the 3389 // fneg/fabs down. If it's an fabs, the constant needs to be non-negative. 3390 SDValue NewLHS = LHS.getOperand(0); 3391 SDValue NewRHS = RHS; 3392 3393 // Careful: if the neg can be folded up, don't try to pull it back down. 3394 bool ShouldFoldNeg = true; 3395 3396 if (NewLHS.hasOneUse()) { 3397 unsigned Opc = NewLHS.getOpcode(); 3398 if (LHS.getOpcode() == ISD::FNEG && fnegFoldsIntoOp(Opc)) 3399 ShouldFoldNeg = false; 3400 if (LHS.getOpcode() == ISD::FABS && Opc == ISD::FMUL) 3401 ShouldFoldNeg = false; 3402 } 3403 3404 if (ShouldFoldNeg) { 3405 if (LHS.getOpcode() == ISD::FNEG) 3406 NewRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3407 else if (CRHS->isNegative()) 3408 return SDValue(); 3409 3410 if (Inv) 3411 std::swap(NewLHS, NewRHS); 3412 3413 SDValue NewSelect = DAG.getNode(ISD::SELECT, SL, VT, 3414 Cond, NewLHS, NewRHS); 3415 DCI.AddToWorklist(NewSelect.getNode()); 3416 return DAG.getNode(LHS.getOpcode(), SL, VT, NewSelect); 3417 } 3418 } 3419 3420 return SDValue(); 3421 } 3422 3423 3424 SDValue AMDGPUTargetLowering::performSelectCombine(SDNode *N, 3425 DAGCombinerInfo &DCI) const { 3426 if (SDValue Folded = foldFreeOpFromSelect(DCI, SDValue(N, 0))) 3427 return Folded; 3428 3429 SDValue Cond = N->getOperand(0); 3430 if (Cond.getOpcode() != ISD::SETCC) 3431 return SDValue(); 3432 3433 EVT VT = N->getValueType(0); 3434 SDValue LHS = Cond.getOperand(0); 3435 SDValue RHS = Cond.getOperand(1); 3436 SDValue CC = Cond.getOperand(2); 3437 3438 SDValue True = N->getOperand(1); 3439 SDValue False = N->getOperand(2); 3440 3441 if (Cond.hasOneUse()) { // TODO: Look for multiple select uses. 3442 SelectionDAG &DAG = DCI.DAG; 3443 if ((DAG.isConstantValueOfAnyType(True) || 3444 DAG.isConstantValueOfAnyType(True)) && 3445 (!DAG.isConstantValueOfAnyType(False) && 3446 !DAG.isConstantValueOfAnyType(False))) { 3447 // Swap cmp + select pair to move constant to false input. 3448 // This will allow using VOPC cndmasks more often. 3449 // select (setcc x, y), k, x -> select (setcc y, x) x, x 3450 3451 SDLoc SL(N); 3452 ISD::CondCode NewCC = getSetCCInverse(cast<CondCodeSDNode>(CC)->get(), 3453 LHS.getValueType().isInteger()); 3454 3455 SDValue NewCond = DAG.getSetCC(SL, Cond.getValueType(), LHS, RHS, NewCC); 3456 return DAG.getNode(ISD::SELECT, SL, VT, NewCond, False, True); 3457 } 3458 3459 if (VT == MVT::f32 && Subtarget->hasFminFmaxLegacy()) { 3460 SDValue MinMax 3461 = combineFMinMaxLegacy(SDLoc(N), VT, LHS, RHS, True, False, CC, DCI); 3462 // Revisit this node so we can catch min3/max3/med3 patterns. 3463 //DCI.AddToWorklist(MinMax.getNode()); 3464 return MinMax; 3465 } 3466 } 3467 3468 // There's no reason to not do this if the condition has other uses. 3469 return performCtlz_CttzCombine(SDLoc(N), Cond, True, False, DCI); 3470 } 3471 3472 static bool isConstantFPZero(SDValue N) { 3473 if (const ConstantFPSDNode *C = isConstOrConstSplatFP(N)) 3474 return C->isZero() && !C->isNegative(); 3475 return false; 3476 } 3477 3478 static unsigned inverseMinMax(unsigned Opc) { 3479 switch (Opc) { 3480 case ISD::FMAXNUM: 3481 return ISD::FMINNUM; 3482 case ISD::FMINNUM: 3483 return ISD::FMAXNUM; 3484 case AMDGPUISD::FMAX_LEGACY: 3485 return AMDGPUISD::FMIN_LEGACY; 3486 case AMDGPUISD::FMIN_LEGACY: 3487 return AMDGPUISD::FMAX_LEGACY; 3488 default: 3489 llvm_unreachable("invalid min/max opcode"); 3490 } 3491 } 3492 3493 SDValue AMDGPUTargetLowering::performFNegCombine(SDNode *N, 3494 DAGCombinerInfo &DCI) const { 3495 SelectionDAG &DAG = DCI.DAG; 3496 SDValue N0 = N->getOperand(0); 3497 EVT VT = N->getValueType(0); 3498 3499 unsigned Opc = N0.getOpcode(); 3500 3501 // If the input has multiple uses and we can either fold the negate down, or 3502 // the other uses cannot, give up. This both prevents unprofitable 3503 // transformations and infinite loops: we won't repeatedly try to fold around 3504 // a negate that has no 'good' form. 3505 if (N0.hasOneUse()) { 3506 // This may be able to fold into the source, but at a code size cost. Don't 3507 // fold if the fold into the user is free. 3508 if (allUsesHaveSourceMods(N, 0)) 3509 return SDValue(); 3510 } else { 3511 if (fnegFoldsIntoOp(Opc) && 3512 (allUsesHaveSourceMods(N) || !allUsesHaveSourceMods(N0.getNode()))) 3513 return SDValue(); 3514 } 3515 3516 SDLoc SL(N); 3517 switch (Opc) { 3518 case ISD::FADD: { 3519 if (!mayIgnoreSignedZero(N0)) 3520 return SDValue(); 3521 3522 // (fneg (fadd x, y)) -> (fadd (fneg x), (fneg y)) 3523 SDValue LHS = N0.getOperand(0); 3524 SDValue RHS = N0.getOperand(1); 3525 3526 if (LHS.getOpcode() != ISD::FNEG) 3527 LHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3528 else 3529 LHS = LHS.getOperand(0); 3530 3531 if (RHS.getOpcode() != ISD::FNEG) 3532 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3533 else 3534 RHS = RHS.getOperand(0); 3535 3536 SDValue Res = DAG.getNode(ISD::FADD, SL, VT, LHS, RHS, N0->getFlags()); 3537 if (!N0.hasOneUse()) 3538 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3539 return Res; 3540 } 3541 case ISD::FMUL: 3542 case AMDGPUISD::FMUL_LEGACY: { 3543 // (fneg (fmul x, y)) -> (fmul x, (fneg y)) 3544 // (fneg (fmul_legacy x, y)) -> (fmul_legacy x, (fneg y)) 3545 SDValue LHS = N0.getOperand(0); 3546 SDValue RHS = N0.getOperand(1); 3547 3548 if (LHS.getOpcode() == ISD::FNEG) 3549 LHS = LHS.getOperand(0); 3550 else if (RHS.getOpcode() == ISD::FNEG) 3551 RHS = RHS.getOperand(0); 3552 else 3553 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3554 3555 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, RHS, N0->getFlags()); 3556 if (!N0.hasOneUse()) 3557 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3558 return Res; 3559 } 3560 case ISD::FMA: 3561 case ISD::FMAD: { 3562 if (!mayIgnoreSignedZero(N0)) 3563 return SDValue(); 3564 3565 // (fneg (fma x, y, z)) -> (fma x, (fneg y), (fneg z)) 3566 SDValue LHS = N0.getOperand(0); 3567 SDValue MHS = N0.getOperand(1); 3568 SDValue RHS = N0.getOperand(2); 3569 3570 if (LHS.getOpcode() == ISD::FNEG) 3571 LHS = LHS.getOperand(0); 3572 else if (MHS.getOpcode() == ISD::FNEG) 3573 MHS = MHS.getOperand(0); 3574 else 3575 MHS = DAG.getNode(ISD::FNEG, SL, VT, MHS); 3576 3577 if (RHS.getOpcode() != ISD::FNEG) 3578 RHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3579 else 3580 RHS = RHS.getOperand(0); 3581 3582 SDValue Res = DAG.getNode(Opc, SL, VT, LHS, MHS, RHS); 3583 if (!N0.hasOneUse()) 3584 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3585 return Res; 3586 } 3587 case ISD::FMAXNUM: 3588 case ISD::FMINNUM: 3589 case AMDGPUISD::FMAX_LEGACY: 3590 case AMDGPUISD::FMIN_LEGACY: { 3591 // fneg (fmaxnum x, y) -> fminnum (fneg x), (fneg y) 3592 // fneg (fminnum x, y) -> fmaxnum (fneg x), (fneg y) 3593 // fneg (fmax_legacy x, y) -> fmin_legacy (fneg x), (fneg y) 3594 // fneg (fmin_legacy x, y) -> fmax_legacy (fneg x), (fneg y) 3595 3596 SDValue LHS = N0.getOperand(0); 3597 SDValue RHS = N0.getOperand(1); 3598 3599 // 0 doesn't have a negated inline immediate. 3600 // TODO: Shouldn't fold 1/2pi either, and should be generalized to other 3601 // operations. 3602 if (isConstantFPZero(RHS)) 3603 return SDValue(); 3604 3605 SDValue NegLHS = DAG.getNode(ISD::FNEG, SL, VT, LHS); 3606 SDValue NegRHS = DAG.getNode(ISD::FNEG, SL, VT, RHS); 3607 unsigned Opposite = inverseMinMax(Opc); 3608 3609 SDValue Res = DAG.getNode(Opposite, SL, VT, NegLHS, NegRHS, N0->getFlags()); 3610 if (!N0.hasOneUse()) 3611 DAG.ReplaceAllUsesWith(N0, DAG.getNode(ISD::FNEG, SL, VT, Res)); 3612 return Res; 3613 } 3614 case ISD::FP_EXTEND: 3615 case ISD::FTRUNC: 3616 case ISD::FRINT: 3617 case ISD::FNEARBYINT: // XXX - Should fround be handled? 3618 case ISD::FSIN: 3619 case AMDGPUISD::RCP: 3620 case AMDGPUISD::RCP_LEGACY: 3621 case AMDGPUISD::RCP_IFLAG: 3622 case AMDGPUISD::SIN_HW: { 3623 SDValue CvtSrc = N0.getOperand(0); 3624 if (CvtSrc.getOpcode() == ISD::FNEG) { 3625 // (fneg (fp_extend (fneg x))) -> (fp_extend x) 3626 // (fneg (rcp (fneg x))) -> (rcp x) 3627 return DAG.getNode(Opc, SL, VT, CvtSrc.getOperand(0)); 3628 } 3629 3630 if (!N0.hasOneUse()) 3631 return SDValue(); 3632 3633 // (fneg (fp_extend x)) -> (fp_extend (fneg x)) 3634 // (fneg (rcp x)) -> (rcp (fneg x)) 3635 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3636 return DAG.getNode(Opc, SL, VT, Neg, N0->getFlags()); 3637 } 3638 case ISD::FP_ROUND: { 3639 SDValue CvtSrc = N0.getOperand(0); 3640 3641 if (CvtSrc.getOpcode() == ISD::FNEG) { 3642 // (fneg (fp_round (fneg x))) -> (fp_round x) 3643 return DAG.getNode(ISD::FP_ROUND, SL, VT, 3644 CvtSrc.getOperand(0), N0.getOperand(1)); 3645 } 3646 3647 if (!N0.hasOneUse()) 3648 return SDValue(); 3649 3650 // (fneg (fp_round x)) -> (fp_round (fneg x)) 3651 SDValue Neg = DAG.getNode(ISD::FNEG, SL, CvtSrc.getValueType(), CvtSrc); 3652 return DAG.getNode(ISD::FP_ROUND, SL, VT, Neg, N0.getOperand(1)); 3653 } 3654 case ISD::FP16_TO_FP: { 3655 // v_cvt_f32_f16 supports source modifiers on pre-VI targets without legal 3656 // f16, but legalization of f16 fneg ends up pulling it out of the source. 3657 // Put the fneg back as a legal source operation that can be matched later. 3658 SDLoc SL(N); 3659 3660 SDValue Src = N0.getOperand(0); 3661 EVT SrcVT = Src.getValueType(); 3662 3663 // fneg (fp16_to_fp x) -> fp16_to_fp (xor x, 0x8000) 3664 SDValue IntFNeg = DAG.getNode(ISD::XOR, SL, SrcVT, Src, 3665 DAG.getConstant(0x8000, SL, SrcVT)); 3666 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFNeg); 3667 } 3668 default: 3669 return SDValue(); 3670 } 3671 } 3672 3673 SDValue AMDGPUTargetLowering::performFAbsCombine(SDNode *N, 3674 DAGCombinerInfo &DCI) const { 3675 SelectionDAG &DAG = DCI.DAG; 3676 SDValue N0 = N->getOperand(0); 3677 3678 if (!N0.hasOneUse()) 3679 return SDValue(); 3680 3681 switch (N0.getOpcode()) { 3682 case ISD::FP16_TO_FP: { 3683 assert(!Subtarget->has16BitInsts() && "should only see if f16 is illegal"); 3684 SDLoc SL(N); 3685 SDValue Src = N0.getOperand(0); 3686 EVT SrcVT = Src.getValueType(); 3687 3688 // fabs (fp16_to_fp x) -> fp16_to_fp (and x, 0x7fff) 3689 SDValue IntFAbs = DAG.getNode(ISD::AND, SL, SrcVT, Src, 3690 DAG.getConstant(0x7fff, SL, SrcVT)); 3691 return DAG.getNode(ISD::FP16_TO_FP, SL, N->getValueType(0), IntFAbs); 3692 } 3693 default: 3694 return SDValue(); 3695 } 3696 } 3697 3698 SDValue AMDGPUTargetLowering::performRcpCombine(SDNode *N, 3699 DAGCombinerInfo &DCI) const { 3700 const auto *CFP = dyn_cast<ConstantFPSDNode>(N->getOperand(0)); 3701 if (!CFP) 3702 return SDValue(); 3703 3704 // XXX - Should this flush denormals? 3705 const APFloat &Val = CFP->getValueAPF(); 3706 APFloat One(Val.getSemantics(), "1.0"); 3707 return DCI.DAG.getConstantFP(One / Val, SDLoc(N), N->getValueType(0)); 3708 } 3709 3710 SDValue AMDGPUTargetLowering::PerformDAGCombine(SDNode *N, 3711 DAGCombinerInfo &DCI) const { 3712 SelectionDAG &DAG = DCI.DAG; 3713 SDLoc DL(N); 3714 3715 switch(N->getOpcode()) { 3716 default: 3717 break; 3718 case ISD::BITCAST: { 3719 EVT DestVT = N->getValueType(0); 3720 3721 // Push casts through vector builds. This helps avoid emitting a large 3722 // number of copies when materializing floating point vector constants. 3723 // 3724 // vNt1 bitcast (vNt0 (build_vector t0:x, t0:y)) => 3725 // vnt1 = build_vector (t1 (bitcast t0:x)), (t1 (bitcast t0:y)) 3726 if (DestVT.isVector()) { 3727 SDValue Src = N->getOperand(0); 3728 if (Src.getOpcode() == ISD::BUILD_VECTOR) { 3729 EVT SrcVT = Src.getValueType(); 3730 unsigned NElts = DestVT.getVectorNumElements(); 3731 3732 if (SrcVT.getVectorNumElements() == NElts) { 3733 EVT DestEltVT = DestVT.getVectorElementType(); 3734 3735 SmallVector<SDValue, 8> CastedElts; 3736 SDLoc SL(N); 3737 for (unsigned I = 0, E = SrcVT.getVectorNumElements(); I != E; ++I) { 3738 SDValue Elt = Src.getOperand(I); 3739 CastedElts.push_back(DAG.getNode(ISD::BITCAST, DL, DestEltVT, Elt)); 3740 } 3741 3742 return DAG.getBuildVector(DestVT, SL, CastedElts); 3743 } 3744 } 3745 } 3746 3747 if (DestVT.getSizeInBits() != 64 && !DestVT.isVector()) 3748 break; 3749 3750 // Fold bitcasts of constants. 3751 // 3752 // v2i32 (bitcast i64:k) -> build_vector lo_32(k), hi_32(k) 3753 // TODO: Generalize and move to DAGCombiner 3754 SDValue Src = N->getOperand(0); 3755 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Src)) { 3756 if (Src.getValueType() == MVT::i64) { 3757 SDLoc SL(N); 3758 uint64_t CVal = C->getZExtValue(); 3759 return DAG.getNode(ISD::BUILD_VECTOR, SL, DestVT, 3760 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3761 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3762 } 3763 } 3764 3765 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Src)) { 3766 const APInt &Val = C->getValueAPF().bitcastToAPInt(); 3767 SDLoc SL(N); 3768 uint64_t CVal = Val.getZExtValue(); 3769 SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, 3770 DAG.getConstant(Lo_32(CVal), SL, MVT::i32), 3771 DAG.getConstant(Hi_32(CVal), SL, MVT::i32)); 3772 3773 return DAG.getNode(ISD::BITCAST, SL, DestVT, Vec); 3774 } 3775 3776 break; 3777 } 3778 case ISD::SHL: { 3779 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3780 break; 3781 3782 return performShlCombine(N, DCI); 3783 } 3784 case ISD::SRL: { 3785 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3786 break; 3787 3788 return performSrlCombine(N, DCI); 3789 } 3790 case ISD::SRA: { 3791 if (DCI.getDAGCombineLevel() < AfterLegalizeDAG) 3792 break; 3793 3794 return performSraCombine(N, DCI); 3795 } 3796 case ISD::TRUNCATE: 3797 return performTruncateCombine(N, DCI); 3798 case ISD::MUL: 3799 return performMulCombine(N, DCI); 3800 case ISD::MULHS: 3801 return performMulhsCombine(N, DCI); 3802 case ISD::MULHU: 3803 return performMulhuCombine(N, DCI); 3804 case AMDGPUISD::MUL_I24: 3805 case AMDGPUISD::MUL_U24: 3806 case AMDGPUISD::MULHI_I24: 3807 case AMDGPUISD::MULHI_U24: { 3808 // If the first call to simplify is successfull, then N may end up being 3809 // deleted, so we shouldn't call simplifyI24 again. 3810 simplifyI24(N, 0, DCI) || simplifyI24(N, 1, DCI); 3811 return SDValue(); 3812 } 3813 case AMDGPUISD::MUL_LOHI_I24: 3814 case AMDGPUISD::MUL_LOHI_U24: 3815 return performMulLoHi24Combine(N, DCI); 3816 case ISD::SELECT: 3817 return performSelectCombine(N, DCI); 3818 case ISD::FNEG: 3819 return performFNegCombine(N, DCI); 3820 case ISD::FABS: 3821 return performFAbsCombine(N, DCI); 3822 case AMDGPUISD::BFE_I32: 3823 case AMDGPUISD::BFE_U32: { 3824 assert(!N->getValueType(0).isVector() && 3825 "Vector handling of BFE not implemented"); 3826 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); 3827 if (!Width) 3828 break; 3829 3830 uint32_t WidthVal = Width->getZExtValue() & 0x1f; 3831 if (WidthVal == 0) 3832 return DAG.getConstant(0, DL, MVT::i32); 3833 3834 ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); 3835 if (!Offset) 3836 break; 3837 3838 SDValue BitsFrom = N->getOperand(0); 3839 uint32_t OffsetVal = Offset->getZExtValue() & 0x1f; 3840 3841 bool Signed = N->getOpcode() == AMDGPUISD::BFE_I32; 3842 3843 if (OffsetVal == 0) { 3844 // This is already sign / zero extended, so try to fold away extra BFEs. 3845 unsigned SignBits = Signed ? (32 - WidthVal + 1) : (32 - WidthVal); 3846 3847 unsigned OpSignBits = DAG.ComputeNumSignBits(BitsFrom); 3848 if (OpSignBits >= SignBits) 3849 return BitsFrom; 3850 3851 EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), WidthVal); 3852 if (Signed) { 3853 // This is a sign_extend_inreg. Replace it to take advantage of existing 3854 // DAG Combines. If not eliminated, we will match back to BFE during 3855 // selection. 3856 3857 // TODO: The sext_inreg of extended types ends, although we can could 3858 // handle them in a single BFE. 3859 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i32, BitsFrom, 3860 DAG.getValueType(SmallVT)); 3861 } 3862 3863 return DAG.getZeroExtendInReg(BitsFrom, DL, SmallVT); 3864 } 3865 3866 if (ConstantSDNode *CVal = dyn_cast<ConstantSDNode>(BitsFrom)) { 3867 if (Signed) { 3868 return constantFoldBFE<int32_t>(DAG, 3869 CVal->getSExtValue(), 3870 OffsetVal, 3871 WidthVal, 3872 DL); 3873 } 3874 3875 return constantFoldBFE<uint32_t>(DAG, 3876 CVal->getZExtValue(), 3877 OffsetVal, 3878 WidthVal, 3879 DL); 3880 } 3881 3882 if ((OffsetVal + WidthVal) >= 32 && 3883 !(Subtarget->hasSDWA() && OffsetVal == 16 && WidthVal == 16)) { 3884 SDValue ShiftVal = DAG.getConstant(OffsetVal, DL, MVT::i32); 3885 return DAG.getNode(Signed ? ISD::SRA : ISD::SRL, DL, MVT::i32, 3886 BitsFrom, ShiftVal); 3887 } 3888 3889 if (BitsFrom.hasOneUse()) { 3890 APInt Demanded = APInt::getBitsSet(32, 3891 OffsetVal, 3892 OffsetVal + WidthVal); 3893 3894 KnownBits Known; 3895 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 3896 !DCI.isBeforeLegalizeOps()); 3897 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 3898 if (TLI.ShrinkDemandedConstant(BitsFrom, Demanded, TLO) || 3899 TLI.SimplifyDemandedBits(BitsFrom, Demanded, Known, TLO)) { 3900 DCI.CommitTargetLoweringOpt(TLO); 3901 } 3902 } 3903 3904 break; 3905 } 3906 case ISD::LOAD: 3907 return performLoadCombine(N, DCI); 3908 case ISD::STORE: 3909 return performStoreCombine(N, DCI); 3910 case AMDGPUISD::RCP: 3911 case AMDGPUISD::RCP_IFLAG: 3912 return performRcpCombine(N, DCI); 3913 case ISD::AssertZext: 3914 case ISD::AssertSext: 3915 return performAssertSZExtCombine(N, DCI); 3916 } 3917 return SDValue(); 3918 } 3919 3920 //===----------------------------------------------------------------------===// 3921 // Helper functions 3922 //===----------------------------------------------------------------------===// 3923 3924 SDValue AMDGPUTargetLowering::CreateLiveInRegister(SelectionDAG &DAG, 3925 const TargetRegisterClass *RC, 3926 unsigned Reg, EVT VT, 3927 const SDLoc &SL, 3928 bool RawReg) const { 3929 MachineFunction &MF = DAG.getMachineFunction(); 3930 MachineRegisterInfo &MRI = MF.getRegInfo(); 3931 unsigned VReg; 3932 3933 if (!MRI.isLiveIn(Reg)) { 3934 VReg = MRI.createVirtualRegister(RC); 3935 MRI.addLiveIn(Reg, VReg); 3936 } else { 3937 VReg = MRI.getLiveInVirtReg(Reg); 3938 } 3939 3940 if (RawReg) 3941 return DAG.getRegister(VReg, VT); 3942 3943 return DAG.getCopyFromReg(DAG.getEntryNode(), SL, VReg, VT); 3944 } 3945 3946 SDValue AMDGPUTargetLowering::loadStackInputValue(SelectionDAG &DAG, 3947 EVT VT, 3948 const SDLoc &SL, 3949 int64_t Offset) const { 3950 MachineFunction &MF = DAG.getMachineFunction(); 3951 MachineFrameInfo &MFI = MF.getFrameInfo(); 3952 3953 int FI = MFI.CreateFixedObject(VT.getStoreSize(), Offset, true); 3954 auto SrcPtrInfo = MachinePointerInfo::getStack(MF, Offset); 3955 SDValue Ptr = DAG.getFrameIndex(FI, MVT::i32); 3956 3957 return DAG.getLoad(VT, SL, DAG.getEntryNode(), Ptr, SrcPtrInfo, 4, 3958 MachineMemOperand::MODereferenceable | 3959 MachineMemOperand::MOInvariant); 3960 } 3961 3962 SDValue AMDGPUTargetLowering::storeStackInputValue(SelectionDAG &DAG, 3963 const SDLoc &SL, 3964 SDValue Chain, 3965 SDValue StackPtr, 3966 SDValue ArgVal, 3967 int64_t Offset) const { 3968 MachineFunction &MF = DAG.getMachineFunction(); 3969 MachinePointerInfo DstInfo = MachinePointerInfo::getStack(MF, Offset); 3970 3971 SDValue Ptr = DAG.getObjectPtrOffset(SL, StackPtr, Offset); 3972 SDValue Store = DAG.getStore(Chain, SL, ArgVal, Ptr, DstInfo, 4, 3973 MachineMemOperand::MODereferenceable); 3974 return Store; 3975 } 3976 3977 SDValue AMDGPUTargetLowering::loadInputValue(SelectionDAG &DAG, 3978 const TargetRegisterClass *RC, 3979 EVT VT, const SDLoc &SL, 3980 const ArgDescriptor &Arg) const { 3981 assert(Arg && "Attempting to load missing argument"); 3982 3983 if (Arg.isRegister()) 3984 return CreateLiveInRegister(DAG, RC, Arg.getRegister(), VT, SL); 3985 return loadStackInputValue(DAG, VT, SL, Arg.getStackOffset()); 3986 } 3987 3988 uint32_t AMDGPUTargetLowering::getImplicitParameterOffset( 3989 const AMDGPUMachineFunction *MFI, const ImplicitParameter Param) const { 3990 unsigned Alignment = Subtarget->getAlignmentForImplicitArgPtr(); 3991 uint64_t ArgOffset = alignTo(MFI->getABIArgOffset(), Alignment); 3992 switch (Param) { 3993 case GRID_DIM: 3994 return ArgOffset; 3995 case GRID_OFFSET: 3996 return ArgOffset + 4; 3997 } 3998 llvm_unreachable("unexpected implicit parameter type"); 3999 } 4000 4001 #define NODE_NAME_CASE(node) case AMDGPUISD::node: return #node; 4002 4003 const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const { 4004 switch ((AMDGPUISD::NodeType)Opcode) { 4005 case AMDGPUISD::FIRST_NUMBER: break; 4006 // AMDIL DAG nodes 4007 NODE_NAME_CASE(UMUL); 4008 NODE_NAME_CASE(BRANCH_COND); 4009 4010 // AMDGPU DAG nodes 4011 NODE_NAME_CASE(IF) 4012 NODE_NAME_CASE(ELSE) 4013 NODE_NAME_CASE(LOOP) 4014 NODE_NAME_CASE(CALL) 4015 NODE_NAME_CASE(TC_RETURN) 4016 NODE_NAME_CASE(TRAP) 4017 NODE_NAME_CASE(RET_FLAG) 4018 NODE_NAME_CASE(RETURN_TO_EPILOG) 4019 NODE_NAME_CASE(ENDPGM) 4020 NODE_NAME_CASE(DWORDADDR) 4021 NODE_NAME_CASE(FRACT) 4022 NODE_NAME_CASE(SETCC) 4023 NODE_NAME_CASE(SETREG) 4024 NODE_NAME_CASE(FMA_W_CHAIN) 4025 NODE_NAME_CASE(FMUL_W_CHAIN) 4026 NODE_NAME_CASE(CLAMP) 4027 NODE_NAME_CASE(COS_HW) 4028 NODE_NAME_CASE(SIN_HW) 4029 NODE_NAME_CASE(FMAX_LEGACY) 4030 NODE_NAME_CASE(FMIN_LEGACY) 4031 NODE_NAME_CASE(FMAX3) 4032 NODE_NAME_CASE(SMAX3) 4033 NODE_NAME_CASE(UMAX3) 4034 NODE_NAME_CASE(FMIN3) 4035 NODE_NAME_CASE(SMIN3) 4036 NODE_NAME_CASE(UMIN3) 4037 NODE_NAME_CASE(FMED3) 4038 NODE_NAME_CASE(SMED3) 4039 NODE_NAME_CASE(UMED3) 4040 NODE_NAME_CASE(URECIP) 4041 NODE_NAME_CASE(DIV_SCALE) 4042 NODE_NAME_CASE(DIV_FMAS) 4043 NODE_NAME_CASE(DIV_FIXUP) 4044 NODE_NAME_CASE(FMAD_FTZ) 4045 NODE_NAME_CASE(TRIG_PREOP) 4046 NODE_NAME_CASE(RCP) 4047 NODE_NAME_CASE(RSQ) 4048 NODE_NAME_CASE(RCP_LEGACY) 4049 NODE_NAME_CASE(RSQ_LEGACY) 4050 NODE_NAME_CASE(RCP_IFLAG) 4051 NODE_NAME_CASE(FMUL_LEGACY) 4052 NODE_NAME_CASE(RSQ_CLAMP) 4053 NODE_NAME_CASE(LDEXP) 4054 NODE_NAME_CASE(FP_CLASS) 4055 NODE_NAME_CASE(DOT4) 4056 NODE_NAME_CASE(CARRY) 4057 NODE_NAME_CASE(BORROW) 4058 NODE_NAME_CASE(BFE_U32) 4059 NODE_NAME_CASE(BFE_I32) 4060 NODE_NAME_CASE(BFI) 4061 NODE_NAME_CASE(BFM) 4062 NODE_NAME_CASE(FFBH_U32) 4063 NODE_NAME_CASE(FFBH_I32) 4064 NODE_NAME_CASE(FFBL_B32) 4065 NODE_NAME_CASE(MUL_U24) 4066 NODE_NAME_CASE(MUL_I24) 4067 NODE_NAME_CASE(MULHI_U24) 4068 NODE_NAME_CASE(MULHI_I24) 4069 NODE_NAME_CASE(MUL_LOHI_U24) 4070 NODE_NAME_CASE(MUL_LOHI_I24) 4071 NODE_NAME_CASE(MAD_U24) 4072 NODE_NAME_CASE(MAD_I24) 4073 NODE_NAME_CASE(MAD_I64_I32) 4074 NODE_NAME_CASE(MAD_U64_U32) 4075 NODE_NAME_CASE(PERM) 4076 NODE_NAME_CASE(TEXTURE_FETCH) 4077 NODE_NAME_CASE(EXPORT) 4078 NODE_NAME_CASE(EXPORT_DONE) 4079 NODE_NAME_CASE(R600_EXPORT) 4080 NODE_NAME_CASE(CONST_ADDRESS) 4081 NODE_NAME_CASE(REGISTER_LOAD) 4082 NODE_NAME_CASE(REGISTER_STORE) 4083 NODE_NAME_CASE(SAMPLE) 4084 NODE_NAME_CASE(SAMPLEB) 4085 NODE_NAME_CASE(SAMPLED) 4086 NODE_NAME_CASE(SAMPLEL) 4087 NODE_NAME_CASE(CVT_F32_UBYTE0) 4088 NODE_NAME_CASE(CVT_F32_UBYTE1) 4089 NODE_NAME_CASE(CVT_F32_UBYTE2) 4090 NODE_NAME_CASE(CVT_F32_UBYTE3) 4091 NODE_NAME_CASE(CVT_PKRTZ_F16_F32) 4092 NODE_NAME_CASE(CVT_PKNORM_I16_F32) 4093 NODE_NAME_CASE(CVT_PKNORM_U16_F32) 4094 NODE_NAME_CASE(CVT_PK_I16_I32) 4095 NODE_NAME_CASE(CVT_PK_U16_U32) 4096 NODE_NAME_CASE(FP_TO_FP16) 4097 NODE_NAME_CASE(FP16_ZEXT) 4098 NODE_NAME_CASE(BUILD_VERTICAL_VECTOR) 4099 NODE_NAME_CASE(CONST_DATA_PTR) 4100 NODE_NAME_CASE(PC_ADD_REL_OFFSET) 4101 NODE_NAME_CASE(KILL) 4102 NODE_NAME_CASE(DUMMY_CHAIN) 4103 case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break; 4104 NODE_NAME_CASE(INIT_EXEC) 4105 NODE_NAME_CASE(INIT_EXEC_FROM_INPUT) 4106 NODE_NAME_CASE(SENDMSG) 4107 NODE_NAME_CASE(SENDMSGHALT) 4108 NODE_NAME_CASE(INTERP_MOV) 4109 NODE_NAME_CASE(INTERP_P1) 4110 NODE_NAME_CASE(INTERP_P2) 4111 NODE_NAME_CASE(STORE_MSKOR) 4112 NODE_NAME_CASE(LOAD_CONSTANT) 4113 NODE_NAME_CASE(TBUFFER_STORE_FORMAT) 4114 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_X3) 4115 NODE_NAME_CASE(TBUFFER_STORE_FORMAT_D16) 4116 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT) 4117 NODE_NAME_CASE(TBUFFER_LOAD_FORMAT_D16) 4118 NODE_NAME_CASE(ATOMIC_CMP_SWAP) 4119 NODE_NAME_CASE(ATOMIC_INC) 4120 NODE_NAME_CASE(ATOMIC_DEC) 4121 NODE_NAME_CASE(ATOMIC_LOAD_FADD) 4122 NODE_NAME_CASE(ATOMIC_LOAD_FMIN) 4123 NODE_NAME_CASE(ATOMIC_LOAD_FMAX) 4124 NODE_NAME_CASE(BUFFER_LOAD) 4125 NODE_NAME_CASE(BUFFER_LOAD_FORMAT) 4126 NODE_NAME_CASE(BUFFER_LOAD_FORMAT_D16) 4127 NODE_NAME_CASE(BUFFER_STORE) 4128 NODE_NAME_CASE(BUFFER_STORE_FORMAT) 4129 NODE_NAME_CASE(BUFFER_STORE_FORMAT_D16) 4130 NODE_NAME_CASE(BUFFER_ATOMIC_SWAP) 4131 NODE_NAME_CASE(BUFFER_ATOMIC_ADD) 4132 NODE_NAME_CASE(BUFFER_ATOMIC_SUB) 4133 NODE_NAME_CASE(BUFFER_ATOMIC_SMIN) 4134 NODE_NAME_CASE(BUFFER_ATOMIC_UMIN) 4135 NODE_NAME_CASE(BUFFER_ATOMIC_SMAX) 4136 NODE_NAME_CASE(BUFFER_ATOMIC_UMAX) 4137 NODE_NAME_CASE(BUFFER_ATOMIC_AND) 4138 NODE_NAME_CASE(BUFFER_ATOMIC_OR) 4139 NODE_NAME_CASE(BUFFER_ATOMIC_XOR) 4140 NODE_NAME_CASE(BUFFER_ATOMIC_CMPSWAP) 4141 4142 case AMDGPUISD::LAST_AMDGPU_ISD_NUMBER: break; 4143 } 4144 return nullptr; 4145 } 4146 4147 SDValue AMDGPUTargetLowering::getSqrtEstimate(SDValue Operand, 4148 SelectionDAG &DAG, int Enabled, 4149 int &RefinementSteps, 4150 bool &UseOneConstNR, 4151 bool Reciprocal) const { 4152 EVT VT = Operand.getValueType(); 4153 4154 if (VT == MVT::f32) { 4155 RefinementSteps = 0; 4156 return DAG.getNode(AMDGPUISD::RSQ, SDLoc(Operand), VT, Operand); 4157 } 4158 4159 // TODO: There is also f64 rsq instruction, but the documentation is less 4160 // clear on its precision. 4161 4162 return SDValue(); 4163 } 4164 4165 SDValue AMDGPUTargetLowering::getRecipEstimate(SDValue Operand, 4166 SelectionDAG &DAG, int Enabled, 4167 int &RefinementSteps) const { 4168 EVT VT = Operand.getValueType(); 4169 4170 if (VT == MVT::f32) { 4171 // Reciprocal, < 1 ulp error. 4172 // 4173 // This reciprocal approximation converges to < 0.5 ulp error with one 4174 // newton rhapson performed with two fused multiple adds (FMAs). 4175 4176 RefinementSteps = 0; 4177 return DAG.getNode(AMDGPUISD::RCP, SDLoc(Operand), VT, Operand); 4178 } 4179 4180 // TODO: There is also f64 rcp instruction, but the documentation is less 4181 // clear on its precision. 4182 4183 return SDValue(); 4184 } 4185 4186 void AMDGPUTargetLowering::computeKnownBitsForTargetNode( 4187 const SDValue Op, KnownBits &Known, 4188 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { 4189 4190 Known.resetAll(); // Don't know anything. 4191 4192 unsigned Opc = Op.getOpcode(); 4193 4194 switch (Opc) { 4195 default: 4196 break; 4197 case AMDGPUISD::CARRY: 4198 case AMDGPUISD::BORROW: { 4199 Known.Zero = APInt::getHighBitsSet(32, 31); 4200 break; 4201 } 4202 4203 case AMDGPUISD::BFE_I32: 4204 case AMDGPUISD::BFE_U32: { 4205 ConstantSDNode *CWidth = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4206 if (!CWidth) 4207 return; 4208 4209 uint32_t Width = CWidth->getZExtValue() & 0x1f; 4210 4211 if (Opc == AMDGPUISD::BFE_U32) 4212 Known.Zero = APInt::getHighBitsSet(32, 32 - Width); 4213 4214 break; 4215 } 4216 case AMDGPUISD::FP_TO_FP16: 4217 case AMDGPUISD::FP16_ZEXT: { 4218 unsigned BitWidth = Known.getBitWidth(); 4219 4220 // High bits are zero. 4221 Known.Zero = APInt::getHighBitsSet(BitWidth, BitWidth - 16); 4222 break; 4223 } 4224 case AMDGPUISD::MUL_U24: 4225 case AMDGPUISD::MUL_I24: { 4226 KnownBits LHSKnown, RHSKnown; 4227 DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1); 4228 DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1); 4229 4230 unsigned TrailZ = LHSKnown.countMinTrailingZeros() + 4231 RHSKnown.countMinTrailingZeros(); 4232 Known.Zero.setLowBits(std::min(TrailZ, 32u)); 4233 4234 unsigned LHSValBits = 32 - std::max(LHSKnown.countMinSignBits(), 8u); 4235 unsigned RHSValBits = 32 - std::max(RHSKnown.countMinSignBits(), 8u); 4236 unsigned MaxValBits = std::min(LHSValBits + RHSValBits, 32u); 4237 if (MaxValBits >= 32) 4238 break; 4239 bool Negative = false; 4240 if (Opc == AMDGPUISD::MUL_I24) { 4241 bool LHSNegative = !!(LHSKnown.One & (1 << 23)); 4242 bool LHSPositive = !!(LHSKnown.Zero & (1 << 23)); 4243 bool RHSNegative = !!(RHSKnown.One & (1 << 23)); 4244 bool RHSPositive = !!(RHSKnown.Zero & (1 << 23)); 4245 if ((!LHSNegative && !LHSPositive) || (!RHSNegative && !RHSPositive)) 4246 break; 4247 Negative = (LHSNegative && RHSPositive) || (LHSPositive && RHSNegative); 4248 } 4249 if (Negative) 4250 Known.One.setHighBits(32 - MaxValBits); 4251 else 4252 Known.Zero.setHighBits(32 - MaxValBits); 4253 break; 4254 } 4255 case AMDGPUISD::PERM: { 4256 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4257 if (!CMask) 4258 return; 4259 4260 KnownBits LHSKnown, RHSKnown; 4261 DAG.computeKnownBits(Op.getOperand(0), LHSKnown, Depth + 1); 4262 DAG.computeKnownBits(Op.getOperand(1), RHSKnown, Depth + 1); 4263 unsigned Sel = CMask->getZExtValue(); 4264 4265 for (unsigned I = 0; I < 32; I += 8) { 4266 unsigned SelBits = Sel & 0xff; 4267 if (SelBits < 4) { 4268 SelBits *= 8; 4269 Known.One |= ((RHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4270 Known.Zero |= ((RHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4271 } else if (SelBits < 7) { 4272 SelBits = (SelBits & 3) * 8; 4273 Known.One |= ((LHSKnown.One.getZExtValue() >> SelBits) & 0xff) << I; 4274 Known.Zero |= ((LHSKnown.Zero.getZExtValue() >> SelBits) & 0xff) << I; 4275 } else if (SelBits == 0x0c) { 4276 Known.Zero |= 0xff << I; 4277 } else if (SelBits > 0x0c) { 4278 Known.One |= 0xff << I; 4279 } 4280 Sel >>= 8; 4281 } 4282 break; 4283 } 4284 case ISD::INTRINSIC_WO_CHAIN: { 4285 unsigned IID = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 4286 switch (IID) { 4287 case Intrinsic::amdgcn_mbcnt_lo: 4288 case Intrinsic::amdgcn_mbcnt_hi: { 4289 // These return at most the wavefront size - 1. 4290 unsigned Size = Op.getValueType().getSizeInBits(); 4291 Known.Zero.setHighBits(Size - Subtarget->getWavefrontSizeLog2()); 4292 break; 4293 } 4294 default: 4295 break; 4296 } 4297 } 4298 } 4299 } 4300 4301 unsigned AMDGPUTargetLowering::ComputeNumSignBitsForTargetNode( 4302 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG, 4303 unsigned Depth) const { 4304 switch (Op.getOpcode()) { 4305 case AMDGPUISD::BFE_I32: { 4306 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4307 if (!Width) 4308 return 1; 4309 4310 unsigned SignBits = 32 - Width->getZExtValue() + 1; 4311 if (!isNullConstant(Op.getOperand(1))) 4312 return SignBits; 4313 4314 // TODO: Could probably figure something out with non-0 offsets. 4315 unsigned Op0SignBits = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1); 4316 return std::max(SignBits, Op0SignBits); 4317 } 4318 4319 case AMDGPUISD::BFE_U32: { 4320 ConstantSDNode *Width = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 4321 return Width ? 32 - (Width->getZExtValue() & 0x1f) : 1; 4322 } 4323 4324 case AMDGPUISD::CARRY: 4325 case AMDGPUISD::BORROW: 4326 return 31; 4327 case AMDGPUISD::FP_TO_FP16: 4328 case AMDGPUISD::FP16_ZEXT: 4329 return 16; 4330 default: 4331 return 1; 4332 } 4333 } 4334