1 //===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the Machinelegalizer class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "AMDGPULegalizerInfo.h" 16 #include "AMDGPUTargetMachine.h" 17 #include "llvm/CodeGen/TargetOpcodes.h" 18 #include "llvm/CodeGen/ValueTypes.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Type.h" 21 #include "llvm/Support/Debug.h" 22 23 using namespace llvm; 24 using namespace LegalizeActions; 25 using namespace LegalizeMutations; 26 using namespace LegalityPredicates; 27 28 AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST, 29 const GCNTargetMachine &TM) { 30 using namespace TargetOpcode; 31 32 auto GetAddrSpacePtr = [&TM](unsigned AS) { 33 return LLT::pointer(AS, TM.getPointerSizeInBits(AS)); 34 }; 35 36 const LLT S1 = LLT::scalar(1); 37 const LLT S16 = LLT::scalar(16); 38 const LLT S32 = LLT::scalar(32); 39 const LLT S64 = LLT::scalar(64); 40 const LLT S128 = LLT::scalar(128); 41 const LLT S256 = LLT::scalar(256); 42 const LLT S512 = LLT::scalar(512); 43 44 const LLT V2S16 = LLT::vector(2, 16); 45 const LLT V4S16 = LLT::vector(4, 16); 46 const LLT V8S16 = LLT::vector(8, 16); 47 48 const LLT V2S32 = LLT::vector(2, 32); 49 const LLT V3S32 = LLT::vector(3, 32); 50 const LLT V4S32 = LLT::vector(4, 32); 51 const LLT V5S32 = LLT::vector(5, 32); 52 const LLT V6S32 = LLT::vector(6, 32); 53 const LLT V7S32 = LLT::vector(7, 32); 54 const LLT V8S32 = LLT::vector(8, 32); 55 const LLT V9S32 = LLT::vector(9, 32); 56 const LLT V10S32 = LLT::vector(10, 32); 57 const LLT V11S32 = LLT::vector(11, 32); 58 const LLT V12S32 = LLT::vector(12, 32); 59 const LLT V13S32 = LLT::vector(13, 32); 60 const LLT V14S32 = LLT::vector(14, 32); 61 const LLT V15S32 = LLT::vector(15, 32); 62 const LLT V16S32 = LLT::vector(16, 32); 63 64 const LLT V2S64 = LLT::vector(2, 64); 65 const LLT V3S64 = LLT::vector(3, 64); 66 const LLT V4S64 = LLT::vector(4, 64); 67 const LLT V5S64 = LLT::vector(5, 64); 68 const LLT V6S64 = LLT::vector(6, 64); 69 const LLT V7S64 = LLT::vector(7, 64); 70 const LLT V8S64 = LLT::vector(8, 64); 71 72 std::initializer_list<LLT> AllS32Vectors = 73 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32, 74 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32}; 75 std::initializer_list<LLT> AllS64Vectors = 76 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64}; 77 78 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS); 79 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS); 80 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS); 81 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS); 82 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS); 83 84 const LLT CodePtr = FlatPtr; 85 86 const LLT AddrSpaces[] = { 87 GlobalPtr, 88 ConstantPtr, 89 LocalPtr, 90 FlatPtr, 91 PrivatePtr 92 }; 93 94 setAction({G_BRCOND, S1}, Legal); 95 96 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_UMULH, G_SMULH}) 97 .legalFor({S32}) 98 .clampScalar(0, S32, S32) 99 .scalarize(0); 100 101 // Report legal for any types we can handle anywhere. For the cases only legal 102 // on the SALU, RegBankSelect will be able to re-legalize. 103 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) 104 .legalFor({S32, S1, S64, V2S32, V2S16, V4S16}) 105 .clampScalar(0, S32, S64) 106 .scalarize(0); 107 108 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO, 109 G_UADDE, G_SADDE, G_USUBE, G_SSUBE}) 110 .legalFor({{S32, S1}}) 111 .clampScalar(0, S32, S32); 112 113 getActionDefinitionsBuilder(G_BITCAST) 114 .legalForCartesianProduct({S32, V2S16}) 115 .legalForCartesianProduct({S64, V2S32, V4S16}) 116 .legalForCartesianProduct({V2S64, V4S32}) 117 // Don't worry about the size constraint. 118 .legalIf(all(isPointer(0), isPointer(1))); 119 120 getActionDefinitionsBuilder(G_FCONSTANT) 121 .legalFor({S32, S64, S16}); 122 123 // G_IMPLICIT_DEF is a no-op so we can make it legal for any value type that 124 // can fit in a register. 125 // FIXME: We need to legalize several more operations before we can add 126 // a test case for size > 512. 127 getActionDefinitionsBuilder(G_IMPLICIT_DEF) 128 .legalIf([=](const LegalityQuery &Query) { 129 return Query.Types[0].getSizeInBits() <= 512; 130 }) 131 .clampScalar(0, S1, S512); 132 133 134 // FIXME: i1 operands to intrinsics should always be legal, but other i1 135 // values may not be legal. We need to figure out how to distinguish 136 // between these two scenarios. 137 // FIXME: Pointer types 138 getActionDefinitionsBuilder(G_CONSTANT) 139 .legalFor({S1, S32, S64}) 140 .clampScalar(0, S32, S64) 141 .widenScalarToNextPow2(0); 142 143 setAction({G_FRAME_INDEX, PrivatePtr}, Legal); 144 145 getActionDefinitionsBuilder({G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA}) 146 .legalFor({S32, S64}) 147 .scalarize(0) 148 .clampScalar(0, S32, S64); 149 150 getActionDefinitionsBuilder(G_FPTRUNC) 151 .legalFor({{S32, S64}, {S16, S32}}) 152 .scalarize(0); 153 154 getActionDefinitionsBuilder(G_FPEXT) 155 .legalFor({{S64, S32}, {S32, S16}}) 156 .lowerFor({{S64, S16}}) // FIXME: Implement 157 .scalarize(0); 158 159 getActionDefinitionsBuilder(G_FSUB) 160 // Use actual fsub instruction 161 .legalFor({S32}) 162 // Must use fadd + fneg 163 .lowerFor({S64, S16, V2S16}) 164 .scalarize(0) 165 .clampScalar(0, S32, S64); 166 167 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT}) 168 .legalFor({{S64, S32}, {S32, S16}, {S64, S16}, 169 {S32, S1}, {S64, S1}, {S16, S1}, 170 // FIXME: Hack 171 {S128, S32}, {S128, S64}, {S32, LLT::scalar(24)}}) 172 .scalarize(0); 173 174 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 175 .legalFor({{S32, S32}, {S64, S32}}) 176 .scalarize(0); 177 178 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 179 .legalFor({{S32, S32}, {S32, S64}}) 180 .scalarize(0); 181 182 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND}) 183 .legalFor({S32, S64}) 184 .scalarize(0); 185 186 for (LLT PtrTy : AddrSpaces) { 187 LLT IdxTy = LLT::scalar(PtrTy.getSizeInBits()); 188 setAction({G_GEP, PtrTy}, Legal); 189 setAction({G_GEP, 1, IdxTy}, Legal); 190 } 191 192 // FIXME: When RegBankSelect inserts copies, it will only create new registers 193 // with scalar types. This means we can end up with G_LOAD/G_STORE/G_GEP 194 // instruction with scalar types for their pointer operands. In assert builds, 195 // the instruction selector will assert if it sees a generic instruction which 196 // isn't legal, so we need to tell it that scalar types are legal for pointer 197 // operands 198 setAction({G_GEP, S64}, Legal); 199 200 setAction({G_BLOCK_ADDR, CodePtr}, Legal); 201 202 getActionDefinitionsBuilder({G_ICMP, G_FCMP}) 203 .legalFor({{S1, S32}, {S1, S64}}) 204 .widenScalarToNextPow2(1) 205 .clampScalar(1, S32, S64) 206 .scalarize(0); 207 208 // FIXME: fexp, flog2, flog10 needs to be custom lowered. 209 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2, 210 G_FLOG, G_FLOG2, G_FLOG10}) 211 .legalFor({S32}) 212 .scalarize(0); 213 214 // The 64-bit versions produce 32-bit results, but only on the SALU. 215 getActionDefinitionsBuilder({G_CTLZ, G_CTLZ_ZERO_UNDEF, 216 G_CTTZ, G_CTTZ_ZERO_UNDEF, 217 G_CTPOP}) 218 .legalFor({{S32, S32}, {S32, S64}}) 219 .clampScalar(0, S32, S32) 220 .clampScalar(1, S32, S64); 221 // TODO: Scalarize 222 223 // TODO: Expand for > s32 224 getActionDefinitionsBuilder(G_BSWAP) 225 .legalFor({S32}) 226 .clampScalar(0, S32, S32) 227 .scalarize(0); 228 229 230 getActionDefinitionsBuilder(G_INTTOPTR) 231 .legalIf([](const LegalityQuery &Query) { 232 return true; 233 }); 234 235 getActionDefinitionsBuilder(G_PTRTOINT) 236 .legalIf([](const LegalityQuery &Query) { 237 return true; 238 }); 239 240 getActionDefinitionsBuilder({G_LOAD, G_STORE}) 241 .narrowScalarIf([](const LegalityQuery &Query) { 242 unsigned Size = Query.Types[0].getSizeInBits(); 243 unsigned MemSize = Query.MMODescrs[0].SizeInBits; 244 return (Size > 32 && MemSize < Size); 245 }, 246 [](const LegalityQuery &Query) { 247 return std::make_pair(0, LLT::scalar(32)); 248 }) 249 .fewerElementsIf([=, &ST](const LegalityQuery &Query) { 250 unsigned MemSize = Query.MMODescrs[0].SizeInBits; 251 return (MemSize == 96) && 252 Query.Types[0].isVector() && 253 ST.getGeneration() < AMDGPUSubtarget::SEA_ISLANDS; 254 }, 255 [=](const LegalityQuery &Query) { 256 return std::make_pair(0, V2S32); 257 }) 258 .legalIf([=, &ST](const LegalityQuery &Query) { 259 const LLT &Ty0 = Query.Types[0]; 260 261 unsigned Size = Ty0.getSizeInBits(); 262 unsigned MemSize = Query.MMODescrs[0].SizeInBits; 263 if (Size > 32 && MemSize < Size) 264 return false; 265 266 if (Ty0.isVector() && Size != MemSize) 267 return false; 268 269 // TODO: Decompose private loads into 4-byte components. 270 // TODO: Illegal flat loads on SI 271 switch (MemSize) { 272 case 8: 273 case 16: 274 return Size == 32; 275 case 32: 276 case 64: 277 case 128: 278 return true; 279 280 case 96: 281 // XXX hasLoadX3 282 return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS); 283 284 case 256: 285 case 512: 286 // TODO: constant loads 287 default: 288 return false; 289 } 290 }) 291 .clampScalar(0, S32, S64); 292 293 294 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD}) 295 .legalForTypesWithMemSize({ 296 {S32, GlobalPtr, 8}, 297 {S32, GlobalPtr, 16}, 298 {S32, LocalPtr, 8}, 299 {S32, LocalPtr, 16}, 300 {S32, PrivatePtr, 8}, 301 {S32, PrivatePtr, 16}}); 302 if (ST.hasFlatAddressSpace()) { 303 ExtLoads.legalForTypesWithMemSize({{S32, FlatPtr, 8}, 304 {S32, FlatPtr, 16}}); 305 } 306 307 ExtLoads.clampScalar(0, S32, S32) 308 .widenScalarToNextPow2(0) 309 .unsupportedIfMemSizeNotPow2() 310 .lower(); 311 312 auto &Atomics = getActionDefinitionsBuilder( 313 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, 314 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR, 315 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX, 316 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG}) 317 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr}, 318 {S64, GlobalPtr}, {S64, LocalPtr}}); 319 if (ST.hasFlatAddressSpace()) { 320 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}}); 321 } 322 323 // TODO: Pointer types, any 32-bit or 64-bit vector 324 getActionDefinitionsBuilder(G_SELECT) 325 .legalFor({{S32, S1}, {S64, S1}, {V2S32, S1}, {V2S16, S1}}) 326 .clampScalar(0, S32, S64) 327 .fewerElementsIf( 328 [=](const LegalityQuery &Query) { 329 if (Query.Types[1].isVector()) 330 return true; 331 332 LLT Ty = Query.Types[0]; 333 334 // FIXME: Hack until odd splits handled 335 return Ty.isVector() && 336 (Ty.getScalarSizeInBits() > 32 || Ty.getNumElements() % 2 != 0); 337 }, 338 scalarize(0)) 339 // FIXME: Handle 16-bit vectors better 340 .fewerElementsIf( 341 [=](const LegalityQuery &Query) { 342 return Query.Types[0].isVector() && 343 Query.Types[0].getElementType().getSizeInBits() < 32;}, 344 scalarize(0)) 345 .scalarize(1) 346 .clampMaxNumElements(0, S32, 2); 347 348 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can 349 // be more flexible with the shift amount type. 350 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR}) 351 .legalFor({{S32, S32}, {S64, S32}}); 352 if (ST.has16BitInsts()) { 353 Shifts.legalFor({{S16, S32}, {S16, S16}}); 354 Shifts.clampScalar(0, S16, S64); 355 } else 356 Shifts.clampScalar(0, S32, S64); 357 Shifts.clampScalar(1, S32, S32); 358 359 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) { 360 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0; 361 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1; 362 unsigned IdxTypeIdx = 2; 363 364 getActionDefinitionsBuilder(Op) 365 .legalIf([=](const LegalityQuery &Query) { 366 const LLT &VecTy = Query.Types[VecTypeIdx]; 367 const LLT &IdxTy = Query.Types[IdxTypeIdx]; 368 return VecTy.getSizeInBits() % 32 == 0 && 369 VecTy.getSizeInBits() <= 512 && 370 IdxTy.getSizeInBits() == 32; 371 }) 372 .clampScalar(EltTypeIdx, S32, S64) 373 .clampScalar(VecTypeIdx, S32, S64) 374 .clampScalar(IdxTypeIdx, S32, S32); 375 } 376 377 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT) 378 .unsupportedIf([=](const LegalityQuery &Query) { 379 const LLT &EltTy = Query.Types[1].getElementType(); 380 return Query.Types[0] != EltTy; 381 }); 382 383 // FIXME: Doesn't handle extract of illegal sizes. 384 getActionDefinitionsBuilder({G_EXTRACT, G_INSERT}) 385 .legalIf([=](const LegalityQuery &Query) { 386 const LLT &Ty0 = Query.Types[0]; 387 const LLT &Ty1 = Query.Types[1]; 388 return (Ty0.getSizeInBits() % 16 == 0) && 389 (Ty1.getSizeInBits() % 16 == 0); 390 }); 391 392 getActionDefinitionsBuilder(G_BUILD_VECTOR) 393 .legalForCartesianProduct(AllS32Vectors, {S32}) 394 .legalForCartesianProduct(AllS64Vectors, {S64}) 395 .clampNumElements(0, V16S32, V16S32) 396 .clampNumElements(0, V2S64, V8S64) 397 .minScalarSameAs(1, 0) 398 // FIXME: Sort of a hack to make progress on other legalizations. 399 .legalIf([=](const LegalityQuery &Query) { 400 return Query.Types[0].getScalarSizeInBits() < 32; 401 }); 402 403 // TODO: Support any combination of v2s32 404 getActionDefinitionsBuilder(G_CONCAT_VECTORS) 405 .legalFor({{V4S32, V2S32}, 406 {V8S32, V2S32}, 407 {V8S32, V4S32}, 408 {V4S64, V2S64}, 409 {V4S16, V2S16}, 410 {V8S16, V2S16}, 411 {V8S16, V4S16}}); 412 413 // Merge/Unmerge 414 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { 415 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1; 416 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0; 417 418 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) { 419 const LLT &Ty = Query.Types[TypeIdx]; 420 if (Ty.isVector()) { 421 const LLT &EltTy = Ty.getElementType(); 422 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64) 423 return true; 424 if (!isPowerOf2_32(EltTy.getSizeInBits())) 425 return true; 426 } 427 return false; 428 }; 429 430 getActionDefinitionsBuilder(Op) 431 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) 432 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not 433 // worth considering the multiples of 64 since 2*192 and 2*384 are not 434 // valid. 435 .clampScalar(LitTyIdx, S16, S256) 436 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) 437 438 // Break up vectors with weird elements into scalars 439 .fewerElementsIf( 440 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); }, 441 scalarize(0)) 442 .fewerElementsIf( 443 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); }, 444 scalarize(1)) 445 .clampScalar(BigTyIdx, S32, S512) 446 .widenScalarIf( 447 [=](const LegalityQuery &Query) { 448 const LLT &Ty = Query.Types[BigTyIdx]; 449 return !isPowerOf2_32(Ty.getSizeInBits()) && 450 Ty.getSizeInBits() % 16 != 0; 451 }, 452 [=](const LegalityQuery &Query) { 453 // Pick the next power of 2, or a multiple of 64 over 128. 454 // Whichever is smaller. 455 const LLT &Ty = Query.Types[BigTyIdx]; 456 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1); 457 if (NewSizeInBits >= 256) { 458 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1); 459 if (RoundedTo < NewSizeInBits) 460 NewSizeInBits = RoundedTo; 461 } 462 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); 463 }) 464 .legalIf([=](const LegalityQuery &Query) { 465 const LLT &BigTy = Query.Types[BigTyIdx]; 466 const LLT &LitTy = Query.Types[LitTyIdx]; 467 468 if (BigTy.isVector() && BigTy.getSizeInBits() < 32) 469 return false; 470 if (LitTy.isVector() && LitTy.getSizeInBits() < 32) 471 return false; 472 473 return BigTy.getSizeInBits() % 16 == 0 && 474 LitTy.getSizeInBits() % 16 == 0 && 475 BigTy.getSizeInBits() <= 512; 476 }) 477 // Any vectors left are the wrong size. Scalarize them. 478 .scalarize(0) 479 .scalarize(1); 480 } 481 482 computeTables(); 483 verify(*ST.getInstrInfo()); 484 } 485