1 //===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the Machinelegalizer class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "AMDGPULegalizerInfo.h" 16 #include "AMDGPUTargetMachine.h" 17 #include "llvm/CodeGen/TargetOpcodes.h" 18 #include "llvm/CodeGen/ValueTypes.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Type.h" 21 #include "llvm/Support/Debug.h" 22 23 using namespace llvm; 24 using namespace LegalizeActions; 25 using namespace LegalizeMutations; 26 using namespace LegalityPredicates; 27 28 AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST, 29 const GCNTargetMachine &TM) { 30 using namespace TargetOpcode; 31 32 auto GetAddrSpacePtr = [&TM](unsigned AS) { 33 return LLT::pointer(AS, TM.getPointerSizeInBits(AS)); 34 }; 35 36 const LLT S1 = LLT::scalar(1); 37 const LLT S16 = LLT::scalar(16); 38 const LLT S32 = LLT::scalar(32); 39 const LLT S64 = LLT::scalar(64); 40 const LLT S128 = LLT::scalar(128); 41 const LLT S256 = LLT::scalar(256); 42 const LLT S512 = LLT::scalar(512); 43 44 const LLT V2S16 = LLT::vector(2, 16); 45 const LLT V4S16 = LLT::vector(4, 16); 46 const LLT V8S16 = LLT::vector(8, 16); 47 48 const LLT V2S32 = LLT::vector(2, 32); 49 const LLT V3S32 = LLT::vector(3, 32); 50 const LLT V4S32 = LLT::vector(4, 32); 51 const LLT V5S32 = LLT::vector(5, 32); 52 const LLT V6S32 = LLT::vector(6, 32); 53 const LLT V7S32 = LLT::vector(7, 32); 54 const LLT V8S32 = LLT::vector(8, 32); 55 const LLT V9S32 = LLT::vector(9, 32); 56 const LLT V10S32 = LLT::vector(10, 32); 57 const LLT V11S32 = LLT::vector(11, 32); 58 const LLT V12S32 = LLT::vector(12, 32); 59 const LLT V13S32 = LLT::vector(13, 32); 60 const LLT V14S32 = LLT::vector(14, 32); 61 const LLT V15S32 = LLT::vector(15, 32); 62 const LLT V16S32 = LLT::vector(16, 32); 63 64 const LLT V2S64 = LLT::vector(2, 64); 65 const LLT V3S64 = LLT::vector(3, 64); 66 const LLT V4S64 = LLT::vector(4, 64); 67 const LLT V5S64 = LLT::vector(5, 64); 68 const LLT V6S64 = LLT::vector(6, 64); 69 const LLT V7S64 = LLT::vector(7, 64); 70 const LLT V8S64 = LLT::vector(8, 64); 71 72 std::initializer_list<LLT> AllS32Vectors = 73 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32, 74 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32}; 75 std::initializer_list<LLT> AllS64Vectors = 76 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64}; 77 78 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS); 79 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS); 80 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS); 81 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS); 82 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS); 83 84 const LLT CodePtr = FlatPtr; 85 86 const LLT AddrSpaces[] = { 87 GlobalPtr, 88 ConstantPtr, 89 LocalPtr, 90 FlatPtr, 91 PrivatePtr 92 }; 93 94 setAction({G_BRCOND, S1}, Legal); 95 96 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_UMULH, G_SMULH}) 97 .legalFor({S32}) 98 .clampScalar(0, S32, S32) 99 .scalarize(0); 100 101 // Report legal for any types we can handle anywhere. For the cases only legal 102 // on the SALU, RegBankSelect will be able to re-legalize. 103 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) 104 .legalFor({S32, S1, S64, V2S32, V2S16, V4S16}) 105 .clampScalar(0, S32, S64) 106 .scalarize(0); 107 108 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO, 109 G_UADDE, G_SADDE, G_USUBE, G_SSUBE}) 110 .legalFor({{S32, S1}}) 111 .clampScalar(0, S32, S32); 112 113 getActionDefinitionsBuilder(G_BITCAST) 114 .legalForCartesianProduct({S32, V2S16}) 115 .legalForCartesianProduct({S64, V2S32, V4S16}) 116 .legalForCartesianProduct({V2S64, V4S32}) 117 // Don't worry about the size constraint. 118 .legalIf(all(isPointer(0), isPointer(1))); 119 120 getActionDefinitionsBuilder(G_FCONSTANT) 121 .legalFor({S32, S64, S16}); 122 123 // G_IMPLICIT_DEF is a no-op so we can make it legal for any value type that 124 // can fit in a register. 125 // FIXME: We need to legalize several more operations before we can add 126 // a test case for size > 512. 127 getActionDefinitionsBuilder(G_IMPLICIT_DEF) 128 .legalIf([=](const LegalityQuery &Query) { 129 return Query.Types[0].getSizeInBits() <= 512; 130 }) 131 .clampScalar(0, S1, S512); 132 133 134 // FIXME: i1 operands to intrinsics should always be legal, but other i1 135 // values may not be legal. We need to figure out how to distinguish 136 // between these two scenarios. 137 // FIXME: Pointer types 138 getActionDefinitionsBuilder(G_CONSTANT) 139 .legalFor({S1, S32, S64}) 140 .clampScalar(0, S32, S64) 141 .widenScalarToNextPow2(0); 142 143 setAction({G_FRAME_INDEX, PrivatePtr}, Legal); 144 145 getActionDefinitionsBuilder({G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA}) 146 .legalFor({S32, S64}) 147 .scalarize(0) 148 .clampScalar(0, S32, S64); 149 150 getActionDefinitionsBuilder(G_FPTRUNC) 151 .legalFor({{S32, S64}, {S16, S32}}) 152 .scalarize(0); 153 154 getActionDefinitionsBuilder(G_FPEXT) 155 .legalFor({{S64, S32}, {S32, S16}}) 156 .lowerFor({{S64, S16}}) // FIXME: Implement 157 .scalarize(0); 158 159 getActionDefinitionsBuilder(G_FSUB) 160 // Use actual fsub instruction 161 .legalFor({S32}) 162 // Must use fadd + fneg 163 .lowerFor({S64, S16, V2S16}) 164 .scalarize(0) 165 .clampScalar(0, S32, S64); 166 167 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT}) 168 .legalFor({{S64, S32}, {S32, S16}, {S64, S16}, 169 {S32, S1}, {S64, S1}, {S16, S1}, 170 // FIXME: Hack 171 {S128, S32}}) 172 .scalarize(0); 173 174 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 175 .legalFor({{S32, S32}, {S64, S32}}) 176 .scalarize(0); 177 178 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 179 .legalFor({{S32, S32}, {S32, S64}}) 180 .scalarize(0); 181 182 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND}) 183 .legalFor({S32, S64}) 184 .scalarize(0); 185 186 for (LLT PtrTy : AddrSpaces) { 187 LLT IdxTy = LLT::scalar(PtrTy.getSizeInBits()); 188 setAction({G_GEP, PtrTy}, Legal); 189 setAction({G_GEP, 1, IdxTy}, Legal); 190 } 191 192 // FIXME: When RegBankSelect inserts copies, it will only create new registers 193 // with scalar types. This means we can end up with G_LOAD/G_STORE/G_GEP 194 // instruction with scalar types for their pointer operands. In assert builds, 195 // the instruction selector will assert if it sees a generic instruction which 196 // isn't legal, so we need to tell it that scalar types are legal for pointer 197 // operands 198 setAction({G_GEP, S64}, Legal); 199 200 setAction({G_BLOCK_ADDR, CodePtr}, Legal); 201 202 getActionDefinitionsBuilder({G_ICMP, G_FCMP}) 203 .legalFor({{S1, S32}, {S1, S64}}) 204 .widenScalarToNextPow2(1) 205 .clampScalar(1, S32, S64) 206 .scalarize(0); 207 208 // FIXME: fexp, flog2, flog10 needs to be custom lowered. 209 getActionDefinitionsBuilder({G_FPOW, G_FEXP, G_FEXP2, 210 G_FLOG, G_FLOG2, G_FLOG10}) 211 .legalFor({S32}) 212 .scalarize(0); 213 214 setAction({G_CTLZ, S32}, Legal); 215 setAction({G_CTLZ_ZERO_UNDEF, S32}, Legal); 216 setAction({G_CTTZ, S32}, Legal); 217 setAction({G_CTTZ_ZERO_UNDEF, S32}, Legal); 218 setAction({G_BSWAP, S32}, Legal); 219 setAction({G_CTPOP, S32}, Legal); 220 221 getActionDefinitionsBuilder(G_INTTOPTR) 222 .legalIf([](const LegalityQuery &Query) { 223 return true; 224 }); 225 226 getActionDefinitionsBuilder(G_PTRTOINT) 227 .legalIf([](const LegalityQuery &Query) { 228 return true; 229 }); 230 231 getActionDefinitionsBuilder({G_LOAD, G_STORE}) 232 .legalIf([=, &ST](const LegalityQuery &Query) { 233 const LLT &Ty0 = Query.Types[0]; 234 235 // TODO: Decompose private loads into 4-byte components. 236 // TODO: Illegal flat loads on SI 237 switch (Ty0.getSizeInBits()) { 238 case 32: 239 case 64: 240 case 128: 241 return true; 242 243 case 96: 244 // XXX hasLoadX3 245 return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS); 246 247 case 256: 248 case 512: 249 // TODO: constant loads 250 default: 251 return false; 252 } 253 }); 254 255 256 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD}) 257 .legalForTypesWithMemSize({ 258 {S32, GlobalPtr, 8}, 259 {S32, GlobalPtr, 16}, 260 {S32, LocalPtr, 8}, 261 {S32, LocalPtr, 16}, 262 {S32, PrivatePtr, 8}, 263 {S32, PrivatePtr, 16}}); 264 if (ST.hasFlatAddressSpace()) { 265 ExtLoads.legalForTypesWithMemSize({{S32, FlatPtr, 8}, 266 {S32, FlatPtr, 16}}); 267 } 268 269 ExtLoads.clampScalar(0, S32, S32) 270 .widenScalarToNextPow2(0) 271 .unsupportedIfMemSizeNotPow2() 272 .lower(); 273 274 auto &Atomics = getActionDefinitionsBuilder( 275 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, 276 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR, 277 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX, 278 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG}) 279 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr}, 280 {S64, GlobalPtr}, {S64, LocalPtr}}); 281 if (ST.hasFlatAddressSpace()) { 282 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}}); 283 } 284 285 // TODO: Pointer types, any 32-bit or 64-bit vector 286 getActionDefinitionsBuilder(G_SELECT) 287 .legalFor({{S32, S1}, {S64, S1}, {V2S32, S1}, {V2S16, S1}}) 288 .clampScalar(0, S32, S64) 289 .scalarize(0); 290 291 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can 292 // be more flexible with the shift amount type. 293 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR}) 294 .legalFor({{S32, S32}, {S64, S32}}); 295 if (ST.has16BitInsts()) 296 Shifts.legalFor({{S16, S32}, {S16, S16}}); 297 else 298 Shifts.clampScalar(0, S32, S64); 299 Shifts.clampScalar(1, S32, S32); 300 301 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) { 302 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0; 303 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1; 304 unsigned IdxTypeIdx = 2; 305 306 getActionDefinitionsBuilder(Op) 307 .legalIf([=](const LegalityQuery &Query) { 308 const LLT &VecTy = Query.Types[VecTypeIdx]; 309 const LLT &IdxTy = Query.Types[IdxTypeIdx]; 310 return VecTy.getSizeInBits() % 32 == 0 && 311 VecTy.getSizeInBits() <= 512 && 312 IdxTy.getSizeInBits() == 32; 313 }) 314 .clampScalar(EltTypeIdx, S32, S64) 315 .clampScalar(VecTypeIdx, S32, S64) 316 .clampScalar(IdxTypeIdx, S32, S32); 317 } 318 319 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT) 320 .unsupportedIf([=](const LegalityQuery &Query) { 321 const LLT &EltTy = Query.Types[1].getElementType(); 322 return Query.Types[0] != EltTy; 323 }); 324 325 // FIXME: Doesn't handle extract of illegal sizes. 326 getActionDefinitionsBuilder({G_EXTRACT, G_INSERT}) 327 .legalIf([=](const LegalityQuery &Query) { 328 const LLT &Ty0 = Query.Types[0]; 329 const LLT &Ty1 = Query.Types[1]; 330 return (Ty0.getSizeInBits() % 16 == 0) && 331 (Ty1.getSizeInBits() % 16 == 0); 332 }); 333 334 getActionDefinitionsBuilder(G_BUILD_VECTOR) 335 .legalForCartesianProduct(AllS32Vectors, {S32}) 336 .legalForCartesianProduct(AllS64Vectors, {S64}) 337 .clampNumElements(0, V16S32, V16S32) 338 .clampNumElements(0, V2S64, V8S64) 339 .minScalarSameAs(1, 0) 340 // FIXME: Sort of a hack to make progress on other legalizations. 341 .legalIf([=](const LegalityQuery &Query) { 342 return Query.Types[0].getScalarSizeInBits() < 32; 343 }); 344 345 // TODO: Support any combination of v2s32 346 getActionDefinitionsBuilder(G_CONCAT_VECTORS) 347 .legalFor({{V4S32, V2S32}, 348 {V8S32, V2S32}, 349 {V8S32, V4S32}, 350 {V4S64, V2S64}, 351 {V4S16, V2S16}, 352 {V8S16, V2S16}, 353 {V8S16, V4S16}}); 354 355 // Merge/Unmerge 356 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { 357 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1; 358 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0; 359 360 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) { 361 const LLT &Ty = Query.Types[TypeIdx]; 362 if (Ty.isVector()) { 363 const LLT &EltTy = Ty.getElementType(); 364 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64) 365 return true; 366 if (!isPowerOf2_32(EltTy.getSizeInBits())) 367 return true; 368 } 369 return false; 370 }; 371 372 getActionDefinitionsBuilder(Op) 373 // Break up vectors with weird elements into scalars 374 .fewerElementsIf( 375 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); }, 376 scalarize(0)) 377 .fewerElementsIf( 378 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); }, 379 scalarize(1)) 380 .clampScalar(BigTyIdx, S32, S512) 381 .widenScalarIf( 382 [=](const LegalityQuery &Query) { 383 const LLT &Ty = Query.Types[BigTyIdx]; 384 return !isPowerOf2_32(Ty.getSizeInBits()) && 385 Ty.getSizeInBits() % 16 != 0; 386 }, 387 [=](const LegalityQuery &Query) { 388 // Pick the next power of 2, or a multiple of 64 over 128. 389 // Whichever is smaller. 390 const LLT &Ty = Query.Types[BigTyIdx]; 391 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1); 392 if (NewSizeInBits >= 256) { 393 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1); 394 if (RoundedTo < NewSizeInBits) 395 NewSizeInBits = RoundedTo; 396 } 397 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); 398 }) 399 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) 400 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not 401 // worth considering the multiples of 64 since 2*192 and 2*384 are not 402 // valid. 403 .clampScalar(LitTyIdx, S16, S256) 404 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) 405 .legalIf([=](const LegalityQuery &Query) { 406 const LLT &BigTy = Query.Types[BigTyIdx]; 407 const LLT &LitTy = Query.Types[LitTyIdx]; 408 409 if (BigTy.isVector() && BigTy.getSizeInBits() < 32) 410 return false; 411 if (LitTy.isVector() && LitTy.getSizeInBits() < 32) 412 return false; 413 414 return BigTy.getSizeInBits() % 16 == 0 && 415 LitTy.getSizeInBits() % 16 == 0 && 416 BigTy.getSizeInBits() <= 512; 417 }) 418 // Any vectors left are the wrong size. Scalarize them. 419 .scalarize(0) 420 .scalarize(1); 421 } 422 423 computeTables(); 424 verify(*ST.getInstrInfo()); 425 } 426