1 //===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the Machinelegalizer class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "AMDGPULegalizerInfo.h" 16 #include "AMDGPUTargetMachine.h" 17 #include "llvm/CodeGen/TargetOpcodes.h" 18 #include "llvm/CodeGen/ValueTypes.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Type.h" 21 #include "llvm/Support/Debug.h" 22 23 using namespace llvm; 24 using namespace LegalizeActions; 25 using namespace LegalityPredicates; 26 27 AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST, 28 const GCNTargetMachine &TM) { 29 using namespace TargetOpcode; 30 31 auto scalarize = [=](const LegalityQuery &Query, unsigned TypeIdx) { 32 const LLT &Ty = Query.Types[TypeIdx]; 33 return std::make_pair(TypeIdx, Ty.getElementType()); 34 }; 35 36 auto GetAddrSpacePtr = [&TM](unsigned AS) { 37 return LLT::pointer(AS, TM.getPointerSizeInBits(AS)); 38 }; 39 40 const LLT S1 = LLT::scalar(1); 41 const LLT S16 = LLT::scalar(16); 42 const LLT S32 = LLT::scalar(32); 43 const LLT S64 = LLT::scalar(64); 44 const LLT S256 = LLT::scalar(256); 45 const LLT S512 = LLT::scalar(512); 46 47 const LLT V2S16 = LLT::vector(2, 16); 48 const LLT V4S16 = LLT::vector(4, 16); 49 const LLT V8S16 = LLT::vector(8, 16); 50 51 const LLT V2S32 = LLT::vector(2, 32); 52 const LLT V3S32 = LLT::vector(3, 32); 53 const LLT V4S32 = LLT::vector(4, 32); 54 const LLT V5S32 = LLT::vector(5, 32); 55 const LLT V6S32 = LLT::vector(6, 32); 56 const LLT V7S32 = LLT::vector(7, 32); 57 const LLT V8S32 = LLT::vector(8, 32); 58 const LLT V9S32 = LLT::vector(9, 32); 59 const LLT V10S32 = LLT::vector(10, 32); 60 const LLT V11S32 = LLT::vector(11, 32); 61 const LLT V12S32 = LLT::vector(12, 32); 62 const LLT V13S32 = LLT::vector(13, 32); 63 const LLT V14S32 = LLT::vector(14, 32); 64 const LLT V15S32 = LLT::vector(15, 32); 65 const LLT V16S32 = LLT::vector(16, 32); 66 67 const LLT V2S64 = LLT::vector(2, 64); 68 const LLT V3S64 = LLT::vector(3, 64); 69 const LLT V4S64 = LLT::vector(4, 64); 70 const LLT V5S64 = LLT::vector(5, 64); 71 const LLT V6S64 = LLT::vector(6, 64); 72 const LLT V7S64 = LLT::vector(7, 64); 73 const LLT V8S64 = LLT::vector(8, 64); 74 75 std::initializer_list<LLT> AllS32Vectors = 76 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32, 77 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32}; 78 std::initializer_list<LLT> AllS64Vectors = 79 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64}; 80 81 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS); 82 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS); 83 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS); 84 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS); 85 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS); 86 87 const LLT CodePtr = FlatPtr; 88 89 const LLT AddrSpaces[] = { 90 GlobalPtr, 91 ConstantPtr, 92 LocalPtr, 93 FlatPtr, 94 PrivatePtr 95 }; 96 97 setAction({G_BRCOND, S1}, Legal); 98 99 setAction({G_ADD, S32}, Legal); 100 setAction({G_ASHR, S32}, Legal); 101 setAction({G_ASHR, 1, S32}, Legal); 102 setAction({G_SUB, S32}, Legal); 103 setAction({G_MUL, S32}, Legal); 104 105 // FIXME: 64-bit ones only legal for scalar 106 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) 107 .legalFor({S32, S1, S64, V2S32}); 108 109 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO, 110 G_UADDE, G_SADDE, G_USUBE, G_SSUBE}) 111 .legalFor({{S32, S1}}); 112 113 getActionDefinitionsBuilder(G_BITCAST) 114 .legalForCartesianProduct({S32, V2S16}) 115 .legalForCartesianProduct({S64, V2S32, V4S16}) 116 .legalForCartesianProduct({V2S64, V4S32}) 117 // Don't worry about the size constraint. 118 .legalIf(all(isPointer(0), isPointer(1))); 119 120 getActionDefinitionsBuilder(G_FCONSTANT) 121 .legalFor({S32, S64, S16}); 122 123 // G_IMPLICIT_DEF is a no-op so we can make it legal for any value type that 124 // can fit in a register. 125 // FIXME: We need to legalize several more operations before we can add 126 // a test case for size > 512. 127 getActionDefinitionsBuilder(G_IMPLICIT_DEF) 128 .legalIf([=](const LegalityQuery &Query) { 129 return Query.Types[0].getSizeInBits() <= 512; 130 }) 131 .clampScalar(0, S1, S512); 132 133 134 // FIXME: i1 operands to intrinsics should always be legal, but other i1 135 // values may not be legal. We need to figure out how to distinguish 136 // between these two scenarios. 137 // FIXME: Pointer types 138 getActionDefinitionsBuilder(G_CONSTANT) 139 .legalFor({S1, S32, S64}) 140 .clampScalar(0, S32, S64) 141 .widenScalarToNextPow2(0); 142 143 setAction({G_FRAME_INDEX, PrivatePtr}, Legal); 144 145 getActionDefinitionsBuilder({G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA}) 146 .legalFor({S32, S64}) 147 .fewerElementsIf( 148 [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); }, 149 [=](const LegalityQuery &Query) { return scalarize(Query, 0); }) 150 .clampScalar(0, S32, S64); 151 152 getActionDefinitionsBuilder(G_FPTRUNC) 153 .legalFor({{S32, S64}, {S16, S32}}); 154 155 getActionDefinitionsBuilder(G_FPEXT) 156 .legalFor({{S64, S32}, {S32, S16}}) 157 .lowerFor({{S64, S16}}); // FIXME: Implement 158 159 getActionDefinitionsBuilder(G_FSUB) 160 // Use actual fsub instruction 161 .legalFor({S32}) 162 // Must use fadd + fneg 163 .lowerFor({S64, S16, V2S16}) 164 .fewerElementsIf( 165 [=](const LegalityQuery &Query) { return Query.Types[0].isVector(); }, 166 [=](const LegalityQuery &Query) { return scalarize(Query, 0); }) 167 .clampScalar(0, S32, S64); 168 169 setAction({G_FCMP, S1}, Legal); 170 setAction({G_FCMP, 1, S32}, Legal); 171 setAction({G_FCMP, 1, S64}, Legal); 172 173 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT}) 174 .legalFor({{S64, S32}, {S32, S16}, {S64, S16}, 175 {S32, S1}, {S64, S1}, {S16, S1}}); 176 177 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 178 .legalFor({{S32, S32}, {S64, S32}}); 179 180 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 181 .legalFor({{S32, S32}, {S32, S64}}); 182 183 setAction({G_FPOW, S32}, Legal); 184 setAction({G_FEXP2, S32}, Legal); 185 setAction({G_FLOG2, S32}, Legal); 186 187 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND}) 188 .legalFor({S32, S64}); 189 190 for (LLT PtrTy : AddrSpaces) { 191 LLT IdxTy = LLT::scalar(PtrTy.getSizeInBits()); 192 setAction({G_GEP, PtrTy}, Legal); 193 setAction({G_GEP, 1, IdxTy}, Legal); 194 } 195 196 setAction({G_BLOCK_ADDR, CodePtr}, Legal); 197 198 setAction({G_ICMP, S1}, Legal); 199 setAction({G_ICMP, 1, S32}, Legal); 200 201 setAction({G_CTLZ, S32}, Legal); 202 setAction({G_CTLZ_ZERO_UNDEF, S32}, Legal); 203 setAction({G_CTTZ, S32}, Legal); 204 setAction({G_CTTZ_ZERO_UNDEF, S32}, Legal); 205 setAction({G_BSWAP, S32}, Legal); 206 setAction({G_CTPOP, S32}, Legal); 207 208 getActionDefinitionsBuilder(G_INTTOPTR) 209 .legalIf([](const LegalityQuery &Query) { 210 return true; 211 }); 212 213 getActionDefinitionsBuilder(G_PTRTOINT) 214 .legalIf([](const LegalityQuery &Query) { 215 return true; 216 }); 217 218 getActionDefinitionsBuilder({G_LOAD, G_STORE}) 219 .legalIf([=, &ST](const LegalityQuery &Query) { 220 const LLT &Ty0 = Query.Types[0]; 221 222 // TODO: Decompose private loads into 4-byte components. 223 // TODO: Illegal flat loads on SI 224 switch (Ty0.getSizeInBits()) { 225 case 32: 226 case 64: 227 case 128: 228 return true; 229 230 case 96: 231 // XXX hasLoadX3 232 return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS); 233 234 case 256: 235 case 512: 236 // TODO: constant loads 237 default: 238 return false; 239 } 240 }); 241 242 243 auto &ExtLoads = getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD}) 244 .legalForTypesWithMemSize({ 245 {S32, GlobalPtr, 8}, 246 {S32, GlobalPtr, 16}, 247 {S32, LocalPtr, 8}, 248 {S32, LocalPtr, 16}, 249 {S32, PrivatePtr, 8}, 250 {S32, PrivatePtr, 16}}); 251 if (ST.hasFlatAddressSpace()) { 252 ExtLoads.legalForTypesWithMemSize({{S32, FlatPtr, 8}, 253 {S32, FlatPtr, 16}}); 254 } 255 256 ExtLoads.clampScalar(0, S32, S32) 257 .widenScalarToNextPow2(0) 258 .unsupportedIfMemSizeNotPow2() 259 .lower(); 260 261 auto &Atomics = getActionDefinitionsBuilder( 262 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, 263 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR, 264 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX, 265 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG}) 266 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr}, 267 {S64, GlobalPtr}, {S64, LocalPtr}}); 268 if (ST.hasFlatAddressSpace()) { 269 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}}); 270 } 271 272 // TODO: Pointer types, any 32-bit or 64-bit vector 273 getActionDefinitionsBuilder(G_SELECT) 274 .legalFor({{S32, S1}, {S64, S1}, {V2S32, S1}, {V2S16, S1}}) 275 .clampScalar(0, S32, S64); 276 277 // TODO: Only the low 4/5/6 bits of the shift amount are observed, so we can 278 // be more flexible with the shift amount type. 279 auto &Shifts = getActionDefinitionsBuilder({G_SHL, G_LSHR, G_ASHR}) 280 .legalFor({{S32, S32}, {S64, S32}}); 281 if (ST.has16BitInsts()) 282 Shifts.legalFor({{S16, S32}, {S16, S16}}); 283 else 284 Shifts.clampScalar(0, S32, S64); 285 Shifts.clampScalar(1, S32, S32); 286 287 // FIXME: When RegBankSelect inserts copies, it will only create new 288 // registers with scalar types. This means we can end up with 289 // G_LOAD/G_STORE/G_GEP instruction with scalar types for their pointer 290 // operands. In assert builds, the instruction selector will assert 291 // if it sees a generic instruction which isn't legal, so we need to 292 // tell it that scalar types are legal for pointer operands 293 setAction({G_GEP, S64}, Legal); 294 295 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) { 296 unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0; 297 unsigned EltTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 0 : 1; 298 unsigned IdxTypeIdx = 2; 299 300 getActionDefinitionsBuilder(Op) 301 .legalIf([=](const LegalityQuery &Query) { 302 const LLT &VecTy = Query.Types[VecTypeIdx]; 303 const LLT &IdxTy = Query.Types[IdxTypeIdx]; 304 return VecTy.getSizeInBits() % 32 == 0 && 305 VecTy.getSizeInBits() <= 512 && 306 IdxTy.getSizeInBits() == 32; 307 }) 308 .clampScalar(EltTypeIdx, S32, S64) 309 .clampScalar(VecTypeIdx, S32, S64) 310 .clampScalar(IdxTypeIdx, S32, S32); 311 } 312 313 getActionDefinitionsBuilder(G_EXTRACT_VECTOR_ELT) 314 .unsupportedIf([=](const LegalityQuery &Query) { 315 const LLT &EltTy = Query.Types[1].getElementType(); 316 return Query.Types[0] != EltTy; 317 }); 318 319 // FIXME: Doesn't handle extract of illegal sizes. 320 getActionDefinitionsBuilder({G_EXTRACT, G_INSERT}) 321 .legalIf([=](const LegalityQuery &Query) { 322 const LLT &Ty0 = Query.Types[0]; 323 const LLT &Ty1 = Query.Types[1]; 324 return (Ty0.getSizeInBits() % 32 == 0) && 325 (Ty1.getSizeInBits() % 32 == 0); 326 }); 327 328 getActionDefinitionsBuilder(G_BUILD_VECTOR) 329 .legalForCartesianProduct(AllS32Vectors, {S32}) 330 .legalForCartesianProduct(AllS64Vectors, {S64}) 331 .clampNumElements(0, V16S32, V16S32) 332 .clampNumElements(0, V2S64, V8S64) 333 .minScalarSameAs(1, 0) 334 // FIXME: Sort of a hack to make progress on other legalizations. 335 .legalIf([=](const LegalityQuery &Query) { 336 return Query.Types[0].getScalarSizeInBits() < 32; 337 }); 338 339 // TODO: Support any combination of v2s32 340 getActionDefinitionsBuilder(G_CONCAT_VECTORS) 341 .legalFor({{V4S32, V2S32}, 342 {V8S32, V2S32}, 343 {V8S32, V4S32}, 344 {V4S64, V2S64}, 345 {V4S16, V2S16}, 346 {V8S16, V2S16}, 347 {V8S16, V4S16}}); 348 349 // Merge/Unmerge 350 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { 351 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1; 352 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0; 353 354 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) { 355 const LLT &Ty = Query.Types[TypeIdx]; 356 if (Ty.isVector()) { 357 const LLT &EltTy = Ty.getElementType(); 358 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64) 359 return true; 360 if (!isPowerOf2_32(EltTy.getSizeInBits())) 361 return true; 362 } 363 return false; 364 }; 365 366 getActionDefinitionsBuilder(Op) 367 // Break up vectors with weird elements into scalars 368 .fewerElementsIf( 369 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); }, 370 [=](const LegalityQuery &Query) { return scalarize(Query, 0); }) 371 .fewerElementsIf( 372 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); }, 373 [=](const LegalityQuery &Query) { return scalarize(Query, 1); }) 374 .clampScalar(BigTyIdx, S32, S512) 375 .widenScalarIf( 376 [=](const LegalityQuery &Query) { 377 const LLT &Ty = Query.Types[BigTyIdx]; 378 return !isPowerOf2_32(Ty.getSizeInBits()) && 379 Ty.getSizeInBits() % 16 != 0; 380 }, 381 [=](const LegalityQuery &Query) { 382 // Pick the next power of 2, or a multiple of 64 over 128. 383 // Whichever is smaller. 384 const LLT &Ty = Query.Types[BigTyIdx]; 385 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1); 386 if (NewSizeInBits >= 256) { 387 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1); 388 if (RoundedTo < NewSizeInBits) 389 NewSizeInBits = RoundedTo; 390 } 391 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); 392 }) 393 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) 394 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not 395 // worth considering the multiples of 64 since 2*192 and 2*384 are not 396 // valid. 397 .clampScalar(LitTyIdx, S16, S256) 398 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) 399 .legalIf([=](const LegalityQuery &Query) { 400 const LLT &BigTy = Query.Types[BigTyIdx]; 401 const LLT &LitTy = Query.Types[LitTyIdx]; 402 403 if (BigTy.isVector() && BigTy.getSizeInBits() < 32) 404 return false; 405 if (LitTy.isVector() && LitTy.getSizeInBits() < 32) 406 return false; 407 408 return BigTy.getSizeInBits() % 16 == 0 && 409 LitTy.getSizeInBits() % 16 == 0 && 410 BigTy.getSizeInBits() <= 512; 411 }) 412 // Any vectors left are the wrong size. Scalarize them. 413 .fewerElementsIf([](const LegalityQuery &Query) { 414 return Query.Types[0].isVector(); 415 }, 416 [](const LegalityQuery &Query) { 417 return std::make_pair( 418 0, Query.Types[0].getElementType()); 419 }) 420 .fewerElementsIf([](const LegalityQuery &Query) { 421 return Query.Types[1].isVector(); 422 }, 423 [](const LegalityQuery &Query) { 424 return std::make_pair( 425 1, Query.Types[1].getElementType()); 426 }); 427 428 } 429 430 computeTables(); 431 verify(*ST.getInstrInfo()); 432 } 433