1 //===- AMDGPULegalizerInfo.cpp -----------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the Machinelegalizer class for 10 /// AMDGPU. 11 /// \todo This should be generated by TableGen. 12 //===----------------------------------------------------------------------===// 13 14 #include "AMDGPU.h" 15 #include "AMDGPULegalizerInfo.h" 16 #include "AMDGPUTargetMachine.h" 17 #include "llvm/CodeGen/TargetOpcodes.h" 18 #include "llvm/CodeGen/ValueTypes.h" 19 #include "llvm/IR/DerivedTypes.h" 20 #include "llvm/IR/Type.h" 21 #include "llvm/Support/Debug.h" 22 23 using namespace llvm; 24 using namespace LegalizeActions; 25 using namespace LegalityPredicates; 26 27 AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST, 28 const GCNTargetMachine &TM) { 29 using namespace TargetOpcode; 30 31 auto GetAddrSpacePtr = [&TM](unsigned AS) { 32 return LLT::pointer(AS, TM.getPointerSizeInBits(AS)); 33 }; 34 35 const LLT S1 = LLT::scalar(1); 36 const LLT S16 = LLT::scalar(16); 37 const LLT S32 = LLT::scalar(32); 38 const LLT S64 = LLT::scalar(64); 39 const LLT S256 = LLT::scalar(256); 40 const LLT S512 = LLT::scalar(512); 41 42 const LLT V2S16 = LLT::vector(2, 16); 43 const LLT V4S16 = LLT::vector(4, 16); 44 const LLT V8S16 = LLT::vector(8, 16); 45 46 const LLT V2S32 = LLT::vector(2, 32); 47 const LLT V3S32 = LLT::vector(3, 32); 48 const LLT V4S32 = LLT::vector(4, 32); 49 const LLT V5S32 = LLT::vector(5, 32); 50 const LLT V6S32 = LLT::vector(6, 32); 51 const LLT V7S32 = LLT::vector(7, 32); 52 const LLT V8S32 = LLT::vector(8, 32); 53 const LLT V9S32 = LLT::vector(9, 32); 54 const LLT V10S32 = LLT::vector(10, 32); 55 const LLT V11S32 = LLT::vector(11, 32); 56 const LLT V12S32 = LLT::vector(12, 32); 57 const LLT V13S32 = LLT::vector(13, 32); 58 const LLT V14S32 = LLT::vector(14, 32); 59 const LLT V15S32 = LLT::vector(15, 32); 60 const LLT V16S32 = LLT::vector(16, 32); 61 62 const LLT V2S64 = LLT::vector(2, 64); 63 const LLT V3S64 = LLT::vector(3, 64); 64 const LLT V4S64 = LLT::vector(4, 64); 65 const LLT V5S64 = LLT::vector(5, 64); 66 const LLT V6S64 = LLT::vector(6, 64); 67 const LLT V7S64 = LLT::vector(7, 64); 68 const LLT V8S64 = LLT::vector(8, 64); 69 70 std::initializer_list<LLT> AllS32Vectors = 71 {V2S32, V3S32, V4S32, V5S32, V6S32, V7S32, V8S32, 72 V9S32, V10S32, V11S32, V12S32, V13S32, V14S32, V15S32, V16S32}; 73 std::initializer_list<LLT> AllS64Vectors = 74 {V2S64, V3S64, V4S64, V5S64, V6S64, V7S64, V8S64}; 75 76 const LLT GlobalPtr = GetAddrSpacePtr(AMDGPUAS::GLOBAL_ADDRESS); 77 const LLT ConstantPtr = GetAddrSpacePtr(AMDGPUAS::CONSTANT_ADDRESS); 78 const LLT LocalPtr = GetAddrSpacePtr(AMDGPUAS::LOCAL_ADDRESS); 79 const LLT FlatPtr = GetAddrSpacePtr(AMDGPUAS::FLAT_ADDRESS); 80 const LLT PrivatePtr = GetAddrSpacePtr(AMDGPUAS::PRIVATE_ADDRESS); 81 82 const LLT CodePtr = FlatPtr; 83 84 const LLT AddrSpaces[] = { 85 GlobalPtr, 86 ConstantPtr, 87 LocalPtr, 88 FlatPtr, 89 PrivatePtr 90 }; 91 92 setAction({G_BRCOND, S1}, Legal); 93 94 setAction({G_ADD, S32}, Legal); 95 setAction({G_ASHR, S32}, Legal); 96 setAction({G_SUB, S32}, Legal); 97 setAction({G_MUL, S32}, Legal); 98 99 // FIXME: 64-bit ones only legal for scalar 100 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) 101 .legalFor({S32, S1, S64, V2S32}); 102 103 getActionDefinitionsBuilder({G_UADDO, G_SADDO, G_USUBO, G_SSUBO, 104 G_UADDE, G_SADDE, G_USUBE, G_SSUBE}) 105 .legalFor({{S32, S1}}); 106 107 getActionDefinitionsBuilder(G_BITCAST) 108 .legalForCartesianProduct({S32, V2S16}) 109 .legalForCartesianProduct({S64, V2S32, V4S16}) 110 .legalForCartesianProduct({V2S64, V4S32}) 111 // Don't worry about the size constraint. 112 .legalIf(all(isPointer(0), isPointer(1))); 113 114 getActionDefinitionsBuilder(G_FCONSTANT) 115 .legalFor({S32, S64, S16}); 116 117 // G_IMPLICIT_DEF is a no-op so we can make it legal for any value type that 118 // can fit in a register. 119 // FIXME: We need to legalize several more operations before we can add 120 // a test case for size > 512. 121 getActionDefinitionsBuilder(G_IMPLICIT_DEF) 122 .legalIf([=](const LegalityQuery &Query) { 123 return Query.Types[0].getSizeInBits() <= 512; 124 }) 125 .clampScalar(0, S1, S512); 126 127 128 // FIXME: i1 operands to intrinsics should always be legal, but other i1 129 // values may not be legal. We need to figure out how to distinguish 130 // between these two scenarios. 131 // FIXME: Pointer types 132 getActionDefinitionsBuilder(G_CONSTANT) 133 .legalFor({S1, S32, S64, V2S32, V2S16}) 134 .clampScalar(0, S32, S64) 135 .widenScalarToNextPow2(0); 136 137 setAction({G_FRAME_INDEX, PrivatePtr}, Legal); 138 139 getActionDefinitionsBuilder( 140 { G_FADD, G_FMUL, G_FNEG, G_FABS, G_FMA}) 141 .legalFor({S32, S64}) 142 .clampScalar(0, S32, S64); 143 144 getActionDefinitionsBuilder(G_FPTRUNC) 145 .legalFor({{S32, S64}, {S16, S32}}); 146 147 getActionDefinitionsBuilder(G_FPEXT) 148 .legalFor({{S64, S32}, {S32, S16}}) 149 .lowerFor({{S64, S16}}); // FIXME: Implement 150 151 getActionDefinitionsBuilder(G_FSUB) 152 // Use actual fsub instruction 153 .legalFor({S32}) 154 // Must use fadd + fneg 155 .lowerFor({S64, S16}) 156 .clampScalar(0, S32, S64); 157 158 setAction({G_FCMP, S1}, Legal); 159 setAction({G_FCMP, 1, S32}, Legal); 160 setAction({G_FCMP, 1, S64}, Legal); 161 162 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT}) 163 .legalFor({{S64, S32}, {S32, S16}, {S64, S16}, 164 {S32, S1}, {S64, S1}, {S16, S1}}); 165 166 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 167 .legalFor({{S32, S32}, {S64, S32}}); 168 169 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 170 .legalFor({{S32, S32}, {S32, S64}}); 171 172 setAction({G_FPOW, S32}, Legal); 173 setAction({G_FEXP2, S32}, Legal); 174 setAction({G_FLOG2, S32}, Legal); 175 176 getActionDefinitionsBuilder({G_INTRINSIC_TRUNC, G_INTRINSIC_ROUND}) 177 .legalFor({S32, S64}); 178 179 for (LLT PtrTy : AddrSpaces) { 180 LLT IdxTy = LLT::scalar(PtrTy.getSizeInBits()); 181 setAction({G_GEP, PtrTy}, Legal); 182 setAction({G_GEP, 1, IdxTy}, Legal); 183 } 184 185 setAction({G_BLOCK_ADDR, CodePtr}, Legal); 186 187 setAction({G_ICMP, S1}, Legal); 188 setAction({G_ICMP, 1, S32}, Legal); 189 190 setAction({G_CTLZ, S32}, Legal); 191 setAction({G_CTLZ_ZERO_UNDEF, S32}, Legal); 192 setAction({G_CTTZ, S32}, Legal); 193 setAction({G_CTTZ_ZERO_UNDEF, S32}, Legal); 194 setAction({G_BSWAP, S32}, Legal); 195 setAction({G_CTPOP, S32}, Legal); 196 197 getActionDefinitionsBuilder(G_INTTOPTR) 198 .legalIf([](const LegalityQuery &Query) { 199 return true; 200 }); 201 202 getActionDefinitionsBuilder(G_PTRTOINT) 203 .legalIf([](const LegalityQuery &Query) { 204 return true; 205 }); 206 207 getActionDefinitionsBuilder({G_LOAD, G_STORE}) 208 .legalIf([=, &ST](const LegalityQuery &Query) { 209 const LLT &Ty0 = Query.Types[0]; 210 211 // TODO: Decompose private loads into 4-byte components. 212 // TODO: Illegal flat loads on SI 213 switch (Ty0.getSizeInBits()) { 214 case 32: 215 case 64: 216 case 128: 217 return true; 218 219 case 96: 220 // XXX hasLoadX3 221 return (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS); 222 223 case 256: 224 case 512: 225 // TODO: constant loads 226 default: 227 return false; 228 } 229 }); 230 231 232 auto &Atomics = getActionDefinitionsBuilder( 233 {G_ATOMICRMW_XCHG, G_ATOMICRMW_ADD, G_ATOMICRMW_SUB, 234 G_ATOMICRMW_AND, G_ATOMICRMW_OR, G_ATOMICRMW_XOR, 235 G_ATOMICRMW_MAX, G_ATOMICRMW_MIN, G_ATOMICRMW_UMAX, 236 G_ATOMICRMW_UMIN, G_ATOMIC_CMPXCHG}) 237 .legalFor({{S32, GlobalPtr}, {S32, LocalPtr}, 238 {S64, GlobalPtr}, {S64, LocalPtr}}); 239 if (ST.hasFlatAddressSpace()) { 240 Atomics.legalFor({{S32, FlatPtr}, {S64, FlatPtr}}); 241 } 242 243 // TODO: Pointer types, any 32-bit or 64-bit vector 244 getActionDefinitionsBuilder(G_SELECT) 245 .legalFor({{S32, S1}, {S64, S1}, {V2S32, S1}, {V2S16, S1}}) 246 .clampScalar(0, S32, S64); 247 248 setAction({G_SHL, S32}, Legal); 249 250 251 // FIXME: When RegBankSelect inserts copies, it will only create new 252 // registers with scalar types. This means we can end up with 253 // G_LOAD/G_STORE/G_GEP instruction with scalar types for their pointer 254 // operands. In assert builds, the instruction selector will assert 255 // if it sees a generic instruction which isn't legal, so we need to 256 // tell it that scalar types are legal for pointer operands 257 setAction({G_GEP, S64}, Legal); 258 259 for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) { 260 getActionDefinitionsBuilder(Op) 261 .legalIf([=](const LegalityQuery &Query) { 262 const LLT &VecTy = Query.Types[1]; 263 const LLT &IdxTy = Query.Types[2]; 264 return VecTy.getSizeInBits() % 32 == 0 && 265 VecTy.getSizeInBits() <= 512 && 266 IdxTy.getSizeInBits() == 32; 267 }); 268 } 269 270 // FIXME: Doesn't handle extract of illegal sizes. 271 getActionDefinitionsBuilder({G_EXTRACT, G_INSERT}) 272 .legalIf([=](const LegalityQuery &Query) { 273 const LLT &Ty0 = Query.Types[0]; 274 const LLT &Ty1 = Query.Types[1]; 275 return (Ty0.getSizeInBits() % 32 == 0) && 276 (Ty1.getSizeInBits() % 32 == 0); 277 }); 278 279 getActionDefinitionsBuilder(G_BUILD_VECTOR) 280 .legalForCartesianProduct(AllS32Vectors, {S32}) 281 .legalForCartesianProduct(AllS64Vectors, {S64}) 282 .clampNumElements(0, V16S32, V16S32) 283 .clampNumElements(0, V2S64, V8S64) 284 .minScalarSameAs(1, 0); 285 286 // TODO: Support any combination of v2s32 287 getActionDefinitionsBuilder(G_CONCAT_VECTORS) 288 .legalFor({{V4S32, V2S32}, 289 {V8S32, V2S32}, 290 {V8S32, V4S32}, 291 {V4S64, V2S64}, 292 {V4S16, V2S16}, 293 {V8S16, V2S16}, 294 {V8S16, V4S16}}); 295 296 // Merge/Unmerge 297 for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) { 298 unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1; 299 unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0; 300 301 auto notValidElt = [=](const LegalityQuery &Query, unsigned TypeIdx) { 302 const LLT &Ty = Query.Types[TypeIdx]; 303 if (Ty.isVector()) { 304 const LLT &EltTy = Ty.getElementType(); 305 if (EltTy.getSizeInBits() < 8 || EltTy.getSizeInBits() > 64) 306 return true; 307 if (!isPowerOf2_32(EltTy.getSizeInBits())) 308 return true; 309 } 310 return false; 311 }; 312 313 auto scalarize = 314 [=](const LegalityQuery &Query, unsigned TypeIdx) { 315 const LLT &Ty = Query.Types[TypeIdx]; 316 return std::make_pair(TypeIdx, Ty.getElementType()); 317 }; 318 319 getActionDefinitionsBuilder(Op) 320 // Break up vectors with weird elements into scalars 321 .fewerElementsIf( 322 [=](const LegalityQuery &Query) { return notValidElt(Query, 0); }, 323 [=](const LegalityQuery &Query) { return scalarize(Query, 0); }) 324 .fewerElementsIf( 325 [=](const LegalityQuery &Query) { return notValidElt(Query, 1); }, 326 [=](const LegalityQuery &Query) { return scalarize(Query, 1); }) 327 .clampScalar(BigTyIdx, S32, S512) 328 .widenScalarIf( 329 [=](const LegalityQuery &Query) { 330 const LLT &Ty = Query.Types[BigTyIdx]; 331 return !isPowerOf2_32(Ty.getSizeInBits()) && 332 Ty.getSizeInBits() % 16 != 0; 333 }, 334 [=](const LegalityQuery &Query) { 335 // Pick the next power of 2, or a multiple of 64 over 128. 336 // Whichever is smaller. 337 const LLT &Ty = Query.Types[BigTyIdx]; 338 unsigned NewSizeInBits = 1 << Log2_32_Ceil(Ty.getSizeInBits() + 1); 339 if (NewSizeInBits >= 256) { 340 unsigned RoundedTo = alignTo<64>(Ty.getSizeInBits() + 1); 341 if (RoundedTo < NewSizeInBits) 342 NewSizeInBits = RoundedTo; 343 } 344 return std::make_pair(BigTyIdx, LLT::scalar(NewSizeInBits)); 345 }) 346 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 16) 347 // Clamp the little scalar to s8-s256 and make it a power of 2. It's not 348 // worth considering the multiples of 64 since 2*192 and 2*384 are not 349 // valid. 350 .clampScalar(LitTyIdx, S16, S256) 351 .widenScalarToNextPow2(LitTyIdx, /*Min*/ 32) 352 .legalIf([=](const LegalityQuery &Query) { 353 const LLT &BigTy = Query.Types[BigTyIdx]; 354 const LLT &LitTy = Query.Types[LitTyIdx]; 355 356 if (BigTy.isVector() && BigTy.getSizeInBits() < 32) 357 return false; 358 if (LitTy.isVector() && LitTy.getSizeInBits() < 32) 359 return false; 360 361 return BigTy.getSizeInBits() % 16 == 0 && 362 LitTy.getSizeInBits() % 16 == 0 && 363 BigTy.getSizeInBits() <= 512; 364 }) 365 // Any vectors left are the wrong size. Scalarize them. 366 .fewerElementsIf([](const LegalityQuery &Query) { 367 return Query.Types[0].isVector(); 368 }, 369 [](const LegalityQuery &Query) { 370 return std::make_pair( 371 0, Query.Types[0].getElementType()); 372 }) 373 .fewerElementsIf([](const LegalityQuery &Query) { 374 return Query.Types[1].isVector(); 375 }, 376 [](const LegalityQuery &Query) { 377 return std::make_pair( 378 1, Query.Types[1].getElementType()); 379 }); 380 381 } 382 383 computeTables(); 384 verify(*ST.getInstrInfo()); 385 } 386