1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "AMDGPUAsmUtils.h"
12 #include "AMDKernelCodeT.h"
13 #include "GCNSubtarget.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/IR/GlobalValue.h"
19 #include "llvm/IR/IntrinsicsAMDGPU.h"
20 #include "llvm/IR/IntrinsicsR600.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/MC/MCSubtargetInfo.h"
23 #include "llvm/Support/AMDHSAKernelDescriptor.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/TargetParser.h"
26 
27 #define GET_INSTRINFO_NAMED_OPS
28 #define GET_INSTRMAP_INFO
29 #include "AMDGPUGenInstrInfo.inc"
30 
31 static llvm::cl::opt<unsigned> AmdhsaCodeObjectVersion(
32   "amdhsa-code-object-version", llvm::cl::Hidden,
33   llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4),
34   llvm::cl::ZeroOrMore);
35 
36 namespace {
37 
38 /// \returns Bit mask for given bit \p Shift and bit \p Width.
39 unsigned getBitMask(unsigned Shift, unsigned Width) {
40   return ((1 << Width) - 1) << Shift;
41 }
42 
43 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
44 ///
45 /// \returns Packed \p Dst.
46 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
47   unsigned Mask = getBitMask(Shift, Width);
48   return ((Src << Shift) & Mask) | (Dst & ~Mask);
49 }
50 
51 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
52 ///
53 /// \returns Unpacked bits.
54 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
55   return (Src & getBitMask(Shift, Width)) >> Shift;
56 }
57 
58 /// \returns Vmcnt bit shift (lower bits).
59 unsigned getVmcntBitShiftLo(unsigned VersionMajor) {
60   return VersionMajor >= 11 ? 10 : 0;
61 }
62 
63 /// \returns Vmcnt bit width (lower bits).
64 unsigned getVmcntBitWidthLo(unsigned VersionMajor) {
65   return VersionMajor >= 11 ? 6 : 4;
66 }
67 
68 /// \returns Expcnt bit shift.
69 unsigned getExpcntBitShift(unsigned VersionMajor) {
70   return VersionMajor >= 11 ? 0 : 4;
71 }
72 
73 /// \returns Expcnt bit width.
74 unsigned getExpcntBitWidth(unsigned VersionMajor) { return 3; }
75 
76 /// \returns Lgkmcnt bit shift.
77 unsigned getLgkmcntBitShift(unsigned VersionMajor) {
78   return VersionMajor >= 11 ? 4 : 8;
79 }
80 
81 /// \returns Lgkmcnt bit width.
82 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
83   return VersionMajor >= 10 ? 6 : 4;
84 }
85 
86 /// \returns Vmcnt bit shift (higher bits).
87 unsigned getVmcntBitShiftHi(unsigned VersionMajor) { return 14; }
88 
89 /// \returns Vmcnt bit width (higher bits).
90 unsigned getVmcntBitWidthHi(unsigned VersionMajor) {
91   return (VersionMajor == 9 || VersionMajor == 10) ? 2 : 0;
92 }
93 
94 } // end namespace anonymous
95 
96 namespace llvm {
97 
98 namespace AMDGPU {
99 
100 Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
101   if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
102     return None;
103 
104   switch (AmdhsaCodeObjectVersion) {
105   case 2:
106     return ELF::ELFABIVERSION_AMDGPU_HSA_V2;
107   case 3:
108     return ELF::ELFABIVERSION_AMDGPU_HSA_V3;
109   case 4:
110     return ELF::ELFABIVERSION_AMDGPU_HSA_V4;
111   case 5:
112     return ELF::ELFABIVERSION_AMDGPU_HSA_V5;
113   default:
114     report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") +
115                        Twine(AmdhsaCodeObjectVersion));
116   }
117 }
118 
119 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
120   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
121     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
122   return false;
123 }
124 
125 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
126   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
127     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
128   return false;
129 }
130 
131 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
132   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
133     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
134   return false;
135 }
136 
137 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
138   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
139     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
140   return false;
141 }
142 
143 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) {
144   return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) ||
145          isHsaAbiVersion5(STI);
146 }
147 
148 unsigned getAmdhsaCodeObjectVersion() {
149   return AmdhsaCodeObjectVersion;
150 }
151 
152 unsigned getMultigridSyncArgImplicitArgPosition() {
153   switch (AmdhsaCodeObjectVersion) {
154   case 2:
155   case 3:
156   case 4:
157     return 48;
158   case 5:
159     return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET;
160   default:
161     llvm_unreachable("Unexpected code object version");
162     return 0;
163   }
164 }
165 
166 
167 // FIXME: All such magic numbers about the ABI should be in a
168 // central TD file.
169 unsigned getHostcallImplicitArgPosition() {
170   switch (AmdhsaCodeObjectVersion) {
171   case 2:
172   case 3:
173   case 4:
174     return 24;
175   case 5:
176     return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET;
177   default:
178     llvm_unreachable("Unexpected code object version");
179     return 0;
180   }
181 }
182 
183 #define GET_MIMGBaseOpcodesTable_IMPL
184 #define GET_MIMGDimInfoTable_IMPL
185 #define GET_MIMGInfoTable_IMPL
186 #define GET_MIMGLZMappingTable_IMPL
187 #define GET_MIMGMIPMappingTable_IMPL
188 #define GET_MIMGBiasMappingTable_IMPL
189 #define GET_MIMGOffsetMappingTable_IMPL
190 #define GET_MIMGG16MappingTable_IMPL
191 #define GET_MAIInstInfoTable_IMPL
192 #include "AMDGPUGenSearchableTables.inc"
193 
194 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
195                   unsigned VDataDwords, unsigned VAddrDwords) {
196   const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
197                                              VDataDwords, VAddrDwords);
198   return Info ? Info->Opcode : -1;
199 }
200 
201 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
202   const MIMGInfo *Info = getMIMGInfo(Opc);
203   return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
204 }
205 
206 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
207   const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
208   const MIMGInfo *NewInfo =
209       getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
210                           NewChannels, OrigInfo->VAddrDwords);
211   return NewInfo ? NewInfo->Opcode : -1;
212 }
213 
214 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
215                            const MIMGDimInfo *Dim, bool IsA16,
216                            bool IsG16Supported) {
217   unsigned AddrWords = BaseOpcode->NumExtraArgs;
218   unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
219                             (BaseOpcode->LodOrClampOrMip ? 1 : 0);
220   if (IsA16)
221     AddrWords += divideCeil(AddrComponents, 2);
222   else
223     AddrWords += AddrComponents;
224 
225   // Note: For subtargets that support A16 but not G16, enabling A16 also
226   // enables 16 bit gradients.
227   // For subtargets that support A16 (operand) and G16 (done with a different
228   // instruction encoding), they are independent.
229 
230   if (BaseOpcode->Gradients) {
231     if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
232       // There are two gradients per coordinate, we pack them separately.
233       // For the 3d case,
234       // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
235       AddrWords += alignTo<2>(Dim->NumGradients / 2);
236     else
237       AddrWords += Dim->NumGradients;
238   }
239   return AddrWords;
240 }
241 
242 struct MUBUFInfo {
243   uint16_t Opcode;
244   uint16_t BaseOpcode;
245   uint8_t elements;
246   bool has_vaddr;
247   bool has_srsrc;
248   bool has_soffset;
249   bool IsBufferInv;
250 };
251 
252 struct MTBUFInfo {
253   uint16_t Opcode;
254   uint16_t BaseOpcode;
255   uint8_t elements;
256   bool has_vaddr;
257   bool has_srsrc;
258   bool has_soffset;
259 };
260 
261 struct SMInfo {
262   uint16_t Opcode;
263   bool IsBuffer;
264 };
265 
266 struct VOPInfo {
267   uint16_t Opcode;
268   bool IsSingle;
269 };
270 
271 #define GET_MTBUFInfoTable_DECL
272 #define GET_MTBUFInfoTable_IMPL
273 #define GET_MUBUFInfoTable_DECL
274 #define GET_MUBUFInfoTable_IMPL
275 #define GET_SMInfoTable_DECL
276 #define GET_SMInfoTable_IMPL
277 #define GET_VOP1InfoTable_DECL
278 #define GET_VOP1InfoTable_IMPL
279 #define GET_VOP2InfoTable_DECL
280 #define GET_VOP2InfoTable_IMPL
281 #define GET_VOP3InfoTable_DECL
282 #define GET_VOP3InfoTable_IMPL
283 #include "AMDGPUGenSearchableTables.inc"
284 
285 int getMTBUFBaseOpcode(unsigned Opc) {
286   const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
287   return Info ? Info->BaseOpcode : -1;
288 }
289 
290 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
291   const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
292   return Info ? Info->Opcode : -1;
293 }
294 
295 int getMTBUFElements(unsigned Opc) {
296   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
297   return Info ? Info->elements : 0;
298 }
299 
300 bool getMTBUFHasVAddr(unsigned Opc) {
301   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
302   return Info ? Info->has_vaddr : false;
303 }
304 
305 bool getMTBUFHasSrsrc(unsigned Opc) {
306   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
307   return Info ? Info->has_srsrc : false;
308 }
309 
310 bool getMTBUFHasSoffset(unsigned Opc) {
311   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
312   return Info ? Info->has_soffset : false;
313 }
314 
315 int getMUBUFBaseOpcode(unsigned Opc) {
316   const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
317   return Info ? Info->BaseOpcode : -1;
318 }
319 
320 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
321   const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
322   return Info ? Info->Opcode : -1;
323 }
324 
325 int getMUBUFElements(unsigned Opc) {
326   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
327   return Info ? Info->elements : 0;
328 }
329 
330 bool getMUBUFHasVAddr(unsigned Opc) {
331   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
332   return Info ? Info->has_vaddr : false;
333 }
334 
335 bool getMUBUFHasSrsrc(unsigned Opc) {
336   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
337   return Info ? Info->has_srsrc : false;
338 }
339 
340 bool getMUBUFHasSoffset(unsigned Opc) {
341   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
342   return Info ? Info->has_soffset : false;
343 }
344 
345 bool getMUBUFIsBufferInv(unsigned Opc) {
346   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
347   return Info ? Info->IsBufferInv : false;
348 }
349 
350 bool getSMEMIsBuffer(unsigned Opc) {
351   const SMInfo *Info = getSMEMOpcodeHelper(Opc);
352   return Info ? Info->IsBuffer : false;
353 }
354 
355 bool getVOP1IsSingle(unsigned Opc) {
356   const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
357   return Info ? Info->IsSingle : false;
358 }
359 
360 bool getVOP2IsSingle(unsigned Opc) {
361   const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
362   return Info ? Info->IsSingle : false;
363 }
364 
365 bool getVOP3IsSingle(unsigned Opc) {
366   const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
367   return Info ? Info->IsSingle : false;
368 }
369 
370 bool getMAIIsDGEMM(unsigned Opc) {
371   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
372   return Info ? Info->is_dgemm : false;
373 }
374 
375 bool getMAIIsGFX940XDL(unsigned Opc) {
376   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
377   return Info ? Info->is_gfx940_xdl : false;
378 }
379 
380 // Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
381 // header files, so we need to wrap it in a function that takes unsigned
382 // instead.
383 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
384   return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
385 }
386 
387 namespace IsaInfo {
388 
389 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI)
390     : STI(STI), XnackSetting(TargetIDSetting::Any),
391       SramEccSetting(TargetIDSetting::Any) {
392   if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
393     XnackSetting = TargetIDSetting::Unsupported;
394   if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
395     SramEccSetting = TargetIDSetting::Unsupported;
396 }
397 
398 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) {
399   // Check if xnack or sramecc is explicitly enabled or disabled.  In the
400   // absence of the target features we assume we must generate code that can run
401   // in any environment.
402   SubtargetFeatures Features(FS);
403   Optional<bool> XnackRequested;
404   Optional<bool> SramEccRequested;
405 
406   for (const std::string &Feature : Features.getFeatures()) {
407     if (Feature == "+xnack")
408       XnackRequested = true;
409     else if (Feature == "-xnack")
410       XnackRequested = false;
411     else if (Feature == "+sramecc")
412       SramEccRequested = true;
413     else if (Feature == "-sramecc")
414       SramEccRequested = false;
415   }
416 
417   bool XnackSupported = isXnackSupported();
418   bool SramEccSupported = isSramEccSupported();
419 
420   if (XnackRequested) {
421     if (XnackSupported) {
422       XnackSetting =
423           *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
424     } else {
425       // If a specific xnack setting was requested and this GPU does not support
426       // xnack emit a warning. Setting will remain set to "Unsupported".
427       if (*XnackRequested) {
428         errs() << "warning: xnack 'On' was requested for a processor that does "
429                   "not support it!\n";
430       } else {
431         errs() << "warning: xnack 'Off' was requested for a processor that "
432                   "does not support it!\n";
433       }
434     }
435   }
436 
437   if (SramEccRequested) {
438     if (SramEccSupported) {
439       SramEccSetting =
440           *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
441     } else {
442       // If a specific sramecc setting was requested and this GPU does not
443       // support sramecc emit a warning. Setting will remain set to
444       // "Unsupported".
445       if (*SramEccRequested) {
446         errs() << "warning: sramecc 'On' was requested for a processor that "
447                   "does not support it!\n";
448       } else {
449         errs() << "warning: sramecc 'Off' was requested for a processor that "
450                   "does not support it!\n";
451       }
452     }
453   }
454 }
455 
456 static TargetIDSetting
457 getTargetIDSettingFromFeatureString(StringRef FeatureString) {
458   if (FeatureString.endswith("-"))
459     return TargetIDSetting::Off;
460   if (FeatureString.endswith("+"))
461     return TargetIDSetting::On;
462 
463   llvm_unreachable("Malformed feature string");
464 }
465 
466 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) {
467   SmallVector<StringRef, 3> TargetIDSplit;
468   TargetID.split(TargetIDSplit, ':');
469 
470   for (const auto &FeatureString : TargetIDSplit) {
471     if (FeatureString.startswith("xnack"))
472       XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
473     if (FeatureString.startswith("sramecc"))
474       SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
475   }
476 }
477 
478 std::string AMDGPUTargetID::toString() const {
479   std::string StringRep;
480   raw_string_ostream StreamRep(StringRep);
481 
482   auto TargetTriple = STI.getTargetTriple();
483   auto Version = getIsaVersion(STI.getCPU());
484 
485   StreamRep << TargetTriple.getArchName() << '-'
486             << TargetTriple.getVendorName() << '-'
487             << TargetTriple.getOSName() << '-'
488             << TargetTriple.getEnvironmentName() << '-';
489 
490   std::string Processor;
491   // TODO: Following else statement is present here because we used various
492   // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
493   // Remove once all aliases are removed from GCNProcessors.td.
494   if (Version.Major >= 9)
495     Processor = STI.getCPU().str();
496   else
497     Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
498                  Twine(Version.Stepping))
499                     .str();
500 
501   std::string Features;
502   if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
503     switch (*HsaAbiVersion) {
504     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
505       // Code object V2 only supported specific processors and had fixed
506       // settings for the XNACK.
507       if (Processor == "gfx600") {
508       } else if (Processor == "gfx601") {
509       } else if (Processor == "gfx602") {
510       } else if (Processor == "gfx700") {
511       } else if (Processor == "gfx701") {
512       } else if (Processor == "gfx702") {
513       } else if (Processor == "gfx703") {
514       } else if (Processor == "gfx704") {
515       } else if (Processor == "gfx705") {
516       } else if (Processor == "gfx801") {
517         if (!isXnackOnOrAny())
518           report_fatal_error(
519               "AMD GPU code object V2 does not support processor " +
520               Twine(Processor) + " without XNACK");
521       } else if (Processor == "gfx802") {
522       } else if (Processor == "gfx803") {
523       } else if (Processor == "gfx805") {
524       } else if (Processor == "gfx810") {
525         if (!isXnackOnOrAny())
526           report_fatal_error(
527               "AMD GPU code object V2 does not support processor " +
528               Twine(Processor) + " without XNACK");
529       } else if (Processor == "gfx900") {
530         if (isXnackOnOrAny())
531           Processor = "gfx901";
532       } else if (Processor == "gfx902") {
533         if (isXnackOnOrAny())
534           Processor = "gfx903";
535       } else if (Processor == "gfx904") {
536         if (isXnackOnOrAny())
537           Processor = "gfx905";
538       } else if (Processor == "gfx906") {
539         if (isXnackOnOrAny())
540           Processor = "gfx907";
541       } else if (Processor == "gfx90c") {
542         if (isXnackOnOrAny())
543           report_fatal_error(
544               "AMD GPU code object V2 does not support processor " +
545               Twine(Processor) + " with XNACK being ON or ANY");
546       } else {
547         report_fatal_error(
548             "AMD GPU code object V2 does not support processor " +
549             Twine(Processor));
550       }
551       break;
552     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
553       // xnack.
554       if (isXnackOnOrAny())
555         Features += "+xnack";
556       // In code object v2 and v3, "sramecc" feature was spelled with a
557       // hyphen ("sram-ecc").
558       if (isSramEccOnOrAny())
559         Features += "+sram-ecc";
560       break;
561     case ELF::ELFABIVERSION_AMDGPU_HSA_V4:
562     case ELF::ELFABIVERSION_AMDGPU_HSA_V5:
563       // sramecc.
564       if (getSramEccSetting() == TargetIDSetting::Off)
565         Features += ":sramecc-";
566       else if (getSramEccSetting() == TargetIDSetting::On)
567         Features += ":sramecc+";
568       // xnack.
569       if (getXnackSetting() == TargetIDSetting::Off)
570         Features += ":xnack-";
571       else if (getXnackSetting() == TargetIDSetting::On)
572         Features += ":xnack+";
573       break;
574     default:
575       break;
576     }
577   }
578 
579   StreamRep << Processor << Features;
580 
581   StreamRep.flush();
582   return StringRep;
583 }
584 
585 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
586   if (STI->getFeatureBits().test(FeatureWavefrontSize16))
587     return 16;
588   if (STI->getFeatureBits().test(FeatureWavefrontSize32))
589     return 32;
590 
591   return 64;
592 }
593 
594 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
595   if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
596     return 32768;
597   if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
598     return 65536;
599 
600   return 0;
601 }
602 
603 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
604   // "Per CU" really means "per whatever functional block the waves of a
605   // workgroup must share". For gfx10 in CU mode this is the CU, which contains
606   // two SIMDs.
607   if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
608     return 2;
609   // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains
610   // two CUs, so a total of four SIMDs.
611   return 4;
612 }
613 
614 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
615                                unsigned FlatWorkGroupSize) {
616   assert(FlatWorkGroupSize != 0);
617   if (STI->getTargetTriple().getArch() != Triple::amdgcn)
618     return 8;
619   unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
620   if (N == 1)
621     return 40;
622   N = 40 / N;
623   return std::min(N, 16u);
624 }
625 
626 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
627   return 1;
628 }
629 
630 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
631   // FIXME: Need to take scratch memory into account.
632   if (isGFX90A(*STI))
633     return 8;
634   if (!isGFX10Plus(*STI))
635     return 10;
636   return hasGFX10_3Insts(*STI) ? 16 : 20;
637 }
638 
639 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI,
640                                    unsigned FlatWorkGroupSize) {
641   return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
642                     getEUsPerCU(STI));
643 }
644 
645 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
646   return 1;
647 }
648 
649 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
650   // Some subtargets allow encoding 2048, but this isn't tested or supported.
651   return 1024;
652 }
653 
654 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
655                               unsigned FlatWorkGroupSize) {
656   return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
657 }
658 
659 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
660   IsaVersion Version = getIsaVersion(STI->getCPU());
661   if (Version.Major >= 10)
662     return getAddressableNumSGPRs(STI);
663   if (Version.Major >= 8)
664     return 16;
665   return 8;
666 }
667 
668 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
669   return 8;
670 }
671 
672 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
673   IsaVersion Version = getIsaVersion(STI->getCPU());
674   if (Version.Major >= 8)
675     return 800;
676   return 512;
677 }
678 
679 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
680   if (STI->getFeatureBits().test(FeatureSGPRInitBug))
681     return FIXED_NUM_SGPRS_FOR_INIT_BUG;
682 
683   IsaVersion Version = getIsaVersion(STI->getCPU());
684   if (Version.Major >= 10)
685     return 106;
686   if (Version.Major >= 8)
687     return 102;
688   return 104;
689 }
690 
691 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
692   assert(WavesPerEU != 0);
693 
694   IsaVersion Version = getIsaVersion(STI->getCPU());
695   if (Version.Major >= 10)
696     return 0;
697 
698   if (WavesPerEU >= getMaxWavesPerEU(STI))
699     return 0;
700 
701   unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
702   if (STI->getFeatureBits().test(FeatureTrapHandler))
703     MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
704   MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
705   return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
706 }
707 
708 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
709                         bool Addressable) {
710   assert(WavesPerEU != 0);
711 
712   unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
713   IsaVersion Version = getIsaVersion(STI->getCPU());
714   if (Version.Major >= 10)
715     return Addressable ? AddressableNumSGPRs : 108;
716   if (Version.Major >= 8 && !Addressable)
717     AddressableNumSGPRs = 112;
718   unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
719   if (STI->getFeatureBits().test(FeatureTrapHandler))
720     MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
721   MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
722   return std::min(MaxNumSGPRs, AddressableNumSGPRs);
723 }
724 
725 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
726                           bool FlatScrUsed, bool XNACKUsed) {
727   unsigned ExtraSGPRs = 0;
728   if (VCCUsed)
729     ExtraSGPRs = 2;
730 
731   IsaVersion Version = getIsaVersion(STI->getCPU());
732   if (Version.Major >= 10)
733     return ExtraSGPRs;
734 
735   if (Version.Major < 8) {
736     if (FlatScrUsed)
737       ExtraSGPRs = 4;
738   } else {
739     if (XNACKUsed)
740       ExtraSGPRs = 4;
741 
742     if (FlatScrUsed ||
743         STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
744       ExtraSGPRs = 6;
745   }
746 
747   return ExtraSGPRs;
748 }
749 
750 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
751                           bool FlatScrUsed) {
752   return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
753                           STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
754 }
755 
756 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
757   NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
758   // SGPRBlocks is actual number of SGPR blocks minus 1.
759   return NumSGPRs / getSGPREncodingGranule(STI) - 1;
760 }
761 
762 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
763                              Optional<bool> EnableWavefrontSize32) {
764   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
765     return 8;
766 
767   bool IsWave32 = EnableWavefrontSize32 ?
768       *EnableWavefrontSize32 :
769       STI->getFeatureBits().test(FeatureWavefrontSize32);
770 
771   if (hasGFX10_3Insts(*STI))
772     return IsWave32 ? 16 : 8;
773 
774   return IsWave32 ? 8 : 4;
775 }
776 
777 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
778                                 Optional<bool> EnableWavefrontSize32) {
779   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
780     return 8;
781 
782   bool IsWave32 = EnableWavefrontSize32 ?
783       *EnableWavefrontSize32 :
784       STI->getFeatureBits().test(FeatureWavefrontSize32);
785 
786   return IsWave32 ? 8 : 4;
787 }
788 
789 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
790   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
791     return 512;
792   if (!isGFX10Plus(*STI))
793     return 256;
794   return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512;
795 }
796 
797 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
798   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
799     return 512;
800   return 256;
801 }
802 
803 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
804   assert(WavesPerEU != 0);
805 
806   if (WavesPerEU >= getMaxWavesPerEU(STI))
807     return 0;
808   unsigned MinNumVGPRs =
809       alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
810                 getVGPRAllocGranule(STI)) + 1;
811   return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
812 }
813 
814 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
815   assert(WavesPerEU != 0);
816 
817   unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
818                                    getVGPRAllocGranule(STI));
819   unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
820   return std::min(MaxNumVGPRs, AddressableNumVGPRs);
821 }
822 
823 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
824                           Optional<bool> EnableWavefrontSize32) {
825   NumVGPRs = alignTo(std::max(1u, NumVGPRs),
826                      getVGPREncodingGranule(STI, EnableWavefrontSize32));
827   // VGPRBlocks is actual number of VGPR blocks minus 1.
828   return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
829 }
830 
831 } // end namespace IsaInfo
832 
833 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
834                                const MCSubtargetInfo *STI) {
835   IsaVersion Version = getIsaVersion(STI->getCPU());
836 
837   memset(&Header, 0, sizeof(Header));
838 
839   Header.amd_kernel_code_version_major = 1;
840   Header.amd_kernel_code_version_minor = 2;
841   Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
842   Header.amd_machine_version_major = Version.Major;
843   Header.amd_machine_version_minor = Version.Minor;
844   Header.amd_machine_version_stepping = Version.Stepping;
845   Header.kernel_code_entry_byte_offset = sizeof(Header);
846   Header.wavefront_size = 6;
847 
848   // If the code object does not support indirect functions, then the value must
849   // be 0xffffffff.
850   Header.call_convention = -1;
851 
852   // These alignment values are specified in powers of two, so alignment =
853   // 2^n.  The minimum alignment is 2^4 = 16.
854   Header.kernarg_segment_alignment = 4;
855   Header.group_segment_alignment = 4;
856   Header.private_segment_alignment = 4;
857 
858   if (Version.Major >= 10) {
859     if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
860       Header.wavefront_size = 5;
861       Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
862     }
863     Header.compute_pgm_resource_registers |=
864       S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
865       S_00B848_MEM_ORDERED(1);
866   }
867 }
868 
869 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
870     const MCSubtargetInfo *STI) {
871   IsaVersion Version = getIsaVersion(STI->getCPU());
872 
873   amdhsa::kernel_descriptor_t KD;
874   memset(&KD, 0, sizeof(KD));
875 
876   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
877                   amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
878                   amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
879   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
880                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
881   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
882                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
883   AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
884                   amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
885   if (Version.Major >= 10) {
886     AMDHSA_BITS_SET(KD.kernel_code_properties,
887                     amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
888                     STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
889     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
890                     amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
891                     STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
892     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
893                     amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
894   }
895   if (AMDGPU::isGFX90A(*STI)) {
896     AMDHSA_BITS_SET(KD.compute_pgm_rsrc3,
897                     amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
898                     STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
899   }
900   return KD;
901 }
902 
903 bool isGroupSegment(const GlobalValue *GV) {
904   return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
905 }
906 
907 bool isGlobalSegment(const GlobalValue *GV) {
908   return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
909 }
910 
911 bool isReadOnlySegment(const GlobalValue *GV) {
912   unsigned AS = GV->getAddressSpace();
913   return AS == AMDGPUAS::CONSTANT_ADDRESS ||
914          AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
915 }
916 
917 bool shouldEmitConstantsToTextSection(const Triple &TT) {
918   return TT.getArch() == Triple::r600;
919 }
920 
921 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
922   Attribute A = F.getFnAttribute(Name);
923   int Result = Default;
924 
925   if (A.isStringAttribute()) {
926     StringRef Str = A.getValueAsString();
927     if (Str.getAsInteger(0, Result)) {
928       LLVMContext &Ctx = F.getContext();
929       Ctx.emitError("can't parse integer attribute " + Name);
930     }
931   }
932 
933   return Result;
934 }
935 
936 std::pair<int, int> getIntegerPairAttribute(const Function &F,
937                                             StringRef Name,
938                                             std::pair<int, int> Default,
939                                             bool OnlyFirstRequired) {
940   Attribute A = F.getFnAttribute(Name);
941   if (!A.isStringAttribute())
942     return Default;
943 
944   LLVMContext &Ctx = F.getContext();
945   std::pair<int, int> Ints = Default;
946   std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
947   if (Strs.first.trim().getAsInteger(0, Ints.first)) {
948     Ctx.emitError("can't parse first integer attribute " + Name);
949     return Default;
950   }
951   if (Strs.second.trim().getAsInteger(0, Ints.second)) {
952     if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
953       Ctx.emitError("can't parse second integer attribute " + Name);
954       return Default;
955     }
956   }
957 
958   return Ints;
959 }
960 
961 unsigned getVmcntBitMask(const IsaVersion &Version) {
962   return (1 << (getVmcntBitWidthLo(Version.Major) +
963                 getVmcntBitWidthHi(Version.Major))) -
964          1;
965 }
966 
967 unsigned getExpcntBitMask(const IsaVersion &Version) {
968   return (1 << getExpcntBitWidth(Version.Major)) - 1;
969 }
970 
971 unsigned getLgkmcntBitMask(const IsaVersion &Version) {
972   return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
973 }
974 
975 unsigned getWaitcntBitMask(const IsaVersion &Version) {
976   unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(Version.Major),
977                                 getVmcntBitWidthLo(Version.Major));
978   unsigned Expcnt = getBitMask(getExpcntBitShift(Version.Major),
979                                getExpcntBitWidth(Version.Major));
980   unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(Version.Major),
981                                 getLgkmcntBitWidth(Version.Major));
982   unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(Version.Major),
983                                 getVmcntBitWidthHi(Version.Major));
984   return VmcntLo | Expcnt | Lgkmcnt | VmcntHi;
985 }
986 
987 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
988   unsigned VmcntLo = unpackBits(Waitcnt, getVmcntBitShiftLo(Version.Major),
989                                 getVmcntBitWidthLo(Version.Major));
990   unsigned VmcntHi = unpackBits(Waitcnt, getVmcntBitShiftHi(Version.Major),
991                                 getVmcntBitWidthHi(Version.Major));
992   return VmcntLo | VmcntHi << getVmcntBitWidthLo(Version.Major);
993 }
994 
995 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
996   return unpackBits(Waitcnt, getExpcntBitShift(Version.Major),
997                     getExpcntBitWidth(Version.Major));
998 }
999 
1000 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
1001   return unpackBits(Waitcnt, getLgkmcntBitShift(Version.Major),
1002                     getLgkmcntBitWidth(Version.Major));
1003 }
1004 
1005 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
1006                    unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
1007   Vmcnt = decodeVmcnt(Version, Waitcnt);
1008   Expcnt = decodeExpcnt(Version, Waitcnt);
1009   Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1010 }
1011 
1012 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1013   Waitcnt Decoded;
1014   Decoded.VmCnt = decodeVmcnt(Version, Encoded);
1015   Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1016   Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
1017   return Decoded;
1018 }
1019 
1020 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1021                      unsigned Vmcnt) {
1022   Waitcnt = packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(Version.Major),
1023                      getVmcntBitWidthLo(Version.Major));
1024   return packBits(Vmcnt >> getVmcntBitWidthLo(Version.Major), Waitcnt,
1025                   getVmcntBitShiftHi(Version.Major),
1026                   getVmcntBitWidthHi(Version.Major));
1027 }
1028 
1029 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1030                       unsigned Expcnt) {
1031   return packBits(Expcnt, Waitcnt, getExpcntBitShift(Version.Major),
1032                   getExpcntBitWidth(Version.Major));
1033 }
1034 
1035 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1036                        unsigned Lgkmcnt) {
1037   return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(Version.Major),
1038                   getLgkmcntBitWidth(Version.Major));
1039 }
1040 
1041 unsigned encodeWaitcnt(const IsaVersion &Version,
1042                        unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
1043   unsigned Waitcnt = getWaitcntBitMask(Version);
1044   Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
1045   Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1046   Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1047   return Waitcnt;
1048 }
1049 
1050 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1051   return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
1052 }
1053 
1054 //===----------------------------------------------------------------------===//
1055 // Custom Operands.
1056 //
1057 // A table of custom operands shall describe "primary" operand names
1058 // first followed by aliases if any. It is not required but recommended
1059 // to arrange operands so that operand encoding match operand position
1060 // in the table. This will make disassembly a bit more efficient.
1061 // Unused slots in the table shall have an empty name.
1062 //
1063 //===----------------------------------------------------------------------===//
1064 
1065 template <class T>
1066 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize,
1067                        T Context) {
1068   return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() &&
1069          (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context));
1070 }
1071 
1072 template <class T>
1073 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test,
1074                      const CustomOperand<T> OpInfo[], int OpInfoSize,
1075                      T Context) {
1076   int InvalidIdx = OPR_ID_UNKNOWN;
1077   for (int Idx = 0; Idx < OpInfoSize; ++Idx) {
1078     if (Test(OpInfo[Idx])) {
1079       if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context))
1080         return Idx;
1081       InvalidIdx = OPR_ID_UNSUPPORTED;
1082     }
1083   }
1084   return InvalidIdx;
1085 }
1086 
1087 template <class T>
1088 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[],
1089                      int OpInfoSize, T Context) {
1090   auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; };
1091   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1092 }
1093 
1094 template <class T>
1095 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize,
1096                      T Context, bool QuickCheck = true) {
1097   auto Test = [=](const CustomOperand<T> &Op) {
1098     return Op.Encoding == Id && !Op.Name.empty();
1099   };
1100   // This is an optimization that should work in most cases.
1101   // As a side effect, it may cause selection of an alias
1102   // instead of a primary operand name in case of sparse tables.
1103   if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) &&
1104       OpInfo[Id].Encoding == Id) {
1105     return Id;
1106   }
1107   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1108 }
1109 
1110 //===----------------------------------------------------------------------===//
1111 // Custom Operand Values
1112 //===----------------------------------------------------------------------===//
1113 
1114 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr,
1115                                                 int Size,
1116                                                 const MCSubtargetInfo &STI) {
1117   unsigned Enc = 0;
1118   for (int Idx = 0; Idx < Size; ++Idx) {
1119     const auto &Op = Opr[Idx];
1120     if (Op.isSupported(STI))
1121       Enc |= Op.encode(Op.Default);
1122   }
1123   return Enc;
1124 }
1125 
1126 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr,
1127                                             int Size, unsigned Code,
1128                                             bool &HasNonDefaultVal,
1129                                             const MCSubtargetInfo &STI) {
1130   unsigned UsedOprMask = 0;
1131   HasNonDefaultVal = false;
1132   for (int Idx = 0; Idx < Size; ++Idx) {
1133     const auto &Op = Opr[Idx];
1134     if (!Op.isSupported(STI))
1135       continue;
1136     UsedOprMask |= Op.getMask();
1137     unsigned Val = Op.decode(Code);
1138     if (!Op.isValid(Val))
1139       return false;
1140     HasNonDefaultVal |= (Val != Op.Default);
1141   }
1142   return (Code & ~UsedOprMask) == 0;
1143 }
1144 
1145 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1146                                 unsigned Code, int &Idx, StringRef &Name,
1147                                 unsigned &Val, bool &IsDefault,
1148                                 const MCSubtargetInfo &STI) {
1149   while (Idx < Size) {
1150     const auto &Op = Opr[Idx++];
1151     if (Op.isSupported(STI)) {
1152       Name = Op.Name;
1153       Val = Op.decode(Code);
1154       IsDefault = (Val == Op.Default);
1155       return true;
1156     }
1157   }
1158 
1159   return false;
1160 }
1161 
1162 static int encodeCustomOperandVal(const CustomOperandVal &Op,
1163                                   int64_t InputVal) {
1164   if (InputVal < 0 || InputVal > Op.Max)
1165     return OPR_VAL_INVALID;
1166   return Op.encode(InputVal);
1167 }
1168 
1169 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1170                                const StringRef Name, int64_t InputVal,
1171                                unsigned &UsedOprMask,
1172                                const MCSubtargetInfo &STI) {
1173   int InvalidId = OPR_ID_UNKNOWN;
1174   for (int Idx = 0; Idx < Size; ++Idx) {
1175     const auto &Op = Opr[Idx];
1176     if (Op.Name == Name) {
1177       if (!Op.isSupported(STI)) {
1178         InvalidId = OPR_ID_UNSUPPORTED;
1179         continue;
1180       }
1181       auto OprMask = Op.getMask();
1182       if (OprMask & UsedOprMask)
1183         return OPR_ID_DUPLICATE;
1184       UsedOprMask |= OprMask;
1185       return encodeCustomOperandVal(Op, InputVal);
1186     }
1187   }
1188   return InvalidId;
1189 }
1190 
1191 //===----------------------------------------------------------------------===//
1192 // DepCtr
1193 //===----------------------------------------------------------------------===//
1194 
1195 namespace DepCtr {
1196 
1197 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) {
1198   static int Default = -1;
1199   if (Default == -1)
1200     Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI);
1201   return Default;
1202 }
1203 
1204 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1205                               const MCSubtargetInfo &STI) {
1206   return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code,
1207                                          HasNonDefaultVal, STI);
1208 }
1209 
1210 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1211                   bool &IsDefault, const MCSubtargetInfo &STI) {
1212   return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
1213                              IsDefault, STI);
1214 }
1215 
1216 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1217                  const MCSubtargetInfo &STI) {
1218   return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
1219                              STI);
1220 }
1221 
1222 } // namespace DepCtr
1223 
1224 //===----------------------------------------------------------------------===//
1225 // hwreg
1226 //===----------------------------------------------------------------------===//
1227 
1228 namespace Hwreg {
1229 
1230 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
1231   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI);
1232   return (Idx < 0) ? Idx : Opr[Idx].Encoding;
1233 }
1234 
1235 bool isValidHwreg(int64_t Id) {
1236   return 0 <= Id && isUInt<ID_WIDTH_>(Id);
1237 }
1238 
1239 bool isValidHwregOffset(int64_t Offset) {
1240   return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
1241 }
1242 
1243 bool isValidHwregWidth(int64_t Width) {
1244   return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
1245 }
1246 
1247 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) {
1248   return (Id << ID_SHIFT_) |
1249          (Offset << OFFSET_SHIFT_) |
1250          ((Width - 1) << WIDTH_M1_SHIFT_);
1251 }
1252 
1253 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
1254   int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
1255   return (Idx < 0) ? "" : Opr[Idx].Name;
1256 }
1257 
1258 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
1259   Id = (Val & ID_MASK_) >> ID_SHIFT_;
1260   Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
1261   Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
1262 }
1263 
1264 } // namespace Hwreg
1265 
1266 //===----------------------------------------------------------------------===//
1267 // exp tgt
1268 //===----------------------------------------------------------------------===//
1269 
1270 namespace Exp {
1271 
1272 struct ExpTgt {
1273   StringLiteral Name;
1274   unsigned Tgt;
1275   unsigned MaxIndex;
1276 };
1277 
1278 static constexpr ExpTgt ExpTgtInfo[] = {
1279   {{"null"},  ET_NULL,   ET_NULL_MAX_IDX},
1280   {{"mrtz"},  ET_MRTZ,   ET_MRTZ_MAX_IDX},
1281   {{"prim"},  ET_PRIM,   ET_PRIM_MAX_IDX},
1282   {{"mrt"},   ET_MRT0,   ET_MRT_MAX_IDX},
1283   {{"pos"},   ET_POS0,   ET_POS_MAX_IDX},
1284   {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
1285 };
1286 
1287 bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
1288   for (const ExpTgt &Val : ExpTgtInfo) {
1289     if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1290       Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1291       Name = Val.Name;
1292       return true;
1293     }
1294   }
1295   return false;
1296 }
1297 
1298 unsigned getTgtId(const StringRef Name) {
1299 
1300   for (const ExpTgt &Val : ExpTgtInfo) {
1301     if (Val.MaxIndex == 0 && Name == Val.Name)
1302       return Val.Tgt;
1303 
1304     if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) {
1305       StringRef Suffix = Name.drop_front(Val.Name.size());
1306 
1307       unsigned Id;
1308       if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
1309         return ET_INVALID;
1310 
1311       // Disable leading zeroes
1312       if (Suffix.size() > 1 && Suffix[0] == '0')
1313         return ET_INVALID;
1314 
1315       return Val.Tgt + Id;
1316     }
1317   }
1318   return ET_INVALID;
1319 }
1320 
1321 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
1322   return (Id != ET_POS4 && Id != ET_PRIM) || isGFX10Plus(STI);
1323 }
1324 
1325 } // namespace Exp
1326 
1327 //===----------------------------------------------------------------------===//
1328 // MTBUF Format
1329 //===----------------------------------------------------------------------===//
1330 
1331 namespace MTBUFFormat {
1332 
1333 int64_t getDfmt(const StringRef Name) {
1334   for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
1335     if (Name == DfmtSymbolic[Id])
1336       return Id;
1337   }
1338   return DFMT_UNDEF;
1339 }
1340 
1341 StringRef getDfmtName(unsigned Id) {
1342   assert(Id <= DFMT_MAX);
1343   return DfmtSymbolic[Id];
1344 }
1345 
1346 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) {
1347   if (isSI(STI) || isCI(STI))
1348     return NfmtSymbolicSICI;
1349   if (isVI(STI) || isGFX9(STI))
1350     return NfmtSymbolicVI;
1351   return NfmtSymbolicGFX10;
1352 }
1353 
1354 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
1355   auto lookupTable = getNfmtLookupTable(STI);
1356   for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
1357     if (Name == lookupTable[Id])
1358       return Id;
1359   }
1360   return NFMT_UNDEF;
1361 }
1362 
1363 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
1364   assert(Id <= NFMT_MAX);
1365   return getNfmtLookupTable(STI)[Id];
1366 }
1367 
1368 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1369   unsigned Dfmt;
1370   unsigned Nfmt;
1371   decodeDfmtNfmt(Id, Dfmt, Nfmt);
1372   return isValidNfmt(Nfmt, STI);
1373 }
1374 
1375 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1376   return !getNfmtName(Id, STI).empty();
1377 }
1378 
1379 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
1380   return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
1381 }
1382 
1383 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
1384   Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
1385   Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
1386 }
1387 
1388 int64_t getUnifiedFormat(const StringRef Name, const MCSubtargetInfo &STI) {
1389   if (isGFX11Plus(STI)) {
1390     for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1391       if (Name == UfmtSymbolicGFX11[Id])
1392         return Id;
1393     }
1394   } else {
1395     for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1396       if (Name == UfmtSymbolicGFX10[Id])
1397         return Id;
1398     }
1399   }
1400   return UFMT_UNDEF;
1401 }
1402 
1403 StringRef getUnifiedFormatName(unsigned Id, const MCSubtargetInfo &STI) {
1404   if(isValidUnifiedFormat(Id, STI))
1405     return isGFX10(STI) ? UfmtSymbolicGFX10[Id] : UfmtSymbolicGFX11[Id];
1406   return "";
1407 }
1408 
1409 bool isValidUnifiedFormat(unsigned Id, const MCSubtargetInfo &STI) {
1410   return isGFX10(STI) ? Id <= UfmtGFX10::UFMT_LAST : Id <= UfmtGFX11::UFMT_LAST;
1411 }
1412 
1413 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt,
1414                              const MCSubtargetInfo &STI) {
1415   int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
1416   if (isGFX11Plus(STI)) {
1417     for (int Id = UfmtGFX11::UFMT_FIRST; Id <= UfmtGFX11::UFMT_LAST; ++Id) {
1418       if (Fmt == DfmtNfmt2UFmtGFX11[Id])
1419         return Id;
1420     }
1421   } else {
1422     for (int Id = UfmtGFX10::UFMT_FIRST; Id <= UfmtGFX10::UFMT_LAST; ++Id) {
1423       if (Fmt == DfmtNfmt2UFmtGFX10[Id])
1424         return Id;
1425     }
1426   }
1427   return UFMT_UNDEF;
1428 }
1429 
1430 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
1431   return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
1432 }
1433 
1434 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) {
1435   if (isGFX10Plus(STI))
1436     return UFMT_DEFAULT;
1437   return DFMT_NFMT_DEFAULT;
1438 }
1439 
1440 } // namespace MTBUFFormat
1441 
1442 //===----------------------------------------------------------------------===//
1443 // SendMsg
1444 //===----------------------------------------------------------------------===//
1445 
1446 namespace SendMsg {
1447 
1448 static uint64_t getMsgIdMask(const MCSubtargetInfo &STI) {
1449   return isGFX11Plus(STI) ? ID_MASK_GFX11Plus_ : ID_MASK_PreGFX11_;
1450 }
1451 
1452 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) {
1453   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI);
1454   return (Idx < 0) ? Idx : Msg[Idx].Encoding;
1455 }
1456 
1457 bool isValidMsgId(int64_t MsgId, const MCSubtargetInfo &STI) {
1458   return (MsgId & ~(getMsgIdMask(STI))) == 0;
1459 }
1460 
1461 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) {
1462   int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI);
1463   return (Idx < 0) ? "" : Msg[Idx].Name;
1464 }
1465 
1466 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
1467   const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1468   const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1469   const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1470   for (int i = F; i < L; ++i) {
1471     if (Name == S[i]) {
1472       return i;
1473     }
1474   }
1475   return OP_UNKNOWN_;
1476 }
1477 
1478 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1479                   bool Strict) {
1480   assert(isValidMsgId(MsgId, STI));
1481 
1482   if (!Strict)
1483     return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1484 
1485   if (MsgId == ID_SYSMSG)
1486     return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
1487   if (!isGFX11Plus(STI)) {
1488     switch (MsgId) {
1489     case ID_GS_PreGFX11:
1490       return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
1491     case ID_GS_DONE_PreGFX11:
1492       return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
1493     }
1494   }
1495   return OpId == OP_NONE_;
1496 }
1497 
1498 StringRef getMsgOpName(int64_t MsgId, int64_t OpId,
1499                        const MCSubtargetInfo &STI) {
1500   assert(msgRequiresOp(MsgId, STI));
1501   return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
1502 }
1503 
1504 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1505                       const MCSubtargetInfo &STI, bool Strict) {
1506   assert(isValidMsgOp(MsgId, OpId, STI, Strict));
1507 
1508   if (!Strict)
1509     return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
1510 
1511   if (!isGFX11Plus(STI)) {
1512     switch (MsgId) {
1513     case ID_GS_PreGFX11:
1514       return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_;
1515     case ID_GS_DONE_PreGFX11:
1516       return (OpId == OP_GS_NOP) ?
1517           (StreamId == STREAM_ID_NONE_) :
1518           (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
1519     }
1520   }
1521   return StreamId == STREAM_ID_NONE_;
1522 }
1523 
1524 bool msgRequiresOp(int64_t MsgId, const MCSubtargetInfo &STI) {
1525   return MsgId == ID_SYSMSG ||
1526       (!isGFX11Plus(STI) &&
1527        (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11));
1528 }
1529 
1530 bool msgSupportsStream(int64_t MsgId, int64_t OpId,
1531                        const MCSubtargetInfo &STI) {
1532   return !isGFX11Plus(STI) &&
1533       (MsgId == ID_GS_PreGFX11 || MsgId == ID_GS_DONE_PreGFX11) &&
1534       OpId != OP_GS_NOP;
1535 }
1536 
1537 void decodeMsg(unsigned Val, uint16_t &MsgId, uint16_t &OpId,
1538                uint16_t &StreamId, const MCSubtargetInfo &STI) {
1539   MsgId = Val & getMsgIdMask(STI);
1540   if (isGFX11Plus(STI)) {
1541     OpId = 0;
1542     StreamId = 0;
1543   } else {
1544     OpId = (Val & OP_MASK_) >> OP_SHIFT_;
1545     StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_;
1546   }
1547 }
1548 
1549 uint64_t encodeMsg(uint64_t MsgId,
1550                    uint64_t OpId,
1551                    uint64_t StreamId) {
1552   return MsgId | (OpId << OP_SHIFT_) | (StreamId << STREAM_ID_SHIFT_);
1553 }
1554 
1555 } // namespace SendMsg
1556 
1557 //===----------------------------------------------------------------------===//
1558 //
1559 //===----------------------------------------------------------------------===//
1560 
1561 unsigned getInitialPSInputAddr(const Function &F) {
1562   return getIntegerAttribute(F, "InitialPSInputAddr", 0);
1563 }
1564 
1565 bool getHasColorExport(const Function &F) {
1566   // As a safe default always respond as if PS has color exports.
1567   return getIntegerAttribute(
1568              F, "amdgpu-color-export",
1569              F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
1570 }
1571 
1572 bool getHasDepthExport(const Function &F) {
1573   return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0;
1574 }
1575 
1576 bool isShader(CallingConv::ID cc) {
1577   switch(cc) {
1578     case CallingConv::AMDGPU_VS:
1579     case CallingConv::AMDGPU_LS:
1580     case CallingConv::AMDGPU_HS:
1581     case CallingConv::AMDGPU_ES:
1582     case CallingConv::AMDGPU_GS:
1583     case CallingConv::AMDGPU_PS:
1584     case CallingConv::AMDGPU_CS:
1585       return true;
1586     default:
1587       return false;
1588   }
1589 }
1590 
1591 bool isGraphics(CallingConv::ID cc) {
1592   return isShader(cc) || cc == CallingConv::AMDGPU_Gfx;
1593 }
1594 
1595 bool isCompute(CallingConv::ID cc) {
1596   return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS;
1597 }
1598 
1599 bool isEntryFunctionCC(CallingConv::ID CC) {
1600   switch (CC) {
1601   case CallingConv::AMDGPU_KERNEL:
1602   case CallingConv::SPIR_KERNEL:
1603   case CallingConv::AMDGPU_VS:
1604   case CallingConv::AMDGPU_GS:
1605   case CallingConv::AMDGPU_PS:
1606   case CallingConv::AMDGPU_CS:
1607   case CallingConv::AMDGPU_ES:
1608   case CallingConv::AMDGPU_HS:
1609   case CallingConv::AMDGPU_LS:
1610     return true;
1611   default:
1612     return false;
1613   }
1614 }
1615 
1616 bool isModuleEntryFunctionCC(CallingConv::ID CC) {
1617   switch (CC) {
1618   case CallingConv::AMDGPU_Gfx:
1619     return true;
1620   default:
1621     return isEntryFunctionCC(CC);
1622   }
1623 }
1624 
1625 bool isKernelCC(const Function *Func) {
1626   return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
1627 }
1628 
1629 bool hasXNACK(const MCSubtargetInfo &STI) {
1630   return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
1631 }
1632 
1633 bool hasSRAMECC(const MCSubtargetInfo &STI) {
1634   return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
1635 }
1636 
1637 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
1638   return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16];
1639 }
1640 
1641 bool hasGFX10A16(const MCSubtargetInfo &STI) {
1642   return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16];
1643 }
1644 
1645 bool hasG16(const MCSubtargetInfo &STI) {
1646   return STI.getFeatureBits()[AMDGPU::FeatureG16];
1647 }
1648 
1649 bool hasPackedD16(const MCSubtargetInfo &STI) {
1650   return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem] && !isCI(STI) &&
1651          !isSI(STI);
1652 }
1653 
1654 bool isSI(const MCSubtargetInfo &STI) {
1655   return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
1656 }
1657 
1658 bool isCI(const MCSubtargetInfo &STI) {
1659   return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
1660 }
1661 
1662 bool isVI(const MCSubtargetInfo &STI) {
1663   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1664 }
1665 
1666 bool isGFX9(const MCSubtargetInfo &STI) {
1667   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1668 }
1669 
1670 bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
1671   return isGFX9(STI) || isGFX10(STI);
1672 }
1673 
1674 bool isGFX8_GFX9_GFX10(const MCSubtargetInfo &STI) {
1675   return isVI(STI) || isGFX9(STI) || isGFX10(STI);
1676 }
1677 
1678 bool isGFX8Plus(const MCSubtargetInfo &STI) {
1679   return isVI(STI) || isGFX9Plus(STI);
1680 }
1681 
1682 bool isGFX9Plus(const MCSubtargetInfo &STI) {
1683   return isGFX9(STI) || isGFX10Plus(STI);
1684 }
1685 
1686 bool isGFX10(const MCSubtargetInfo &STI) {
1687   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1688 }
1689 
1690 bool isGFX10Plus(const MCSubtargetInfo &STI) {
1691   return isGFX10(STI) || isGFX11Plus(STI);
1692 }
1693 
1694 bool isGFX11(const MCSubtargetInfo &STI) {
1695   return STI.getFeatureBits()[AMDGPU::FeatureGFX11];
1696 }
1697 
1698 bool isGFX11Plus(const MCSubtargetInfo &STI) {
1699   return isGFX11(STI);
1700 }
1701 
1702 bool isNotGFX11Plus(const MCSubtargetInfo &STI) {
1703   return !isGFX11Plus(STI);
1704 }
1705 
1706 bool isNotGFX10Plus(const MCSubtargetInfo &STI) {
1707   return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
1708 }
1709 
1710 bool isGFX10Before1030(const MCSubtargetInfo &STI) {
1711   return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
1712 }
1713 
1714 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
1715   return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
1716 }
1717 
1718 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) {
1719   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding];
1720 }
1721 
1722 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) {
1723   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding];
1724 }
1725 
1726 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) {
1727   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts];
1728 }
1729 
1730 bool isGFX90A(const MCSubtargetInfo &STI) {
1731   return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
1732 }
1733 
1734 bool isGFX940(const MCSubtargetInfo &STI) {
1735   return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts];
1736 }
1737 
1738 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) {
1739   return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1740 }
1741 
1742 bool hasMAIInsts(const MCSubtargetInfo &STI) {
1743   return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts];
1744 }
1745 
1746 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
1747                          int32_t ArgNumVGPR) {
1748   if (has90AInsts && ArgNumAGPR)
1749     return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
1750   return std::max(ArgNumVGPR, ArgNumAGPR);
1751 }
1752 
1753 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
1754   const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
1755   const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
1756   return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
1757     Reg == AMDGPU::SCC;
1758 }
1759 
1760 #define MAP_REG2REG \
1761   using namespace AMDGPU; \
1762   switch(Reg) { \
1763   default: return Reg; \
1764   CASE_CI_VI(FLAT_SCR) \
1765   CASE_CI_VI(FLAT_SCR_LO) \
1766   CASE_CI_VI(FLAT_SCR_HI) \
1767   CASE_VI_GFX9PLUS(TTMP0) \
1768   CASE_VI_GFX9PLUS(TTMP1) \
1769   CASE_VI_GFX9PLUS(TTMP2) \
1770   CASE_VI_GFX9PLUS(TTMP3) \
1771   CASE_VI_GFX9PLUS(TTMP4) \
1772   CASE_VI_GFX9PLUS(TTMP5) \
1773   CASE_VI_GFX9PLUS(TTMP6) \
1774   CASE_VI_GFX9PLUS(TTMP7) \
1775   CASE_VI_GFX9PLUS(TTMP8) \
1776   CASE_VI_GFX9PLUS(TTMP9) \
1777   CASE_VI_GFX9PLUS(TTMP10) \
1778   CASE_VI_GFX9PLUS(TTMP11) \
1779   CASE_VI_GFX9PLUS(TTMP12) \
1780   CASE_VI_GFX9PLUS(TTMP13) \
1781   CASE_VI_GFX9PLUS(TTMP14) \
1782   CASE_VI_GFX9PLUS(TTMP15) \
1783   CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
1784   CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
1785   CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
1786   CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
1787   CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
1788   CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
1789   CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
1790   CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
1791   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
1792   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
1793   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
1794   CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
1795   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
1796   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
1797   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1798   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1799   CASE_GFXPRE11_GFX11PLUS(M0) \
1800   CASE_GFXPRE11_GFX11PLUS(SGPR_NULL) \
1801   }
1802 
1803 #define CASE_CI_VI(node) \
1804   assert(!isSI(STI)); \
1805   case node: return isCI(STI) ? node##_ci : node##_vi;
1806 
1807 #define CASE_VI_GFX9PLUS(node) \
1808   case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
1809 
1810 #define CASE_GFXPRE11_GFX11PLUS(node) \
1811   case node: return isGFX11Plus(STI) ? node##_gfx11plus : node##_gfxpre11;
1812 
1813 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
1814   if (STI.getTargetTriple().getArch() == Triple::r600)
1815     return Reg;
1816   MAP_REG2REG
1817 }
1818 
1819 #undef CASE_CI_VI
1820 #undef CASE_VI_GFX9PLUS
1821 #undef CASE_GFXPRE11_GFX11PLUS
1822 
1823 #define CASE_CI_VI(node)   case node##_ci: case node##_vi:   return node;
1824 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
1825 #define CASE_GFXPRE11_GFX11PLUS(node) case node##_gfx11plus: case node##_gfxpre11: return node;
1826 
1827 unsigned mc2PseudoReg(unsigned Reg) {
1828   MAP_REG2REG
1829 }
1830 
1831 #undef CASE_CI_VI
1832 #undef CASE_VI_GFX9PLUS
1833 #undef CASE_GFXPRE11_GFX11PLUS
1834 #undef MAP_REG2REG
1835 
1836 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1837   assert(OpNo < Desc.NumOperands);
1838   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1839   return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1840          OpType <= AMDGPU::OPERAND_SRC_LAST;
1841 }
1842 
1843 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1844   assert(OpNo < Desc.NumOperands);
1845   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1846   switch (OpType) {
1847   case AMDGPU::OPERAND_REG_IMM_FP32:
1848   case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
1849   case AMDGPU::OPERAND_REG_IMM_FP64:
1850   case AMDGPU::OPERAND_REG_IMM_FP16:
1851   case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
1852   case AMDGPU::OPERAND_REG_IMM_V2FP16:
1853   case AMDGPU::OPERAND_REG_IMM_V2INT16:
1854   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1855   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1856   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1857   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1858   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1859   case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
1860   case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
1861   case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
1862   case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
1863   case AMDGPU::OPERAND_REG_IMM_V2FP32:
1864   case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
1865   case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
1866     return true;
1867   default:
1868     return false;
1869   }
1870 }
1871 
1872 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1873   assert(OpNo < Desc.NumOperands);
1874   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1875   return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
1876          OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
1877 }
1878 
1879 // Avoid using MCRegisterClass::getSize, since that function will go away
1880 // (move from MC* level to Target* level). Return size in bits.
1881 unsigned getRegBitWidth(unsigned RCID) {
1882   switch (RCID) {
1883   case AMDGPU::VGPR_LO16RegClassID:
1884   case AMDGPU::VGPR_HI16RegClassID:
1885   case AMDGPU::SGPR_LO16RegClassID:
1886   case AMDGPU::AGPR_LO16RegClassID:
1887     return 16;
1888   case AMDGPU::SGPR_32RegClassID:
1889   case AMDGPU::VGPR_32RegClassID:
1890   case AMDGPU::VRegOrLds_32RegClassID:
1891   case AMDGPU::AGPR_32RegClassID:
1892   case AMDGPU::VS_32RegClassID:
1893   case AMDGPU::AV_32RegClassID:
1894   case AMDGPU::SReg_32RegClassID:
1895   case AMDGPU::SReg_32_XM0RegClassID:
1896   case AMDGPU::SRegOrLds_32RegClassID:
1897     return 32;
1898   case AMDGPU::SGPR_64RegClassID:
1899   case AMDGPU::VS_64RegClassID:
1900   case AMDGPU::SReg_64RegClassID:
1901   case AMDGPU::VReg_64RegClassID:
1902   case AMDGPU::AReg_64RegClassID:
1903   case AMDGPU::SReg_64_XEXECRegClassID:
1904   case AMDGPU::VReg_64_Align2RegClassID:
1905   case AMDGPU::AReg_64_Align2RegClassID:
1906   case AMDGPU::AV_64RegClassID:
1907   case AMDGPU::AV_64_Align2RegClassID:
1908     return 64;
1909   case AMDGPU::SGPR_96RegClassID:
1910   case AMDGPU::SReg_96RegClassID:
1911   case AMDGPU::VReg_96RegClassID:
1912   case AMDGPU::AReg_96RegClassID:
1913   case AMDGPU::VReg_96_Align2RegClassID:
1914   case AMDGPU::AReg_96_Align2RegClassID:
1915   case AMDGPU::AV_96RegClassID:
1916   case AMDGPU::AV_96_Align2RegClassID:
1917     return 96;
1918   case AMDGPU::SGPR_128RegClassID:
1919   case AMDGPU::SReg_128RegClassID:
1920   case AMDGPU::VReg_128RegClassID:
1921   case AMDGPU::AReg_128RegClassID:
1922   case AMDGPU::VReg_128_Align2RegClassID:
1923   case AMDGPU::AReg_128_Align2RegClassID:
1924   case AMDGPU::AV_128RegClassID:
1925   case AMDGPU::AV_128_Align2RegClassID:
1926     return 128;
1927   case AMDGPU::SGPR_160RegClassID:
1928   case AMDGPU::SReg_160RegClassID:
1929   case AMDGPU::VReg_160RegClassID:
1930   case AMDGPU::AReg_160RegClassID:
1931   case AMDGPU::VReg_160_Align2RegClassID:
1932   case AMDGPU::AReg_160_Align2RegClassID:
1933   case AMDGPU::AV_160RegClassID:
1934   case AMDGPU::AV_160_Align2RegClassID:
1935     return 160;
1936   case AMDGPU::SGPR_192RegClassID:
1937   case AMDGPU::SReg_192RegClassID:
1938   case AMDGPU::VReg_192RegClassID:
1939   case AMDGPU::AReg_192RegClassID:
1940   case AMDGPU::VReg_192_Align2RegClassID:
1941   case AMDGPU::AReg_192_Align2RegClassID:
1942   case AMDGPU::AV_192RegClassID:
1943   case AMDGPU::AV_192_Align2RegClassID:
1944     return 192;
1945   case AMDGPU::SGPR_224RegClassID:
1946   case AMDGPU::SReg_224RegClassID:
1947   case AMDGPU::VReg_224RegClassID:
1948   case AMDGPU::AReg_224RegClassID:
1949   case AMDGPU::VReg_224_Align2RegClassID:
1950   case AMDGPU::AReg_224_Align2RegClassID:
1951   case AMDGPU::AV_224RegClassID:
1952   case AMDGPU::AV_224_Align2RegClassID:
1953     return 224;
1954   case AMDGPU::SGPR_256RegClassID:
1955   case AMDGPU::SReg_256RegClassID:
1956   case AMDGPU::VReg_256RegClassID:
1957   case AMDGPU::AReg_256RegClassID:
1958   case AMDGPU::VReg_256_Align2RegClassID:
1959   case AMDGPU::AReg_256_Align2RegClassID:
1960   case AMDGPU::AV_256RegClassID:
1961   case AMDGPU::AV_256_Align2RegClassID:
1962     return 256;
1963   case AMDGPU::SGPR_512RegClassID:
1964   case AMDGPU::SReg_512RegClassID:
1965   case AMDGPU::VReg_512RegClassID:
1966   case AMDGPU::AReg_512RegClassID:
1967   case AMDGPU::VReg_512_Align2RegClassID:
1968   case AMDGPU::AReg_512_Align2RegClassID:
1969   case AMDGPU::AV_512RegClassID:
1970   case AMDGPU::AV_512_Align2RegClassID:
1971     return 512;
1972   case AMDGPU::SGPR_1024RegClassID:
1973   case AMDGPU::SReg_1024RegClassID:
1974   case AMDGPU::VReg_1024RegClassID:
1975   case AMDGPU::AReg_1024RegClassID:
1976   case AMDGPU::VReg_1024_Align2RegClassID:
1977   case AMDGPU::AReg_1024_Align2RegClassID:
1978   case AMDGPU::AV_1024RegClassID:
1979   case AMDGPU::AV_1024_Align2RegClassID:
1980     return 1024;
1981   default:
1982     llvm_unreachable("Unexpected register class");
1983   }
1984 }
1985 
1986 unsigned getRegBitWidth(const MCRegisterClass &RC) {
1987   return getRegBitWidth(RC.getID());
1988 }
1989 
1990 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
1991                            unsigned OpNo) {
1992   assert(OpNo < Desc.NumOperands);
1993   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
1994   return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
1995 }
1996 
1997 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
1998   if (isInlinableIntLiteral(Literal))
1999     return true;
2000 
2001   uint64_t Val = static_cast<uint64_t>(Literal);
2002   return (Val == DoubleToBits(0.0)) ||
2003          (Val == DoubleToBits(1.0)) ||
2004          (Val == DoubleToBits(-1.0)) ||
2005          (Val == DoubleToBits(0.5)) ||
2006          (Val == DoubleToBits(-0.5)) ||
2007          (Val == DoubleToBits(2.0)) ||
2008          (Val == DoubleToBits(-2.0)) ||
2009          (Val == DoubleToBits(4.0)) ||
2010          (Val == DoubleToBits(-4.0)) ||
2011          (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
2012 }
2013 
2014 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
2015   if (isInlinableIntLiteral(Literal))
2016     return true;
2017 
2018   // The actual type of the operand does not seem to matter as long
2019   // as the bits match one of the inline immediate values.  For example:
2020   //
2021   // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
2022   // so it is a legal inline immediate.
2023   //
2024   // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
2025   // floating-point, so it is a legal inline immediate.
2026 
2027   uint32_t Val = static_cast<uint32_t>(Literal);
2028   return (Val == FloatToBits(0.0f)) ||
2029          (Val == FloatToBits(1.0f)) ||
2030          (Val == FloatToBits(-1.0f)) ||
2031          (Val == FloatToBits(0.5f)) ||
2032          (Val == FloatToBits(-0.5f)) ||
2033          (Val == FloatToBits(2.0f)) ||
2034          (Val == FloatToBits(-2.0f)) ||
2035          (Val == FloatToBits(4.0f)) ||
2036          (Val == FloatToBits(-4.0f)) ||
2037          (Val == 0x3e22f983 && HasInv2Pi);
2038 }
2039 
2040 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
2041   if (!HasInv2Pi)
2042     return false;
2043 
2044   if (isInlinableIntLiteral(Literal))
2045     return true;
2046 
2047   uint16_t Val = static_cast<uint16_t>(Literal);
2048   return Val == 0x3C00 || // 1.0
2049          Val == 0xBC00 || // -1.0
2050          Val == 0x3800 || // 0.5
2051          Val == 0xB800 || // -0.5
2052          Val == 0x4000 || // 2.0
2053          Val == 0xC000 || // -2.0
2054          Val == 0x4400 || // 4.0
2055          Val == 0xC400 || // -4.0
2056          Val == 0x3118;   // 1/2pi
2057 }
2058 
2059 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2060   assert(HasInv2Pi);
2061 
2062   if (isInt<16>(Literal) || isUInt<16>(Literal)) {
2063     int16_t Trunc = static_cast<int16_t>(Literal);
2064     return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
2065   }
2066   if (!(Literal & 0xffff))
2067     return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
2068 
2069   int16_t Lo16 = static_cast<int16_t>(Literal);
2070   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2071   return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
2072 }
2073 
2074 bool isInlinableIntLiteralV216(int32_t Literal) {
2075   int16_t Lo16 = static_cast<int16_t>(Literal);
2076   if (isInt<16>(Literal) || isUInt<16>(Literal))
2077     return isInlinableIntLiteral(Lo16);
2078 
2079   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2080   if (!(Literal & 0xffff))
2081     return isInlinableIntLiteral(Hi16);
2082   return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
2083 }
2084 
2085 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2086   assert(HasInv2Pi);
2087 
2088   int16_t Lo16 = static_cast<int16_t>(Literal);
2089   if (isInt<16>(Literal) || isUInt<16>(Literal))
2090     return true;
2091 
2092   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2093   if (!(Literal & 0xffff))
2094     return true;
2095   return Lo16 == Hi16;
2096 }
2097 
2098 bool isArgPassedInSGPR(const Argument *A) {
2099   const Function *F = A->getParent();
2100 
2101   // Arguments to compute shaders are never a source of divergence.
2102   CallingConv::ID CC = F->getCallingConv();
2103   switch (CC) {
2104   case CallingConv::AMDGPU_KERNEL:
2105   case CallingConv::SPIR_KERNEL:
2106     return true;
2107   case CallingConv::AMDGPU_VS:
2108   case CallingConv::AMDGPU_LS:
2109   case CallingConv::AMDGPU_HS:
2110   case CallingConv::AMDGPU_ES:
2111   case CallingConv::AMDGPU_GS:
2112   case CallingConv::AMDGPU_PS:
2113   case CallingConv::AMDGPU_CS:
2114   case CallingConv::AMDGPU_Gfx:
2115     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
2116     // Everything else is in VGPRs.
2117     return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) ||
2118            F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal);
2119   default:
2120     // TODO: Should calls support inreg for SGPR inputs?
2121     return false;
2122   }
2123 }
2124 
2125 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
2126   return isGCN3Encoding(ST) || isGFX10Plus(ST);
2127 }
2128 
2129 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) {
2130   return isGFX9Plus(ST);
2131 }
2132 
2133 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST,
2134                                       int64_t EncodedOffset) {
2135   return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
2136                                : isUInt<8>(EncodedOffset);
2137 }
2138 
2139 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST,
2140                                     int64_t EncodedOffset,
2141                                     bool IsBuffer) {
2142   return !IsBuffer &&
2143          hasSMRDSignedImmOffset(ST) &&
2144          isInt<21>(EncodedOffset);
2145 }
2146 
2147 static bool isDwordAligned(uint64_t ByteOffset) {
2148   return (ByteOffset & 3) == 0;
2149 }
2150 
2151 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST,
2152                                 uint64_t ByteOffset) {
2153   if (hasSMEMByteOffset(ST))
2154     return ByteOffset;
2155 
2156   assert(isDwordAligned(ByteOffset));
2157   return ByteOffset >> 2;
2158 }
2159 
2160 Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
2161                                        int64_t ByteOffset, bool IsBuffer) {
2162   // The signed version is always a byte offset.
2163   if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
2164     assert(hasSMEMByteOffset(ST));
2165     return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None;
2166   }
2167 
2168   if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
2169     return None;
2170 
2171   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2172   return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
2173              ? Optional<int64_t>(EncodedOffset)
2174              : None;
2175 }
2176 
2177 Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
2178                                                 int64_t ByteOffset) {
2179   if (!isCI(ST) || !isDwordAligned(ByteOffset))
2180     return None;
2181 
2182   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2183   return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None;
2184 }
2185 
2186 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed) {
2187   // Address offset is 12-bit signed for GFX10, 13-bit for GFX9.
2188   if (AMDGPU::isGFX10(ST))
2189     return Signed ? 12 : 11;
2190 
2191   return Signed ? 13 : 12;
2192 }
2193 
2194 // Given Imm, split it into the values to put into the SOffset and ImmOffset
2195 // fields in an MUBUF instruction. Return false if it is not possible (due to a
2196 // hardware bug needing a workaround).
2197 //
2198 // The required alignment ensures that individual address components remain
2199 // aligned if they are aligned to begin with. It also ensures that additional
2200 // offsets within the given alignment can be added to the resulting ImmOffset.
2201 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
2202                       const GCNSubtarget *Subtarget, Align Alignment) {
2203   const uint32_t MaxImm = alignDown(4095, Alignment.value());
2204   uint32_t Overflow = 0;
2205 
2206   if (Imm > MaxImm) {
2207     if (Imm <= MaxImm + 64) {
2208       // Use an SOffset inline constant for 4..64
2209       Overflow = Imm - MaxImm;
2210       Imm = MaxImm;
2211     } else {
2212       // Try to keep the same value in SOffset for adjacent loads, so that
2213       // the corresponding register contents can be re-used.
2214       //
2215       // Load values with all low-bits (except for alignment bits) set into
2216       // SOffset, so that a larger range of values can be covered using
2217       // s_movk_i32.
2218       //
2219       // Atomic operations fail to work correctly when individual address
2220       // components are unaligned, even if their sum is aligned.
2221       uint32_t High = (Imm + Alignment.value()) & ~4095;
2222       uint32_t Low = (Imm + Alignment.value()) & 4095;
2223       Imm = Low;
2224       Overflow = High - Alignment.value();
2225     }
2226   }
2227 
2228   // There is a hardware bug in SI and CI which prevents address clamping in
2229   // MUBUF instructions from working correctly with SOffsets. The immediate
2230   // offset is unaffected.
2231   if (Overflow > 0 &&
2232       Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
2233     return false;
2234 
2235   ImmOffset = Imm;
2236   SOffset = Overflow;
2237   return true;
2238 }
2239 
2240 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) {
2241   *this = getDefaultForCallingConv(F.getCallingConv());
2242 
2243   StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
2244   if (!IEEEAttr.empty())
2245     IEEE = IEEEAttr == "true";
2246 
2247   StringRef DX10ClampAttr
2248     = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
2249   if (!DX10ClampAttr.empty())
2250     DX10Clamp = DX10ClampAttr == "true";
2251 
2252   StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString();
2253   if (!DenormF32Attr.empty()) {
2254     DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr);
2255     FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2256     FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2257   }
2258 
2259   StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString();
2260   if (!DenormAttr.empty()) {
2261     DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr);
2262 
2263     if (DenormF32Attr.empty()) {
2264       FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2265       FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2266     }
2267 
2268     FP64FP16InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2269     FP64FP16OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2270   }
2271 }
2272 
2273 namespace {
2274 
2275 struct SourceOfDivergence {
2276   unsigned Intr;
2277 };
2278 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
2279 
2280 #define GET_SourcesOfDivergence_IMPL
2281 #define GET_Gfx9BufferFormat_IMPL
2282 #define GET_Gfx10BufferFormat_IMPL
2283 #define GET_Gfx11PlusBufferFormat_IMPL
2284 #include "AMDGPUGenSearchableTables.inc"
2285 
2286 } // end anonymous namespace
2287 
2288 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
2289   return lookupSourceOfDivergence(IntrID);
2290 }
2291 
2292 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
2293                                                   uint8_t NumComponents,
2294                                                   uint8_t NumFormat,
2295                                                   const MCSubtargetInfo &STI) {
2296   return isGFX11Plus(STI)
2297              ? getGfx11PlusBufferFormatInfo(BitsPerComp, NumComponents,
2298                                             NumFormat)
2299              : isGFX10(STI) ? getGfx10BufferFormatInfo(BitsPerComp,
2300                                                        NumComponents, NumFormat)
2301                             : getGfx9BufferFormatInfo(BitsPerComp,
2302                                                       NumComponents, NumFormat);
2303 }
2304 
2305 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format,
2306                                                   const MCSubtargetInfo &STI) {
2307   return isGFX11Plus(STI) ? getGfx11PlusBufferFormatInfo(Format)
2308                           : isGFX10(STI) ? getGfx10BufferFormatInfo(Format)
2309                                          : getGfx9BufferFormatInfo(Format);
2310 }
2311 
2312 } // namespace AMDGPU
2313 
2314 raw_ostream &operator<<(raw_ostream &OS,
2315                         const AMDGPU::IsaInfo::TargetIDSetting S) {
2316   switch (S) {
2317   case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported):
2318     OS << "Unsupported";
2319     break;
2320   case (AMDGPU::IsaInfo::TargetIDSetting::Any):
2321     OS << "Any";
2322     break;
2323   case (AMDGPU::IsaInfo::TargetIDSetting::Off):
2324     OS << "Off";
2325     break;
2326   case (AMDGPU::IsaInfo::TargetIDSetting::On):
2327     OS << "On";
2328     break;
2329   }
2330   return OS;
2331 }
2332 
2333 } // namespace llvm
2334