1 //===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "AMDGPUBaseInfo.h"
10 #include "AMDGPU.h"
11 #include "AMDGPUAsmUtils.h"
12 #include "AMDKernelCodeT.h"
13 #include "GCNSubtarget.h"
14 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
15 #include "llvm/BinaryFormat/ELF.h"
16 #include "llvm/IR/Attributes.h"
17 #include "llvm/IR/Function.h"
18 #include "llvm/IR/GlobalValue.h"
19 #include "llvm/IR/IntrinsicsAMDGPU.h"
20 #include "llvm/IR/IntrinsicsR600.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/MC/MCSubtargetInfo.h"
23 #include "llvm/Support/AMDHSAKernelDescriptor.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Support/TargetParser.h"
26 
27 #define GET_INSTRINFO_NAMED_OPS
28 #define GET_INSTRMAP_INFO
29 #include "AMDGPUGenInstrInfo.inc"
30 
31 static llvm::cl::opt<unsigned> AmdhsaCodeObjectVersion(
32   "amdhsa-code-object-version", llvm::cl::Hidden,
33   llvm::cl::desc("AMDHSA Code Object Version"), llvm::cl::init(4),
34   llvm::cl::ZeroOrMore);
35 
36 namespace {
37 
38 /// \returns Bit mask for given bit \p Shift and bit \p Width.
39 unsigned getBitMask(unsigned Shift, unsigned Width) {
40   return ((1 << Width) - 1) << Shift;
41 }
42 
43 /// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
44 ///
45 /// \returns Packed \p Dst.
46 unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
47   Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
48   Dst |= (Src << Shift) & getBitMask(Shift, Width);
49   return Dst;
50 }
51 
52 /// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
53 ///
54 /// \returns Unpacked bits.
55 unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
56   return (Src & getBitMask(Shift, Width)) >> Shift;
57 }
58 
59 /// \returns Vmcnt bit shift (lower bits).
60 unsigned getVmcntBitShiftLo() { return 0; }
61 
62 /// \returns Vmcnt bit width (lower bits).
63 unsigned getVmcntBitWidthLo() { return 4; }
64 
65 /// \returns Expcnt bit shift.
66 unsigned getExpcntBitShift() { return 4; }
67 
68 /// \returns Expcnt bit width.
69 unsigned getExpcntBitWidth() { return 3; }
70 
71 /// \returns Lgkmcnt bit shift.
72 unsigned getLgkmcntBitShift() { return 8; }
73 
74 /// \returns Lgkmcnt bit width.
75 unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
76   return (VersionMajor >= 10) ? 6 : 4;
77 }
78 
79 /// \returns Vmcnt bit shift (higher bits).
80 unsigned getVmcntBitShiftHi() { return 14; }
81 
82 /// \returns Vmcnt bit width (higher bits).
83 unsigned getVmcntBitWidthHi() { return 2; }
84 
85 } // end namespace anonymous
86 
87 namespace llvm {
88 
89 namespace AMDGPU {
90 
91 Optional<uint8_t> getHsaAbiVersion(const MCSubtargetInfo *STI) {
92   if (STI && STI->getTargetTriple().getOS() != Triple::AMDHSA)
93     return None;
94 
95   switch (AmdhsaCodeObjectVersion) {
96   case 2:
97     return ELF::ELFABIVERSION_AMDGPU_HSA_V2;
98   case 3:
99     return ELF::ELFABIVERSION_AMDGPU_HSA_V3;
100   case 4:
101     return ELF::ELFABIVERSION_AMDGPU_HSA_V4;
102   case 5:
103     return ELF::ELFABIVERSION_AMDGPU_HSA_V5;
104   default:
105     report_fatal_error(Twine("Unsupported AMDHSA Code Object Version ") +
106                        Twine(AmdhsaCodeObjectVersion));
107   }
108 }
109 
110 bool isHsaAbiVersion2(const MCSubtargetInfo *STI) {
111   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
112     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V2;
113   return false;
114 }
115 
116 bool isHsaAbiVersion3(const MCSubtargetInfo *STI) {
117   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
118     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V3;
119   return false;
120 }
121 
122 bool isHsaAbiVersion4(const MCSubtargetInfo *STI) {
123   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
124     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V4;
125   return false;
126 }
127 
128 bool isHsaAbiVersion5(const MCSubtargetInfo *STI) {
129   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(STI))
130     return *HsaAbiVer == ELF::ELFABIVERSION_AMDGPU_HSA_V5;
131   return false;
132 }
133 
134 bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) {
135   return isHsaAbiVersion3(STI) || isHsaAbiVersion4(STI) ||
136          isHsaAbiVersion5(STI);
137 }
138 
139 unsigned getAmdhsaCodeObjectVersion() {
140   return AmdhsaCodeObjectVersion;
141 }
142 
143 unsigned getMultigridSyncArgImplicitArgPosition() {
144   switch (AmdhsaCodeObjectVersion) {
145   case 2:
146   case 3:
147   case 4:
148     return 48;
149   case 5:
150     return AMDGPU::ImplicitArg::MULTIGRID_SYNC_ARG_OFFSET;
151   default:
152     llvm_unreachable("Unexpected code object version");
153     return 0;
154   }
155 }
156 
157 
158 // FIXME: All such magic numbers about the ABI should be in a
159 // central TD file.
160 unsigned getHostcallImplicitArgPosition() {
161   switch (AmdhsaCodeObjectVersion) {
162   case 2:
163   case 3:
164   case 4:
165     return 24;
166   case 5:
167     return AMDGPU::ImplicitArg::HOSTCALL_PTR_OFFSET;
168   default:
169     llvm_unreachable("Unexpected code object version");
170     return 0;
171   }
172 }
173 
174 #define GET_MIMGBaseOpcodesTable_IMPL
175 #define GET_MIMGDimInfoTable_IMPL
176 #define GET_MIMGInfoTable_IMPL
177 #define GET_MIMGLZMappingTable_IMPL
178 #define GET_MIMGMIPMappingTable_IMPL
179 #define GET_MIMGBiasMappingTable_IMPL
180 #define GET_MIMGOffsetMappingTable_IMPL
181 #define GET_MIMGG16MappingTable_IMPL
182 #define GET_MAIInstInfoTable_IMPL
183 #include "AMDGPUGenSearchableTables.inc"
184 
185 int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
186                   unsigned VDataDwords, unsigned VAddrDwords) {
187   const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
188                                              VDataDwords, VAddrDwords);
189   return Info ? Info->Opcode : -1;
190 }
191 
192 const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
193   const MIMGInfo *Info = getMIMGInfo(Opc);
194   return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
195 }
196 
197 int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
198   const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
199   const MIMGInfo *NewInfo =
200       getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
201                           NewChannels, OrigInfo->VAddrDwords);
202   return NewInfo ? NewInfo->Opcode : -1;
203 }
204 
205 unsigned getAddrSizeMIMGOp(const MIMGBaseOpcodeInfo *BaseOpcode,
206                            const MIMGDimInfo *Dim, bool IsA16,
207                            bool IsG16Supported) {
208   unsigned AddrWords = BaseOpcode->NumExtraArgs;
209   unsigned AddrComponents = (BaseOpcode->Coordinates ? Dim->NumCoords : 0) +
210                             (BaseOpcode->LodOrClampOrMip ? 1 : 0);
211   if (IsA16)
212     AddrWords += divideCeil(AddrComponents, 2);
213   else
214     AddrWords += AddrComponents;
215 
216   // Note: For subtargets that support A16 but not G16, enabling A16 also
217   // enables 16 bit gradients.
218   // For subtargets that support A16 (operand) and G16 (done with a different
219   // instruction encoding), they are independent.
220 
221   if (BaseOpcode->Gradients) {
222     if ((IsA16 && !IsG16Supported) || BaseOpcode->G16)
223       // There are two gradients per coordinate, we pack them separately.
224       // For the 3d case,
225       // we get (dy/du, dx/du) (-, dz/du) (dy/dv, dx/dv) (-, dz/dv)
226       AddrWords += alignTo<2>(Dim->NumGradients / 2);
227     else
228       AddrWords += Dim->NumGradients;
229   }
230   return AddrWords;
231 }
232 
233 struct MUBUFInfo {
234   uint16_t Opcode;
235   uint16_t BaseOpcode;
236   uint8_t elements;
237   bool has_vaddr;
238   bool has_srsrc;
239   bool has_soffset;
240   bool IsBufferInv;
241 };
242 
243 struct MTBUFInfo {
244   uint16_t Opcode;
245   uint16_t BaseOpcode;
246   uint8_t elements;
247   bool has_vaddr;
248   bool has_srsrc;
249   bool has_soffset;
250 };
251 
252 struct SMInfo {
253   uint16_t Opcode;
254   bool IsBuffer;
255 };
256 
257 struct VOPInfo {
258   uint16_t Opcode;
259   bool IsSingle;
260 };
261 
262 #define GET_MTBUFInfoTable_DECL
263 #define GET_MTBUFInfoTable_IMPL
264 #define GET_MUBUFInfoTable_DECL
265 #define GET_MUBUFInfoTable_IMPL
266 #define GET_SMInfoTable_DECL
267 #define GET_SMInfoTable_IMPL
268 #define GET_VOP1InfoTable_DECL
269 #define GET_VOP1InfoTable_IMPL
270 #define GET_VOP2InfoTable_DECL
271 #define GET_VOP2InfoTable_IMPL
272 #define GET_VOP3InfoTable_DECL
273 #define GET_VOP3InfoTable_IMPL
274 #include "AMDGPUGenSearchableTables.inc"
275 
276 int getMTBUFBaseOpcode(unsigned Opc) {
277   const MTBUFInfo *Info = getMTBUFInfoFromOpcode(Opc);
278   return Info ? Info->BaseOpcode : -1;
279 }
280 
281 int getMTBUFOpcode(unsigned BaseOpc, unsigned Elements) {
282   const MTBUFInfo *Info = getMTBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
283   return Info ? Info->Opcode : -1;
284 }
285 
286 int getMTBUFElements(unsigned Opc) {
287   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
288   return Info ? Info->elements : 0;
289 }
290 
291 bool getMTBUFHasVAddr(unsigned Opc) {
292   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
293   return Info ? Info->has_vaddr : false;
294 }
295 
296 bool getMTBUFHasSrsrc(unsigned Opc) {
297   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
298   return Info ? Info->has_srsrc : false;
299 }
300 
301 bool getMTBUFHasSoffset(unsigned Opc) {
302   const MTBUFInfo *Info = getMTBUFOpcodeHelper(Opc);
303   return Info ? Info->has_soffset : false;
304 }
305 
306 int getMUBUFBaseOpcode(unsigned Opc) {
307   const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
308   return Info ? Info->BaseOpcode : -1;
309 }
310 
311 int getMUBUFOpcode(unsigned BaseOpc, unsigned Elements) {
312   const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndElements(BaseOpc, Elements);
313   return Info ? Info->Opcode : -1;
314 }
315 
316 int getMUBUFElements(unsigned Opc) {
317   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
318   return Info ? Info->elements : 0;
319 }
320 
321 bool getMUBUFHasVAddr(unsigned Opc) {
322   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
323   return Info ? Info->has_vaddr : false;
324 }
325 
326 bool getMUBUFHasSrsrc(unsigned Opc) {
327   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
328   return Info ? Info->has_srsrc : false;
329 }
330 
331 bool getMUBUFHasSoffset(unsigned Opc) {
332   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
333   return Info ? Info->has_soffset : false;
334 }
335 
336 bool getMUBUFIsBufferInv(unsigned Opc) {
337   const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
338   return Info ? Info->IsBufferInv : false;
339 }
340 
341 bool getSMEMIsBuffer(unsigned Opc) {
342   const SMInfo *Info = getSMEMOpcodeHelper(Opc);
343   return Info ? Info->IsBuffer : false;
344 }
345 
346 bool getVOP1IsSingle(unsigned Opc) {
347   const VOPInfo *Info = getVOP1OpcodeHelper(Opc);
348   return Info ? Info->IsSingle : false;
349 }
350 
351 bool getVOP2IsSingle(unsigned Opc) {
352   const VOPInfo *Info = getVOP2OpcodeHelper(Opc);
353   return Info ? Info->IsSingle : false;
354 }
355 
356 bool getVOP3IsSingle(unsigned Opc) {
357   const VOPInfo *Info = getVOP3OpcodeHelper(Opc);
358   return Info ? Info->IsSingle : false;
359 }
360 
361 bool getMAIIsDGEMM(unsigned Opc) {
362   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
363   return Info ? Info->is_dgemm : false;
364 }
365 
366 bool getMAIIsGFX940XDL(unsigned Opc) {
367   const MAIInstInfo *Info = getMAIInstInfoHelper(Opc);
368   return Info ? Info->is_gfx940_xdl : false;
369 }
370 
371 // Wrapper for Tablegen'd function.  enum Subtarget is not defined in any
372 // header files, so we need to wrap it in a function that takes unsigned
373 // instead.
374 int getMCOpcode(uint16_t Opcode, unsigned Gen) {
375   return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
376 }
377 
378 namespace IsaInfo {
379 
380 AMDGPUTargetID::AMDGPUTargetID(const MCSubtargetInfo &STI)
381     : STI(STI), XnackSetting(TargetIDSetting::Any),
382       SramEccSetting(TargetIDSetting::Any) {
383   if (!STI.getFeatureBits().test(FeatureSupportsXNACK))
384     XnackSetting = TargetIDSetting::Unsupported;
385   if (!STI.getFeatureBits().test(FeatureSupportsSRAMECC))
386     SramEccSetting = TargetIDSetting::Unsupported;
387 }
388 
389 void AMDGPUTargetID::setTargetIDFromFeaturesString(StringRef FS) {
390   // Check if xnack or sramecc is explicitly enabled or disabled.  In the
391   // absence of the target features we assume we must generate code that can run
392   // in any environment.
393   SubtargetFeatures Features(FS);
394   Optional<bool> XnackRequested;
395   Optional<bool> SramEccRequested;
396 
397   for (const std::string &Feature : Features.getFeatures()) {
398     if (Feature == "+xnack")
399       XnackRequested = true;
400     else if (Feature == "-xnack")
401       XnackRequested = false;
402     else if (Feature == "+sramecc")
403       SramEccRequested = true;
404     else if (Feature == "-sramecc")
405       SramEccRequested = false;
406   }
407 
408   bool XnackSupported = isXnackSupported();
409   bool SramEccSupported = isSramEccSupported();
410 
411   if (XnackRequested) {
412     if (XnackSupported) {
413       XnackSetting =
414           *XnackRequested ? TargetIDSetting::On : TargetIDSetting::Off;
415     } else {
416       // If a specific xnack setting was requested and this GPU does not support
417       // xnack emit a warning. Setting will remain set to "Unsupported".
418       if (*XnackRequested) {
419         errs() << "warning: xnack 'On' was requested for a processor that does "
420                   "not support it!\n";
421       } else {
422         errs() << "warning: xnack 'Off' was requested for a processor that "
423                   "does not support it!\n";
424       }
425     }
426   }
427 
428   if (SramEccRequested) {
429     if (SramEccSupported) {
430       SramEccSetting =
431           *SramEccRequested ? TargetIDSetting::On : TargetIDSetting::Off;
432     } else {
433       // If a specific sramecc setting was requested and this GPU does not
434       // support sramecc emit a warning. Setting will remain set to
435       // "Unsupported".
436       if (*SramEccRequested) {
437         errs() << "warning: sramecc 'On' was requested for a processor that "
438                   "does not support it!\n";
439       } else {
440         errs() << "warning: sramecc 'Off' was requested for a processor that "
441                   "does not support it!\n";
442       }
443     }
444   }
445 }
446 
447 static TargetIDSetting
448 getTargetIDSettingFromFeatureString(StringRef FeatureString) {
449   if (FeatureString.endswith("-"))
450     return TargetIDSetting::Off;
451   if (FeatureString.endswith("+"))
452     return TargetIDSetting::On;
453 
454   llvm_unreachable("Malformed feature string");
455 }
456 
457 void AMDGPUTargetID::setTargetIDFromTargetIDStream(StringRef TargetID) {
458   SmallVector<StringRef, 3> TargetIDSplit;
459   TargetID.split(TargetIDSplit, ':');
460 
461   for (const auto &FeatureString : TargetIDSplit) {
462     if (FeatureString.startswith("xnack"))
463       XnackSetting = getTargetIDSettingFromFeatureString(FeatureString);
464     if (FeatureString.startswith("sramecc"))
465       SramEccSetting = getTargetIDSettingFromFeatureString(FeatureString);
466   }
467 }
468 
469 std::string AMDGPUTargetID::toString() const {
470   std::string StringRep;
471   raw_string_ostream StreamRep(StringRep);
472 
473   auto TargetTriple = STI.getTargetTriple();
474   auto Version = getIsaVersion(STI.getCPU());
475 
476   StreamRep << TargetTriple.getArchName() << '-'
477             << TargetTriple.getVendorName() << '-'
478             << TargetTriple.getOSName() << '-'
479             << TargetTriple.getEnvironmentName() << '-';
480 
481   std::string Processor;
482   // TODO: Following else statement is present here because we used various
483   // alias names for GPUs up until GFX9 (e.g. 'fiji' is same as 'gfx803').
484   // Remove once all aliases are removed from GCNProcessors.td.
485   if (Version.Major >= 9)
486     Processor = STI.getCPU().str();
487   else
488     Processor = (Twine("gfx") + Twine(Version.Major) + Twine(Version.Minor) +
489                  Twine(Version.Stepping))
490                     .str();
491 
492   std::string Features;
493   if (Optional<uint8_t> HsaAbiVersion = getHsaAbiVersion(&STI)) {
494     switch (*HsaAbiVersion) {
495     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
496       // Code object V2 only supported specific processors and had fixed
497       // settings for the XNACK.
498       if (Processor == "gfx600") {
499       } else if (Processor == "gfx601") {
500       } else if (Processor == "gfx602") {
501       } else if (Processor == "gfx700") {
502       } else if (Processor == "gfx701") {
503       } else if (Processor == "gfx702") {
504       } else if (Processor == "gfx703") {
505       } else if (Processor == "gfx704") {
506       } else if (Processor == "gfx705") {
507       } else if (Processor == "gfx801") {
508         if (!isXnackOnOrAny())
509           report_fatal_error(
510               "AMD GPU code object V2 does not support processor " +
511               Twine(Processor) + " without XNACK");
512       } else if (Processor == "gfx802") {
513       } else if (Processor == "gfx803") {
514       } else if (Processor == "gfx805") {
515       } else if (Processor == "gfx810") {
516         if (!isXnackOnOrAny())
517           report_fatal_error(
518               "AMD GPU code object V2 does not support processor " +
519               Twine(Processor) + " without XNACK");
520       } else if (Processor == "gfx900") {
521         if (isXnackOnOrAny())
522           Processor = "gfx901";
523       } else if (Processor == "gfx902") {
524         if (isXnackOnOrAny())
525           Processor = "gfx903";
526       } else if (Processor == "gfx904") {
527         if (isXnackOnOrAny())
528           Processor = "gfx905";
529       } else if (Processor == "gfx906") {
530         if (isXnackOnOrAny())
531           Processor = "gfx907";
532       } else if (Processor == "gfx90c") {
533         if (isXnackOnOrAny())
534           report_fatal_error(
535               "AMD GPU code object V2 does not support processor " +
536               Twine(Processor) + " with XNACK being ON or ANY");
537       } else {
538         report_fatal_error(
539             "AMD GPU code object V2 does not support processor " +
540             Twine(Processor));
541       }
542       break;
543     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
544       // xnack.
545       if (isXnackOnOrAny())
546         Features += "+xnack";
547       // In code object v2 and v3, "sramecc" feature was spelled with a
548       // hyphen ("sram-ecc").
549       if (isSramEccOnOrAny())
550         Features += "+sram-ecc";
551       break;
552     case ELF::ELFABIVERSION_AMDGPU_HSA_V4:
553     case ELF::ELFABIVERSION_AMDGPU_HSA_V5:
554       // sramecc.
555       if (getSramEccSetting() == TargetIDSetting::Off)
556         Features += ":sramecc-";
557       else if (getSramEccSetting() == TargetIDSetting::On)
558         Features += ":sramecc+";
559       // xnack.
560       if (getXnackSetting() == TargetIDSetting::Off)
561         Features += ":xnack-";
562       else if (getXnackSetting() == TargetIDSetting::On)
563         Features += ":xnack+";
564       break;
565     default:
566       break;
567     }
568   }
569 
570   StreamRep << Processor << Features;
571 
572   StreamRep.flush();
573   return StringRep;
574 }
575 
576 unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
577   if (STI->getFeatureBits().test(FeatureWavefrontSize16))
578     return 16;
579   if (STI->getFeatureBits().test(FeatureWavefrontSize32))
580     return 32;
581 
582   return 64;
583 }
584 
585 unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
586   if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
587     return 32768;
588   if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
589     return 65536;
590 
591   return 0;
592 }
593 
594 unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
595   // "Per CU" really means "per whatever functional block the waves of a
596   // workgroup must share". For gfx10 in CU mode this is the CU, which contains
597   // two SIMDs.
598   if (isGFX10Plus(*STI) && STI->getFeatureBits().test(FeatureCuMode))
599     return 2;
600   // Pre-gfx10 a CU contains four SIMDs. For gfx10 in WGP mode the WGP contains
601   // two CUs, so a total of four SIMDs.
602   return 4;
603 }
604 
605 unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
606                                unsigned FlatWorkGroupSize) {
607   assert(FlatWorkGroupSize != 0);
608   if (STI->getTargetTriple().getArch() != Triple::amdgcn)
609     return 8;
610   unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
611   if (N == 1)
612     return 40;
613   N = 40 / N;
614   return std::min(N, 16u);
615 }
616 
617 unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
618   return 1;
619 }
620 
621 unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI) {
622   // FIXME: Need to take scratch memory into account.
623   if (isGFX90A(*STI))
624     return 8;
625   if (!isGFX10Plus(*STI))
626     return 10;
627   return hasGFX10_3Insts(*STI) ? 16 : 20;
628 }
629 
630 unsigned getWavesPerEUForWorkGroup(const MCSubtargetInfo *STI,
631                                    unsigned FlatWorkGroupSize) {
632   return divideCeil(getWavesPerWorkGroup(STI, FlatWorkGroupSize),
633                     getEUsPerCU(STI));
634 }
635 
636 unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
637   return 1;
638 }
639 
640 unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
641   // Some subtargets allow encoding 2048, but this isn't tested or supported.
642   return 1024;
643 }
644 
645 unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
646                               unsigned FlatWorkGroupSize) {
647   return divideCeil(FlatWorkGroupSize, getWavefrontSize(STI));
648 }
649 
650 unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
651   IsaVersion Version = getIsaVersion(STI->getCPU());
652   if (Version.Major >= 10)
653     return getAddressableNumSGPRs(STI);
654   if (Version.Major >= 8)
655     return 16;
656   return 8;
657 }
658 
659 unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
660   return 8;
661 }
662 
663 unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
664   IsaVersion Version = getIsaVersion(STI->getCPU());
665   if (Version.Major >= 8)
666     return 800;
667   return 512;
668 }
669 
670 unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
671   if (STI->getFeatureBits().test(FeatureSGPRInitBug))
672     return FIXED_NUM_SGPRS_FOR_INIT_BUG;
673 
674   IsaVersion Version = getIsaVersion(STI->getCPU());
675   if (Version.Major >= 10)
676     return 106;
677   if (Version.Major >= 8)
678     return 102;
679   return 104;
680 }
681 
682 unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
683   assert(WavesPerEU != 0);
684 
685   IsaVersion Version = getIsaVersion(STI->getCPU());
686   if (Version.Major >= 10)
687     return 0;
688 
689   if (WavesPerEU >= getMaxWavesPerEU(STI))
690     return 0;
691 
692   unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
693   if (STI->getFeatureBits().test(FeatureTrapHandler))
694     MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
695   MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
696   return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
697 }
698 
699 unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
700                         bool Addressable) {
701   assert(WavesPerEU != 0);
702 
703   unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
704   IsaVersion Version = getIsaVersion(STI->getCPU());
705   if (Version.Major >= 10)
706     return Addressable ? AddressableNumSGPRs : 108;
707   if (Version.Major >= 8 && !Addressable)
708     AddressableNumSGPRs = 112;
709   unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
710   if (STI->getFeatureBits().test(FeatureTrapHandler))
711     MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
712   MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
713   return std::min(MaxNumSGPRs, AddressableNumSGPRs);
714 }
715 
716 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
717                           bool FlatScrUsed, bool XNACKUsed) {
718   unsigned ExtraSGPRs = 0;
719   if (VCCUsed)
720     ExtraSGPRs = 2;
721 
722   IsaVersion Version = getIsaVersion(STI->getCPU());
723   if (Version.Major >= 10)
724     return ExtraSGPRs;
725 
726   if (Version.Major < 8) {
727     if (FlatScrUsed)
728       ExtraSGPRs = 4;
729   } else {
730     if (XNACKUsed)
731       ExtraSGPRs = 4;
732 
733     if (FlatScrUsed ||
734         STI->getFeatureBits().test(AMDGPU::FeatureArchitectedFlatScratch))
735       ExtraSGPRs = 6;
736   }
737 
738   return ExtraSGPRs;
739 }
740 
741 unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
742                           bool FlatScrUsed) {
743   return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
744                           STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
745 }
746 
747 unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
748   NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
749   // SGPRBlocks is actual number of SGPR blocks minus 1.
750   return NumSGPRs / getSGPREncodingGranule(STI) - 1;
751 }
752 
753 unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
754                              Optional<bool> EnableWavefrontSize32) {
755   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
756     return 8;
757 
758   bool IsWave32 = EnableWavefrontSize32 ?
759       *EnableWavefrontSize32 :
760       STI->getFeatureBits().test(FeatureWavefrontSize32);
761 
762   if (hasGFX10_3Insts(*STI))
763     return IsWave32 ? 16 : 8;
764 
765   return IsWave32 ? 8 : 4;
766 }
767 
768 unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
769                                 Optional<bool> EnableWavefrontSize32) {
770   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
771     return 8;
772 
773   bool IsWave32 = EnableWavefrontSize32 ?
774       *EnableWavefrontSize32 :
775       STI->getFeatureBits().test(FeatureWavefrontSize32);
776 
777   return IsWave32 ? 8 : 4;
778 }
779 
780 unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
781   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
782     return 512;
783   if (!isGFX10Plus(*STI))
784     return 256;
785   return STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1024 : 512;
786 }
787 
788 unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
789   if (STI->getFeatureBits().test(FeatureGFX90AInsts))
790     return 512;
791   return 256;
792 }
793 
794 unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
795   assert(WavesPerEU != 0);
796 
797   if (WavesPerEU >= getMaxWavesPerEU(STI))
798     return 0;
799   unsigned MinNumVGPRs =
800       alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
801                 getVGPRAllocGranule(STI)) + 1;
802   return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
803 }
804 
805 unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
806   assert(WavesPerEU != 0);
807 
808   unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
809                                    getVGPRAllocGranule(STI));
810   unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
811   return std::min(MaxNumVGPRs, AddressableNumVGPRs);
812 }
813 
814 unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
815                           Optional<bool> EnableWavefrontSize32) {
816   NumVGPRs = alignTo(std::max(1u, NumVGPRs),
817                      getVGPREncodingGranule(STI, EnableWavefrontSize32));
818   // VGPRBlocks is actual number of VGPR blocks minus 1.
819   return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
820 }
821 
822 } // end namespace IsaInfo
823 
824 void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
825                                const MCSubtargetInfo *STI) {
826   IsaVersion Version = getIsaVersion(STI->getCPU());
827 
828   memset(&Header, 0, sizeof(Header));
829 
830   Header.amd_kernel_code_version_major = 1;
831   Header.amd_kernel_code_version_minor = 2;
832   Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
833   Header.amd_machine_version_major = Version.Major;
834   Header.amd_machine_version_minor = Version.Minor;
835   Header.amd_machine_version_stepping = Version.Stepping;
836   Header.kernel_code_entry_byte_offset = sizeof(Header);
837   Header.wavefront_size = 6;
838 
839   // If the code object does not support indirect functions, then the value must
840   // be 0xffffffff.
841   Header.call_convention = -1;
842 
843   // These alignment values are specified in powers of two, so alignment =
844   // 2^n.  The minimum alignment is 2^4 = 16.
845   Header.kernarg_segment_alignment = 4;
846   Header.group_segment_alignment = 4;
847   Header.private_segment_alignment = 4;
848 
849   if (Version.Major >= 10) {
850     if (STI->getFeatureBits().test(FeatureWavefrontSize32)) {
851       Header.wavefront_size = 5;
852       Header.code_properties |= AMD_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32;
853     }
854     Header.compute_pgm_resource_registers |=
855       S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
856       S_00B848_MEM_ORDERED(1);
857   }
858 }
859 
860 amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
861     const MCSubtargetInfo *STI) {
862   IsaVersion Version = getIsaVersion(STI->getCPU());
863 
864   amdhsa::kernel_descriptor_t KD;
865   memset(&KD, 0, sizeof(KD));
866 
867   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
868                   amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
869                   amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
870   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
871                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
872   AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
873                   amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
874   AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
875                   amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
876   if (Version.Major >= 10) {
877     AMDHSA_BITS_SET(KD.kernel_code_properties,
878                     amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32,
879                     STI->getFeatureBits().test(FeatureWavefrontSize32) ? 1 : 0);
880     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
881                     amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
882                     STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
883     AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
884                     amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
885   }
886   if (AMDGPU::isGFX90A(*STI)) {
887     AMDHSA_BITS_SET(KD.compute_pgm_rsrc3,
888                     amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT,
889                     STI->getFeatureBits().test(FeatureTgSplit) ? 1 : 0);
890   }
891   return KD;
892 }
893 
894 bool isGroupSegment(const GlobalValue *GV) {
895   return GV->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
896 }
897 
898 bool isGlobalSegment(const GlobalValue *GV) {
899   return GV->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
900 }
901 
902 bool isReadOnlySegment(const GlobalValue *GV) {
903   unsigned AS = GV->getAddressSpace();
904   return AS == AMDGPUAS::CONSTANT_ADDRESS ||
905          AS == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
906 }
907 
908 bool shouldEmitConstantsToTextSection(const Triple &TT) {
909   return TT.getArch() == Triple::r600;
910 }
911 
912 int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
913   Attribute A = F.getFnAttribute(Name);
914   int Result = Default;
915 
916   if (A.isStringAttribute()) {
917     StringRef Str = A.getValueAsString();
918     if (Str.getAsInteger(0, Result)) {
919       LLVMContext &Ctx = F.getContext();
920       Ctx.emitError("can't parse integer attribute " + Name);
921     }
922   }
923 
924   return Result;
925 }
926 
927 std::pair<int, int> getIntegerPairAttribute(const Function &F,
928                                             StringRef Name,
929                                             std::pair<int, int> Default,
930                                             bool OnlyFirstRequired) {
931   Attribute A = F.getFnAttribute(Name);
932   if (!A.isStringAttribute())
933     return Default;
934 
935   LLVMContext &Ctx = F.getContext();
936   std::pair<int, int> Ints = Default;
937   std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
938   if (Strs.first.trim().getAsInteger(0, Ints.first)) {
939     Ctx.emitError("can't parse first integer attribute " + Name);
940     return Default;
941   }
942   if (Strs.second.trim().getAsInteger(0, Ints.second)) {
943     if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
944       Ctx.emitError("can't parse second integer attribute " + Name);
945       return Default;
946     }
947   }
948 
949   return Ints;
950 }
951 
952 unsigned getVmcntBitMask(const IsaVersion &Version) {
953   unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
954   if (Version.Major < 9)
955     return VmcntLo;
956 
957   unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
958   return VmcntLo | VmcntHi;
959 }
960 
961 unsigned getExpcntBitMask(const IsaVersion &Version) {
962   return (1 << getExpcntBitWidth()) - 1;
963 }
964 
965 unsigned getLgkmcntBitMask(const IsaVersion &Version) {
966   return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
967 }
968 
969 unsigned getWaitcntBitMask(const IsaVersion &Version) {
970   unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
971   unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
972   unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(),
973                                 getLgkmcntBitWidth(Version.Major));
974   unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
975   if (Version.Major < 9)
976     return Waitcnt;
977 
978   unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
979   return Waitcnt | VmcntHi;
980 }
981 
982 unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
983   unsigned VmcntLo =
984       unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
985   if (Version.Major < 9)
986     return VmcntLo;
987 
988   unsigned VmcntHi =
989       unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
990   VmcntHi <<= getVmcntBitWidthLo();
991   return VmcntLo | VmcntHi;
992 }
993 
994 unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
995   return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
996 }
997 
998 unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
999   return unpackBits(Waitcnt, getLgkmcntBitShift(),
1000                     getLgkmcntBitWidth(Version.Major));
1001 }
1002 
1003 void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
1004                    unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
1005   Vmcnt = decodeVmcnt(Version, Waitcnt);
1006   Expcnt = decodeExpcnt(Version, Waitcnt);
1007   Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
1008 }
1009 
1010 Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
1011   Waitcnt Decoded;
1012   Decoded.VmCnt = decodeVmcnt(Version, Encoded);
1013   Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
1014   Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
1015   return Decoded;
1016 }
1017 
1018 unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
1019                      unsigned Vmcnt) {
1020   Waitcnt =
1021       packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
1022   if (Version.Major < 9)
1023     return Waitcnt;
1024 
1025   Vmcnt >>= getVmcntBitWidthLo();
1026   return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
1027 }
1028 
1029 unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
1030                       unsigned Expcnt) {
1031   return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
1032 }
1033 
1034 unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
1035                        unsigned Lgkmcnt) {
1036   return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(),
1037                                     getLgkmcntBitWidth(Version.Major));
1038 }
1039 
1040 unsigned encodeWaitcnt(const IsaVersion &Version,
1041                        unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
1042   unsigned Waitcnt = getWaitcntBitMask(Version);
1043   Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
1044   Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
1045   Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
1046   return Waitcnt;
1047 }
1048 
1049 unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
1050   return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
1051 }
1052 
1053 //===----------------------------------------------------------------------===//
1054 // Custom Operands.
1055 //
1056 // A table of custom operands shall describe "primary" operand names
1057 // first followed by aliases if any. It is not required but recommended
1058 // to arrange operands so that operand encoding match operand position
1059 // in the table. This will make disassembly a bit more efficient.
1060 // Unused slots in the table shall have an empty name.
1061 //
1062 //===----------------------------------------------------------------------===//
1063 
1064 template <class T>
1065 static bool isValidOpr(int Idx, const CustomOperand<T> OpInfo[], int OpInfoSize,
1066                        T Context) {
1067   return 0 <= Idx && Idx < OpInfoSize && !OpInfo[Idx].Name.empty() &&
1068          (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context));
1069 }
1070 
1071 template <class T>
1072 static int getOprIdx(std::function<bool(const CustomOperand<T> &)> Test,
1073                      const CustomOperand<T> OpInfo[], int OpInfoSize,
1074                      T Context) {
1075   int InvalidIdx = OPR_ID_UNKNOWN;
1076   for (int Idx = 0; Idx < OpInfoSize; ++Idx) {
1077     if (Test(OpInfo[Idx])) {
1078       if (!OpInfo[Idx].Cond || OpInfo[Idx].Cond(Context))
1079         return Idx;
1080       InvalidIdx = OPR_ID_UNSUPPORTED;
1081     }
1082   }
1083   return InvalidIdx;
1084 }
1085 
1086 template <class T>
1087 static int getOprIdx(const StringRef Name, const CustomOperand<T> OpInfo[],
1088                      int OpInfoSize, T Context) {
1089   auto Test = [=](const CustomOperand<T> &Op) { return Op.Name == Name; };
1090   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1091 }
1092 
1093 template <class T>
1094 static int getOprIdx(int Id, const CustomOperand<T> OpInfo[], int OpInfoSize,
1095                      T Context, bool QuickCheck = true) {
1096   auto Test = [=](const CustomOperand<T> &Op) {
1097     return Op.Encoding == Id && !Op.Name.empty();
1098   };
1099   // This is an optimization that should work in most cases.
1100   // As a side effect, it may cause selection of an alias
1101   // instead of a primary operand name in case of sparse tables.
1102   if (QuickCheck && isValidOpr<T>(Id, OpInfo, OpInfoSize, Context) &&
1103       OpInfo[Id].Encoding == Id) {
1104     return Id;
1105   }
1106   return getOprIdx<T>(Test, OpInfo, OpInfoSize, Context);
1107 }
1108 
1109 //===----------------------------------------------------------------------===//
1110 // Custom Operand Values
1111 //===----------------------------------------------------------------------===//
1112 
1113 static unsigned getDefaultCustomOperandEncoding(const CustomOperandVal *Opr,
1114                                                 int Size,
1115                                                 const MCSubtargetInfo &STI) {
1116   unsigned Enc = 0;
1117   for (int Idx = 0; Idx < Size; ++Idx) {
1118     const auto &Op = Opr[Idx];
1119     if (Op.isSupported(STI))
1120       Enc |= Op.encode(Op.Default);
1121   }
1122   return Enc;
1123 }
1124 
1125 static bool isSymbolicCustomOperandEncoding(const CustomOperandVal *Opr,
1126                                             int Size, unsigned Code,
1127                                             bool &HasNonDefaultVal,
1128                                             const MCSubtargetInfo &STI) {
1129   unsigned UsedOprMask = 0;
1130   HasNonDefaultVal = false;
1131   for (int Idx = 0; Idx < Size; ++Idx) {
1132     const auto &Op = Opr[Idx];
1133     if (!Op.isSupported(STI))
1134       continue;
1135     UsedOprMask |= Op.getMask();
1136     unsigned Val = Op.decode(Code);
1137     if (!Op.isValid(Val))
1138       return false;
1139     HasNonDefaultVal |= (Val != Op.Default);
1140   }
1141   return (Code & ~UsedOprMask) == 0;
1142 }
1143 
1144 static bool decodeCustomOperand(const CustomOperandVal *Opr, int Size,
1145                                 unsigned Code, int &Idx, StringRef &Name,
1146                                 unsigned &Val, bool &IsDefault,
1147                                 const MCSubtargetInfo &STI) {
1148   while (Idx < Size) {
1149     const auto &Op = Opr[Idx++];
1150     if (Op.isSupported(STI)) {
1151       Name = Op.Name;
1152       Val = Op.decode(Code);
1153       IsDefault = (Val == Op.Default);
1154       return true;
1155     }
1156   }
1157 
1158   return false;
1159 }
1160 
1161 static int encodeCustomOperandVal(const CustomOperandVal &Op,
1162                                   int64_t InputVal) {
1163   if (InputVal < 0 || InputVal > Op.Max)
1164     return OPR_VAL_INVALID;
1165   return Op.encode(InputVal);
1166 }
1167 
1168 static int encodeCustomOperand(const CustomOperandVal *Opr, int Size,
1169                                const StringRef Name, int64_t InputVal,
1170                                unsigned &UsedOprMask,
1171                                const MCSubtargetInfo &STI) {
1172   int InvalidId = OPR_ID_UNKNOWN;
1173   for (int Idx = 0; Idx < Size; ++Idx) {
1174     const auto &Op = Opr[Idx];
1175     if (Op.Name == Name) {
1176       if (!Op.isSupported(STI)) {
1177         InvalidId = OPR_ID_UNSUPPORTED;
1178         continue;
1179       }
1180       auto OprMask = Op.getMask();
1181       if (OprMask & UsedOprMask)
1182         return OPR_ID_DUPLICATE;
1183       UsedOprMask |= OprMask;
1184       return encodeCustomOperandVal(Op, InputVal);
1185     }
1186   }
1187   return InvalidId;
1188 }
1189 
1190 //===----------------------------------------------------------------------===//
1191 // DepCtr
1192 //===----------------------------------------------------------------------===//
1193 
1194 namespace DepCtr {
1195 
1196 int getDefaultDepCtrEncoding(const MCSubtargetInfo &STI) {
1197   static int Default = -1;
1198   if (Default == -1)
1199     Default = getDefaultCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, STI);
1200   return Default;
1201 }
1202 
1203 bool isSymbolicDepCtrEncoding(unsigned Code, bool &HasNonDefaultVal,
1204                               const MCSubtargetInfo &STI) {
1205   return isSymbolicCustomOperandEncoding(DepCtrInfo, DEP_CTR_SIZE, Code,
1206                                          HasNonDefaultVal, STI);
1207 }
1208 
1209 bool decodeDepCtr(unsigned Code, int &Id, StringRef &Name, unsigned &Val,
1210                   bool &IsDefault, const MCSubtargetInfo &STI) {
1211   return decodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Code, Id, Name, Val,
1212                              IsDefault, STI);
1213 }
1214 
1215 int encodeDepCtr(const StringRef Name, int64_t Val, unsigned &UsedOprMask,
1216                  const MCSubtargetInfo &STI) {
1217   return encodeCustomOperand(DepCtrInfo, DEP_CTR_SIZE, Name, Val, UsedOprMask,
1218                              STI);
1219 }
1220 
1221 } // namespace DepCtr
1222 
1223 //===----------------------------------------------------------------------===//
1224 // hwreg
1225 //===----------------------------------------------------------------------===//
1226 
1227 namespace Hwreg {
1228 
1229 int64_t getHwregId(const StringRef Name, const MCSubtargetInfo &STI) {
1230   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Opr, OPR_SIZE, STI);
1231   return (Idx < 0) ? Idx : Opr[Idx].Encoding;
1232 }
1233 
1234 bool isValidHwreg(int64_t Id) {
1235   return 0 <= Id && isUInt<ID_WIDTH_>(Id);
1236 }
1237 
1238 bool isValidHwregOffset(int64_t Offset) {
1239   return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
1240 }
1241 
1242 bool isValidHwregWidth(int64_t Width) {
1243   return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
1244 }
1245 
1246 uint64_t encodeHwreg(uint64_t Id, uint64_t Offset, uint64_t Width) {
1247   return (Id << ID_SHIFT_) |
1248          (Offset << OFFSET_SHIFT_) |
1249          ((Width - 1) << WIDTH_M1_SHIFT_);
1250 }
1251 
1252 StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
1253   int Idx = getOprIdx<const MCSubtargetInfo &>(Id, Opr, OPR_SIZE, STI);
1254   return (Idx < 0) ? "" : Opr[Idx].Name;
1255 }
1256 
1257 void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
1258   Id = (Val & ID_MASK_) >> ID_SHIFT_;
1259   Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
1260   Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
1261 }
1262 
1263 } // namespace Hwreg
1264 
1265 //===----------------------------------------------------------------------===//
1266 // exp tgt
1267 //===----------------------------------------------------------------------===//
1268 
1269 namespace Exp {
1270 
1271 struct ExpTgt {
1272   StringLiteral Name;
1273   unsigned Tgt;
1274   unsigned MaxIndex;
1275 };
1276 
1277 static constexpr ExpTgt ExpTgtInfo[] = {
1278   {{"null"},  ET_NULL,   ET_NULL_MAX_IDX},
1279   {{"mrtz"},  ET_MRTZ,   ET_MRTZ_MAX_IDX},
1280   {{"prim"},  ET_PRIM,   ET_PRIM_MAX_IDX},
1281   {{"mrt"},   ET_MRT0,   ET_MRT_MAX_IDX},
1282   {{"pos"},   ET_POS0,   ET_POS_MAX_IDX},
1283   {{"param"}, ET_PARAM0, ET_PARAM_MAX_IDX},
1284 };
1285 
1286 bool getTgtName(unsigned Id, StringRef &Name, int &Index) {
1287   for (const ExpTgt &Val : ExpTgtInfo) {
1288     if (Val.Tgt <= Id && Id <= Val.Tgt + Val.MaxIndex) {
1289       Index = (Val.MaxIndex == 0) ? -1 : (Id - Val.Tgt);
1290       Name = Val.Name;
1291       return true;
1292     }
1293   }
1294   return false;
1295 }
1296 
1297 unsigned getTgtId(const StringRef Name) {
1298 
1299   for (const ExpTgt &Val : ExpTgtInfo) {
1300     if (Val.MaxIndex == 0 && Name == Val.Name)
1301       return Val.Tgt;
1302 
1303     if (Val.MaxIndex > 0 && Name.startswith(Val.Name)) {
1304       StringRef Suffix = Name.drop_front(Val.Name.size());
1305 
1306       unsigned Id;
1307       if (Suffix.getAsInteger(10, Id) || Id > Val.MaxIndex)
1308         return ET_INVALID;
1309 
1310       // Disable leading zeroes
1311       if (Suffix.size() > 1 && Suffix[0] == '0')
1312         return ET_INVALID;
1313 
1314       return Val.Tgt + Id;
1315     }
1316   }
1317   return ET_INVALID;
1318 }
1319 
1320 bool isSupportedTgtId(unsigned Id, const MCSubtargetInfo &STI) {
1321   return (Id != ET_POS4 && Id != ET_PRIM) || isGFX10Plus(STI);
1322 }
1323 
1324 } // namespace Exp
1325 
1326 //===----------------------------------------------------------------------===//
1327 // MTBUF Format
1328 //===----------------------------------------------------------------------===//
1329 
1330 namespace MTBUFFormat {
1331 
1332 int64_t getDfmt(const StringRef Name) {
1333   for (int Id = DFMT_MIN; Id <= DFMT_MAX; ++Id) {
1334     if (Name == DfmtSymbolic[Id])
1335       return Id;
1336   }
1337   return DFMT_UNDEF;
1338 }
1339 
1340 StringRef getDfmtName(unsigned Id) {
1341   assert(Id <= DFMT_MAX);
1342   return DfmtSymbolic[Id];
1343 }
1344 
1345 static StringLiteral const *getNfmtLookupTable(const MCSubtargetInfo &STI) {
1346   if (isSI(STI) || isCI(STI))
1347     return NfmtSymbolicSICI;
1348   if (isVI(STI) || isGFX9(STI))
1349     return NfmtSymbolicVI;
1350   return NfmtSymbolicGFX10;
1351 }
1352 
1353 int64_t getNfmt(const StringRef Name, const MCSubtargetInfo &STI) {
1354   auto lookupTable = getNfmtLookupTable(STI);
1355   for (int Id = NFMT_MIN; Id <= NFMT_MAX; ++Id) {
1356     if (Name == lookupTable[Id])
1357       return Id;
1358   }
1359   return NFMT_UNDEF;
1360 }
1361 
1362 StringRef getNfmtName(unsigned Id, const MCSubtargetInfo &STI) {
1363   assert(Id <= NFMT_MAX);
1364   return getNfmtLookupTable(STI)[Id];
1365 }
1366 
1367 bool isValidDfmtNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1368   unsigned Dfmt;
1369   unsigned Nfmt;
1370   decodeDfmtNfmt(Id, Dfmt, Nfmt);
1371   return isValidNfmt(Nfmt, STI);
1372 }
1373 
1374 bool isValidNfmt(unsigned Id, const MCSubtargetInfo &STI) {
1375   return !getNfmtName(Id, STI).empty();
1376 }
1377 
1378 int64_t encodeDfmtNfmt(unsigned Dfmt, unsigned Nfmt) {
1379   return (Dfmt << DFMT_SHIFT) | (Nfmt << NFMT_SHIFT);
1380 }
1381 
1382 void decodeDfmtNfmt(unsigned Format, unsigned &Dfmt, unsigned &Nfmt) {
1383   Dfmt = (Format >> DFMT_SHIFT) & DFMT_MASK;
1384   Nfmt = (Format >> NFMT_SHIFT) & NFMT_MASK;
1385 }
1386 
1387 int64_t getUnifiedFormat(const StringRef Name) {
1388   for (int Id = UFMT_FIRST; Id <= UFMT_LAST; ++Id) {
1389     if (Name == UfmtSymbolic[Id])
1390       return Id;
1391   }
1392   return UFMT_UNDEF;
1393 }
1394 
1395 StringRef getUnifiedFormatName(unsigned Id) {
1396   return isValidUnifiedFormat(Id) ? UfmtSymbolic[Id] : "";
1397 }
1398 
1399 bool isValidUnifiedFormat(unsigned Id) {
1400   return Id <= UFMT_LAST;
1401 }
1402 
1403 int64_t convertDfmtNfmt2Ufmt(unsigned Dfmt, unsigned Nfmt) {
1404   int64_t Fmt = encodeDfmtNfmt(Dfmt, Nfmt);
1405   for (int Id = UFMT_FIRST; Id <= UFMT_LAST; ++Id) {
1406     if (Fmt == DfmtNfmt2UFmt[Id])
1407       return Id;
1408   }
1409   return UFMT_UNDEF;
1410 }
1411 
1412 bool isValidFormatEncoding(unsigned Val, const MCSubtargetInfo &STI) {
1413   return isGFX10Plus(STI) ? (Val <= UFMT_MAX) : (Val <= DFMT_NFMT_MAX);
1414 }
1415 
1416 unsigned getDefaultFormatEncoding(const MCSubtargetInfo &STI) {
1417   if (isGFX10Plus(STI))
1418     return UFMT_DEFAULT;
1419   return DFMT_NFMT_DEFAULT;
1420 }
1421 
1422 } // namespace MTBUFFormat
1423 
1424 //===----------------------------------------------------------------------===//
1425 // SendMsg
1426 //===----------------------------------------------------------------------===//
1427 
1428 namespace SendMsg {
1429 
1430 int64_t getMsgId(const StringRef Name, const MCSubtargetInfo &STI) {
1431   int Idx = getOprIdx<const MCSubtargetInfo &>(Name, Msg, MSG_SIZE, STI);
1432   return (Idx < 0) ? Idx : Msg[Idx].Encoding;
1433 }
1434 
1435 bool isValidMsgId(int64_t MsgId) {
1436   return 0 <= MsgId && isUInt<ID_WIDTH_>(MsgId);
1437 }
1438 
1439 StringRef getMsgName(int64_t MsgId, const MCSubtargetInfo &STI) {
1440   int Idx = getOprIdx<const MCSubtargetInfo &>(MsgId, Msg, MSG_SIZE, STI);
1441   return (Idx < 0) ? "" : Msg[Idx].Name;
1442 }
1443 
1444 int64_t getMsgOpId(int64_t MsgId, const StringRef Name) {
1445   const char* const *S = (MsgId == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1446   const int F = (MsgId == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1447   const int L = (MsgId == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1448   for (int i = F; i < L; ++i) {
1449     if (Name == S[i]) {
1450       return i;
1451     }
1452   }
1453   return OP_UNKNOWN_;
1454 }
1455 
1456 bool isValidMsgOp(int64_t MsgId, int64_t OpId, const MCSubtargetInfo &STI,
1457                   bool Strict) {
1458   assert(isValidMsgId(MsgId));
1459 
1460   if (!Strict)
1461     return 0 <= OpId && isUInt<OP_WIDTH_>(OpId);
1462 
1463   switch(MsgId)
1464   {
1465   case ID_GS:
1466     return (OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_) && OpId != OP_GS_NOP;
1467   case ID_GS_DONE:
1468     return OP_GS_FIRST_ <= OpId && OpId < OP_GS_LAST_;
1469   case ID_SYSMSG:
1470     return OP_SYS_FIRST_ <= OpId && OpId < OP_SYS_LAST_;
1471   default:
1472     return OpId == OP_NONE_;
1473   }
1474 }
1475 
1476 StringRef getMsgOpName(int64_t MsgId, int64_t OpId) {
1477   assert(msgRequiresOp(MsgId));
1478   return (MsgId == ID_SYSMSG)? OpSysSymbolic[OpId] : OpGsSymbolic[OpId];
1479 }
1480 
1481 bool isValidMsgStream(int64_t MsgId, int64_t OpId, int64_t StreamId,
1482                       const MCSubtargetInfo &STI, bool Strict) {
1483   assert(isValidMsgOp(MsgId, OpId, STI, Strict));
1484 
1485   if (!Strict)
1486     return 0 <= StreamId && isUInt<STREAM_ID_WIDTH_>(StreamId);
1487 
1488   switch(MsgId)
1489   {
1490   case ID_GS:
1491     return STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_;
1492   case ID_GS_DONE:
1493     return (OpId == OP_GS_NOP)?
1494            (StreamId == STREAM_ID_NONE_) :
1495            (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_);
1496   default:
1497     return StreamId == STREAM_ID_NONE_;
1498   }
1499 }
1500 
1501 bool msgRequiresOp(int64_t MsgId) {
1502   return MsgId == ID_GS || MsgId == ID_GS_DONE || MsgId == ID_SYSMSG;
1503 }
1504 
1505 bool msgSupportsStream(int64_t MsgId, int64_t OpId) {
1506   return (MsgId == ID_GS || MsgId == ID_GS_DONE) && OpId != OP_GS_NOP;
1507 }
1508 
1509 void decodeMsg(unsigned Val,
1510                uint16_t &MsgId,
1511                uint16_t &OpId,
1512                uint16_t &StreamId) {
1513   MsgId = Val & ID_MASK_;
1514   OpId = (Val & OP_MASK_) >> OP_SHIFT_;
1515   StreamId = (Val & STREAM_ID_MASK_) >> STREAM_ID_SHIFT_;
1516 }
1517 
1518 uint64_t encodeMsg(uint64_t MsgId,
1519                    uint64_t OpId,
1520                    uint64_t StreamId) {
1521   return (MsgId << ID_SHIFT_) |
1522          (OpId << OP_SHIFT_) |
1523          (StreamId << STREAM_ID_SHIFT_);
1524 }
1525 
1526 } // namespace SendMsg
1527 
1528 //===----------------------------------------------------------------------===//
1529 //
1530 //===----------------------------------------------------------------------===//
1531 
1532 unsigned getInitialPSInputAddr(const Function &F) {
1533   return getIntegerAttribute(F, "InitialPSInputAddr", 0);
1534 }
1535 
1536 bool getHasColorExport(const Function &F) {
1537   // As a safe default always respond as if PS has color exports.
1538   return getIntegerAttribute(
1539              F, "amdgpu-color-export",
1540              F.getCallingConv() == CallingConv::AMDGPU_PS ? 1 : 0) != 0;
1541 }
1542 
1543 bool getHasDepthExport(const Function &F) {
1544   return getIntegerAttribute(F, "amdgpu-depth-export", 0) != 0;
1545 }
1546 
1547 bool isShader(CallingConv::ID cc) {
1548   switch(cc) {
1549     case CallingConv::AMDGPU_VS:
1550     case CallingConv::AMDGPU_LS:
1551     case CallingConv::AMDGPU_HS:
1552     case CallingConv::AMDGPU_ES:
1553     case CallingConv::AMDGPU_GS:
1554     case CallingConv::AMDGPU_PS:
1555     case CallingConv::AMDGPU_CS:
1556       return true;
1557     default:
1558       return false;
1559   }
1560 }
1561 
1562 bool isGraphics(CallingConv::ID cc) {
1563   return isShader(cc) || cc == CallingConv::AMDGPU_Gfx;
1564 }
1565 
1566 bool isCompute(CallingConv::ID cc) {
1567   return !isGraphics(cc) || cc == CallingConv::AMDGPU_CS;
1568 }
1569 
1570 bool isEntryFunctionCC(CallingConv::ID CC) {
1571   switch (CC) {
1572   case CallingConv::AMDGPU_KERNEL:
1573   case CallingConv::SPIR_KERNEL:
1574   case CallingConv::AMDGPU_VS:
1575   case CallingConv::AMDGPU_GS:
1576   case CallingConv::AMDGPU_PS:
1577   case CallingConv::AMDGPU_CS:
1578   case CallingConv::AMDGPU_ES:
1579   case CallingConv::AMDGPU_HS:
1580   case CallingConv::AMDGPU_LS:
1581     return true;
1582   default:
1583     return false;
1584   }
1585 }
1586 
1587 bool isModuleEntryFunctionCC(CallingConv::ID CC) {
1588   switch (CC) {
1589   case CallingConv::AMDGPU_Gfx:
1590     return true;
1591   default:
1592     return isEntryFunctionCC(CC);
1593   }
1594 }
1595 
1596 bool isKernelCC(const Function *Func) {
1597   return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
1598 }
1599 
1600 bool hasXNACK(const MCSubtargetInfo &STI) {
1601   return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
1602 }
1603 
1604 bool hasSRAMECC(const MCSubtargetInfo &STI) {
1605   return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
1606 }
1607 
1608 bool hasMIMG_R128(const MCSubtargetInfo &STI) {
1609   return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128] && !STI.getFeatureBits()[AMDGPU::FeatureR128A16];
1610 }
1611 
1612 bool hasGFX10A16(const MCSubtargetInfo &STI) {
1613   return STI.getFeatureBits()[AMDGPU::FeatureGFX10A16];
1614 }
1615 
1616 bool hasG16(const MCSubtargetInfo &STI) {
1617   return STI.getFeatureBits()[AMDGPU::FeatureG16];
1618 }
1619 
1620 bool hasPackedD16(const MCSubtargetInfo &STI) {
1621   return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem];
1622 }
1623 
1624 bool isSI(const MCSubtargetInfo &STI) {
1625   return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
1626 }
1627 
1628 bool isCI(const MCSubtargetInfo &STI) {
1629   return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
1630 }
1631 
1632 bool isVI(const MCSubtargetInfo &STI) {
1633   return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
1634 }
1635 
1636 bool isGFX9(const MCSubtargetInfo &STI) {
1637   return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
1638 }
1639 
1640 bool isGFX9_GFX10(const MCSubtargetInfo &STI) {
1641   return isGFX9(STI) || isGFX10(STI);
1642 }
1643 
1644 bool isGFX8Plus(const MCSubtargetInfo &STI) {
1645   return isVI(STI) || isGFX9Plus(STI);
1646 }
1647 
1648 bool isGFX9Plus(const MCSubtargetInfo &STI) {
1649   return isGFX9(STI) || isGFX10Plus(STI);
1650 }
1651 
1652 bool isGFX10(const MCSubtargetInfo &STI) {
1653   return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
1654 }
1655 
1656 bool isGFX10Plus(const MCSubtargetInfo &STI) { return isGFX10(STI); }
1657 
1658 bool isNotGFX10Plus(const MCSubtargetInfo &STI) {
1659   return isSI(STI) || isCI(STI) || isVI(STI) || isGFX9(STI);
1660 }
1661 
1662 bool isGFX10Before1030(const MCSubtargetInfo &STI) {
1663   return isGFX10(STI) && !AMDGPU::isGFX10_BEncoding(STI);
1664 }
1665 
1666 bool isGCN3Encoding(const MCSubtargetInfo &STI) {
1667   return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
1668 }
1669 
1670 bool isGFX10_AEncoding(const MCSubtargetInfo &STI) {
1671   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_AEncoding];
1672 }
1673 
1674 bool isGFX10_BEncoding(const MCSubtargetInfo &STI) {
1675   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_BEncoding];
1676 }
1677 
1678 bool hasGFX10_3Insts(const MCSubtargetInfo &STI) {
1679   return STI.getFeatureBits()[AMDGPU::FeatureGFX10_3Insts];
1680 }
1681 
1682 bool isGFX90A(const MCSubtargetInfo &STI) {
1683   return STI.getFeatureBits()[AMDGPU::FeatureGFX90AInsts];
1684 }
1685 
1686 bool isGFX940(const MCSubtargetInfo &STI) {
1687   return STI.getFeatureBits()[AMDGPU::FeatureGFX940Insts];
1688 }
1689 
1690 bool hasArchitectedFlatScratch(const MCSubtargetInfo &STI) {
1691   return STI.getFeatureBits()[AMDGPU::FeatureArchitectedFlatScratch];
1692 }
1693 
1694 bool hasMAIInsts(const MCSubtargetInfo &STI) {
1695   return STI.getFeatureBits()[AMDGPU::FeatureMAIInsts];
1696 }
1697 
1698 int32_t getTotalNumVGPRs(bool has90AInsts, int32_t ArgNumAGPR,
1699                          int32_t ArgNumVGPR) {
1700   if (has90AInsts && ArgNumAGPR)
1701     return alignTo(ArgNumVGPR, 4) + ArgNumAGPR;
1702   return std::max(ArgNumVGPR, ArgNumAGPR);
1703 }
1704 
1705 bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
1706   const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
1707   const unsigned FirstSubReg = TRI->getSubReg(Reg, AMDGPU::sub0);
1708   return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
1709     Reg == AMDGPU::SCC;
1710 }
1711 
1712 #define MAP_REG2REG \
1713   using namespace AMDGPU; \
1714   switch(Reg) { \
1715   default: return Reg; \
1716   CASE_CI_VI(FLAT_SCR) \
1717   CASE_CI_VI(FLAT_SCR_LO) \
1718   CASE_CI_VI(FLAT_SCR_HI) \
1719   CASE_VI_GFX9PLUS(TTMP0) \
1720   CASE_VI_GFX9PLUS(TTMP1) \
1721   CASE_VI_GFX9PLUS(TTMP2) \
1722   CASE_VI_GFX9PLUS(TTMP3) \
1723   CASE_VI_GFX9PLUS(TTMP4) \
1724   CASE_VI_GFX9PLUS(TTMP5) \
1725   CASE_VI_GFX9PLUS(TTMP6) \
1726   CASE_VI_GFX9PLUS(TTMP7) \
1727   CASE_VI_GFX9PLUS(TTMP8) \
1728   CASE_VI_GFX9PLUS(TTMP9) \
1729   CASE_VI_GFX9PLUS(TTMP10) \
1730   CASE_VI_GFX9PLUS(TTMP11) \
1731   CASE_VI_GFX9PLUS(TTMP12) \
1732   CASE_VI_GFX9PLUS(TTMP13) \
1733   CASE_VI_GFX9PLUS(TTMP14) \
1734   CASE_VI_GFX9PLUS(TTMP15) \
1735   CASE_VI_GFX9PLUS(TTMP0_TTMP1) \
1736   CASE_VI_GFX9PLUS(TTMP2_TTMP3) \
1737   CASE_VI_GFX9PLUS(TTMP4_TTMP5) \
1738   CASE_VI_GFX9PLUS(TTMP6_TTMP7) \
1739   CASE_VI_GFX9PLUS(TTMP8_TTMP9) \
1740   CASE_VI_GFX9PLUS(TTMP10_TTMP11) \
1741   CASE_VI_GFX9PLUS(TTMP12_TTMP13) \
1742   CASE_VI_GFX9PLUS(TTMP14_TTMP15) \
1743   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3) \
1744   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7) \
1745   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11) \
1746   CASE_VI_GFX9PLUS(TTMP12_TTMP13_TTMP14_TTMP15) \
1747   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
1748   CASE_VI_GFX9PLUS(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
1749   CASE_VI_GFX9PLUS(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1750   CASE_VI_GFX9PLUS(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
1751   }
1752 
1753 #define CASE_CI_VI(node) \
1754   assert(!isSI(STI)); \
1755   case node: return isCI(STI) ? node##_ci : node##_vi;
1756 
1757 #define CASE_VI_GFX9PLUS(node) \
1758   case node: return isGFX9Plus(STI) ? node##_gfx9plus : node##_vi;
1759 
1760 unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
1761   if (STI.getTargetTriple().getArch() == Triple::r600)
1762     return Reg;
1763   MAP_REG2REG
1764 }
1765 
1766 #undef CASE_CI_VI
1767 #undef CASE_VI_GFX9PLUS
1768 
1769 #define CASE_CI_VI(node)   case node##_ci: case node##_vi:   return node;
1770 #define CASE_VI_GFX9PLUS(node) case node##_vi: case node##_gfx9plus: return node;
1771 
1772 unsigned mc2PseudoReg(unsigned Reg) {
1773   MAP_REG2REG
1774 }
1775 
1776 #undef CASE_CI_VI
1777 #undef CASE_VI_GFX9PLUS
1778 #undef MAP_REG2REG
1779 
1780 bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1781   assert(OpNo < Desc.NumOperands);
1782   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1783   return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
1784          OpType <= AMDGPU::OPERAND_SRC_LAST;
1785 }
1786 
1787 bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1788   assert(OpNo < Desc.NumOperands);
1789   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1790   switch (OpType) {
1791   case AMDGPU::OPERAND_REG_IMM_FP32:
1792   case AMDGPU::OPERAND_REG_IMM_FP32_DEFERRED:
1793   case AMDGPU::OPERAND_REG_IMM_FP64:
1794   case AMDGPU::OPERAND_REG_IMM_FP16:
1795   case AMDGPU::OPERAND_REG_IMM_FP16_DEFERRED:
1796   case AMDGPU::OPERAND_REG_IMM_V2FP16:
1797   case AMDGPU::OPERAND_REG_IMM_V2INT16:
1798   case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1799   case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1800   case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1801   case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1802   case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1803   case AMDGPU::OPERAND_REG_INLINE_AC_FP32:
1804   case AMDGPU::OPERAND_REG_INLINE_AC_FP16:
1805   case AMDGPU::OPERAND_REG_INLINE_AC_V2FP16:
1806   case AMDGPU::OPERAND_REG_INLINE_AC_V2INT16:
1807   case AMDGPU::OPERAND_REG_IMM_V2FP32:
1808   case AMDGPU::OPERAND_REG_INLINE_C_V2FP32:
1809   case AMDGPU::OPERAND_REG_INLINE_AC_FP64:
1810     return true;
1811   default:
1812     return false;
1813   }
1814 }
1815 
1816 bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
1817   assert(OpNo < Desc.NumOperands);
1818   unsigned OpType = Desc.OpInfo[OpNo].OperandType;
1819   return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
1820          OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
1821 }
1822 
1823 // Avoid using MCRegisterClass::getSize, since that function will go away
1824 // (move from MC* level to Target* level). Return size in bits.
1825 unsigned getRegBitWidth(unsigned RCID) {
1826   switch (RCID) {
1827   case AMDGPU::VGPR_LO16RegClassID:
1828   case AMDGPU::VGPR_HI16RegClassID:
1829   case AMDGPU::SGPR_LO16RegClassID:
1830   case AMDGPU::AGPR_LO16RegClassID:
1831     return 16;
1832   case AMDGPU::SGPR_32RegClassID:
1833   case AMDGPU::VGPR_32RegClassID:
1834   case AMDGPU::VRegOrLds_32RegClassID:
1835   case AMDGPU::AGPR_32RegClassID:
1836   case AMDGPU::VS_32RegClassID:
1837   case AMDGPU::AV_32RegClassID:
1838   case AMDGPU::SReg_32RegClassID:
1839   case AMDGPU::SReg_32_XM0RegClassID:
1840   case AMDGPU::SRegOrLds_32RegClassID:
1841     return 32;
1842   case AMDGPU::SGPR_64RegClassID:
1843   case AMDGPU::VS_64RegClassID:
1844   case AMDGPU::SReg_64RegClassID:
1845   case AMDGPU::VReg_64RegClassID:
1846   case AMDGPU::AReg_64RegClassID:
1847   case AMDGPU::SReg_64_XEXECRegClassID:
1848   case AMDGPU::VReg_64_Align2RegClassID:
1849   case AMDGPU::AReg_64_Align2RegClassID:
1850   case AMDGPU::AV_64RegClassID:
1851   case AMDGPU::AV_64_Align2RegClassID:
1852     return 64;
1853   case AMDGPU::SGPR_96RegClassID:
1854   case AMDGPU::SReg_96RegClassID:
1855   case AMDGPU::VReg_96RegClassID:
1856   case AMDGPU::AReg_96RegClassID:
1857   case AMDGPU::VReg_96_Align2RegClassID:
1858   case AMDGPU::AReg_96_Align2RegClassID:
1859   case AMDGPU::AV_96RegClassID:
1860   case AMDGPU::AV_96_Align2RegClassID:
1861     return 96;
1862   case AMDGPU::SGPR_128RegClassID:
1863   case AMDGPU::SReg_128RegClassID:
1864   case AMDGPU::VReg_128RegClassID:
1865   case AMDGPU::AReg_128RegClassID:
1866   case AMDGPU::VReg_128_Align2RegClassID:
1867   case AMDGPU::AReg_128_Align2RegClassID:
1868   case AMDGPU::AV_128RegClassID:
1869   case AMDGPU::AV_128_Align2RegClassID:
1870     return 128;
1871   case AMDGPU::SGPR_160RegClassID:
1872   case AMDGPU::SReg_160RegClassID:
1873   case AMDGPU::VReg_160RegClassID:
1874   case AMDGPU::AReg_160RegClassID:
1875   case AMDGPU::VReg_160_Align2RegClassID:
1876   case AMDGPU::AReg_160_Align2RegClassID:
1877   case AMDGPU::AV_160RegClassID:
1878   case AMDGPU::AV_160_Align2RegClassID:
1879     return 160;
1880   case AMDGPU::SGPR_192RegClassID:
1881   case AMDGPU::SReg_192RegClassID:
1882   case AMDGPU::VReg_192RegClassID:
1883   case AMDGPU::AReg_192RegClassID:
1884   case AMDGPU::VReg_192_Align2RegClassID:
1885   case AMDGPU::AReg_192_Align2RegClassID:
1886   case AMDGPU::AV_192RegClassID:
1887   case AMDGPU::AV_192_Align2RegClassID:
1888     return 192;
1889   case AMDGPU::SGPR_224RegClassID:
1890   case AMDGPU::SReg_224RegClassID:
1891   case AMDGPU::VReg_224RegClassID:
1892   case AMDGPU::AReg_224RegClassID:
1893   case AMDGPU::VReg_224_Align2RegClassID:
1894   case AMDGPU::AReg_224_Align2RegClassID:
1895   case AMDGPU::AV_224RegClassID:
1896   case AMDGPU::AV_224_Align2RegClassID:
1897     return 224;
1898   case AMDGPU::SGPR_256RegClassID:
1899   case AMDGPU::SReg_256RegClassID:
1900   case AMDGPU::VReg_256RegClassID:
1901   case AMDGPU::AReg_256RegClassID:
1902   case AMDGPU::VReg_256_Align2RegClassID:
1903   case AMDGPU::AReg_256_Align2RegClassID:
1904   case AMDGPU::AV_256RegClassID:
1905   case AMDGPU::AV_256_Align2RegClassID:
1906     return 256;
1907   case AMDGPU::SGPR_512RegClassID:
1908   case AMDGPU::SReg_512RegClassID:
1909   case AMDGPU::VReg_512RegClassID:
1910   case AMDGPU::AReg_512RegClassID:
1911   case AMDGPU::VReg_512_Align2RegClassID:
1912   case AMDGPU::AReg_512_Align2RegClassID:
1913   case AMDGPU::AV_512RegClassID:
1914   case AMDGPU::AV_512_Align2RegClassID:
1915     return 512;
1916   case AMDGPU::SGPR_1024RegClassID:
1917   case AMDGPU::SReg_1024RegClassID:
1918   case AMDGPU::VReg_1024RegClassID:
1919   case AMDGPU::AReg_1024RegClassID:
1920   case AMDGPU::VReg_1024_Align2RegClassID:
1921   case AMDGPU::AReg_1024_Align2RegClassID:
1922   case AMDGPU::AV_1024RegClassID:
1923   case AMDGPU::AV_1024_Align2RegClassID:
1924     return 1024;
1925   default:
1926     llvm_unreachable("Unexpected register class");
1927   }
1928 }
1929 
1930 unsigned getRegBitWidth(const MCRegisterClass &RC) {
1931   return getRegBitWidth(RC.getID());
1932 }
1933 
1934 unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
1935                            unsigned OpNo) {
1936   assert(OpNo < Desc.NumOperands);
1937   unsigned RCID = Desc.OpInfo[OpNo].RegClass;
1938   return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
1939 }
1940 
1941 bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
1942   if (isInlinableIntLiteral(Literal))
1943     return true;
1944 
1945   uint64_t Val = static_cast<uint64_t>(Literal);
1946   return (Val == DoubleToBits(0.0)) ||
1947          (Val == DoubleToBits(1.0)) ||
1948          (Val == DoubleToBits(-1.0)) ||
1949          (Val == DoubleToBits(0.5)) ||
1950          (Val == DoubleToBits(-0.5)) ||
1951          (Val == DoubleToBits(2.0)) ||
1952          (Val == DoubleToBits(-2.0)) ||
1953          (Val == DoubleToBits(4.0)) ||
1954          (Val == DoubleToBits(-4.0)) ||
1955          (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
1956 }
1957 
1958 bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
1959   if (isInlinableIntLiteral(Literal))
1960     return true;
1961 
1962   // The actual type of the operand does not seem to matter as long
1963   // as the bits match one of the inline immediate values.  For example:
1964   //
1965   // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
1966   // so it is a legal inline immediate.
1967   //
1968   // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
1969   // floating-point, so it is a legal inline immediate.
1970 
1971   uint32_t Val = static_cast<uint32_t>(Literal);
1972   return (Val == FloatToBits(0.0f)) ||
1973          (Val == FloatToBits(1.0f)) ||
1974          (Val == FloatToBits(-1.0f)) ||
1975          (Val == FloatToBits(0.5f)) ||
1976          (Val == FloatToBits(-0.5f)) ||
1977          (Val == FloatToBits(2.0f)) ||
1978          (Val == FloatToBits(-2.0f)) ||
1979          (Val == FloatToBits(4.0f)) ||
1980          (Val == FloatToBits(-4.0f)) ||
1981          (Val == 0x3e22f983 && HasInv2Pi);
1982 }
1983 
1984 bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
1985   if (!HasInv2Pi)
1986     return false;
1987 
1988   if (isInlinableIntLiteral(Literal))
1989     return true;
1990 
1991   uint16_t Val = static_cast<uint16_t>(Literal);
1992   return Val == 0x3C00 || // 1.0
1993          Val == 0xBC00 || // -1.0
1994          Val == 0x3800 || // 0.5
1995          Val == 0xB800 || // -0.5
1996          Val == 0x4000 || // 2.0
1997          Val == 0xC000 || // -2.0
1998          Val == 0x4400 || // 4.0
1999          Val == 0xC400 || // -4.0
2000          Val == 0x3118;   // 1/2pi
2001 }
2002 
2003 bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2004   assert(HasInv2Pi);
2005 
2006   if (isInt<16>(Literal) || isUInt<16>(Literal)) {
2007     int16_t Trunc = static_cast<int16_t>(Literal);
2008     return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
2009   }
2010   if (!(Literal & 0xffff))
2011     return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
2012 
2013   int16_t Lo16 = static_cast<int16_t>(Literal);
2014   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2015   return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
2016 }
2017 
2018 bool isInlinableIntLiteralV216(int32_t Literal) {
2019   int16_t Lo16 = static_cast<int16_t>(Literal);
2020   if (isInt<16>(Literal) || isUInt<16>(Literal))
2021     return isInlinableIntLiteral(Lo16);
2022 
2023   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2024   if (!(Literal & 0xffff))
2025     return isInlinableIntLiteral(Hi16);
2026   return Lo16 == Hi16 && isInlinableIntLiteral(Lo16);
2027 }
2028 
2029 bool isFoldableLiteralV216(int32_t Literal, bool HasInv2Pi) {
2030   assert(HasInv2Pi);
2031 
2032   int16_t Lo16 = static_cast<int16_t>(Literal);
2033   if (isInt<16>(Literal) || isUInt<16>(Literal))
2034     return true;
2035 
2036   int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
2037   if (!(Literal & 0xffff))
2038     return true;
2039   return Lo16 == Hi16;
2040 }
2041 
2042 bool isArgPassedInSGPR(const Argument *A) {
2043   const Function *F = A->getParent();
2044 
2045   // Arguments to compute shaders are never a source of divergence.
2046   CallingConv::ID CC = F->getCallingConv();
2047   switch (CC) {
2048   case CallingConv::AMDGPU_KERNEL:
2049   case CallingConv::SPIR_KERNEL:
2050     return true;
2051   case CallingConv::AMDGPU_VS:
2052   case CallingConv::AMDGPU_LS:
2053   case CallingConv::AMDGPU_HS:
2054   case CallingConv::AMDGPU_ES:
2055   case CallingConv::AMDGPU_GS:
2056   case CallingConv::AMDGPU_PS:
2057   case CallingConv::AMDGPU_CS:
2058   case CallingConv::AMDGPU_Gfx:
2059     // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
2060     // Everything else is in VGPRs.
2061     return F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::InReg) ||
2062            F->getAttributes().hasParamAttr(A->getArgNo(), Attribute::ByVal);
2063   default:
2064     // TODO: Should calls support inreg for SGPR inputs?
2065     return false;
2066   }
2067 }
2068 
2069 static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
2070   return isGCN3Encoding(ST) || isGFX10Plus(ST);
2071 }
2072 
2073 static bool hasSMRDSignedImmOffset(const MCSubtargetInfo &ST) {
2074   return isGFX9Plus(ST);
2075 }
2076 
2077 bool isLegalSMRDEncodedUnsignedOffset(const MCSubtargetInfo &ST,
2078                                       int64_t EncodedOffset) {
2079   return hasSMEMByteOffset(ST) ? isUInt<20>(EncodedOffset)
2080                                : isUInt<8>(EncodedOffset);
2081 }
2082 
2083 bool isLegalSMRDEncodedSignedOffset(const MCSubtargetInfo &ST,
2084                                     int64_t EncodedOffset,
2085                                     bool IsBuffer) {
2086   return !IsBuffer &&
2087          hasSMRDSignedImmOffset(ST) &&
2088          isInt<21>(EncodedOffset);
2089 }
2090 
2091 static bool isDwordAligned(uint64_t ByteOffset) {
2092   return (ByteOffset & 3) == 0;
2093 }
2094 
2095 uint64_t convertSMRDOffsetUnits(const MCSubtargetInfo &ST,
2096                                 uint64_t ByteOffset) {
2097   if (hasSMEMByteOffset(ST))
2098     return ByteOffset;
2099 
2100   assert(isDwordAligned(ByteOffset));
2101   return ByteOffset >> 2;
2102 }
2103 
2104 Optional<int64_t> getSMRDEncodedOffset(const MCSubtargetInfo &ST,
2105                                        int64_t ByteOffset, bool IsBuffer) {
2106   // The signed version is always a byte offset.
2107   if (!IsBuffer && hasSMRDSignedImmOffset(ST)) {
2108     assert(hasSMEMByteOffset(ST));
2109     return isInt<20>(ByteOffset) ? Optional<int64_t>(ByteOffset) : None;
2110   }
2111 
2112   if (!isDwordAligned(ByteOffset) && !hasSMEMByteOffset(ST))
2113     return None;
2114 
2115   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2116   return isLegalSMRDEncodedUnsignedOffset(ST, EncodedOffset)
2117              ? Optional<int64_t>(EncodedOffset)
2118              : None;
2119 }
2120 
2121 Optional<int64_t> getSMRDEncodedLiteralOffset32(const MCSubtargetInfo &ST,
2122                                                 int64_t ByteOffset) {
2123   if (!isCI(ST) || !isDwordAligned(ByteOffset))
2124     return None;
2125 
2126   int64_t EncodedOffset = convertSMRDOffsetUnits(ST, ByteOffset);
2127   return isUInt<32>(EncodedOffset) ? Optional<int64_t>(EncodedOffset) : None;
2128 }
2129 
2130 unsigned getNumFlatOffsetBits(const MCSubtargetInfo &ST, bool Signed) {
2131   // Address offset is 12-bit signed for GFX10, 13-bit for GFX9.
2132   if (AMDGPU::isGFX10(ST))
2133     return Signed ? 12 : 11;
2134 
2135   return Signed ? 13 : 12;
2136 }
2137 
2138 // Given Imm, split it into the values to put into the SOffset and ImmOffset
2139 // fields in an MUBUF instruction. Return false if it is not possible (due to a
2140 // hardware bug needing a workaround).
2141 //
2142 // The required alignment ensures that individual address components remain
2143 // aligned if they are aligned to begin with. It also ensures that additional
2144 // offsets within the given alignment can be added to the resulting ImmOffset.
2145 bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
2146                       const GCNSubtarget *Subtarget, Align Alignment) {
2147   const uint32_t MaxImm = alignDown(4095, Alignment.value());
2148   uint32_t Overflow = 0;
2149 
2150   if (Imm > MaxImm) {
2151     if (Imm <= MaxImm + 64) {
2152       // Use an SOffset inline constant for 4..64
2153       Overflow = Imm - MaxImm;
2154       Imm = MaxImm;
2155     } else {
2156       // Try to keep the same value in SOffset for adjacent loads, so that
2157       // the corresponding register contents can be re-used.
2158       //
2159       // Load values with all low-bits (except for alignment bits) set into
2160       // SOffset, so that a larger range of values can be covered using
2161       // s_movk_i32.
2162       //
2163       // Atomic operations fail to work correctly when individual address
2164       // components are unaligned, even if their sum is aligned.
2165       uint32_t High = (Imm + Alignment.value()) & ~4095;
2166       uint32_t Low = (Imm + Alignment.value()) & 4095;
2167       Imm = Low;
2168       Overflow = High - Alignment.value();
2169     }
2170   }
2171 
2172   // There is a hardware bug in SI and CI which prevents address clamping in
2173   // MUBUF instructions from working correctly with SOffsets. The immediate
2174   // offset is unaffected.
2175   if (Overflow > 0 &&
2176       Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
2177     return false;
2178 
2179   ImmOffset = Imm;
2180   SOffset = Overflow;
2181   return true;
2182 }
2183 
2184 SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) {
2185   *this = getDefaultForCallingConv(F.getCallingConv());
2186 
2187   StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
2188   if (!IEEEAttr.empty())
2189     IEEE = IEEEAttr == "true";
2190 
2191   StringRef DX10ClampAttr
2192     = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
2193   if (!DX10ClampAttr.empty())
2194     DX10Clamp = DX10ClampAttr == "true";
2195 
2196   StringRef DenormF32Attr = F.getFnAttribute("denormal-fp-math-f32").getValueAsString();
2197   if (!DenormF32Attr.empty()) {
2198     DenormalMode DenormMode = parseDenormalFPAttribute(DenormF32Attr);
2199     FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2200     FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2201   }
2202 
2203   StringRef DenormAttr = F.getFnAttribute("denormal-fp-math").getValueAsString();
2204   if (!DenormAttr.empty()) {
2205     DenormalMode DenormMode = parseDenormalFPAttribute(DenormAttr);
2206 
2207     if (DenormF32Attr.empty()) {
2208       FP32InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2209       FP32OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2210     }
2211 
2212     FP64FP16InputDenormals = DenormMode.Input == DenormalMode::IEEE;
2213     FP64FP16OutputDenormals = DenormMode.Output == DenormalMode::IEEE;
2214   }
2215 }
2216 
2217 namespace {
2218 
2219 struct SourceOfDivergence {
2220   unsigned Intr;
2221 };
2222 const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
2223 
2224 #define GET_SourcesOfDivergence_IMPL
2225 #define GET_Gfx9BufferFormat_IMPL
2226 #define GET_Gfx10PlusBufferFormat_IMPL
2227 #include "AMDGPUGenSearchableTables.inc"
2228 
2229 } // end anonymous namespace
2230 
2231 bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
2232   return lookupSourceOfDivergence(IntrID);
2233 }
2234 
2235 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t BitsPerComp,
2236                                                   uint8_t NumComponents,
2237                                                   uint8_t NumFormat,
2238                                                   const MCSubtargetInfo &STI) {
2239   return isGFX10Plus(STI)
2240              ? getGfx10PlusBufferFormatInfo(BitsPerComp, NumComponents,
2241                                             NumFormat)
2242              : getGfx9BufferFormatInfo(BitsPerComp, NumComponents, NumFormat);
2243 }
2244 
2245 const GcnBufferFormatInfo *getGcnBufferFormatInfo(uint8_t Format,
2246                                                   const MCSubtargetInfo &STI) {
2247   return isGFX10Plus(STI) ? getGfx10PlusBufferFormatInfo(Format)
2248                           : getGfx9BufferFormatInfo(Format);
2249 }
2250 
2251 } // namespace AMDGPU
2252 
2253 raw_ostream &operator<<(raw_ostream &OS,
2254                         const AMDGPU::IsaInfo::TargetIDSetting S) {
2255   switch (S) {
2256   case (AMDGPU::IsaInfo::TargetIDSetting::Unsupported):
2257     OS << "Unsupported";
2258     break;
2259   case (AMDGPU::IsaInfo::TargetIDSetting::Any):
2260     OS << "Any";
2261     break;
2262   case (AMDGPU::IsaInfo::TargetIDSetting::Off):
2263     OS << "Off";
2264     break;
2265   case (AMDGPU::IsaInfo::TargetIDSetting::On):
2266     OS << "On";
2267     break;
2268   }
2269   return OS;
2270 }
2271 
2272 } // namespace llvm
2273