1 //===-- AMDGPUTargetStreamer.cpp - Mips Target Streamer Methods -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides AMDGPU specific target streamer methods.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AMDGPUTargetStreamer.h"
14 #include "AMDGPUPTNote.h"
15 #include "AMDKernelCodeT.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "Utils/AMDKernelCodeTUtils.h"
18 #include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h"
19 #include "llvm/BinaryFormat/ELF.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCELFStreamer.h"
22 #include "llvm/MC/MCSectionELF.h"
23 #include "llvm/Support/AMDGPUMetadata.h"
24 #include "llvm/Support/AMDHSAKernelDescriptor.h"
25 #include "llvm/Support/FormattedStream.h"
26 
27 using namespace llvm;
28 using namespace llvm::AMDGPU;
29 
30 //===----------------------------------------------------------------------===//
31 // AMDGPUTargetStreamer
32 //===----------------------------------------------------------------------===//
33 
34 static void convertIsaVersionV2(uint32_t &Major, uint32_t &Minor,
35                                 uint32_t &Stepping, bool Sramecc, bool Xnack) {
36   if (Major == 9 && Minor == 0) {
37     switch (Stepping) {
38       case 0:
39       case 2:
40       case 4:
41       case 6:
42         if (Xnack)
43           Stepping++;
44     }
45   }
46 }
47 
48 bool AMDGPUTargetStreamer::EmitHSAMetadataV2(StringRef HSAMetadataString) {
49   HSAMD::Metadata HSAMetadata;
50   if (HSAMD::fromString(HSAMetadataString, HSAMetadata))
51     return false;
52   return EmitHSAMetadata(HSAMetadata);
53 }
54 
55 bool AMDGPUTargetStreamer::EmitHSAMetadataV3(StringRef HSAMetadataString) {
56   msgpack::Document HSAMetadataDoc;
57   if (!HSAMetadataDoc.fromYAML(HSAMetadataString))
58     return false;
59   return EmitHSAMetadata(HSAMetadataDoc, false);
60 }
61 
62 StringRef AMDGPUTargetStreamer::getArchNameFromElfMach(unsigned ElfMach) {
63   AMDGPU::GPUKind AK;
64 
65   switch (ElfMach) {
66   default: llvm_unreachable("Unhandled ELF::EF_AMDGPU type");
67   case ELF::EF_AMDGPU_MACH_R600_R600:      AK = GK_R600;    break;
68   case ELF::EF_AMDGPU_MACH_R600_R630:      AK = GK_R630;    break;
69   case ELF::EF_AMDGPU_MACH_R600_RS880:     AK = GK_RS880;   break;
70   case ELF::EF_AMDGPU_MACH_R600_RV670:     AK = GK_RV670;   break;
71   case ELF::EF_AMDGPU_MACH_R600_RV710:     AK = GK_RV710;   break;
72   case ELF::EF_AMDGPU_MACH_R600_RV730:     AK = GK_RV730;   break;
73   case ELF::EF_AMDGPU_MACH_R600_RV770:     AK = GK_RV770;   break;
74   case ELF::EF_AMDGPU_MACH_R600_CEDAR:     AK = GK_CEDAR;   break;
75   case ELF::EF_AMDGPU_MACH_R600_CYPRESS:   AK = GK_CYPRESS; break;
76   case ELF::EF_AMDGPU_MACH_R600_JUNIPER:   AK = GK_JUNIPER; break;
77   case ELF::EF_AMDGPU_MACH_R600_REDWOOD:   AK = GK_REDWOOD; break;
78   case ELF::EF_AMDGPU_MACH_R600_SUMO:      AK = GK_SUMO;    break;
79   case ELF::EF_AMDGPU_MACH_R600_BARTS:     AK = GK_BARTS;   break;
80   case ELF::EF_AMDGPU_MACH_R600_CAICOS:    AK = GK_CAICOS;  break;
81   case ELF::EF_AMDGPU_MACH_R600_CAYMAN:    AK = GK_CAYMAN;  break;
82   case ELF::EF_AMDGPU_MACH_R600_TURKS:     AK = GK_TURKS;   break;
83   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX600:  AK = GK_GFX600;  break;
84   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX601:  AK = GK_GFX601;  break;
85   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX602:  AK = GK_GFX602;  break;
86   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX700:  AK = GK_GFX700;  break;
87   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX701:  AK = GK_GFX701;  break;
88   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX702:  AK = GK_GFX702;  break;
89   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX703:  AK = GK_GFX703;  break;
90   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX704:  AK = GK_GFX704;  break;
91   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX705:  AK = GK_GFX705;  break;
92   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX801:  AK = GK_GFX801;  break;
93   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX802:  AK = GK_GFX802;  break;
94   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX803:  AK = GK_GFX803;  break;
95   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX805:  AK = GK_GFX805;  break;
96   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX810:  AK = GK_GFX810;  break;
97   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX900:  AK = GK_GFX900;  break;
98   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX902:  AK = GK_GFX902;  break;
99   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX904:  AK = GK_GFX904;  break;
100   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX906:  AK = GK_GFX906;  break;
101   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX908:  AK = GK_GFX908;  break;
102   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX909:  AK = GK_GFX909;  break;
103   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX90A:  AK = GK_GFX90A;  break;
104   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C:  AK = GK_GFX90C;  break;
105   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010: AK = GK_GFX1010; break;
106   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011: AK = GK_GFX1011; break;
107   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012: AK = GK_GFX1012; break;
108   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030: AK = GK_GFX1030; break;
109   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031: AK = GK_GFX1031; break;
110   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032: AK = GK_GFX1032; break;
111   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033: AK = GK_GFX1033; break;
112   case ELF::EF_AMDGPU_MACH_NONE:           AK = GK_NONE;    break;
113   }
114 
115   StringRef GPUName = getArchNameAMDGCN(AK);
116   if (GPUName != "")
117     return GPUName;
118   return getArchNameR600(AK);
119 }
120 
121 unsigned AMDGPUTargetStreamer::getElfMach(StringRef GPU) {
122   AMDGPU::GPUKind AK = parseArchAMDGCN(GPU);
123   if (AK == AMDGPU::GPUKind::GK_NONE)
124     AK = parseArchR600(GPU);
125 
126   switch (AK) {
127   case GK_R600:    return ELF::EF_AMDGPU_MACH_R600_R600;
128   case GK_R630:    return ELF::EF_AMDGPU_MACH_R600_R630;
129   case GK_RS880:   return ELF::EF_AMDGPU_MACH_R600_RS880;
130   case GK_RV670:   return ELF::EF_AMDGPU_MACH_R600_RV670;
131   case GK_RV710:   return ELF::EF_AMDGPU_MACH_R600_RV710;
132   case GK_RV730:   return ELF::EF_AMDGPU_MACH_R600_RV730;
133   case GK_RV770:   return ELF::EF_AMDGPU_MACH_R600_RV770;
134   case GK_CEDAR:   return ELF::EF_AMDGPU_MACH_R600_CEDAR;
135   case GK_CYPRESS: return ELF::EF_AMDGPU_MACH_R600_CYPRESS;
136   case GK_JUNIPER: return ELF::EF_AMDGPU_MACH_R600_JUNIPER;
137   case GK_REDWOOD: return ELF::EF_AMDGPU_MACH_R600_REDWOOD;
138   case GK_SUMO:    return ELF::EF_AMDGPU_MACH_R600_SUMO;
139   case GK_BARTS:   return ELF::EF_AMDGPU_MACH_R600_BARTS;
140   case GK_CAICOS:  return ELF::EF_AMDGPU_MACH_R600_CAICOS;
141   case GK_CAYMAN:  return ELF::EF_AMDGPU_MACH_R600_CAYMAN;
142   case GK_TURKS:   return ELF::EF_AMDGPU_MACH_R600_TURKS;
143   case GK_GFX600:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX600;
144   case GK_GFX601:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX601;
145   case GK_GFX602:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX602;
146   case GK_GFX700:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX700;
147   case GK_GFX701:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX701;
148   case GK_GFX702:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX702;
149   case GK_GFX703:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX703;
150   case GK_GFX704:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX704;
151   case GK_GFX705:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX705;
152   case GK_GFX801:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX801;
153   case GK_GFX802:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX802;
154   case GK_GFX803:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX803;
155   case GK_GFX805:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX805;
156   case GK_GFX810:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX810;
157   case GK_GFX900:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX900;
158   case GK_GFX902:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX902;
159   case GK_GFX904:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX904;
160   case GK_GFX906:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX906;
161   case GK_GFX908:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX908;
162   case GK_GFX909:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX909;
163   case GK_GFX90A:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX90A;
164   case GK_GFX90C:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C;
165   case GK_GFX1010: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010;
166   case GK_GFX1011: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011;
167   case GK_GFX1012: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012;
168   case GK_GFX1030: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030;
169   case GK_GFX1031: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031;
170   case GK_GFX1032: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032;
171   case GK_GFX1033: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033;
172   case GK_NONE:    return ELF::EF_AMDGPU_MACH_NONE;
173   }
174 
175   llvm_unreachable("unknown GPU");
176 }
177 
178 //===----------------------------------------------------------------------===//
179 // AMDGPUTargetAsmStreamer
180 //===----------------------------------------------------------------------===//
181 
182 AMDGPUTargetAsmStreamer::AMDGPUTargetAsmStreamer(MCStreamer &S,
183                                                  formatted_raw_ostream &OS)
184     : AMDGPUTargetStreamer(S), OS(OS) { }
185 
186 // A hook for emitting stuff at the end.
187 // We use it for emitting the accumulated PAL metadata as directives.
188 // The PAL metadata is reset after it is emitted.
189 void AMDGPUTargetAsmStreamer::finish() {
190   std::string S;
191   getPALMetadata()->toString(S);
192   OS << S;
193 
194   // Reset the pal metadata so its data will not affect a compilation that
195   // reuses this object.
196   getPALMetadata()->reset();
197 }
198 
199 void AMDGPUTargetAsmStreamer::EmitDirectiveAMDGCNTarget() {
200   OS << "\t.amdgcn_target \"" << getTargetID()->toString() << "\"\n";
201 }
202 
203 void AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectVersion(
204     uint32_t Major, uint32_t Minor) {
205   OS << "\t.hsa_code_object_version " <<
206         Twine(Major) << "," << Twine(Minor) << '\n';
207 }
208 
209 void
210 AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectISAV2(uint32_t Major,
211                                                          uint32_t Minor,
212                                                          uint32_t Stepping,
213                                                          StringRef VendorName,
214                                                          StringRef ArchName) {
215   convertIsaVersionV2(Major, Minor, Stepping, TargetID->isSramEccOnOrAny(), TargetID->isXnackOnOrAny());
216   OS << "\t.hsa_code_object_isa " << Twine(Major) << "," << Twine(Minor) << ","
217      << Twine(Stepping) << ",\"" << VendorName << "\",\"" << ArchName << "\"\n";
218 }
219 
220 void
221 AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) {
222   OS << "\t.amd_kernel_code_t\n";
223   dumpAmdKernelCode(&Header, OS, "\t\t");
224   OS << "\t.end_amd_kernel_code_t\n";
225 }
226 
227 void AMDGPUTargetAsmStreamer::EmitAMDGPUSymbolType(StringRef SymbolName,
228                                                    unsigned Type) {
229   switch (Type) {
230     default: llvm_unreachable("Invalid AMDGPU symbol type");
231     case ELF::STT_AMDGPU_HSA_KERNEL:
232       OS << "\t.amdgpu_hsa_kernel " << SymbolName << '\n' ;
233       break;
234   }
235 }
236 
237 void AMDGPUTargetAsmStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size,
238                                             Align Alignment) {
239   OS << "\t.amdgpu_lds " << Symbol->getName() << ", " << Size << ", "
240      << Alignment.value() << '\n';
241 }
242 
243 bool AMDGPUTargetAsmStreamer::EmitISAVersion() {
244   OS << "\t.amd_amdgpu_isa \"" << getTargetID()->toString() << "\"\n";
245   return true;
246 }
247 
248 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
249     const AMDGPU::HSAMD::Metadata &HSAMetadata) {
250   std::string HSAMetadataString;
251   if (HSAMD::toString(HSAMetadata, HSAMetadataString))
252     return false;
253 
254   OS << '\t' << HSAMD::AssemblerDirectiveBegin << '\n';
255   OS << HSAMetadataString << '\n';
256   OS << '\t' << HSAMD::AssemblerDirectiveEnd << '\n';
257   return true;
258 }
259 
260 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
261     msgpack::Document &HSAMetadataDoc, bool Strict) {
262   HSAMD::V3::MetadataVerifier Verifier(Strict);
263   if (!Verifier.verify(HSAMetadataDoc.getRoot()))
264     return false;
265 
266   std::string HSAMetadataString;
267   raw_string_ostream StrOS(HSAMetadataString);
268   HSAMetadataDoc.toYAML(StrOS);
269 
270   OS << '\t' << HSAMD::V3::AssemblerDirectiveBegin << '\n';
271   OS << StrOS.str() << '\n';
272   OS << '\t' << HSAMD::V3::AssemblerDirectiveEnd << '\n';
273   return true;
274 }
275 
276 bool AMDGPUTargetAsmStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
277   const uint32_t Encoded_s_code_end = 0xbf9f0000;
278   const uint32_t Encoded_s_nop = 0xbf800000;
279   uint32_t Encoded_pad = Encoded_s_code_end;
280 
281   // Instruction cache line size in bytes.
282   const unsigned Log2CacheLineSize = 6;
283   const unsigned CacheLineSize = 1u << Log2CacheLineSize;
284 
285   // Extra padding amount in bytes to support prefetch mode 3.
286   unsigned FillSize = 3 * CacheLineSize;
287 
288   if (AMDGPU::isGFX90A(STI)) {
289     Encoded_pad = Encoded_s_nop;
290     FillSize = 16 * CacheLineSize;
291   }
292 
293   OS << "\t.p2alignl " << Log2CacheLineSize << ", " << Encoded_pad << '\n';
294   OS << "\t.fill " << (FillSize / 4) << ", 4, " << Encoded_pad << '\n';
295   return true;
296 }
297 
298 void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor(
299     const MCSubtargetInfo &STI, StringRef KernelName,
300     const amdhsa::kernel_descriptor_t &KD, uint64_t NextVGPR, uint64_t NextSGPR,
301     bool ReserveVCC, bool ReserveFlatScr) {
302   IsaVersion IVersion = getIsaVersion(STI.getCPU());
303 
304   OS << "\t.amdhsa_kernel " << KernelName << '\n';
305 
306 #define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME)   \
307   STREAM << "\t\t" << DIRECTIVE << " "                                         \
308          << AMDHSA_BITS_GET(KERNEL_DESC.MEMBER_NAME, FIELD_NAME) << '\n';
309 
310   OS << "\t\t.amdhsa_group_segment_fixed_size " << KD.group_segment_fixed_size
311      << '\n';
312   OS << "\t\t.amdhsa_private_segment_fixed_size "
313      << KD.private_segment_fixed_size << '\n';
314   OS << "\t\t.amdhsa_kernarg_size " << KD.kernarg_size << '\n';
315 
316   PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_buffer", KD,
317               kernel_code_properties,
318               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
319   PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_ptr", KD,
320               kernel_code_properties,
321               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
322   PRINT_FIELD(OS, ".amdhsa_user_sgpr_queue_ptr", KD,
323               kernel_code_properties,
324               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
325   PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_segment_ptr", KD,
326               kernel_code_properties,
327               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
328   PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_id", KD,
329               kernel_code_properties,
330               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
331   PRINT_FIELD(OS, ".amdhsa_user_sgpr_flat_scratch_init", KD,
332               kernel_code_properties,
333               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
334   PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_size", KD,
335               kernel_code_properties,
336               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
337   if (IVersion.Major >= 10)
338     PRINT_FIELD(OS, ".amdhsa_wavefront_size32", KD,
339                 kernel_code_properties,
340                 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
341   PRINT_FIELD(
342       OS, ".amdhsa_system_sgpr_private_segment_wavefront_offset", KD,
343       compute_pgm_rsrc2,
344       amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
345   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_x", KD,
346               compute_pgm_rsrc2,
347               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
348   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_y", KD,
349               compute_pgm_rsrc2,
350               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
351   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_z", KD,
352               compute_pgm_rsrc2,
353               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
354   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_info", KD,
355               compute_pgm_rsrc2,
356               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
357   PRINT_FIELD(OS, ".amdhsa_system_vgpr_workitem_id", KD,
358               compute_pgm_rsrc2,
359               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
360 
361   // These directives are required.
362   OS << "\t\t.amdhsa_next_free_vgpr " << NextVGPR << '\n';
363   OS << "\t\t.amdhsa_next_free_sgpr " << NextSGPR << '\n';
364 
365   if (AMDGPU::isGFX90A(STI))
366     OS << "\t\t.amdhsa_accum_offset " <<
367       (AMDHSA_BITS_GET(KD.compute_pgm_rsrc3,
368                        amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
369       << '\n';
370 
371   if (!ReserveVCC)
372     OS << "\t\t.amdhsa_reserve_vcc " << ReserveVCC << '\n';
373   if (IVersion.Major >= 7 && !ReserveFlatScr)
374     OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n';
375 
376   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
377     switch (*HsaAbiVer) {
378     default:
379       break;
380     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
381       break;
382     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
383     case ELF::ELFABIVERSION_AMDGPU_HSA_V4:
384       if (getTargetID()->isXnackSupported())
385         OS << "\t\t.amdhsa_reserve_xnack_mask " << getTargetID()->isXnackOnOrAny() << '\n';
386       break;
387     }
388   }
389 
390   PRINT_FIELD(OS, ".amdhsa_float_round_mode_32", KD,
391               compute_pgm_rsrc1,
392               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
393   PRINT_FIELD(OS, ".amdhsa_float_round_mode_16_64", KD,
394               compute_pgm_rsrc1,
395               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
396   PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_32", KD,
397               compute_pgm_rsrc1,
398               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
399   PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_16_64", KD,
400               compute_pgm_rsrc1,
401               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
402   PRINT_FIELD(OS, ".amdhsa_dx10_clamp", KD,
403               compute_pgm_rsrc1,
404               amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP);
405   PRINT_FIELD(OS, ".amdhsa_ieee_mode", KD,
406               compute_pgm_rsrc1,
407               amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE);
408   if (IVersion.Major >= 9)
409     PRINT_FIELD(OS, ".amdhsa_fp16_overflow", KD,
410                 compute_pgm_rsrc1,
411                 amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL);
412   if (AMDGPU::isGFX90A(STI))
413     PRINT_FIELD(OS, ".amdhsa_tg_split", KD,
414                 compute_pgm_rsrc3,
415                 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
416   if (IVersion.Major >= 10) {
417     PRINT_FIELD(OS, ".amdhsa_workgroup_processor_mode", KD,
418                 compute_pgm_rsrc1,
419                 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE);
420     PRINT_FIELD(OS, ".amdhsa_memory_ordered", KD,
421                 compute_pgm_rsrc1,
422                 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED);
423     PRINT_FIELD(OS, ".amdhsa_forward_progress", KD,
424                 compute_pgm_rsrc1,
425                 amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS);
426   }
427   PRINT_FIELD(
428       OS, ".amdhsa_exception_fp_ieee_invalid_op", KD,
429       compute_pgm_rsrc2,
430       amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
431   PRINT_FIELD(OS, ".amdhsa_exception_fp_denorm_src", KD,
432               compute_pgm_rsrc2,
433               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
434   PRINT_FIELD(
435       OS, ".amdhsa_exception_fp_ieee_div_zero", KD,
436       compute_pgm_rsrc2,
437       amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
438   PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_overflow", KD,
439               compute_pgm_rsrc2,
440               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
441   PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_underflow", KD,
442               compute_pgm_rsrc2,
443               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
444   PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_inexact", KD,
445               compute_pgm_rsrc2,
446               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
447   PRINT_FIELD(OS, ".amdhsa_exception_int_div_zero", KD,
448               compute_pgm_rsrc2,
449               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
450 #undef PRINT_FIELD
451 
452   OS << "\t.end_amdhsa_kernel\n";
453 }
454 
455 //===----------------------------------------------------------------------===//
456 // AMDGPUTargetELFStreamer
457 //===----------------------------------------------------------------------===//
458 
459 AMDGPUTargetELFStreamer::AMDGPUTargetELFStreamer(MCStreamer &S,
460                                                  const MCSubtargetInfo &STI)
461     : AMDGPUTargetStreamer(S), STI(STI), Streamer(S) {}
462 
463 MCELFStreamer &AMDGPUTargetELFStreamer::getStreamer() {
464   return static_cast<MCELFStreamer &>(Streamer);
465 }
466 
467 // A hook for emitting stuff at the end.
468 // We use it for emitting the accumulated PAL metadata as a .note record.
469 // The PAL metadata is reset after it is emitted.
470 void AMDGPUTargetELFStreamer::finish() {
471   MCAssembler &MCA = getStreamer().getAssembler();
472   MCA.setELFHeaderEFlags(getEFlags());
473 
474   std::string Blob;
475   const char *Vendor = getPALMetadata()->getVendor();
476   unsigned Type = getPALMetadata()->getType();
477   getPALMetadata()->toBlob(Type, Blob);
478   if (Blob.empty())
479     return;
480   EmitNote(Vendor, MCConstantExpr::create(Blob.size(), getContext()), Type,
481            [&](MCELFStreamer &OS) { OS.emitBytes(Blob); });
482 
483   // Reset the pal metadata so its data will not affect a compilation that
484   // reuses this object.
485   getPALMetadata()->reset();
486 }
487 
488 void AMDGPUTargetELFStreamer::EmitNote(
489     StringRef Name, const MCExpr *DescSZ, unsigned NoteType,
490     function_ref<void(MCELFStreamer &)> EmitDesc) {
491   auto &S = getStreamer();
492   auto &Context = S.getContext();
493 
494   auto NameSZ = Name.size() + 1;
495 
496   unsigned NoteFlags = 0;
497   // TODO Apparently, this is currently needed for OpenCL as mentioned in
498   // https://reviews.llvm.org/D74995
499   if (STI.getTargetTriple().getOS() == Triple::AMDHSA)
500     NoteFlags = ELF::SHF_ALLOC;
501 
502   S.PushSection();
503   S.SwitchSection(
504       Context.getELFSection(ElfNote::SectionName, ELF::SHT_NOTE, NoteFlags));
505   S.emitInt32(NameSZ);                                        // namesz
506   S.emitValue(DescSZ, 4);                                     // descz
507   S.emitInt32(NoteType);                                      // type
508   S.emitBytes(Name);                                          // name
509   S.emitValueToAlignment(4, 0, 1, 0);                         // padding 0
510   EmitDesc(S);                                                // desc
511   S.emitValueToAlignment(4, 0, 1, 0);                         // padding 0
512   S.PopSection();
513 }
514 
515 unsigned AMDGPUTargetELFStreamer::getEFlags() {
516   switch (STI.getTargetTriple().getArch()) {
517   default:
518     llvm_unreachable("Unsupported Arch");
519   case Triple::r600:
520     return getEFlagsR600();
521   case Triple::amdgcn:
522     return getEFlagsAMDGCN();
523   }
524 }
525 
526 unsigned AMDGPUTargetELFStreamer::getEFlagsR600() {
527   assert(STI.getTargetTriple().getArch() == Triple::r600);
528 
529   return getElfMach(STI.getCPU());
530 }
531 
532 unsigned AMDGPUTargetELFStreamer::getEFlagsAMDGCN() {
533   assert(STI.getTargetTriple().getArch() == Triple::amdgcn);
534 
535   switch (STI.getTargetTriple().getOS()) {
536   default:
537     // TODO: Why are some tests have "mingw" listed as OS?
538     // llvm_unreachable("Unsupported OS");
539   case Triple::UnknownOS:
540     return getEFlagsUnknownOS();
541   case Triple::AMDHSA:
542     return getEFlagsAMDHSA();
543   case Triple::AMDPAL:
544     return getEFlagsAMDPAL();
545   case Triple::Mesa3D:
546     return getEFlagsMesa3D();
547   }
548 }
549 
550 unsigned AMDGPUTargetELFStreamer::getEFlagsUnknownOS() {
551   // TODO: Why are some tests have "mingw" listed as OS?
552   // assert(STI.getTargetTriple().getOS() == Triple::UnknownOS);
553 
554   return getEFlagsV3();
555 }
556 
557 unsigned AMDGPUTargetELFStreamer::getEFlagsAMDHSA() {
558   assert(STI.getTargetTriple().getOS() == Triple::AMDHSA);
559 
560   if (Optional<uint8_t> HsaAbiVer = getHsaAbiVersion(&STI)) {
561     switch (*HsaAbiVer) {
562     case ELF::ELFABIVERSION_AMDGPU_HSA_V2:
563     case ELF::ELFABIVERSION_AMDGPU_HSA_V3:
564       return getEFlagsV3();
565     case ELF::ELFABIVERSION_AMDGPU_HSA_V4:
566       return getEFlagsV4();
567     }
568   }
569 
570   llvm_unreachable("HSA OS ABI Version identification must be defined");
571 }
572 
573 unsigned AMDGPUTargetELFStreamer::getEFlagsAMDPAL() {
574   assert(STI.getTargetTriple().getOS() == Triple::AMDPAL);
575 
576   return getEFlagsV3();
577 }
578 
579 unsigned AMDGPUTargetELFStreamer::getEFlagsMesa3D() {
580   assert(STI.getTargetTriple().getOS() == Triple::Mesa3D);
581 
582   return getEFlagsV3();
583 }
584 
585 unsigned AMDGPUTargetELFStreamer::getEFlagsV3() {
586   unsigned EFlagsV3 = 0;
587 
588   // mach.
589   EFlagsV3 |= getElfMach(STI.getCPU());
590 
591   // xnack.
592   if (getTargetID()->isXnackOnOrAny())
593     EFlagsV3 |= ELF::EF_AMDGPU_FEATURE_XNACK_V3;
594   // sramecc.
595   if (getTargetID()->isSramEccOnOrAny())
596     EFlagsV3 |= ELF::EF_AMDGPU_FEATURE_SRAMECC_V3;
597 
598   return EFlagsV3;
599 }
600 
601 unsigned AMDGPUTargetELFStreamer::getEFlagsV4() {
602   unsigned EFlagsV4 = 0;
603 
604   // mach.
605   EFlagsV4 |= getElfMach(STI.getCPU());
606 
607   // xnack.
608   switch (getTargetID()->getXnackSetting()) {
609   case AMDGPU::IsaInfo::TargetIDSetting::Unsupported:
610     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_XNACK_UNSUPPORTED_V4;
611     break;
612   case AMDGPU::IsaInfo::TargetIDSetting::Any:
613     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_XNACK_ANY_V4;
614     break;
615   case AMDGPU::IsaInfo::TargetIDSetting::Off:
616     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_XNACK_OFF_V4;
617     break;
618   case AMDGPU::IsaInfo::TargetIDSetting::On:
619     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_XNACK_ON_V4;
620     break;
621   }
622   // sramecc.
623   switch (getTargetID()->getSramEccSetting()) {
624   case AMDGPU::IsaInfo::TargetIDSetting::Unsupported:
625     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_SRAMECC_UNSUPPORTED_V4;
626     break;
627   case AMDGPU::IsaInfo::TargetIDSetting::Any:
628     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_SRAMECC_ANY_V4;
629     break;
630   case AMDGPU::IsaInfo::TargetIDSetting::Off:
631     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_SRAMECC_OFF_V4;
632     break;
633   case AMDGPU::IsaInfo::TargetIDSetting::On:
634     EFlagsV4 |= ELF::EF_AMDGPU_FEATURE_SRAMECC_ON_V4;
635     break;
636   }
637 
638   return EFlagsV4;
639 }
640 
641 void AMDGPUTargetELFStreamer::EmitDirectiveAMDGCNTarget() {}
642 
643 void AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectVersion(
644     uint32_t Major, uint32_t Minor) {
645 
646   EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(8, getContext()),
647            ELF::NT_AMD_HSA_CODE_OBJECT_VERSION, [&](MCELFStreamer &OS) {
648              OS.emitInt32(Major);
649              OS.emitInt32(Minor);
650            });
651 }
652 
653 void
654 AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectISAV2(uint32_t Major,
655                                                          uint32_t Minor,
656                                                          uint32_t Stepping,
657                                                          StringRef VendorName,
658                                                          StringRef ArchName) {
659   uint16_t VendorNameSize = VendorName.size() + 1;
660   uint16_t ArchNameSize = ArchName.size() + 1;
661 
662   unsigned DescSZ = sizeof(VendorNameSize) + sizeof(ArchNameSize) +
663     sizeof(Major) + sizeof(Minor) + sizeof(Stepping) +
664     VendorNameSize + ArchNameSize;
665 
666   convertIsaVersionV2(Major, Minor, Stepping, TargetID->isSramEccOnOrAny(), TargetID->isXnackOnOrAny());
667   EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(DescSZ, getContext()),
668            ELF::NT_AMD_HSA_ISA_VERSION, [&](MCELFStreamer &OS) {
669              OS.emitInt16(VendorNameSize);
670              OS.emitInt16(ArchNameSize);
671              OS.emitInt32(Major);
672              OS.emitInt32(Minor);
673              OS.emitInt32(Stepping);
674              OS.emitBytes(VendorName);
675              OS.emitInt8(0); // NULL terminate VendorName
676              OS.emitBytes(ArchName);
677              OS.emitInt8(0); // NULL terminte ArchName
678            });
679 }
680 
681 void
682 AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) {
683 
684   MCStreamer &OS = getStreamer();
685   OS.PushSection();
686   OS.emitBytes(StringRef((const char*)&Header, sizeof(Header)));
687   OS.PopSection();
688 }
689 
690 void AMDGPUTargetELFStreamer::EmitAMDGPUSymbolType(StringRef SymbolName,
691                                                    unsigned Type) {
692   MCSymbolELF *Symbol = cast<MCSymbolELF>(
693       getStreamer().getContext().getOrCreateSymbol(SymbolName));
694   Symbol->setType(Type);
695 }
696 
697 void AMDGPUTargetELFStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size,
698                                             Align Alignment) {
699   MCSymbolELF *SymbolELF = cast<MCSymbolELF>(Symbol);
700   SymbolELF->setType(ELF::STT_OBJECT);
701 
702   if (!SymbolELF->isBindingSet()) {
703     SymbolELF->setBinding(ELF::STB_GLOBAL);
704     SymbolELF->setExternal(true);
705   }
706 
707   if (SymbolELF->declareCommon(Size, Alignment.value(), true)) {
708     report_fatal_error("Symbol: " + Symbol->getName() +
709                        " redeclared as different type");
710   }
711 
712   SymbolELF->setIndex(ELF::SHN_AMDGPU_LDS);
713   SymbolELF->setSize(MCConstantExpr::create(Size, getContext()));
714 }
715 
716 bool AMDGPUTargetELFStreamer::EmitISAVersion() {
717   // Create two labels to mark the beginning and end of the desc field
718   // and a MCExpr to calculate the size of the desc field.
719   auto &Context = getContext();
720   auto *DescBegin = Context.createTempSymbol();
721   auto *DescEnd = Context.createTempSymbol();
722   auto *DescSZ = MCBinaryExpr::createSub(
723     MCSymbolRefExpr::create(DescEnd, Context),
724     MCSymbolRefExpr::create(DescBegin, Context), Context);
725 
726   EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_HSA_ISA_NAME,
727            [&](MCELFStreamer &OS) {
728              OS.emitLabel(DescBegin);
729              OS.emitBytes(getTargetID()->toString());
730              OS.emitLabel(DescEnd);
731            });
732   return true;
733 }
734 
735 bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc,
736                                               bool Strict) {
737   HSAMD::V3::MetadataVerifier Verifier(Strict);
738   if (!Verifier.verify(HSAMetadataDoc.getRoot()))
739     return false;
740 
741   std::string HSAMetadataString;
742   HSAMetadataDoc.writeToBlob(HSAMetadataString);
743 
744   // Create two labels to mark the beginning and end of the desc field
745   // and a MCExpr to calculate the size of the desc field.
746   auto &Context = getContext();
747   auto *DescBegin = Context.createTempSymbol();
748   auto *DescEnd = Context.createTempSymbol();
749   auto *DescSZ = MCBinaryExpr::createSub(
750       MCSymbolRefExpr::create(DescEnd, Context),
751       MCSymbolRefExpr::create(DescBegin, Context), Context);
752 
753   EmitNote(ElfNote::NoteNameV3, DescSZ, ELF::NT_AMDGPU_METADATA,
754            [&](MCELFStreamer &OS) {
755              OS.emitLabel(DescBegin);
756              OS.emitBytes(HSAMetadataString);
757              OS.emitLabel(DescEnd);
758            });
759   return true;
760 }
761 
762 bool AMDGPUTargetELFStreamer::EmitHSAMetadata(
763     const AMDGPU::HSAMD::Metadata &HSAMetadata) {
764   std::string HSAMetadataString;
765   if (HSAMD::toString(HSAMetadata, HSAMetadataString))
766     return false;
767 
768   // Create two labels to mark the beginning and end of the desc field
769   // and a MCExpr to calculate the size of the desc field.
770   auto &Context = getContext();
771   auto *DescBegin = Context.createTempSymbol();
772   auto *DescEnd = Context.createTempSymbol();
773   auto *DescSZ = MCBinaryExpr::createSub(
774     MCSymbolRefExpr::create(DescEnd, Context),
775     MCSymbolRefExpr::create(DescBegin, Context), Context);
776 
777   EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_HSA_METADATA,
778            [&](MCELFStreamer &OS) {
779              OS.emitLabel(DescBegin);
780              OS.emitBytes(HSAMetadataString);
781              OS.emitLabel(DescEnd);
782            });
783   return true;
784 }
785 
786 bool AMDGPUTargetELFStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
787   const uint32_t Encoded_s_code_end = 0xbf9f0000;
788   const uint32_t Encoded_s_nop = 0xbf800000;
789   uint32_t Encoded_pad = Encoded_s_code_end;
790 
791   // Instruction cache line size in bytes.
792   const unsigned Log2CacheLineSize = 6;
793   const unsigned CacheLineSize = 1u << Log2CacheLineSize;
794 
795   // Extra padding amount in bytes to support prefetch mode 3.
796   unsigned FillSize = 3 * CacheLineSize;
797 
798   if (AMDGPU::isGFX90A(STI)) {
799     Encoded_pad = Encoded_s_nop;
800     FillSize = 16 * CacheLineSize;
801   }
802 
803   MCStreamer &OS = getStreamer();
804   OS.PushSection();
805   OS.emitValueToAlignment(CacheLineSize, Encoded_pad, 4);
806   for (unsigned I = 0; I < FillSize; I += 4)
807     OS.emitInt32(Encoded_pad);
808   OS.PopSection();
809   return true;
810 }
811 
812 void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor(
813     const MCSubtargetInfo &STI, StringRef KernelName,
814     const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR,
815     uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr) {
816   auto &Streamer = getStreamer();
817   auto &Context = Streamer.getContext();
818 
819   MCSymbolELF *KernelCodeSymbol = cast<MCSymbolELF>(
820       Context.getOrCreateSymbol(Twine(KernelName)));
821   MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>(
822       Context.getOrCreateSymbol(Twine(KernelName) + Twine(".kd")));
823 
824   // Copy kernel descriptor symbol's binding, other and visibility from the
825   // kernel code symbol.
826   KernelDescriptorSymbol->setBinding(KernelCodeSymbol->getBinding());
827   KernelDescriptorSymbol->setOther(KernelCodeSymbol->getOther());
828   KernelDescriptorSymbol->setVisibility(KernelCodeSymbol->getVisibility());
829   // Kernel descriptor symbol's type and size are fixed.
830   KernelDescriptorSymbol->setType(ELF::STT_OBJECT);
831   KernelDescriptorSymbol->setSize(
832       MCConstantExpr::create(sizeof(KernelDescriptor), Context));
833 
834   // The visibility of the kernel code symbol must be protected or less to allow
835   // static relocations from the kernel descriptor to be used.
836   if (KernelCodeSymbol->getVisibility() == ELF::STV_DEFAULT)
837     KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED);
838 
839   Streamer.emitLabel(KernelDescriptorSymbol);
840   Streamer.emitInt32(KernelDescriptor.group_segment_fixed_size);
841   Streamer.emitInt32(KernelDescriptor.private_segment_fixed_size);
842   Streamer.emitInt32(KernelDescriptor.kernarg_size);
843 
844   for (uint8_t Res : KernelDescriptor.reserved0)
845     Streamer.emitInt8(Res);
846 
847   // FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The
848   // expression being created is:
849   //   (start of kernel code) - (start of kernel descriptor)
850   // It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64.
851   Streamer.emitValue(MCBinaryExpr::createSub(
852       MCSymbolRefExpr::create(
853           KernelCodeSymbol, MCSymbolRefExpr::VK_AMDGPU_REL64, Context),
854       MCSymbolRefExpr::create(
855           KernelDescriptorSymbol, MCSymbolRefExpr::VK_None, Context),
856       Context),
857       sizeof(KernelDescriptor.kernel_code_entry_byte_offset));
858   for (uint8_t Res : KernelDescriptor.reserved1)
859     Streamer.emitInt8(Res);
860   Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc3);
861   Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc1);
862   Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc2);
863   Streamer.emitInt16(KernelDescriptor.kernel_code_properties);
864   for (uint8_t Res : KernelDescriptor.reserved2)
865     Streamer.emitInt8(Res);
866 }
867