1 //===-- AMDGPUTargetStreamer.cpp - Mips Target Streamer Methods -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file provides AMDGPU specific target streamer methods.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "AMDGPUTargetStreamer.h"
14 #include "AMDGPUPTNote.h"
15 #include "AMDKernelCodeT.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "Utils/AMDKernelCodeTUtils.h"
18 #include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h"
19 #include "llvm/BinaryFormat/ELF.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCELFStreamer.h"
22 #include "llvm/MC/MCSectionELF.h"
23 #include "llvm/Support/AMDGPUMetadata.h"
24 #include "llvm/Support/AMDHSAKernelDescriptor.h"
25 #include "llvm/Support/FormattedStream.h"
26 
27 using namespace llvm;
28 using namespace llvm::AMDGPU;
29 
30 //===----------------------------------------------------------------------===//
31 // AMDGPUTargetStreamer
32 //===----------------------------------------------------------------------===//
33 
34 bool AMDGPUTargetStreamer::EmitHSAMetadataV2(StringRef HSAMetadataString) {
35   HSAMD::Metadata HSAMetadata;
36   if (HSAMD::fromString(HSAMetadataString, HSAMetadata))
37     return false;
38   return EmitHSAMetadata(HSAMetadata);
39 }
40 
41 bool AMDGPUTargetStreamer::EmitHSAMetadataV3(StringRef HSAMetadataString) {
42   msgpack::Document HSAMetadataDoc;
43   if (!HSAMetadataDoc.fromYAML(HSAMetadataString))
44     return false;
45   return EmitHSAMetadata(HSAMetadataDoc, false);
46 }
47 
48 StringRef AMDGPUTargetStreamer::getArchNameFromElfMach(unsigned ElfMach) {
49   AMDGPU::GPUKind AK;
50 
51   switch (ElfMach) {
52   default: llvm_unreachable("Unhandled ELF::EF_AMDGPU type");
53   case ELF::EF_AMDGPU_MACH_R600_R600:      AK = GK_R600;    break;
54   case ELF::EF_AMDGPU_MACH_R600_R630:      AK = GK_R630;    break;
55   case ELF::EF_AMDGPU_MACH_R600_RS880:     AK = GK_RS880;   break;
56   case ELF::EF_AMDGPU_MACH_R600_RV670:     AK = GK_RV670;   break;
57   case ELF::EF_AMDGPU_MACH_R600_RV710:     AK = GK_RV710;   break;
58   case ELF::EF_AMDGPU_MACH_R600_RV730:     AK = GK_RV730;   break;
59   case ELF::EF_AMDGPU_MACH_R600_RV770:     AK = GK_RV770;   break;
60   case ELF::EF_AMDGPU_MACH_R600_CEDAR:     AK = GK_CEDAR;   break;
61   case ELF::EF_AMDGPU_MACH_R600_CYPRESS:   AK = GK_CYPRESS; break;
62   case ELF::EF_AMDGPU_MACH_R600_JUNIPER:   AK = GK_JUNIPER; break;
63   case ELF::EF_AMDGPU_MACH_R600_REDWOOD:   AK = GK_REDWOOD; break;
64   case ELF::EF_AMDGPU_MACH_R600_SUMO:      AK = GK_SUMO;    break;
65   case ELF::EF_AMDGPU_MACH_R600_BARTS:     AK = GK_BARTS;   break;
66   case ELF::EF_AMDGPU_MACH_R600_CAICOS:    AK = GK_CAICOS;  break;
67   case ELF::EF_AMDGPU_MACH_R600_CAYMAN:    AK = GK_CAYMAN;  break;
68   case ELF::EF_AMDGPU_MACH_R600_TURKS:     AK = GK_TURKS;   break;
69   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX600:  AK = GK_GFX600;  break;
70   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX601:  AK = GK_GFX601;  break;
71   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX602:  AK = GK_GFX602;  break;
72   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX700:  AK = GK_GFX700;  break;
73   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX701:  AK = GK_GFX701;  break;
74   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX702:  AK = GK_GFX702;  break;
75   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX703:  AK = GK_GFX703;  break;
76   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX704:  AK = GK_GFX704;  break;
77   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX705:  AK = GK_GFX705;  break;
78   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX801:  AK = GK_GFX801;  break;
79   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX802:  AK = GK_GFX802;  break;
80   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX803:  AK = GK_GFX803;  break;
81   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX805:  AK = GK_GFX805;  break;
82   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX810:  AK = GK_GFX810;  break;
83   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX900:  AK = GK_GFX900;  break;
84   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX902:  AK = GK_GFX902;  break;
85   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX904:  AK = GK_GFX904;  break;
86   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX906:  AK = GK_GFX906;  break;
87   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX908:  AK = GK_GFX908;  break;
88   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX909:  AK = GK_GFX909;  break;
89   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX90A:  AK = GK_GFX90A;  break;
90   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C:  AK = GK_GFX90C;  break;
91   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010: AK = GK_GFX1010; break;
92   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011: AK = GK_GFX1011; break;
93   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012: AK = GK_GFX1012; break;
94   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030: AK = GK_GFX1030; break;
95   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031: AK = GK_GFX1031; break;
96   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032: AK = GK_GFX1032; break;
97   case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033: AK = GK_GFX1033; break;
98   case ELF::EF_AMDGPU_MACH_NONE:           AK = GK_NONE;    break;
99   }
100 
101   StringRef GPUName = getArchNameAMDGCN(AK);
102   if (GPUName != "")
103     return GPUName;
104   return getArchNameR600(AK);
105 }
106 
107 unsigned AMDGPUTargetStreamer::getElfMach(StringRef GPU) {
108   AMDGPU::GPUKind AK = parseArchAMDGCN(GPU);
109   if (AK == AMDGPU::GPUKind::GK_NONE)
110     AK = parseArchR600(GPU);
111 
112   switch (AK) {
113   case GK_R600:    return ELF::EF_AMDGPU_MACH_R600_R600;
114   case GK_R630:    return ELF::EF_AMDGPU_MACH_R600_R630;
115   case GK_RS880:   return ELF::EF_AMDGPU_MACH_R600_RS880;
116   case GK_RV670:   return ELF::EF_AMDGPU_MACH_R600_RV670;
117   case GK_RV710:   return ELF::EF_AMDGPU_MACH_R600_RV710;
118   case GK_RV730:   return ELF::EF_AMDGPU_MACH_R600_RV730;
119   case GK_RV770:   return ELF::EF_AMDGPU_MACH_R600_RV770;
120   case GK_CEDAR:   return ELF::EF_AMDGPU_MACH_R600_CEDAR;
121   case GK_CYPRESS: return ELF::EF_AMDGPU_MACH_R600_CYPRESS;
122   case GK_JUNIPER: return ELF::EF_AMDGPU_MACH_R600_JUNIPER;
123   case GK_REDWOOD: return ELF::EF_AMDGPU_MACH_R600_REDWOOD;
124   case GK_SUMO:    return ELF::EF_AMDGPU_MACH_R600_SUMO;
125   case GK_BARTS:   return ELF::EF_AMDGPU_MACH_R600_BARTS;
126   case GK_CAICOS:  return ELF::EF_AMDGPU_MACH_R600_CAICOS;
127   case GK_CAYMAN:  return ELF::EF_AMDGPU_MACH_R600_CAYMAN;
128   case GK_TURKS:   return ELF::EF_AMDGPU_MACH_R600_TURKS;
129   case GK_GFX600:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX600;
130   case GK_GFX601:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX601;
131   case GK_GFX602:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX602;
132   case GK_GFX700:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX700;
133   case GK_GFX701:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX701;
134   case GK_GFX702:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX702;
135   case GK_GFX703:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX703;
136   case GK_GFX704:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX704;
137   case GK_GFX705:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX705;
138   case GK_GFX801:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX801;
139   case GK_GFX802:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX802;
140   case GK_GFX803:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX803;
141   case GK_GFX805:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX805;
142   case GK_GFX810:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX810;
143   case GK_GFX900:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX900;
144   case GK_GFX902:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX902;
145   case GK_GFX904:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX904;
146   case GK_GFX906:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX906;
147   case GK_GFX908:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX908;
148   case GK_GFX909:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX909;
149   case GK_GFX90A:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX90A;
150   case GK_GFX90C:  return ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C;
151   case GK_GFX1010: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010;
152   case GK_GFX1011: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011;
153   case GK_GFX1012: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012;
154   case GK_GFX1030: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030;
155   case GK_GFX1031: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031;
156   case GK_GFX1032: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032;
157   case GK_GFX1033: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033;
158   case GK_NONE:    return ELF::EF_AMDGPU_MACH_NONE;
159   }
160 
161   llvm_unreachable("unknown GPU");
162 }
163 
164 //===----------------------------------------------------------------------===//
165 // AMDGPUTargetAsmStreamer
166 //===----------------------------------------------------------------------===//
167 
168 AMDGPUTargetAsmStreamer::AMDGPUTargetAsmStreamer(MCStreamer &S,
169                                                  formatted_raw_ostream &OS)
170     : AMDGPUTargetStreamer(S), OS(OS) { }
171 
172 // A hook for emitting stuff at the end.
173 // We use it for emitting the accumulated PAL metadata as directives.
174 // The PAL metadata is reset after it is emitted.
175 void AMDGPUTargetAsmStreamer::finish() {
176   std::string S;
177   getPALMetadata()->toString(S);
178   OS << S;
179 
180   // Reset the pal metadata so its data will not affect a compilation that
181   // reuses this object.
182   getPALMetadata()->reset();
183 }
184 
185 void AMDGPUTargetAsmStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) {
186   OS << "\t.amdgcn_target \"" << Target << "\"\n";
187 }
188 
189 void AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectVersion(
190     uint32_t Major, uint32_t Minor) {
191   OS << "\t.hsa_code_object_version " <<
192         Twine(Major) << "," << Twine(Minor) << '\n';
193 }
194 
195 void
196 AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major,
197                                                        uint32_t Minor,
198                                                        uint32_t Stepping,
199                                                        StringRef VendorName,
200                                                        StringRef ArchName) {
201   OS << "\t.hsa_code_object_isa " <<
202         Twine(Major) << "," << Twine(Minor) << "," << Twine(Stepping) <<
203         ",\"" << VendorName << "\",\"" << ArchName << "\"\n";
204 
205 }
206 
207 void
208 AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) {
209   OS << "\t.amd_kernel_code_t\n";
210   dumpAmdKernelCode(&Header, OS, "\t\t");
211   OS << "\t.end_amd_kernel_code_t\n";
212 }
213 
214 void AMDGPUTargetAsmStreamer::EmitAMDGPUSymbolType(StringRef SymbolName,
215                                                    unsigned Type) {
216   switch (Type) {
217     default: llvm_unreachable("Invalid AMDGPU symbol type");
218     case ELF::STT_AMDGPU_HSA_KERNEL:
219       OS << "\t.amdgpu_hsa_kernel " << SymbolName << '\n' ;
220       break;
221   }
222 }
223 
224 void AMDGPUTargetAsmStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size,
225                                             Align Alignment) {
226   OS << "\t.amdgpu_lds " << Symbol->getName() << ", " << Size << ", "
227      << Alignment.value() << '\n';
228 }
229 
230 bool AMDGPUTargetAsmStreamer::EmitISAVersion(StringRef IsaVersionString) {
231   OS << "\t.amd_amdgpu_isa \"" << IsaVersionString << "\"\n";
232   return true;
233 }
234 
235 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
236     const AMDGPU::HSAMD::Metadata &HSAMetadata) {
237   std::string HSAMetadataString;
238   if (HSAMD::toString(HSAMetadata, HSAMetadataString))
239     return false;
240 
241   OS << '\t' << HSAMD::AssemblerDirectiveBegin << '\n';
242   OS << HSAMetadataString << '\n';
243   OS << '\t' << HSAMD::AssemblerDirectiveEnd << '\n';
244   return true;
245 }
246 
247 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata(
248     msgpack::Document &HSAMetadataDoc, bool Strict) {
249   HSAMD::V3::MetadataVerifier Verifier(Strict);
250   if (!Verifier.verify(HSAMetadataDoc.getRoot()))
251     return false;
252 
253   std::string HSAMetadataString;
254   raw_string_ostream StrOS(HSAMetadataString);
255   HSAMetadataDoc.toYAML(StrOS);
256 
257   OS << '\t' << HSAMD::V3::AssemblerDirectiveBegin << '\n';
258   OS << StrOS.str() << '\n';
259   OS << '\t' << HSAMD::V3::AssemblerDirectiveEnd << '\n';
260   return true;
261 }
262 
263 bool AMDGPUTargetAsmStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
264   const uint32_t Encoded_s_code_end = 0xbf9f0000;
265   const uint32_t Encoded_s_nop = 0xbf800000;
266   uint32_t Encoded_pad = Encoded_s_code_end;
267   unsigned FillSize = 48;
268 
269   if (AMDGPU::isGFX90A(STI)) {
270     Encoded_pad = Encoded_s_nop;
271     FillSize = 256;
272   }
273 
274   OS << "\t.p2alignl 6, " << Encoded_pad << '\n';
275   OS << "\t.fill " << FillSize << ", 4, " << Encoded_pad << '\n';
276   return true;
277 }
278 
279 void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor(
280     const MCSubtargetInfo &STI, StringRef KernelName,
281     const amdhsa::kernel_descriptor_t &KD, uint64_t NextVGPR, uint64_t NextSGPR,
282     bool ReserveVCC, bool ReserveFlatScr, bool ReserveXNACK) {
283   IsaVersion IVersion = getIsaVersion(STI.getCPU());
284 
285   OS << "\t.amdhsa_kernel " << KernelName << '\n';
286 
287 #define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME)   \
288   STREAM << "\t\t" << DIRECTIVE << " "                                         \
289          << AMDHSA_BITS_GET(KERNEL_DESC.MEMBER_NAME, FIELD_NAME) << '\n';
290 
291   OS << "\t\t.amdhsa_group_segment_fixed_size " << KD.group_segment_fixed_size
292      << '\n';
293   OS << "\t\t.amdhsa_private_segment_fixed_size "
294      << KD.private_segment_fixed_size << '\n';
295 
296   PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_buffer", KD,
297               kernel_code_properties,
298               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER);
299   PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_ptr", KD,
300               kernel_code_properties,
301               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR);
302   PRINT_FIELD(OS, ".amdhsa_user_sgpr_queue_ptr", KD,
303               kernel_code_properties,
304               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR);
305   PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_segment_ptr", KD,
306               kernel_code_properties,
307               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR);
308   PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_id", KD,
309               kernel_code_properties,
310               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID);
311   PRINT_FIELD(OS, ".amdhsa_user_sgpr_flat_scratch_init", KD,
312               kernel_code_properties,
313               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT);
314   PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_size", KD,
315               kernel_code_properties,
316               amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE);
317   if (IVersion.Major >= 10)
318     PRINT_FIELD(OS, ".amdhsa_wavefront_size32", KD,
319                 kernel_code_properties,
320                 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32);
321   PRINT_FIELD(
322       OS, ".amdhsa_system_sgpr_private_segment_wavefront_offset", KD,
323       compute_pgm_rsrc2,
324       amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT);
325   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_x", KD,
326               compute_pgm_rsrc2,
327               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X);
328   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_y", KD,
329               compute_pgm_rsrc2,
330               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y);
331   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_z", KD,
332               compute_pgm_rsrc2,
333               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z);
334   PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_info", KD,
335               compute_pgm_rsrc2,
336               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO);
337   PRINT_FIELD(OS, ".amdhsa_system_vgpr_workitem_id", KD,
338               compute_pgm_rsrc2,
339               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID);
340 
341   // These directives are required.
342   OS << "\t\t.amdhsa_next_free_vgpr " << NextVGPR << '\n';
343   OS << "\t\t.amdhsa_next_free_sgpr " << NextSGPR << '\n';
344 
345   if (AMDGPU::isGFX90A(STI))
346     OS << "\t\t.amdhsa_accum_offset " <<
347       (AMDHSA_BITS_GET(KD.compute_pgm_rsrc3,
348                        amdhsa::COMPUTE_PGM_RSRC3_GFX90A_ACCUM_OFFSET) + 1) * 4
349       << '\n';
350 
351   if (!ReserveVCC)
352     OS << "\t\t.amdhsa_reserve_vcc " << ReserveVCC << '\n';
353   if (IVersion.Major >= 7 && !ReserveFlatScr)
354     OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n';
355   if (IVersion.Major >= 8 && ReserveXNACK != hasXNACK(STI))
356     OS << "\t\t.amdhsa_reserve_xnack_mask " << ReserveXNACK << '\n';
357 
358   PRINT_FIELD(OS, ".amdhsa_float_round_mode_32", KD,
359               compute_pgm_rsrc1,
360               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32);
361   PRINT_FIELD(OS, ".amdhsa_float_round_mode_16_64", KD,
362               compute_pgm_rsrc1,
363               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64);
364   PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_32", KD,
365               compute_pgm_rsrc1,
366               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32);
367   PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_16_64", KD,
368               compute_pgm_rsrc1,
369               amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64);
370   PRINT_FIELD(OS, ".amdhsa_dx10_clamp", KD,
371               compute_pgm_rsrc1,
372               amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP);
373   PRINT_FIELD(OS, ".amdhsa_ieee_mode", KD,
374               compute_pgm_rsrc1,
375               amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE);
376   if (IVersion.Major >= 9)
377     PRINT_FIELD(OS, ".amdhsa_fp16_overflow", KD,
378                 compute_pgm_rsrc1,
379                 amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL);
380   if (AMDGPU::isGFX90A(STI))
381     PRINT_FIELD(OS, ".amdhsa_tg_split", KD,
382                 compute_pgm_rsrc3,
383                 amdhsa::COMPUTE_PGM_RSRC3_GFX90A_TG_SPLIT);
384   if (IVersion.Major >= 10) {
385     PRINT_FIELD(OS, ".amdhsa_workgroup_processor_mode", KD,
386                 compute_pgm_rsrc1,
387                 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE);
388     PRINT_FIELD(OS, ".amdhsa_memory_ordered", KD,
389                 compute_pgm_rsrc1,
390                 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED);
391     PRINT_FIELD(OS, ".amdhsa_forward_progress", KD,
392                 compute_pgm_rsrc1,
393                 amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS);
394   }
395   PRINT_FIELD(
396       OS, ".amdhsa_exception_fp_ieee_invalid_op", KD,
397       compute_pgm_rsrc2,
398       amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION);
399   PRINT_FIELD(OS, ".amdhsa_exception_fp_denorm_src", KD,
400               compute_pgm_rsrc2,
401               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE);
402   PRINT_FIELD(
403       OS, ".amdhsa_exception_fp_ieee_div_zero", KD,
404       compute_pgm_rsrc2,
405       amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO);
406   PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_overflow", KD,
407               compute_pgm_rsrc2,
408               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW);
409   PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_underflow", KD,
410               compute_pgm_rsrc2,
411               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW);
412   PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_inexact", KD,
413               compute_pgm_rsrc2,
414               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT);
415   PRINT_FIELD(OS, ".amdhsa_exception_int_div_zero", KD,
416               compute_pgm_rsrc2,
417               amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO);
418 #undef PRINT_FIELD
419 
420   OS << "\t.end_amdhsa_kernel\n";
421 }
422 
423 //===----------------------------------------------------------------------===//
424 // AMDGPUTargetELFStreamer
425 //===----------------------------------------------------------------------===//
426 
427 AMDGPUTargetELFStreamer::AMDGPUTargetELFStreamer(MCStreamer &S,
428                                                  const MCSubtargetInfo &STI)
429     : AMDGPUTargetStreamer(S), Streamer(S), Os(STI.getTargetTriple().getOS()) {
430   MCAssembler &MCA = getStreamer().getAssembler();
431   unsigned EFlags = MCA.getELFHeaderEFlags();
432 
433   EFlags &= ~ELF::EF_AMDGPU_MACH;
434   EFlags |= getElfMach(STI.getCPU());
435 
436   EFlags &= ~ELF::EF_AMDGPU_XNACK;
437   if (AMDGPU::hasXNACK(STI))
438     EFlags |= ELF::EF_AMDGPU_XNACK;
439 
440   EFlags &= ~ELF::EF_AMDGPU_SRAM_ECC;
441   if (AMDGPU::hasSRAMECC(STI))
442     EFlags |= ELF::EF_AMDGPU_SRAM_ECC;
443 
444   MCA.setELFHeaderEFlags(EFlags);
445 }
446 
447 MCELFStreamer &AMDGPUTargetELFStreamer::getStreamer() {
448   return static_cast<MCELFStreamer &>(Streamer);
449 }
450 
451 // A hook for emitting stuff at the end.
452 // We use it for emitting the accumulated PAL metadata as a .note record.
453 // The PAL metadata is reset after it is emitted.
454 void AMDGPUTargetELFStreamer::finish() {
455   std::string Blob;
456   const char *Vendor = getPALMetadata()->getVendor();
457   unsigned Type = getPALMetadata()->getType();
458   getPALMetadata()->toBlob(Type, Blob);
459   if (Blob.empty())
460     return;
461   EmitNote(Vendor, MCConstantExpr::create(Blob.size(), getContext()), Type,
462            [&](MCELFStreamer &OS) { OS.emitBytes(Blob); });
463 
464   // Reset the pal metadata so its data will not affect a compilation that
465   // reuses this object.
466   getPALMetadata()->reset();
467 }
468 
469 void AMDGPUTargetELFStreamer::EmitNote(
470     StringRef Name, const MCExpr *DescSZ, unsigned NoteType,
471     function_ref<void(MCELFStreamer &)> EmitDesc) {
472   auto &S = getStreamer();
473   auto &Context = S.getContext();
474 
475   auto NameSZ = Name.size() + 1;
476 
477   unsigned NoteFlags = 0;
478   // TODO Apparently, this is currently needed for OpenCL as mentioned in
479   // https://reviews.llvm.org/D74995
480   if (Os == Triple::AMDHSA)
481     NoteFlags = ELF::SHF_ALLOC;
482 
483   S.PushSection();
484   S.SwitchSection(
485       Context.getELFSection(ElfNote::SectionName, ELF::SHT_NOTE, NoteFlags));
486   S.emitInt32(NameSZ);                                        // namesz
487   S.emitValue(DescSZ, 4);                                     // descz
488   S.emitInt32(NoteType);                                      // type
489   S.emitBytes(Name);                                          // name
490   S.emitValueToAlignment(4, 0, 1, 0);                         // padding 0
491   EmitDesc(S);                                                // desc
492   S.emitValueToAlignment(4, 0, 1, 0);                         // padding 0
493   S.PopSection();
494 }
495 
496 void AMDGPUTargetELFStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) {}
497 
498 void AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectVersion(
499     uint32_t Major, uint32_t Minor) {
500 
501   EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(8, getContext()),
502            ElfNote::NT_AMDGPU_HSA_CODE_OBJECT_VERSION, [&](MCELFStreamer &OS) {
503              OS.emitInt32(Major);
504              OS.emitInt32(Minor);
505            });
506 }
507 
508 void
509 AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major,
510                                                        uint32_t Minor,
511                                                        uint32_t Stepping,
512                                                        StringRef VendorName,
513                                                        StringRef ArchName) {
514   uint16_t VendorNameSize = VendorName.size() + 1;
515   uint16_t ArchNameSize = ArchName.size() + 1;
516 
517   unsigned DescSZ = sizeof(VendorNameSize) + sizeof(ArchNameSize) +
518     sizeof(Major) + sizeof(Minor) + sizeof(Stepping) +
519     VendorNameSize + ArchNameSize;
520 
521   EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(DescSZ, getContext()),
522            ElfNote::NT_AMDGPU_HSA_ISA, [&](MCELFStreamer &OS) {
523              OS.emitInt16(VendorNameSize);
524              OS.emitInt16(ArchNameSize);
525              OS.emitInt32(Major);
526              OS.emitInt32(Minor);
527              OS.emitInt32(Stepping);
528              OS.emitBytes(VendorName);
529              OS.emitInt8(0); // NULL terminate VendorName
530              OS.emitBytes(ArchName);
531              OS.emitInt8(0); // NULL terminte ArchName
532            });
533 }
534 
535 void
536 AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) {
537 
538   MCStreamer &OS = getStreamer();
539   OS.PushSection();
540   OS.emitBytes(StringRef((const char*)&Header, sizeof(Header)));
541   OS.PopSection();
542 }
543 
544 void AMDGPUTargetELFStreamer::EmitAMDGPUSymbolType(StringRef SymbolName,
545                                                    unsigned Type) {
546   MCSymbolELF *Symbol = cast<MCSymbolELF>(
547       getStreamer().getContext().getOrCreateSymbol(SymbolName));
548   Symbol->setType(Type);
549 }
550 
551 void AMDGPUTargetELFStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size,
552                                             Align Alignment) {
553   MCSymbolELF *SymbolELF = cast<MCSymbolELF>(Symbol);
554   SymbolELF->setType(ELF::STT_OBJECT);
555 
556   if (!SymbolELF->isBindingSet()) {
557     SymbolELF->setBinding(ELF::STB_GLOBAL);
558     SymbolELF->setExternal(true);
559   }
560 
561   if (SymbolELF->declareCommon(Size, Alignment.value(), true)) {
562     report_fatal_error("Symbol: " + Symbol->getName() +
563                        " redeclared as different type");
564   }
565 
566   SymbolELF->setIndex(ELF::SHN_AMDGPU_LDS);
567   SymbolELF->setSize(MCConstantExpr::create(Size, getContext()));
568 }
569 
570 bool AMDGPUTargetELFStreamer::EmitISAVersion(StringRef IsaVersionString) {
571   // Create two labels to mark the beginning and end of the desc field
572   // and a MCExpr to calculate the size of the desc field.
573   auto &Context = getContext();
574   auto *DescBegin = Context.createTempSymbol();
575   auto *DescEnd = Context.createTempSymbol();
576   auto *DescSZ = MCBinaryExpr::createSub(
577     MCSymbolRefExpr::create(DescEnd, Context),
578     MCSymbolRefExpr::create(DescBegin, Context), Context);
579 
580   EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_ISA,
581            [&](MCELFStreamer &OS) {
582              OS.emitLabel(DescBegin);
583              OS.emitBytes(IsaVersionString);
584              OS.emitLabel(DescEnd);
585            });
586   return true;
587 }
588 
589 bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc,
590                                               bool Strict) {
591   HSAMD::V3::MetadataVerifier Verifier(Strict);
592   if (!Verifier.verify(HSAMetadataDoc.getRoot()))
593     return false;
594 
595   std::string HSAMetadataString;
596   HSAMetadataDoc.writeToBlob(HSAMetadataString);
597 
598   // Create two labels to mark the beginning and end of the desc field
599   // and a MCExpr to calculate the size of the desc field.
600   auto &Context = getContext();
601   auto *DescBegin = Context.createTempSymbol();
602   auto *DescEnd = Context.createTempSymbol();
603   auto *DescSZ = MCBinaryExpr::createSub(
604       MCSymbolRefExpr::create(DescEnd, Context),
605       MCSymbolRefExpr::create(DescBegin, Context), Context);
606 
607   EmitNote(ElfNote::NoteNameV3, DescSZ, ELF::NT_AMDGPU_METADATA,
608            [&](MCELFStreamer &OS) {
609              OS.emitLabel(DescBegin);
610              OS.emitBytes(HSAMetadataString);
611              OS.emitLabel(DescEnd);
612            });
613   return true;
614 }
615 
616 bool AMDGPUTargetELFStreamer::EmitHSAMetadata(
617     const AMDGPU::HSAMD::Metadata &HSAMetadata) {
618   std::string HSAMetadataString;
619   if (HSAMD::toString(HSAMetadata, HSAMetadataString))
620     return false;
621 
622   // Create two labels to mark the beginning and end of the desc field
623   // and a MCExpr to calculate the size of the desc field.
624   auto &Context = getContext();
625   auto *DescBegin = Context.createTempSymbol();
626   auto *DescEnd = Context.createTempSymbol();
627   auto *DescSZ = MCBinaryExpr::createSub(
628     MCSymbolRefExpr::create(DescEnd, Context),
629     MCSymbolRefExpr::create(DescBegin, Context), Context);
630 
631   EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_HSA_METADATA,
632            [&](MCELFStreamer &OS) {
633              OS.emitLabel(DescBegin);
634              OS.emitBytes(HSAMetadataString);
635              OS.emitLabel(DescEnd);
636            });
637   return true;
638 }
639 
640 bool AMDGPUTargetELFStreamer::EmitCodeEnd(const MCSubtargetInfo &STI) {
641   const uint32_t Encoded_s_code_end = 0xbf9f0000;
642   const uint32_t Encoded_s_nop = 0xbf800000;
643   uint32_t Encoded_pad = Encoded_s_code_end;
644   unsigned FillSize = 48;
645 
646   if (AMDGPU::isGFX90A(STI)) {
647     Encoded_pad = Encoded_s_nop;
648     FillSize = 256;
649   }
650 
651   MCStreamer &OS = getStreamer();
652   OS.PushSection();
653   OS.emitValueToAlignment(64, Encoded_pad, 4);
654   for (unsigned I = 0; I < FillSize; ++I)
655     OS.emitInt32(Encoded_pad);
656   OS.PopSection();
657   return true;
658 }
659 
660 void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor(
661     const MCSubtargetInfo &STI, StringRef KernelName,
662     const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR,
663     uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr,
664     bool ReserveXNACK) {
665   auto &Streamer = getStreamer();
666   auto &Context = Streamer.getContext();
667 
668   MCSymbolELF *KernelCodeSymbol = cast<MCSymbolELF>(
669       Context.getOrCreateSymbol(Twine(KernelName)));
670   MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>(
671       Context.getOrCreateSymbol(Twine(KernelName) + Twine(".kd")));
672 
673   // Copy kernel descriptor symbol's binding, other and visibility from the
674   // kernel code symbol.
675   KernelDescriptorSymbol->setBinding(KernelCodeSymbol->getBinding());
676   KernelDescriptorSymbol->setOther(KernelCodeSymbol->getOther());
677   KernelDescriptorSymbol->setVisibility(KernelCodeSymbol->getVisibility());
678   // Kernel descriptor symbol's type and size are fixed.
679   KernelDescriptorSymbol->setType(ELF::STT_OBJECT);
680   KernelDescriptorSymbol->setSize(
681       MCConstantExpr::create(sizeof(KernelDescriptor), Context));
682 
683   // The visibility of the kernel code symbol must be protected or less to allow
684   // static relocations from the kernel descriptor to be used.
685   if (KernelCodeSymbol->getVisibility() == ELF::STV_DEFAULT)
686     KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED);
687 
688   Streamer.emitLabel(KernelDescriptorSymbol);
689   Streamer.emitInt32(KernelDescriptor.group_segment_fixed_size);
690   Streamer.emitInt32(KernelDescriptor.private_segment_fixed_size);
691   for (uint8_t Res : KernelDescriptor.reserved0)
692     Streamer.emitInt8(Res);
693   // FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The
694   // expression being created is:
695   //   (start of kernel code) - (start of kernel descriptor)
696   // It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64.
697   Streamer.emitValue(MCBinaryExpr::createSub(
698       MCSymbolRefExpr::create(
699           KernelCodeSymbol, MCSymbolRefExpr::VK_AMDGPU_REL64, Context),
700       MCSymbolRefExpr::create(
701           KernelDescriptorSymbol, MCSymbolRefExpr::VK_None, Context),
702       Context),
703       sizeof(KernelDescriptor.kernel_code_entry_byte_offset));
704   for (uint8_t Res : KernelDescriptor.reserved1)
705     Streamer.emitInt8(Res);
706   Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc3);
707   Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc1);
708   Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc2);
709   Streamer.emitInt16(KernelDescriptor.kernel_code_properties);
710   for (uint8_t Res : KernelDescriptor.reserved2)
711     Streamer.emitInt8(Res);
712 }
713