1 //===-- AMDGPUTargetStreamer.cpp - Mips Target Streamer Methods -----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file provides AMDGPU specific target streamer methods. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "AMDGPUTargetStreamer.h" 14 #include "AMDGPU.h" 15 #include "SIDefines.h" 16 #include "Utils/AMDGPUBaseInfo.h" 17 #include "Utils/AMDKernelCodeTUtils.h" 18 #include "llvm/ADT/Twine.h" 19 #include "llvm/BinaryFormat/AMDGPUMetadataVerifier.h" 20 #include "llvm/BinaryFormat/ELF.h" 21 #include "llvm/IR/Constants.h" 22 #include "llvm/IR/Function.h" 23 #include "llvm/IR/Metadata.h" 24 #include "llvm/IR/Module.h" 25 #include "llvm/MC/MCContext.h" 26 #include "llvm/MC/MCELFStreamer.h" 27 #include "llvm/MC/MCObjectFileInfo.h" 28 #include "llvm/MC/MCSectionELF.h" 29 #include "llvm/Support/FormattedStream.h" 30 #include "llvm/Support/TargetParser.h" 31 32 namespace llvm { 33 #include "AMDGPUPTNote.h" 34 } 35 36 using namespace llvm; 37 using namespace llvm::AMDGPU; 38 using namespace llvm::AMDGPU::HSAMD; 39 40 //===----------------------------------------------------------------------===// 41 // AMDGPUTargetStreamer 42 //===----------------------------------------------------------------------===// 43 44 bool AMDGPUTargetStreamer::EmitHSAMetadataV2(StringRef HSAMetadataString) { 45 HSAMD::Metadata HSAMetadata; 46 if (HSAMD::fromString(std::string(HSAMetadataString), HSAMetadata)) 47 return false; 48 49 return EmitHSAMetadata(HSAMetadata); 50 } 51 52 bool AMDGPUTargetStreamer::EmitHSAMetadataV3(StringRef HSAMetadataString) { 53 msgpack::Document HSAMetadataDoc; 54 if (!HSAMetadataDoc.fromYAML(HSAMetadataString)) 55 return false; 56 return EmitHSAMetadata(HSAMetadataDoc, false); 57 } 58 59 StringRef AMDGPUTargetStreamer::getArchNameFromElfMach(unsigned ElfMach) { 60 AMDGPU::GPUKind AK; 61 62 switch (ElfMach) { 63 default: llvm_unreachable("Unhandled ELF::EF_AMDGPU type"); 64 case ELF::EF_AMDGPU_MACH_R600_R600: AK = GK_R600; break; 65 case ELF::EF_AMDGPU_MACH_R600_R630: AK = GK_R630; break; 66 case ELF::EF_AMDGPU_MACH_R600_RS880: AK = GK_RS880; break; 67 case ELF::EF_AMDGPU_MACH_R600_RV670: AK = GK_RV670; break; 68 case ELF::EF_AMDGPU_MACH_R600_RV710: AK = GK_RV710; break; 69 case ELF::EF_AMDGPU_MACH_R600_RV730: AK = GK_RV730; break; 70 case ELF::EF_AMDGPU_MACH_R600_RV770: AK = GK_RV770; break; 71 case ELF::EF_AMDGPU_MACH_R600_CEDAR: AK = GK_CEDAR; break; 72 case ELF::EF_AMDGPU_MACH_R600_CYPRESS: AK = GK_CYPRESS; break; 73 case ELF::EF_AMDGPU_MACH_R600_JUNIPER: AK = GK_JUNIPER; break; 74 case ELF::EF_AMDGPU_MACH_R600_REDWOOD: AK = GK_REDWOOD; break; 75 case ELF::EF_AMDGPU_MACH_R600_SUMO: AK = GK_SUMO; break; 76 case ELF::EF_AMDGPU_MACH_R600_BARTS: AK = GK_BARTS; break; 77 case ELF::EF_AMDGPU_MACH_R600_CAICOS: AK = GK_CAICOS; break; 78 case ELF::EF_AMDGPU_MACH_R600_CAYMAN: AK = GK_CAYMAN; break; 79 case ELF::EF_AMDGPU_MACH_R600_TURKS: AK = GK_TURKS; break; 80 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX600: AK = GK_GFX600; break; 81 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX601: AK = GK_GFX601; break; 82 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX602: AK = GK_GFX602; break; 83 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX700: AK = GK_GFX700; break; 84 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX701: AK = GK_GFX701; break; 85 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX702: AK = GK_GFX702; break; 86 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX703: AK = GK_GFX703; break; 87 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX704: AK = GK_GFX704; break; 88 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX705: AK = GK_GFX705; break; 89 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX801: AK = GK_GFX801; break; 90 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX802: AK = GK_GFX802; break; 91 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX803: AK = GK_GFX803; break; 92 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX805: AK = GK_GFX805; break; 93 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX810: AK = GK_GFX810; break; 94 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX900: AK = GK_GFX900; break; 95 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX902: AK = GK_GFX902; break; 96 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX904: AK = GK_GFX904; break; 97 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX906: AK = GK_GFX906; break; 98 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX908: AK = GK_GFX908; break; 99 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX909: AK = GK_GFX909; break; 100 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C: AK = GK_GFX90C; break; 101 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010: AK = GK_GFX1010; break; 102 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011: AK = GK_GFX1011; break; 103 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012: AK = GK_GFX1012; break; 104 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030: AK = GK_GFX1030; break; 105 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031: AK = GK_GFX1031; break; 106 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032: AK = GK_GFX1032; break; 107 case ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033: AK = GK_GFX1033; break; 108 case ELF::EF_AMDGPU_MACH_NONE: AK = GK_NONE; break; 109 } 110 111 StringRef GPUName = getArchNameAMDGCN(AK); 112 if (GPUName != "") 113 return GPUName; 114 return getArchNameR600(AK); 115 } 116 117 unsigned AMDGPUTargetStreamer::getElfMach(StringRef GPU) { 118 AMDGPU::GPUKind AK = parseArchAMDGCN(GPU); 119 if (AK == AMDGPU::GPUKind::GK_NONE) 120 AK = parseArchR600(GPU); 121 122 switch (AK) { 123 case GK_R600: return ELF::EF_AMDGPU_MACH_R600_R600; 124 case GK_R630: return ELF::EF_AMDGPU_MACH_R600_R630; 125 case GK_RS880: return ELF::EF_AMDGPU_MACH_R600_RS880; 126 case GK_RV670: return ELF::EF_AMDGPU_MACH_R600_RV670; 127 case GK_RV710: return ELF::EF_AMDGPU_MACH_R600_RV710; 128 case GK_RV730: return ELF::EF_AMDGPU_MACH_R600_RV730; 129 case GK_RV770: return ELF::EF_AMDGPU_MACH_R600_RV770; 130 case GK_CEDAR: return ELF::EF_AMDGPU_MACH_R600_CEDAR; 131 case GK_CYPRESS: return ELF::EF_AMDGPU_MACH_R600_CYPRESS; 132 case GK_JUNIPER: return ELF::EF_AMDGPU_MACH_R600_JUNIPER; 133 case GK_REDWOOD: return ELF::EF_AMDGPU_MACH_R600_REDWOOD; 134 case GK_SUMO: return ELF::EF_AMDGPU_MACH_R600_SUMO; 135 case GK_BARTS: return ELF::EF_AMDGPU_MACH_R600_BARTS; 136 case GK_CAICOS: return ELF::EF_AMDGPU_MACH_R600_CAICOS; 137 case GK_CAYMAN: return ELF::EF_AMDGPU_MACH_R600_CAYMAN; 138 case GK_TURKS: return ELF::EF_AMDGPU_MACH_R600_TURKS; 139 case GK_GFX600: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX600; 140 case GK_GFX601: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX601; 141 case GK_GFX602: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX602; 142 case GK_GFX700: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX700; 143 case GK_GFX701: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX701; 144 case GK_GFX702: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX702; 145 case GK_GFX703: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX703; 146 case GK_GFX704: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX704; 147 case GK_GFX705: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX705; 148 case GK_GFX801: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX801; 149 case GK_GFX802: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX802; 150 case GK_GFX803: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX803; 151 case GK_GFX805: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX805; 152 case GK_GFX810: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX810; 153 case GK_GFX900: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX900; 154 case GK_GFX902: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX902; 155 case GK_GFX904: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX904; 156 case GK_GFX906: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX906; 157 case GK_GFX908: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX908; 158 case GK_GFX909: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX909; 159 case GK_GFX90C: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX90C; 160 case GK_GFX1010: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1010; 161 case GK_GFX1011: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1011; 162 case GK_GFX1012: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1012; 163 case GK_GFX1030: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1030; 164 case GK_GFX1031: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1031; 165 case GK_GFX1032: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1032; 166 case GK_GFX1033: return ELF::EF_AMDGPU_MACH_AMDGCN_GFX1033; 167 case GK_NONE: return ELF::EF_AMDGPU_MACH_NONE; 168 } 169 170 llvm_unreachable("unknown GPU"); 171 } 172 173 //===----------------------------------------------------------------------===// 174 // AMDGPUTargetAsmStreamer 175 //===----------------------------------------------------------------------===// 176 177 AMDGPUTargetAsmStreamer::AMDGPUTargetAsmStreamer(MCStreamer &S, 178 formatted_raw_ostream &OS) 179 : AMDGPUTargetStreamer(S), OS(OS) { } 180 181 // A hook for emitting stuff at the end. 182 // We use it for emitting the accumulated PAL metadata as directives. 183 // The PAL metadata is reset after it is emitted. 184 void AMDGPUTargetAsmStreamer::finish() { 185 std::string S; 186 getPALMetadata()->toString(S); 187 OS << S; 188 189 // Reset the pal metadata so its data will not affect a compilation that 190 // reuses this object. 191 getPALMetadata()->reset(); 192 } 193 194 void AMDGPUTargetAsmStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) { 195 OS << "\t.amdgcn_target \"" << Target << "\"\n"; 196 } 197 198 void AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectVersion( 199 uint32_t Major, uint32_t Minor) { 200 OS << "\t.hsa_code_object_version " << 201 Twine(Major) << "," << Twine(Minor) << '\n'; 202 } 203 204 void 205 AMDGPUTargetAsmStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major, 206 uint32_t Minor, 207 uint32_t Stepping, 208 StringRef VendorName, 209 StringRef ArchName) { 210 OS << "\t.hsa_code_object_isa " << 211 Twine(Major) << "," << Twine(Minor) << "," << Twine(Stepping) << 212 ",\"" << VendorName << "\",\"" << ArchName << "\"\n"; 213 214 } 215 216 void 217 AMDGPUTargetAsmStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) { 218 OS << "\t.amd_kernel_code_t\n"; 219 dumpAmdKernelCode(&Header, OS, "\t\t"); 220 OS << "\t.end_amd_kernel_code_t\n"; 221 } 222 223 void AMDGPUTargetAsmStreamer::EmitAMDGPUSymbolType(StringRef SymbolName, 224 unsigned Type) { 225 switch (Type) { 226 default: llvm_unreachable("Invalid AMDGPU symbol type"); 227 case ELF::STT_AMDGPU_HSA_KERNEL: 228 OS << "\t.amdgpu_hsa_kernel " << SymbolName << '\n' ; 229 break; 230 } 231 } 232 233 void AMDGPUTargetAsmStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size, 234 Align Alignment) { 235 OS << "\t.amdgpu_lds " << Symbol->getName() << ", " << Size << ", " 236 << Alignment.value() << '\n'; 237 } 238 239 bool AMDGPUTargetAsmStreamer::EmitISAVersion(StringRef IsaVersionString) { 240 OS << "\t.amd_amdgpu_isa \"" << IsaVersionString << "\"\n"; 241 return true; 242 } 243 244 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata( 245 const AMDGPU::HSAMD::Metadata &HSAMetadata) { 246 std::string HSAMetadataString; 247 if (HSAMD::toString(HSAMetadata, HSAMetadataString)) 248 return false; 249 250 OS << '\t' << AssemblerDirectiveBegin << '\n'; 251 OS << HSAMetadataString << '\n'; 252 OS << '\t' << AssemblerDirectiveEnd << '\n'; 253 return true; 254 } 255 256 bool AMDGPUTargetAsmStreamer::EmitHSAMetadata( 257 msgpack::Document &HSAMetadataDoc, bool Strict) { 258 V3::MetadataVerifier Verifier(Strict); 259 if (!Verifier.verify(HSAMetadataDoc.getRoot())) 260 return false; 261 262 std::string HSAMetadataString; 263 raw_string_ostream StrOS(HSAMetadataString); 264 HSAMetadataDoc.toYAML(StrOS); 265 266 OS << '\t' << V3::AssemblerDirectiveBegin << '\n'; 267 OS << StrOS.str() << '\n'; 268 OS << '\t' << V3::AssemblerDirectiveEnd << '\n'; 269 return true; 270 } 271 272 bool AMDGPUTargetAsmStreamer::EmitCodeEnd() { 273 const uint32_t Encoded_s_code_end = 0xbf9f0000; 274 OS << "\t.p2alignl 6, " << Encoded_s_code_end << '\n'; 275 OS << "\t.fill 48, 4, " << Encoded_s_code_end << '\n'; 276 return true; 277 } 278 279 void AMDGPUTargetAsmStreamer::EmitAmdhsaKernelDescriptor( 280 const MCSubtargetInfo &STI, StringRef KernelName, 281 const amdhsa::kernel_descriptor_t &KD, uint64_t NextVGPR, uint64_t NextSGPR, 282 bool ReserveVCC, bool ReserveFlatScr, bool ReserveXNACK) { 283 IsaVersion IVersion = getIsaVersion(STI.getCPU()); 284 285 OS << "\t.amdhsa_kernel " << KernelName << '\n'; 286 287 #define PRINT_FIELD(STREAM, DIRECTIVE, KERNEL_DESC, MEMBER_NAME, FIELD_NAME) \ 288 STREAM << "\t\t" << DIRECTIVE << " " \ 289 << AMDHSA_BITS_GET(KERNEL_DESC.MEMBER_NAME, FIELD_NAME) << '\n'; 290 291 OS << "\t\t.amdhsa_group_segment_fixed_size " << KD.group_segment_fixed_size 292 << '\n'; 293 OS << "\t\t.amdhsa_private_segment_fixed_size " 294 << KD.private_segment_fixed_size << '\n'; 295 296 PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_buffer", KD, 297 kernel_code_properties, 298 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER); 299 PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_ptr", KD, 300 kernel_code_properties, 301 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR); 302 PRINT_FIELD(OS, ".amdhsa_user_sgpr_queue_ptr", KD, 303 kernel_code_properties, 304 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR); 305 PRINT_FIELD(OS, ".amdhsa_user_sgpr_kernarg_segment_ptr", KD, 306 kernel_code_properties, 307 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR); 308 PRINT_FIELD(OS, ".amdhsa_user_sgpr_dispatch_id", KD, 309 kernel_code_properties, 310 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID); 311 PRINT_FIELD(OS, ".amdhsa_user_sgpr_flat_scratch_init", KD, 312 kernel_code_properties, 313 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT); 314 PRINT_FIELD(OS, ".amdhsa_user_sgpr_private_segment_size", KD, 315 kernel_code_properties, 316 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE); 317 if (IVersion.Major >= 10) 318 PRINT_FIELD(OS, ".amdhsa_wavefront_size32", KD, 319 kernel_code_properties, 320 amdhsa::KERNEL_CODE_PROPERTY_ENABLE_WAVEFRONT_SIZE32); 321 PRINT_FIELD( 322 OS, ".amdhsa_system_sgpr_private_segment_wavefront_offset", KD, 323 compute_pgm_rsrc2, 324 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_PRIVATE_SEGMENT); 325 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_x", KD, 326 compute_pgm_rsrc2, 327 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X); 328 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_y", KD, 329 compute_pgm_rsrc2, 330 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y); 331 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_id_z", KD, 332 compute_pgm_rsrc2, 333 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z); 334 PRINT_FIELD(OS, ".amdhsa_system_sgpr_workgroup_info", KD, 335 compute_pgm_rsrc2, 336 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO); 337 PRINT_FIELD(OS, ".amdhsa_system_vgpr_workitem_id", KD, 338 compute_pgm_rsrc2, 339 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID); 340 341 // These directives are required. 342 OS << "\t\t.amdhsa_next_free_vgpr " << NextVGPR << '\n'; 343 OS << "\t\t.amdhsa_next_free_sgpr " << NextSGPR << '\n'; 344 345 if (!ReserveVCC) 346 OS << "\t\t.amdhsa_reserve_vcc " << ReserveVCC << '\n'; 347 if (IVersion.Major >= 7 && !ReserveFlatScr) 348 OS << "\t\t.amdhsa_reserve_flat_scratch " << ReserveFlatScr << '\n'; 349 if (IVersion.Major >= 8 && ReserveXNACK != hasXNACK(STI)) 350 OS << "\t\t.amdhsa_reserve_xnack_mask " << ReserveXNACK << '\n'; 351 352 PRINT_FIELD(OS, ".amdhsa_float_round_mode_32", KD, 353 compute_pgm_rsrc1, 354 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32); 355 PRINT_FIELD(OS, ".amdhsa_float_round_mode_16_64", KD, 356 compute_pgm_rsrc1, 357 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64); 358 PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_32", KD, 359 compute_pgm_rsrc1, 360 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32); 361 PRINT_FIELD(OS, ".amdhsa_float_denorm_mode_16_64", KD, 362 compute_pgm_rsrc1, 363 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64); 364 PRINT_FIELD(OS, ".amdhsa_dx10_clamp", KD, 365 compute_pgm_rsrc1, 366 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP); 367 PRINT_FIELD(OS, ".amdhsa_ieee_mode", KD, 368 compute_pgm_rsrc1, 369 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE); 370 if (IVersion.Major >= 9) 371 PRINT_FIELD(OS, ".amdhsa_fp16_overflow", KD, 372 compute_pgm_rsrc1, 373 amdhsa::COMPUTE_PGM_RSRC1_FP16_OVFL); 374 if (IVersion.Major >= 10) { 375 PRINT_FIELD(OS, ".amdhsa_workgroup_processor_mode", KD, 376 compute_pgm_rsrc1, 377 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE); 378 PRINT_FIELD(OS, ".amdhsa_memory_ordered", KD, 379 compute_pgm_rsrc1, 380 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED); 381 PRINT_FIELD(OS, ".amdhsa_forward_progress", KD, 382 compute_pgm_rsrc1, 383 amdhsa::COMPUTE_PGM_RSRC1_FWD_PROGRESS); 384 } 385 PRINT_FIELD( 386 OS, ".amdhsa_exception_fp_ieee_invalid_op", KD, 387 compute_pgm_rsrc2, 388 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION); 389 PRINT_FIELD(OS, ".amdhsa_exception_fp_denorm_src", KD, 390 compute_pgm_rsrc2, 391 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE); 392 PRINT_FIELD( 393 OS, ".amdhsa_exception_fp_ieee_div_zero", KD, 394 compute_pgm_rsrc2, 395 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO); 396 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_overflow", KD, 397 compute_pgm_rsrc2, 398 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW); 399 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_underflow", KD, 400 compute_pgm_rsrc2, 401 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW); 402 PRINT_FIELD(OS, ".amdhsa_exception_fp_ieee_inexact", KD, 403 compute_pgm_rsrc2, 404 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT); 405 PRINT_FIELD(OS, ".amdhsa_exception_int_div_zero", KD, 406 compute_pgm_rsrc2, 407 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO); 408 #undef PRINT_FIELD 409 410 OS << "\t.end_amdhsa_kernel\n"; 411 } 412 413 //===----------------------------------------------------------------------===// 414 // AMDGPUTargetELFStreamer 415 //===----------------------------------------------------------------------===// 416 417 AMDGPUTargetELFStreamer::AMDGPUTargetELFStreamer(MCStreamer &S, 418 const MCSubtargetInfo &STI) 419 : AMDGPUTargetStreamer(S), Streamer(S), Os(STI.getTargetTriple().getOS()) { 420 MCAssembler &MCA = getStreamer().getAssembler(); 421 unsigned EFlags = MCA.getELFHeaderEFlags(); 422 423 EFlags &= ~ELF::EF_AMDGPU_MACH; 424 EFlags |= getElfMach(STI.getCPU()); 425 426 EFlags &= ~ELF::EF_AMDGPU_XNACK; 427 if (AMDGPU::hasXNACK(STI)) 428 EFlags |= ELF::EF_AMDGPU_XNACK; 429 430 EFlags &= ~ELF::EF_AMDGPU_SRAM_ECC; 431 if (AMDGPU::hasSRAMECC(STI)) 432 EFlags |= ELF::EF_AMDGPU_SRAM_ECC; 433 434 MCA.setELFHeaderEFlags(EFlags); 435 } 436 437 MCELFStreamer &AMDGPUTargetELFStreamer::getStreamer() { 438 return static_cast<MCELFStreamer &>(Streamer); 439 } 440 441 // A hook for emitting stuff at the end. 442 // We use it for emitting the accumulated PAL metadata as a .note record. 443 // The PAL metadata is reset after it is emitted. 444 void AMDGPUTargetELFStreamer::finish() { 445 std::string Blob; 446 const char *Vendor = getPALMetadata()->getVendor(); 447 unsigned Type = getPALMetadata()->getType(); 448 getPALMetadata()->toBlob(Type, Blob); 449 if (Blob.empty()) 450 return; 451 EmitNote(Vendor, MCConstantExpr::create(Blob.size(), getContext()), Type, 452 [&](MCELFStreamer &OS) { OS.emitBytes(Blob); }); 453 454 // Reset the pal metadata so its data will not affect a compilation that 455 // reuses this object. 456 getPALMetadata()->reset(); 457 } 458 459 void AMDGPUTargetELFStreamer::EmitNote( 460 StringRef Name, const MCExpr *DescSZ, unsigned NoteType, 461 function_ref<void(MCELFStreamer &)> EmitDesc) { 462 auto &S = getStreamer(); 463 auto &Context = S.getContext(); 464 465 auto NameSZ = Name.size() + 1; 466 467 unsigned NoteFlags = 0; 468 // TODO Apparently, this is currently needed for OpenCL as mentioned in 469 // https://reviews.llvm.org/D74995 470 if (Os == Triple::AMDHSA) 471 NoteFlags = ELF::SHF_ALLOC; 472 473 S.PushSection(); 474 S.SwitchSection( 475 Context.getELFSection(ElfNote::SectionName, ELF::SHT_NOTE, NoteFlags)); 476 S.emitInt32(NameSZ); // namesz 477 S.emitValue(DescSZ, 4); // descz 478 S.emitInt32(NoteType); // type 479 S.emitBytes(Name); // name 480 S.emitValueToAlignment(4, 0, 1, 0); // padding 0 481 EmitDesc(S); // desc 482 S.emitValueToAlignment(4, 0, 1, 0); // padding 0 483 S.PopSection(); 484 } 485 486 void AMDGPUTargetELFStreamer::EmitDirectiveAMDGCNTarget(StringRef Target) {} 487 488 void AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectVersion( 489 uint32_t Major, uint32_t Minor) { 490 491 EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(8, getContext()), 492 ElfNote::NT_AMDGPU_HSA_CODE_OBJECT_VERSION, [&](MCELFStreamer &OS) { 493 OS.emitInt32(Major); 494 OS.emitInt32(Minor); 495 }); 496 } 497 498 void 499 AMDGPUTargetELFStreamer::EmitDirectiveHSACodeObjectISA(uint32_t Major, 500 uint32_t Minor, 501 uint32_t Stepping, 502 StringRef VendorName, 503 StringRef ArchName) { 504 uint16_t VendorNameSize = VendorName.size() + 1; 505 uint16_t ArchNameSize = ArchName.size() + 1; 506 507 unsigned DescSZ = sizeof(VendorNameSize) + sizeof(ArchNameSize) + 508 sizeof(Major) + sizeof(Minor) + sizeof(Stepping) + 509 VendorNameSize + ArchNameSize; 510 511 EmitNote(ElfNote::NoteNameV2, MCConstantExpr::create(DescSZ, getContext()), 512 ElfNote::NT_AMDGPU_HSA_ISA, [&](MCELFStreamer &OS) { 513 OS.emitInt16(VendorNameSize); 514 OS.emitInt16(ArchNameSize); 515 OS.emitInt32(Major); 516 OS.emitInt32(Minor); 517 OS.emitInt32(Stepping); 518 OS.emitBytes(VendorName); 519 OS.emitInt8(0); // NULL terminate VendorName 520 OS.emitBytes(ArchName); 521 OS.emitInt8(0); // NULL terminte ArchName 522 }); 523 } 524 525 void 526 AMDGPUTargetELFStreamer::EmitAMDKernelCodeT(const amd_kernel_code_t &Header) { 527 528 MCStreamer &OS = getStreamer(); 529 OS.PushSection(); 530 OS.emitBytes(StringRef((const char*)&Header, sizeof(Header))); 531 OS.PopSection(); 532 } 533 534 void AMDGPUTargetELFStreamer::EmitAMDGPUSymbolType(StringRef SymbolName, 535 unsigned Type) { 536 MCSymbolELF *Symbol = cast<MCSymbolELF>( 537 getStreamer().getContext().getOrCreateSymbol(SymbolName)); 538 Symbol->setType(Type); 539 } 540 541 void AMDGPUTargetELFStreamer::emitAMDGPULDS(MCSymbol *Symbol, unsigned Size, 542 Align Alignment) { 543 MCSymbolELF *SymbolELF = cast<MCSymbolELF>(Symbol); 544 SymbolELF->setType(ELF::STT_OBJECT); 545 546 if (!SymbolELF->isBindingSet()) { 547 SymbolELF->setBinding(ELF::STB_GLOBAL); 548 SymbolELF->setExternal(true); 549 } 550 551 if (SymbolELF->declareCommon(Size, Alignment.value(), true)) { 552 report_fatal_error("Symbol: " + Symbol->getName() + 553 " redeclared as different type"); 554 } 555 556 SymbolELF->setIndex(ELF::SHN_AMDGPU_LDS); 557 SymbolELF->setSize(MCConstantExpr::create(Size, getContext())); 558 } 559 560 bool AMDGPUTargetELFStreamer::EmitISAVersion(StringRef IsaVersionString) { 561 // Create two labels to mark the beginning and end of the desc field 562 // and a MCExpr to calculate the size of the desc field. 563 auto &Context = getContext(); 564 auto *DescBegin = Context.createTempSymbol(); 565 auto *DescEnd = Context.createTempSymbol(); 566 auto *DescSZ = MCBinaryExpr::createSub( 567 MCSymbolRefExpr::create(DescEnd, Context), 568 MCSymbolRefExpr::create(DescBegin, Context), Context); 569 570 EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_ISA, 571 [&](MCELFStreamer &OS) { 572 OS.emitLabel(DescBegin); 573 OS.emitBytes(IsaVersionString); 574 OS.emitLabel(DescEnd); 575 }); 576 return true; 577 } 578 579 bool AMDGPUTargetELFStreamer::EmitHSAMetadata(msgpack::Document &HSAMetadataDoc, 580 bool Strict) { 581 V3::MetadataVerifier Verifier(Strict); 582 if (!Verifier.verify(HSAMetadataDoc.getRoot())) 583 return false; 584 585 std::string HSAMetadataString; 586 HSAMetadataDoc.writeToBlob(HSAMetadataString); 587 588 // Create two labels to mark the beginning and end of the desc field 589 // and a MCExpr to calculate the size of the desc field. 590 auto &Context = getContext(); 591 auto *DescBegin = Context.createTempSymbol(); 592 auto *DescEnd = Context.createTempSymbol(); 593 auto *DescSZ = MCBinaryExpr::createSub( 594 MCSymbolRefExpr::create(DescEnd, Context), 595 MCSymbolRefExpr::create(DescBegin, Context), Context); 596 597 EmitNote(ElfNote::NoteNameV3, DescSZ, ELF::NT_AMDGPU_METADATA, 598 [&](MCELFStreamer &OS) { 599 OS.emitLabel(DescBegin); 600 OS.emitBytes(HSAMetadataString); 601 OS.emitLabel(DescEnd); 602 }); 603 return true; 604 } 605 606 bool AMDGPUTargetELFStreamer::EmitHSAMetadata( 607 const AMDGPU::HSAMD::Metadata &HSAMetadata) { 608 std::string HSAMetadataString; 609 if (HSAMD::toString(HSAMetadata, HSAMetadataString)) 610 return false; 611 612 // Create two labels to mark the beginning and end of the desc field 613 // and a MCExpr to calculate the size of the desc field. 614 auto &Context = getContext(); 615 auto *DescBegin = Context.createTempSymbol(); 616 auto *DescEnd = Context.createTempSymbol(); 617 auto *DescSZ = MCBinaryExpr::createSub( 618 MCSymbolRefExpr::create(DescEnd, Context), 619 MCSymbolRefExpr::create(DescBegin, Context), Context); 620 621 EmitNote(ElfNote::NoteNameV2, DescSZ, ELF::NT_AMD_AMDGPU_HSA_METADATA, 622 [&](MCELFStreamer &OS) { 623 OS.emitLabel(DescBegin); 624 OS.emitBytes(HSAMetadataString); 625 OS.emitLabel(DescEnd); 626 }); 627 return true; 628 } 629 630 bool AMDGPUTargetELFStreamer::EmitCodeEnd() { 631 const uint32_t Encoded_s_code_end = 0xbf9f0000; 632 633 MCStreamer &OS = getStreamer(); 634 OS.PushSection(); 635 OS.emitValueToAlignment(64, Encoded_s_code_end, 4); 636 for (unsigned I = 0; I < 48; ++I) 637 OS.emitInt32(Encoded_s_code_end); 638 OS.PopSection(); 639 return true; 640 } 641 642 void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor( 643 const MCSubtargetInfo &STI, StringRef KernelName, 644 const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR, 645 uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr, 646 bool ReserveXNACK) { 647 auto &Streamer = getStreamer(); 648 auto &Context = Streamer.getContext(); 649 650 MCSymbolELF *KernelCodeSymbol = cast<MCSymbolELF>( 651 Context.getOrCreateSymbol(Twine(KernelName))); 652 MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>( 653 Context.getOrCreateSymbol(Twine(KernelName) + Twine(".kd"))); 654 655 // Copy kernel descriptor symbol's binding, other and visibility from the 656 // kernel code symbol. 657 KernelDescriptorSymbol->setBinding(KernelCodeSymbol->getBinding()); 658 KernelDescriptorSymbol->setOther(KernelCodeSymbol->getOther()); 659 KernelDescriptorSymbol->setVisibility(KernelCodeSymbol->getVisibility()); 660 // Kernel descriptor symbol's type and size are fixed. 661 KernelDescriptorSymbol->setType(ELF::STT_OBJECT); 662 KernelDescriptorSymbol->setSize( 663 MCConstantExpr::create(sizeof(KernelDescriptor), Context)); 664 665 // The visibility of the kernel code symbol must be protected or less to allow 666 // static relocations from the kernel descriptor to be used. 667 if (KernelCodeSymbol->getVisibility() == ELF::STV_DEFAULT) 668 KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED); 669 670 Streamer.emitLabel(KernelDescriptorSymbol); 671 Streamer.emitInt32(KernelDescriptor.group_segment_fixed_size); 672 Streamer.emitInt32(KernelDescriptor.private_segment_fixed_size); 673 for (uint8_t Res : KernelDescriptor.reserved0) 674 Streamer.emitInt8(Res); 675 // FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The 676 // expression being created is: 677 // (start of kernel code) - (start of kernel descriptor) 678 // It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64. 679 Streamer.emitValue(MCBinaryExpr::createSub( 680 MCSymbolRefExpr::create( 681 KernelCodeSymbol, MCSymbolRefExpr::VK_AMDGPU_REL64, Context), 682 MCSymbolRefExpr::create( 683 KernelDescriptorSymbol, MCSymbolRefExpr::VK_None, Context), 684 Context), 685 sizeof(KernelDescriptor.kernel_code_entry_byte_offset)); 686 for (uint8_t Res : KernelDescriptor.reserved1) 687 Streamer.emitInt8(Res); 688 Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc3); 689 Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc1); 690 Streamer.emitInt32(KernelDescriptor.compute_pgm_rsrc2); 691 Streamer.emitInt16(KernelDescriptor.kernel_code_properties); 692 for (uint8_t Res : KernelDescriptor.reserved2) 693 Streamer.emitInt8(Res); 694 } 695