1 //===--- amdgpu/impl/system.cpp ----------------------------------- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 #include <libelf.h>
9 
10 #include <cassert>
11 #include <sstream>
12 #include <string>
13 
14 #include "internal.h"
15 #include "machine.h"
16 #include "rt.h"
17 
18 #include "msgpack.h"
19 
20 namespace hsa {
21 // Wrap HSA iterate API in a shim that allows passing general callables
22 template <typename C>
23 hsa_status_t executable_iterate_symbols(hsa_executable_t executable, C cb) {
24   auto L = [](hsa_executable_t executable, hsa_executable_symbol_t symbol,
25               void *data) -> hsa_status_t {
26     C *unwrapped = static_cast<C *>(data);
27     return (*unwrapped)(executable, symbol);
28   };
29   return hsa_executable_iterate_symbols(executable, L,
30                                         static_cast<void *>(&cb));
31 }
32 } // namespace hsa
33 
34 typedef unsigned char *address;
35 /*
36  * Note descriptors.
37  */
38 typedef struct {
39   uint32_t n_namesz; /* Length of note's name. */
40   uint32_t n_descsz; /* Length of note's value. */
41   uint32_t n_type;   /* Type of note. */
42   // then name
43   // then padding, optional
44   // then desc, at 4 byte alignment (not 8, despite being elf64)
45 } Elf_Note;
46 
47 // The following include file and following structs/enums
48 // have been replicated on a per-use basis below. For example,
49 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields,
50 // but we may care only about kernargSegmentSize_ for now, so
51 // we just include that field in our KernelMD implementation. We
52 // chose this approach to replicate in order to avoid forcing
53 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime.
54 // #include "llvm/Support/AMDGPUMetadata.h"
55 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD;
56 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD;
57 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD;
58 // using llvm::AMDGPU::HSAMD::AccessQualifier;
59 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier;
60 // using llvm::AMDGPU::HSAMD::ValueKind;
61 // using llvm::AMDGPU::HSAMD::ValueType;
62 
63 class KernelArgMD {
64 public:
65   enum class ValueKind {
66     HiddenGlobalOffsetX,
67     HiddenGlobalOffsetY,
68     HiddenGlobalOffsetZ,
69     HiddenNone,
70     HiddenPrintfBuffer,
71     HiddenDefaultQueue,
72     HiddenCompletionAction,
73     HiddenMultiGridSyncArg,
74     HiddenHostcallBuffer,
75     Unknown
76   };
77 
78   KernelArgMD()
79       : name_(std::string()), typeName_(std::string()), size_(0), offset_(0),
80         align_(0), valueKind_(ValueKind::Unknown) {}
81 
82   // fields
83   std::string name_;
84   std::string typeName_;
85   uint32_t size_;
86   uint32_t offset_;
87   uint32_t align_;
88   ValueKind valueKind_;
89 };
90 
91 class KernelMD {
92 public:
93   KernelMD() : kernargSegmentSize_(0ull) {}
94 
95   // fields
96   uint64_t kernargSegmentSize_;
97 };
98 
99 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = {
100     //    Including only those fields that are relevant to the runtime.
101     //    {"ByValue", KernelArgMD::ValueKind::ByValue},
102     //    {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer},
103     //    {"DynamicSharedPointer",
104     //    KernelArgMD::ValueKind::DynamicSharedPointer},
105     //    {"Sampler", KernelArgMD::ValueKind::Sampler},
106     //    {"Image", KernelArgMD::ValueKind::Image},
107     //    {"Pipe", KernelArgMD::ValueKind::Pipe},
108     //    {"Queue", KernelArgMD::ValueKind::Queue},
109     {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
110     {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
111     {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
112     {"HiddenNone", KernelArgMD::ValueKind::HiddenNone},
113     {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
114     {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue},
115     {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction},
116     {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
117     {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
118     // v3
119     //    {"by_value", KernelArgMD::ValueKind::ByValue},
120     //    {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer},
121     //    {"dynamic_shared_pointer",
122     //    KernelArgMD::ValueKind::DynamicSharedPointer},
123     //    {"sampler", KernelArgMD::ValueKind::Sampler},
124     //    {"image", KernelArgMD::ValueKind::Image},
125     //    {"pipe", KernelArgMD::ValueKind::Pipe},
126     //    {"queue", KernelArgMD::ValueKind::Queue},
127     {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
128     {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
129     {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
130     {"hidden_none", KernelArgMD::ValueKind::HiddenNone},
131     {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
132     {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue},
133     {"hidden_completion_action",
134      KernelArgMD::ValueKind::HiddenCompletionAction},
135     {"hidden_multigrid_sync_arg",
136      KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
137     {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
138 };
139 
140 ATLMachine g_atl_machine;
141 
142 namespace core {
143 
144 // Implement memory_pool iteration function
145 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,
146                                          void *data) {
147   ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data);
148   hsa_status_t err = HSA_STATUS_SUCCESS;
149   // Check if the memory_pool is allowed to allocate, i.e. do not return group
150   // memory
151   bool alloc_allowed = false;
152   err = hsa_amd_memory_pool_get_info(
153       memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
154       &alloc_allowed);
155   if (err != HSA_STATUS_SUCCESS) {
156     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
157            "Alloc allowed in memory pool check", get_error_string(err));
158     return err;
159   }
160   if (alloc_allowed) {
161     uint32_t global_flag = 0;
162     err = hsa_amd_memory_pool_get_info(
163         memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag);
164     if (err != HSA_STATUS_SUCCESS) {
165       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
166              "Get memory pool info", get_error_string(err));
167       return err;
168     }
169     if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) {
170       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED);
171       proc->addMemory(new_mem);
172     } else {
173       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED);
174       proc->addMemory(new_mem);
175     }
176   }
177 
178   return err;
179 }
180 
181 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) {
182   hsa_status_t err = HSA_STATUS_SUCCESS;
183   hsa_device_type_t device_type;
184   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
185   if (err != HSA_STATUS_SUCCESS) {
186     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
187            "Get device type info", get_error_string(err));
188     return err;
189   }
190   switch (device_type) {
191   case HSA_DEVICE_TYPE_CPU: {
192     ATLCPUProcessor new_proc(agent);
193     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
194                                              &new_proc);
195     if (err != HSA_STATUS_SUCCESS) {
196       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
197              "Iterate all memory pools", get_error_string(err));
198       return err;
199     }
200     g_atl_machine.addProcessor(new_proc);
201   } break;
202   case HSA_DEVICE_TYPE_GPU: {
203     hsa_profile_t profile;
204     err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile);
205     if (err != HSA_STATUS_SUCCESS) {
206       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
207              "Query the agent profile", get_error_string(err));
208       return err;
209     }
210     atmi_devtype_t gpu_type;
211     gpu_type =
212         (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU;
213     ATLGPUProcessor new_proc(agent, gpu_type);
214     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
215                                              &new_proc);
216     if (err != HSA_STATUS_SUCCESS) {
217       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
218              "Iterate all memory pools", get_error_string(err));
219       return err;
220     }
221     g_atl_machine.addProcessor(new_proc);
222   } break;
223   case HSA_DEVICE_TYPE_DSP: {
224     err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
225   } break;
226   }
227 
228   return err;
229 }
230 
231 static hsa_status_t init_compute_and_memory() {
232   hsa_status_t err;
233 
234   /* Iterate over the agents and pick the gpu agent */
235   err = hsa_iterate_agents(get_agent_info, NULL);
236   if (err == HSA_STATUS_INFO_BREAK) {
237     err = HSA_STATUS_SUCCESS;
238   }
239   if (err != HSA_STATUS_SUCCESS) {
240     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Getting a gpu agent",
241            get_error_string(err));
242     return err;
243   }
244 
245   /* Init all devices or individual device types? */
246   std::vector<ATLCPUProcessor> &cpu_procs =
247       g_atl_machine.processors<ATLCPUProcessor>();
248   std::vector<ATLGPUProcessor> &gpu_procs =
249       g_atl_machine.processors<ATLGPUProcessor>();
250   /* For CPU memory pools, add other devices that can access them directly
251    * or indirectly */
252   for (auto &cpu_proc : cpu_procs) {
253     for (auto &cpu_mem : cpu_proc.memories()) {
254       hsa_amd_memory_pool_t pool = cpu_mem.memory();
255       for (auto &gpu_proc : gpu_procs) {
256         hsa_agent_t agent = gpu_proc.agent();
257         hsa_amd_memory_pool_access_t access;
258         hsa_amd_agent_memory_pool_get_info(
259             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
260         if (access != 0) {
261           // this means not NEVER, but could be YES or NO
262           // add this memory pool to the proc
263           gpu_proc.addMemory(cpu_mem);
264         }
265       }
266     }
267   }
268 
269   /* FIXME: are the below combinations of procs and memory pools needed?
270    * all to all compare procs with their memory pools and add those memory
271    * pools that are accessible by the target procs */
272   for (auto &gpu_proc : gpu_procs) {
273     for (auto &gpu_mem : gpu_proc.memories()) {
274       hsa_amd_memory_pool_t pool = gpu_mem.memory();
275       for (auto &cpu_proc : cpu_procs) {
276         hsa_agent_t agent = cpu_proc.agent();
277         hsa_amd_memory_pool_access_t access;
278         hsa_amd_agent_memory_pool_get_info(
279             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
280         if (access != 0) {
281           // this means not NEVER, but could be YES or NO
282           // add this memory pool to the proc
283           cpu_proc.addMemory(gpu_mem);
284         }
285       }
286     }
287   }
288 
289   size_t num_procs = cpu_procs.size() + gpu_procs.size();
290   int num_iGPUs = 0;
291   int num_dGPUs = 0;
292   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
293     if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU)
294       num_iGPUs++;
295     else
296       num_dGPUs++;
297   }
298   assert(num_iGPUs + num_dGPUs == gpu_procs.size() &&
299          "Number of dGPUs and iGPUs do not add up");
300   DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size());
301   DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs);
302   DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs);
303   DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size());
304 
305   int cpus_begin = 0;
306   int cpus_end = cpu_procs.size();
307   int gpus_begin = cpu_procs.size();
308   int gpus_end = cpu_procs.size() + gpu_procs.size();
309   int proc_index = 0;
310   for (int i = cpus_begin; i < cpus_end; i++) {
311     std::vector<ATLMemory> memories = cpu_procs[proc_index].memories();
312     int fine_memories_size = 0;
313     int coarse_memories_size = 0;
314     DEBUG_PRINT("CPU memory types:\t");
315     for (auto &memory : memories) {
316       atmi_memtype_t type = memory.type();
317       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
318         fine_memories_size++;
319         DEBUG_PRINT("Fine\t");
320       } else {
321         coarse_memories_size++;
322         DEBUG_PRINT("Coarse\t");
323       }
324     }
325     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
326     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
327     proc_index++;
328   }
329   proc_index = 0;
330   for (int i = gpus_begin; i < gpus_end; i++) {
331     std::vector<ATLMemory> memories = gpu_procs[proc_index].memories();
332     int fine_memories_size = 0;
333     int coarse_memories_size = 0;
334     DEBUG_PRINT("GPU memory types:\t");
335     for (auto &memory : memories) {
336       atmi_memtype_t type = memory.type();
337       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
338         fine_memories_size++;
339         DEBUG_PRINT("Fine\t");
340       } else {
341         coarse_memories_size++;
342         DEBUG_PRINT("Coarse\t");
343       }
344     }
345     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
346     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
347     proc_index++;
348   }
349   if (num_procs > 0)
350     return HSA_STATUS_SUCCESS;
351   else
352     return HSA_STATUS_ERROR_NOT_INITIALIZED;
353 }
354 
355 hsa_status_t init_hsa() {
356   DEBUG_PRINT("Initializing HSA...");
357   hsa_status_t err = hsa_init();
358   if (err != HSA_STATUS_SUCCESS) {
359     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
360            "Initializing the hsa runtime", get_error_string(err));
361     return err;
362   }
363   if (err != HSA_STATUS_SUCCESS)
364     return err;
365 
366   err = init_compute_and_memory();
367   if (err != HSA_STATUS_SUCCESS)
368     return err;
369   if (err != HSA_STATUS_SUCCESS) {
370     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
371            "After initializing compute and memory", get_error_string(err));
372     return err;
373   }
374 
375   DEBUG_PRINT("done\n");
376   return HSA_STATUS_SUCCESS;
377 }
378 
379 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) {
380 #if (ROCM_VERSION_MAJOR >= 3) ||                                               \
381     (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3)
382   if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) {
383 #else
384   if (event->event_type == GPU_MEMORY_FAULT_EVENT) {
385 #endif
386     hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault;
387     // memory_fault.agent
388     // memory_fault.virtual_address
389     // memory_fault.fault_reason_mask
390     // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address);
391     std::stringstream stream;
392     stream << std::hex << (uintptr_t)memory_fault.virtual_address;
393     std::string addr("0x" + stream.str());
394 
395     std::string err_string = "[GPU Memory Error] Addr: " + addr;
396     err_string += " Reason: ";
397     if (!(memory_fault.fault_reason_mask & 0x00111111)) {
398       err_string += "No Idea! ";
399     } else {
400       if (memory_fault.fault_reason_mask & 0x00000001)
401         err_string += "Page not present or supervisor privilege. ";
402       if (memory_fault.fault_reason_mask & 0x00000010)
403         err_string += "Write access to a read-only page. ";
404       if (memory_fault.fault_reason_mask & 0x00000100)
405         err_string += "Execute access to a page marked NX. ";
406       if (memory_fault.fault_reason_mask & 0x00001000)
407         err_string += "Host access only. ";
408       if (memory_fault.fault_reason_mask & 0x00010000)
409         err_string += "ECC failure (if supported by HW). ";
410       if (memory_fault.fault_reason_mask & 0x00100000)
411         err_string += "Can't determine the exact fault address. ";
412     }
413     fprintf(stderr, "%s\n", err_string.c_str());
414     return HSA_STATUS_ERROR;
415   }
416   return HSA_STATUS_SUCCESS;
417 }
418 
419 hsa_status_t atl_init_gpu_context() {
420   hsa_status_t err;
421   err = init_hsa();
422   if (err != HSA_STATUS_SUCCESS)
423     return HSA_STATUS_ERROR;
424 
425   err = hsa_amd_register_system_event_handler(callbackEvent, NULL);
426   if (err != HSA_STATUS_SUCCESS) {
427     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
428            "Registering the system for memory faults", get_error_string(err));
429     return HSA_STATUS_ERROR;
430   }
431 
432   return HSA_STATUS_SUCCESS;
433 }
434 
435 static bool isImplicit(KernelArgMD::ValueKind value_kind) {
436   switch (value_kind) {
437   case KernelArgMD::ValueKind::HiddenGlobalOffsetX:
438   case KernelArgMD::ValueKind::HiddenGlobalOffsetY:
439   case KernelArgMD::ValueKind::HiddenGlobalOffsetZ:
440   case KernelArgMD::ValueKind::HiddenNone:
441   case KernelArgMD::ValueKind::HiddenPrintfBuffer:
442   case KernelArgMD::ValueKind::HiddenDefaultQueue:
443   case KernelArgMD::ValueKind::HiddenCompletionAction:
444   case KernelArgMD::ValueKind::HiddenMultiGridSyncArg:
445   case KernelArgMD::ValueKind::HiddenHostcallBuffer:
446     return true;
447   default:
448     return false;
449   }
450 }
451 
452 static std::pair<unsigned char *, unsigned char *>
453 find_metadata(void *binary, size_t binSize) {
454   std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr};
455 
456   Elf *e = elf_memory(static_cast<char *>(binary), binSize);
457   if (elf_kind(e) != ELF_K_ELF) {
458     return failure;
459   }
460 
461   size_t numpHdrs;
462   if (elf_getphdrnum(e, &numpHdrs) != 0) {
463     return failure;
464   }
465 
466   Elf64_Phdr *pHdrs = elf64_getphdr(e);
467   for (size_t i = 0; i < numpHdrs; ++i) {
468     Elf64_Phdr pHdr = pHdrs[i];
469 
470     // Look for the runtime metadata note
471     if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) {
472       // Iterate over the notes in this segment
473       address ptr = (address)binary + pHdr.p_offset;
474       address segmentEnd = ptr + pHdr.p_filesz;
475 
476       while (ptr < segmentEnd) {
477         Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr);
478         address name = (address)&note[1];
479 
480         if (note->n_type == 7 || note->n_type == 8) {
481           return failure;
482         } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ &&
483                    note->n_namesz == sizeof "AMD" &&
484                    !memcmp(name, "AMD", note->n_namesz)) {
485           // code object v2 uses yaml metadata, no longer supported
486           return failure;
487         } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ &&
488                    note->n_namesz == sizeof "AMDGPU" &&
489                    !memcmp(name, "AMDGPU", note->n_namesz)) {
490 
491           // n_descsz = 485
492           // value is padded to 4 byte alignment, may want to move end up to
493           // match
494           size_t offset = sizeof(uint32_t) * 3 /* fields */
495                           + sizeof("AMDGPU")   /* name */
496                           + 1 /* padding to 4 byte alignment */;
497 
498           // Including the trailing padding means both pointers are 4 bytes
499           // aligned, which may be useful later.
500           unsigned char *metadata_start = (unsigned char *)ptr + offset;
501           unsigned char *metadata_end =
502               metadata_start + core::alignUp(note->n_descsz, 4);
503           return {metadata_start, metadata_end};
504         }
505         ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) +
506                core::alignUp(note->n_descsz, sizeof(int));
507       }
508     }
509   }
510 
511   return failure;
512 }
513 
514 namespace {
515 int map_lookup_array(msgpack::byte_range message, const char *needle,
516                      msgpack::byte_range *res, uint64_t *size) {
517   unsigned count = 0;
518   struct s : msgpack::functors_defaults<s> {
519     s(unsigned &count, uint64_t *size) : count(count), size(size) {}
520     unsigned &count;
521     uint64_t *size;
522     const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) {
523       count++;
524       *size = N;
525       return bytes.end;
526     }
527   };
528 
529   msgpack::foreach_map(message,
530                        [&](msgpack::byte_range key, msgpack::byte_range value) {
531                          if (msgpack::message_is_string(key, needle)) {
532                            // If the message is an array, record number of
533                            // elements in *size
534                            msgpack::handle_msgpack<s>(value, {count, size});
535                            // return the whole array
536                            *res = value;
537                          }
538                        });
539   // Only claim success if exactly one key/array pair matched
540   return count != 1;
541 }
542 
543 int map_lookup_string(msgpack::byte_range message, const char *needle,
544                       std::string *res) {
545   unsigned count = 0;
546   struct s : public msgpack::functors_defaults<s> {
547     s(unsigned &count, std::string *res) : count(count), res(res) {}
548     unsigned &count;
549     std::string *res;
550     void handle_string(size_t N, const unsigned char *str) {
551       count++;
552       *res = std::string(str, str + N);
553     }
554   };
555   msgpack::foreach_map(message,
556                        [&](msgpack::byte_range key, msgpack::byte_range value) {
557                          if (msgpack::message_is_string(key, needle)) {
558                            msgpack::handle_msgpack<s>(value, {count, res});
559                          }
560                        });
561   return count != 1;
562 }
563 
564 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle,
565                         uint64_t *res) {
566   unsigned count = 0;
567   msgpack::foreach_map(message,
568                        [&](msgpack::byte_range key, msgpack::byte_range value) {
569                          if (msgpack::message_is_string(key, needle)) {
570                            msgpack::foronly_unsigned(value, [&](uint64_t x) {
571                              count++;
572                              *res = x;
573                            });
574                          }
575                        });
576   return count != 1;
577 }
578 
579 int array_lookup_element(msgpack::byte_range message, uint64_t elt,
580                          msgpack::byte_range *res) {
581   int rc = 1;
582   uint64_t i = 0;
583   msgpack::foreach_array(message, [&](msgpack::byte_range value) {
584     if (i == elt) {
585       *res = value;
586       rc = 0;
587     }
588     i++;
589   });
590   return rc;
591 }
592 
593 int populate_kernelArgMD(msgpack::byte_range args_element,
594                          KernelArgMD *kernelarg) {
595   using namespace msgpack;
596   int error = 0;
597   foreach_map(args_element, [&](byte_range key, byte_range value) -> void {
598     if (message_is_string(key, ".name")) {
599       foronly_string(value, [&](size_t N, const unsigned char *str) {
600         kernelarg->name_ = std::string(str, str + N);
601       });
602     } else if (message_is_string(key, ".type_name")) {
603       foronly_string(value, [&](size_t N, const unsigned char *str) {
604         kernelarg->typeName_ = std::string(str, str + N);
605       });
606     } else if (message_is_string(key, ".size")) {
607       foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; });
608     } else if (message_is_string(key, ".offset")) {
609       foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; });
610     } else if (message_is_string(key, ".value_kind")) {
611       foronly_string(value, [&](size_t N, const unsigned char *str) {
612         std::string s = std::string(str, str + N);
613         auto itValueKind = ArgValueKind.find(s);
614         if (itValueKind != ArgValueKind.end()) {
615           kernelarg->valueKind_ = itValueKind->second;
616         }
617       });
618     }
619   });
620   return error;
621 }
622 } // namespace
623 
624 static hsa_status_t get_code_object_custom_metadata(
625     void *binary, size_t binSize,
626     std::map<std::string, atl_kernel_info_t> &KernelInfoTable) {
627   // parse code object with different keys from v2
628   // also, the kernel name is not the same as the symbol name -- so a
629   // symbol->name map is needed
630 
631   std::pair<unsigned char *, unsigned char *> metadata =
632       find_metadata(binary, binSize);
633   if (!metadata.first) {
634     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
635   }
636 
637   uint64_t kernelsSize = 0;
638   int msgpack_errors = 0;
639   msgpack::byte_range kernel_array;
640   msgpack_errors =
641       map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels",
642                        &kernel_array, &kernelsSize);
643   if (msgpack_errors != 0) {
644     printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
645            "kernels lookup in program metadata");
646     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
647   }
648 
649   for (size_t i = 0; i < kernelsSize; i++) {
650     assert(msgpack_errors == 0);
651     std::string kernelName;
652     std::string symbolName;
653 
654     msgpack::byte_range element;
655     msgpack_errors += array_lookup_element(kernel_array, i, &element);
656     if (msgpack_errors != 0) {
657       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
658              "element lookup in kernel metadata");
659       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
660     }
661 
662     msgpack_errors += map_lookup_string(element, ".name", &kernelName);
663     msgpack_errors += map_lookup_string(element, ".symbol", &symbolName);
664     if (msgpack_errors != 0) {
665       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
666              "strings lookup in kernel metadata");
667       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
668     }
669 
670     // Make sure that kernelName + ".kd" == symbolName
671     if ((kernelName + ".kd") != symbolName) {
672       printf("[%s:%d] Kernel name mismatching symbol: %s != %s + .kd\n",
673              __FILE__, __LINE__, symbolName.c_str(), kernelName.c_str());
674       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
675     }
676 
677     atl_kernel_info_t info = {0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {}, {}};
678 
679     uint64_t sgpr_count, vgpr_count, sgpr_spill_count, vgpr_spill_count;
680     msgpack_errors += map_lookup_uint64_t(element, ".sgpr_count", &sgpr_count);
681     if (msgpack_errors != 0) {
682       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
683              "sgpr count metadata lookup in kernel metadata");
684       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
685     }
686 
687     info.sgpr_count = sgpr_count;
688 
689     msgpack_errors += map_lookup_uint64_t(element, ".vgpr_count", &vgpr_count);
690     if (msgpack_errors != 0) {
691       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
692              "vgpr count metadata lookup in kernel metadata");
693       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
694     }
695 
696     info.vgpr_count = vgpr_count;
697 
698     msgpack_errors +=
699         map_lookup_uint64_t(element, ".sgpr_spill_count", &sgpr_spill_count);
700     if (msgpack_errors != 0) {
701       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
702              "sgpr spill count metadata lookup in kernel metadata");
703       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
704     }
705 
706     info.sgpr_spill_count = sgpr_spill_count;
707 
708     msgpack_errors +=
709         map_lookup_uint64_t(element, ".vgpr_spill_count", &vgpr_spill_count);
710     if (msgpack_errors != 0) {
711       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
712              "vgpr spill count metadata lookup in kernel metadata");
713       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
714     }
715 
716     info.vgpr_spill_count = vgpr_spill_count;
717 
718     size_t kernel_explicit_args_size = 0;
719     uint64_t kernel_segment_size;
720     msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size",
721                                           &kernel_segment_size);
722     if (msgpack_errors != 0) {
723       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
724              "kernarg segment size metadata lookup in kernel metadata");
725       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
726     }
727 
728     bool hasHiddenArgs = false;
729     if (kernel_segment_size > 0) {
730       uint64_t argsSize;
731       size_t offset = 0;
732 
733       msgpack::byte_range args_array;
734       msgpack_errors +=
735           map_lookup_array(element, ".args", &args_array, &argsSize);
736       if (msgpack_errors != 0) {
737         printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
738                "kernel args metadata lookup in kernel metadata");
739         return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
740       }
741 
742       info.num_args = argsSize;
743 
744       for (size_t i = 0; i < argsSize; ++i) {
745         KernelArgMD lcArg;
746 
747         msgpack::byte_range args_element;
748         msgpack_errors += array_lookup_element(args_array, i, &args_element);
749         if (msgpack_errors != 0) {
750           printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
751                  "iterate args map in kernel args metadata");
752           return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
753         }
754 
755         msgpack_errors += populate_kernelArgMD(args_element, &lcArg);
756         if (msgpack_errors != 0) {
757           printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
758                  "iterate args map in kernel args metadata");
759           return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
760         }
761         // populate info with sizes and offsets
762         info.arg_sizes.push_back(lcArg.size_);
763         // v3 has offset field and not align field
764         size_t new_offset = lcArg.offset_;
765         size_t padding = new_offset - offset;
766         offset = new_offset;
767         info.arg_offsets.push_back(lcArg.offset_);
768         DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(),
769                     lcArg.size_, lcArg.offset_);
770         offset += lcArg.size_;
771 
772         // check if the arg is a hidden/implicit arg
773         // this logic assumes that all hidden args are 8-byte aligned
774         if (!isImplicit(lcArg.valueKind_)) {
775           kernel_explicit_args_size += lcArg.size_;
776         } else {
777           hasHiddenArgs = true;
778         }
779         kernel_explicit_args_size += padding;
780       }
781     }
782 
783     // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but
784     // in ATMI, do not count the compiler set implicit args, but set your own
785     // implicit args by discounting the compiler set implicit args
786     info.kernel_segment_size =
787         (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) +
788         sizeof(atmi_implicit_args_t);
789     DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(),
790                 kernel_segment_size, info.kernel_segment_size);
791 
792     // kernel received, now add it to the kernel info table
793     KernelInfoTable[kernelName] = info;
794   }
795 
796   return HSA_STATUS_SUCCESS;
797 }
798 
799 static hsa_status_t
800 populate_InfoTables(hsa_executable_symbol_t symbol,
801                     std::map<std::string, atl_kernel_info_t> &KernelInfoTable,
802                     std::map<std::string, atl_symbol_info_t> &SymbolInfoTable) {
803   hsa_symbol_kind_t type;
804 
805   uint32_t name_length;
806   hsa_status_t err;
807   err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE,
808                                        &type);
809   if (err != HSA_STATUS_SUCCESS) {
810     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
811            "Symbol info extraction", get_error_string(err));
812     return err;
813   }
814   DEBUG_PRINT("Exec Symbol type: %d\n", type);
815   if (type == HSA_SYMBOL_KIND_KERNEL) {
816     err = hsa_executable_symbol_get_info(
817         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
818     if (err != HSA_STATUS_SUCCESS) {
819       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
820              "Symbol info extraction", get_error_string(err));
821       return err;
822     }
823     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
824     err = hsa_executable_symbol_get_info(symbol,
825                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
826     if (err != HSA_STATUS_SUCCESS) {
827       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
828              "Symbol info extraction", get_error_string(err));
829       return err;
830     }
831     // remove the suffix .kd from symbol name.
832     name[name_length - 3] = 0;
833 
834     atl_kernel_info_t info;
835     std::string kernelName(name);
836     // by now, the kernel info table should already have an entry
837     // because the non-ROCr custom code object parsing is called before
838     // iterating over the code object symbols using ROCr
839     if (KernelInfoTable.find(kernelName) == KernelInfoTable.end()) {
840       if (HSA_STATUS_ERROR_INVALID_CODE_OBJECT != HSA_STATUS_SUCCESS) {
841         printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
842                "Finding the entry kernel info table",
843                get_error_string(HSA_STATUS_ERROR_INVALID_CODE_OBJECT));
844         exit(1);
845       }
846     }
847     // found, so assign and update
848     info = KernelInfoTable[kernelName];
849 
850     /* Extract dispatch information from the symbol */
851     err = hsa_executable_symbol_get_info(
852         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
853         &(info.kernel_object));
854     if (err != HSA_STATUS_SUCCESS) {
855       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
856              "Extracting the symbol from the executable",
857              get_error_string(err));
858       return err;
859     }
860     err = hsa_executable_symbol_get_info(
861         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
862         &(info.group_segment_size));
863     if (err != HSA_STATUS_SUCCESS) {
864       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
865              "Extracting the group segment size from the executable",
866              get_error_string(err));
867       return err;
868     }
869     err = hsa_executable_symbol_get_info(
870         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
871         &(info.private_segment_size));
872     if (err != HSA_STATUS_SUCCESS) {
873       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
874              "Extracting the private segment from the executable",
875              get_error_string(err));
876       return err;
877     }
878 
879     DEBUG_PRINT(
880         "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes "
881         "kernarg\n",
882         kernelName.c_str(), info.kernel_object, info.group_segment_size,
883         info.private_segment_size, info.kernel_segment_size);
884 
885     // assign it back to the kernel info table
886     KernelInfoTable[kernelName] = info;
887     free(name);
888   } else if (type == HSA_SYMBOL_KIND_VARIABLE) {
889     err = hsa_executable_symbol_get_info(
890         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
891     if (err != HSA_STATUS_SUCCESS) {
892       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
893              "Symbol info extraction", get_error_string(err));
894       return err;
895     }
896     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
897     err = hsa_executable_symbol_get_info(symbol,
898                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
899     if (err != HSA_STATUS_SUCCESS) {
900       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
901              "Symbol info extraction", get_error_string(err));
902       return err;
903     }
904     name[name_length] = 0;
905 
906     atl_symbol_info_t info;
907 
908     err = hsa_executable_symbol_get_info(
909         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr));
910     if (err != HSA_STATUS_SUCCESS) {
911       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
912              "Symbol info address extraction", get_error_string(err));
913       return err;
914     }
915 
916     err = hsa_executable_symbol_get_info(
917         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size));
918     if (err != HSA_STATUS_SUCCESS) {
919       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
920              "Symbol info size extraction", get_error_string(err));
921       return err;
922     }
923 
924     DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr,
925                 info.size);
926     SymbolInfoTable[std::string(name)] = info;
927     free(name);
928   } else {
929     DEBUG_PRINT("Symbol is an indirect function\n");
930   }
931   return HSA_STATUS_SUCCESS;
932 }
933 
934 hsa_status_t RegisterModuleFromMemory(
935     std::map<std::string, atl_kernel_info_t> &KernelInfoTable,
936     std::map<std::string, atl_symbol_info_t> &SymbolInfoTable,
937     void *module_bytes, size_t module_size, hsa_agent_t agent,
938     hsa_status_t (*on_deserialized_data)(void *data, size_t size,
939                                          void *cb_state),
940     void *cb_state, std::vector<hsa_executable_t> &HSAExecutables) {
941   hsa_status_t err;
942   hsa_executable_t executable = {0};
943   hsa_profile_t agent_profile;
944 
945   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile);
946   if (err != HSA_STATUS_SUCCESS) {
947     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
948            "Query the agent profile", get_error_string(err));
949     return HSA_STATUS_ERROR;
950   }
951   // FIXME: Assume that every profile is FULL until we understand how to build
952   // GCN with base profile
953   agent_profile = HSA_PROFILE_FULL;
954   /* Create the empty executable.  */
955   err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "",
956                               &executable);
957   if (err != HSA_STATUS_SUCCESS) {
958     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
959            "Create the executable", get_error_string(err));
960     return HSA_STATUS_ERROR;
961   }
962 
963   bool module_load_success = false;
964   do // Existing control flow used continue, preserve that for this patch
965   {
966     {
967       // Some metadata info is not available through ROCr API, so use custom
968       // code object metadata parsing to collect such metadata info
969 
970       err = get_code_object_custom_metadata(module_bytes, module_size,
971                                             KernelInfoTable);
972       if (err != HSA_STATUS_SUCCESS) {
973         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
974                     "Getting custom code object metadata",
975                     get_error_string(err));
976         continue;
977       }
978 
979       // Deserialize code object.
980       hsa_code_object_t code_object = {0};
981       err = hsa_code_object_deserialize(module_bytes, module_size, NULL,
982                                         &code_object);
983       if (err != HSA_STATUS_SUCCESS) {
984         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
985                     "Code Object Deserialization", get_error_string(err));
986         continue;
987       }
988       assert(0 != code_object.handle);
989 
990       // Mutating the device image here avoids another allocation & memcpy
991       void *code_object_alloc_data =
992           reinterpret_cast<void *>(code_object.handle);
993       hsa_status_t atmi_err =
994           on_deserialized_data(code_object_alloc_data, module_size, cb_state);
995       if (atmi_err != HSA_STATUS_SUCCESS) {
996         printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
997                "Error in deserialized_data callback",
998                get_error_string(atmi_err));
999         return atmi_err;
1000       }
1001 
1002       /* Load the code object.  */
1003       err =
1004           hsa_executable_load_code_object(executable, agent, code_object, NULL);
1005       if (err != HSA_STATUS_SUCCESS) {
1006         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1007                     "Loading the code object", get_error_string(err));
1008         continue;
1009       }
1010 
1011       // cannot iterate over symbols until executable is frozen
1012     }
1013     module_load_success = true;
1014   } while (0);
1015   DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success);
1016   if (module_load_success) {
1017     /* Freeze the executable; it can now be queried for symbols.  */
1018     err = hsa_executable_freeze(executable, "");
1019     if (err != HSA_STATUS_SUCCESS) {
1020       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1021              "Freeze the executable", get_error_string(err));
1022       return HSA_STATUS_ERROR;
1023     }
1024 
1025     err = hsa::executable_iterate_symbols(
1026         executable,
1027         [&](hsa_executable_t, hsa_executable_symbol_t symbol) -> hsa_status_t {
1028           return populate_InfoTables(symbol, KernelInfoTable, SymbolInfoTable);
1029         });
1030     if (err != HSA_STATUS_SUCCESS) {
1031       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1032              "Iterating over symbols for execuatable", get_error_string(err));
1033       return HSA_STATUS_ERROR;
1034     }
1035 
1036     // save the executable and destroy during finalize
1037     HSAExecutables.push_back(executable);
1038     return HSA_STATUS_SUCCESS;
1039   } else {
1040     return HSA_STATUS_ERROR;
1041   }
1042 }
1043 
1044 } // namespace core
1045