1 /*===--------------------------------------------------------------------------
2  *              ATMI (Asynchronous Task and Memory Interface)
3  *
4  * This file is distributed under the MIT License. See LICENSE.txt for details.
5  *===------------------------------------------------------------------------*/
6 #include <libelf.h>
7 
8 #include <cassert>
9 #include <sstream>
10 #include <string>
11 
12 #include "internal.h"
13 #include "machine.h"
14 #include "rt.h"
15 
16 #include "msgpack.h"
17 
18 namespace hsa {
19 // Wrap HSA iterate API in a shim that allows passing general callables
20 template <typename C>
21 hsa_status_t executable_iterate_symbols(hsa_executable_t executable, C cb) {
22   auto L = [](hsa_executable_t executable, hsa_executable_symbol_t symbol,
23               void *data) -> hsa_status_t {
24     C *unwrapped = static_cast<C *>(data);
25     return (*unwrapped)(executable, symbol);
26   };
27   return hsa_executable_iterate_symbols(executable, L,
28                                         static_cast<void *>(&cb));
29 }
30 } // namespace hsa
31 
32 typedef unsigned char *address;
33 /*
34  * Note descriptors.
35  */
36 typedef struct {
37   uint32_t n_namesz; /* Length of note's name. */
38   uint32_t n_descsz; /* Length of note's value. */
39   uint32_t n_type;   /* Type of note. */
40   // then name
41   // then padding, optional
42   // then desc, at 4 byte alignment (not 8, despite being elf64)
43 } Elf_Note;
44 
45 // The following include file and following structs/enums
46 // have been replicated on a per-use basis below. For example,
47 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields,
48 // but we may care only about kernargSegmentSize_ for now, so
49 // we just include that field in our KernelMD implementation. We
50 // chose this approach to replicate in order to avoid forcing
51 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime.
52 // #include "llvm/Support/AMDGPUMetadata.h"
53 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD;
54 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD;
55 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD;
56 // using llvm::AMDGPU::HSAMD::AccessQualifier;
57 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier;
58 // using llvm::AMDGPU::HSAMD::ValueKind;
59 // using llvm::AMDGPU::HSAMD::ValueType;
60 
61 class KernelArgMD {
62 public:
63   enum class ValueKind {
64     HiddenGlobalOffsetX,
65     HiddenGlobalOffsetY,
66     HiddenGlobalOffsetZ,
67     HiddenNone,
68     HiddenPrintfBuffer,
69     HiddenDefaultQueue,
70     HiddenCompletionAction,
71     HiddenMultiGridSyncArg,
72     HiddenHostcallBuffer,
73     Unknown
74   };
75 
76   KernelArgMD()
77       : name_(std::string()), typeName_(std::string()), size_(0), offset_(0),
78         align_(0), valueKind_(ValueKind::Unknown) {}
79 
80   // fields
81   std::string name_;
82   std::string typeName_;
83   uint32_t size_;
84   uint32_t offset_;
85   uint32_t align_;
86   ValueKind valueKind_;
87 };
88 
89 class KernelMD {
90 public:
91   KernelMD() : kernargSegmentSize_(0ull) {}
92 
93   // fields
94   uint64_t kernargSegmentSize_;
95 };
96 
97 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = {
98     //    Including only those fields that are relevant to the runtime.
99     //    {"ByValue", KernelArgMD::ValueKind::ByValue},
100     //    {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer},
101     //    {"DynamicSharedPointer",
102     //    KernelArgMD::ValueKind::DynamicSharedPointer},
103     //    {"Sampler", KernelArgMD::ValueKind::Sampler},
104     //    {"Image", KernelArgMD::ValueKind::Image},
105     //    {"Pipe", KernelArgMD::ValueKind::Pipe},
106     //    {"Queue", KernelArgMD::ValueKind::Queue},
107     {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
108     {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
109     {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
110     {"HiddenNone", KernelArgMD::ValueKind::HiddenNone},
111     {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
112     {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue},
113     {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction},
114     {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
115     {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
116     // v3
117     //    {"by_value", KernelArgMD::ValueKind::ByValue},
118     //    {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer},
119     //    {"dynamic_shared_pointer",
120     //    KernelArgMD::ValueKind::DynamicSharedPointer},
121     //    {"sampler", KernelArgMD::ValueKind::Sampler},
122     //    {"image", KernelArgMD::ValueKind::Image},
123     //    {"pipe", KernelArgMD::ValueKind::Pipe},
124     //    {"queue", KernelArgMD::ValueKind::Queue},
125     {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
126     {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
127     {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
128     {"hidden_none", KernelArgMD::ValueKind::HiddenNone},
129     {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
130     {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue},
131     {"hidden_completion_action",
132      KernelArgMD::ValueKind::HiddenCompletionAction},
133     {"hidden_multigrid_sync_arg",
134      KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
135     {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
136 };
137 
138 ATLMachine g_atl_machine;
139 
140 namespace core {
141 
142 // Implement memory_pool iteration function
143 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,
144                                          void *data) {
145   ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data);
146   hsa_status_t err = HSA_STATUS_SUCCESS;
147   // Check if the memory_pool is allowed to allocate, i.e. do not return group
148   // memory
149   bool alloc_allowed = false;
150   err = hsa_amd_memory_pool_get_info(
151       memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
152       &alloc_allowed);
153   if (err != HSA_STATUS_SUCCESS) {
154     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
155            "Alloc allowed in memory pool check", get_error_string(err));
156     return err;
157   }
158   if (alloc_allowed) {
159     uint32_t global_flag = 0;
160     err = hsa_amd_memory_pool_get_info(
161         memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag);
162     if (err != HSA_STATUS_SUCCESS) {
163       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
164              "Get memory pool info", get_error_string(err));
165       return err;
166     }
167     if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) {
168       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED);
169       proc->addMemory(new_mem);
170     } else {
171       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED);
172       proc->addMemory(new_mem);
173     }
174   }
175 
176   return err;
177 }
178 
179 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) {
180   hsa_status_t err = HSA_STATUS_SUCCESS;
181   hsa_device_type_t device_type;
182   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
183   if (err != HSA_STATUS_SUCCESS) {
184     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
185            "Get device type info", get_error_string(err));
186     return err;
187   }
188   switch (device_type) {
189   case HSA_DEVICE_TYPE_CPU: {
190     ATLCPUProcessor new_proc(agent);
191     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
192                                              &new_proc);
193     if (err != HSA_STATUS_SUCCESS) {
194       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
195              "Iterate all memory pools", get_error_string(err));
196       return err;
197     }
198     g_atl_machine.addProcessor(new_proc);
199   } break;
200   case HSA_DEVICE_TYPE_GPU: {
201     hsa_profile_t profile;
202     err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile);
203     if (err != HSA_STATUS_SUCCESS) {
204       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
205              "Query the agent profile", get_error_string(err));
206       return err;
207     }
208     atmi_devtype_t gpu_type;
209     gpu_type =
210         (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU;
211     ATLGPUProcessor new_proc(agent, gpu_type);
212     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
213                                              &new_proc);
214     if (err != HSA_STATUS_SUCCESS) {
215       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
216              "Iterate all memory pools", get_error_string(err));
217       return err;
218     }
219     g_atl_machine.addProcessor(new_proc);
220   } break;
221   case HSA_DEVICE_TYPE_DSP: {
222     err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
223   } break;
224   }
225 
226   return err;
227 }
228 
229 static hsa_status_t init_compute_and_memory() {
230   hsa_status_t err;
231 
232   /* Iterate over the agents and pick the gpu agent */
233   err = hsa_iterate_agents(get_agent_info, NULL);
234   if (err == HSA_STATUS_INFO_BREAK) {
235     err = HSA_STATUS_SUCCESS;
236   }
237   if (err != HSA_STATUS_SUCCESS) {
238     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Getting a gpu agent",
239            get_error_string(err));
240     return err;
241   }
242 
243   /* Init all devices or individual device types? */
244   std::vector<ATLCPUProcessor> &cpu_procs =
245       g_atl_machine.processors<ATLCPUProcessor>();
246   std::vector<ATLGPUProcessor> &gpu_procs =
247       g_atl_machine.processors<ATLGPUProcessor>();
248   /* For CPU memory pools, add other devices that can access them directly
249    * or indirectly */
250   for (auto &cpu_proc : cpu_procs) {
251     for (auto &cpu_mem : cpu_proc.memories()) {
252       hsa_amd_memory_pool_t pool = cpu_mem.memory();
253       for (auto &gpu_proc : gpu_procs) {
254         hsa_agent_t agent = gpu_proc.agent();
255         hsa_amd_memory_pool_access_t access;
256         hsa_amd_agent_memory_pool_get_info(
257             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
258         if (access != 0) {
259           // this means not NEVER, but could be YES or NO
260           // add this memory pool to the proc
261           gpu_proc.addMemory(cpu_mem);
262         }
263       }
264     }
265   }
266 
267   /* FIXME: are the below combinations of procs and memory pools needed?
268    * all to all compare procs with their memory pools and add those memory
269    * pools that are accessible by the target procs */
270   for (auto &gpu_proc : gpu_procs) {
271     for (auto &gpu_mem : gpu_proc.memories()) {
272       hsa_amd_memory_pool_t pool = gpu_mem.memory();
273       for (auto &cpu_proc : cpu_procs) {
274         hsa_agent_t agent = cpu_proc.agent();
275         hsa_amd_memory_pool_access_t access;
276         hsa_amd_agent_memory_pool_get_info(
277             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
278         if (access != 0) {
279           // this means not NEVER, but could be YES or NO
280           // add this memory pool to the proc
281           cpu_proc.addMemory(gpu_mem);
282         }
283       }
284     }
285   }
286 
287   size_t num_procs = cpu_procs.size() + gpu_procs.size();
288   int num_iGPUs = 0;
289   int num_dGPUs = 0;
290   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
291     if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU)
292       num_iGPUs++;
293     else
294       num_dGPUs++;
295   }
296   assert(num_iGPUs + num_dGPUs == gpu_procs.size() &&
297          "Number of dGPUs and iGPUs do not add up");
298   DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size());
299   DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs);
300   DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs);
301   DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size());
302 
303   int cpus_begin = 0;
304   int cpus_end = cpu_procs.size();
305   int gpus_begin = cpu_procs.size();
306   int gpus_end = cpu_procs.size() + gpu_procs.size();
307   int proc_index = 0;
308   for (int i = cpus_begin; i < cpus_end; i++) {
309     std::vector<ATLMemory> memories = cpu_procs[proc_index].memories();
310     int fine_memories_size = 0;
311     int coarse_memories_size = 0;
312     DEBUG_PRINT("CPU memory types:\t");
313     for (auto &memory : memories) {
314       atmi_memtype_t type = memory.type();
315       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
316         fine_memories_size++;
317         DEBUG_PRINT("Fine\t");
318       } else {
319         coarse_memories_size++;
320         DEBUG_PRINT("Coarse\t");
321       }
322     }
323     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
324     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
325     proc_index++;
326   }
327   proc_index = 0;
328   for (int i = gpus_begin; i < gpus_end; i++) {
329     std::vector<ATLMemory> memories = gpu_procs[proc_index].memories();
330     int fine_memories_size = 0;
331     int coarse_memories_size = 0;
332     DEBUG_PRINT("GPU memory types:\t");
333     for (auto &memory : memories) {
334       atmi_memtype_t type = memory.type();
335       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
336         fine_memories_size++;
337         DEBUG_PRINT("Fine\t");
338       } else {
339         coarse_memories_size++;
340         DEBUG_PRINT("Coarse\t");
341       }
342     }
343     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
344     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
345     proc_index++;
346   }
347   if (num_procs > 0)
348     return HSA_STATUS_SUCCESS;
349   else
350     return HSA_STATUS_ERROR_NOT_INITIALIZED;
351 }
352 
353 hsa_status_t init_hsa() {
354   DEBUG_PRINT("Initializing HSA...");
355   hsa_status_t err = hsa_init();
356   if (err != HSA_STATUS_SUCCESS) {
357     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
358            "Initializing the hsa runtime", get_error_string(err));
359     return err;
360   }
361   if (err != HSA_STATUS_SUCCESS)
362     return err;
363 
364   err = init_compute_and_memory();
365   if (err != HSA_STATUS_SUCCESS)
366     return err;
367   if (err != HSA_STATUS_SUCCESS) {
368     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
369            "After initializing compute and memory", get_error_string(err));
370     return err;
371   }
372 
373   DEBUG_PRINT("done\n");
374   return HSA_STATUS_SUCCESS;
375 }
376 
377 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) {
378 #if (ROCM_VERSION_MAJOR >= 3) ||                                               \
379     (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3)
380   if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) {
381 #else
382   if (event->event_type == GPU_MEMORY_FAULT_EVENT) {
383 #endif
384     hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault;
385     // memory_fault.agent
386     // memory_fault.virtual_address
387     // memory_fault.fault_reason_mask
388     // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address);
389     std::stringstream stream;
390     stream << std::hex << (uintptr_t)memory_fault.virtual_address;
391     std::string addr("0x" + stream.str());
392 
393     std::string err_string = "[GPU Memory Error] Addr: " + addr;
394     err_string += " Reason: ";
395     if (!(memory_fault.fault_reason_mask & 0x00111111)) {
396       err_string += "No Idea! ";
397     } else {
398       if (memory_fault.fault_reason_mask & 0x00000001)
399         err_string += "Page not present or supervisor privilege. ";
400       if (memory_fault.fault_reason_mask & 0x00000010)
401         err_string += "Write access to a read-only page. ";
402       if (memory_fault.fault_reason_mask & 0x00000100)
403         err_string += "Execute access to a page marked NX. ";
404       if (memory_fault.fault_reason_mask & 0x00001000)
405         err_string += "Host access only. ";
406       if (memory_fault.fault_reason_mask & 0x00010000)
407         err_string += "ECC failure (if supported by HW). ";
408       if (memory_fault.fault_reason_mask & 0x00100000)
409         err_string += "Can't determine the exact fault address. ";
410     }
411     fprintf(stderr, "%s\n", err_string.c_str());
412     return HSA_STATUS_ERROR;
413   }
414   return HSA_STATUS_SUCCESS;
415 }
416 
417 hsa_status_t atl_init_gpu_context() {
418   hsa_status_t err;
419   err = init_hsa();
420   if (err != HSA_STATUS_SUCCESS)
421     return HSA_STATUS_ERROR;
422 
423   err = hsa_amd_register_system_event_handler(callbackEvent, NULL);
424   if (err != HSA_STATUS_SUCCESS) {
425     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
426            "Registering the system for memory faults", get_error_string(err));
427     return HSA_STATUS_ERROR;
428   }
429 
430   return HSA_STATUS_SUCCESS;
431 }
432 
433 static bool isImplicit(KernelArgMD::ValueKind value_kind) {
434   switch (value_kind) {
435   case KernelArgMD::ValueKind::HiddenGlobalOffsetX:
436   case KernelArgMD::ValueKind::HiddenGlobalOffsetY:
437   case KernelArgMD::ValueKind::HiddenGlobalOffsetZ:
438   case KernelArgMD::ValueKind::HiddenNone:
439   case KernelArgMD::ValueKind::HiddenPrintfBuffer:
440   case KernelArgMD::ValueKind::HiddenDefaultQueue:
441   case KernelArgMD::ValueKind::HiddenCompletionAction:
442   case KernelArgMD::ValueKind::HiddenMultiGridSyncArg:
443   case KernelArgMD::ValueKind::HiddenHostcallBuffer:
444     return true;
445   default:
446     return false;
447   }
448 }
449 
450 static std::pair<unsigned char *, unsigned char *>
451 find_metadata(void *binary, size_t binSize) {
452   std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr};
453 
454   Elf *e = elf_memory(static_cast<char *>(binary), binSize);
455   if (elf_kind(e) != ELF_K_ELF) {
456     return failure;
457   }
458 
459   size_t numpHdrs;
460   if (elf_getphdrnum(e, &numpHdrs) != 0) {
461     return failure;
462   }
463 
464   Elf64_Phdr *pHdrs = elf64_getphdr(e);
465   for (size_t i = 0; i < numpHdrs; ++i) {
466     Elf64_Phdr pHdr = pHdrs[i];
467 
468     // Look for the runtime metadata note
469     if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) {
470       // Iterate over the notes in this segment
471       address ptr = (address)binary + pHdr.p_offset;
472       address segmentEnd = ptr + pHdr.p_filesz;
473 
474       while (ptr < segmentEnd) {
475         Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr);
476         address name = (address)&note[1];
477 
478         if (note->n_type == 7 || note->n_type == 8) {
479           return failure;
480         } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ &&
481                    note->n_namesz == sizeof "AMD" &&
482                    !memcmp(name, "AMD", note->n_namesz)) {
483           // code object v2 uses yaml metadata, no longer supported
484           return failure;
485         } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ &&
486                    note->n_namesz == sizeof "AMDGPU" &&
487                    !memcmp(name, "AMDGPU", note->n_namesz)) {
488 
489           // n_descsz = 485
490           // value is padded to 4 byte alignment, may want to move end up to
491           // match
492           size_t offset = sizeof(uint32_t) * 3 /* fields */
493                           + sizeof("AMDGPU")   /* name */
494                           + 1 /* padding to 4 byte alignment */;
495 
496           // Including the trailing padding means both pointers are 4 bytes
497           // aligned, which may be useful later.
498           unsigned char *metadata_start = (unsigned char *)ptr + offset;
499           unsigned char *metadata_end =
500               metadata_start + core::alignUp(note->n_descsz, 4);
501           return {metadata_start, metadata_end};
502         }
503         ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) +
504                core::alignUp(note->n_descsz, sizeof(int));
505       }
506     }
507   }
508 
509   return failure;
510 }
511 
512 namespace {
513 int map_lookup_array(msgpack::byte_range message, const char *needle,
514                      msgpack::byte_range *res, uint64_t *size) {
515   unsigned count = 0;
516   struct s : msgpack::functors_defaults<s> {
517     s(unsigned &count, uint64_t *size) : count(count), size(size) {}
518     unsigned &count;
519     uint64_t *size;
520     const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) {
521       count++;
522       *size = N;
523       return bytes.end;
524     }
525   };
526 
527   msgpack::foreach_map(message,
528                        [&](msgpack::byte_range key, msgpack::byte_range value) {
529                          if (msgpack::message_is_string(key, needle)) {
530                            // If the message is an array, record number of
531                            // elements in *size
532                            msgpack::handle_msgpack<s>(value, {count, size});
533                            // return the whole array
534                            *res = value;
535                          }
536                        });
537   // Only claim success if exactly one key/array pair matched
538   return count != 1;
539 }
540 
541 int map_lookup_string(msgpack::byte_range message, const char *needle,
542                       std::string *res) {
543   unsigned count = 0;
544   struct s : public msgpack::functors_defaults<s> {
545     s(unsigned &count, std::string *res) : count(count), res(res) {}
546     unsigned &count;
547     std::string *res;
548     void handle_string(size_t N, const unsigned char *str) {
549       count++;
550       *res = std::string(str, str + N);
551     }
552   };
553   msgpack::foreach_map(message,
554                        [&](msgpack::byte_range key, msgpack::byte_range value) {
555                          if (msgpack::message_is_string(key, needle)) {
556                            msgpack::handle_msgpack<s>(value, {count, res});
557                          }
558                        });
559   return count != 1;
560 }
561 
562 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle,
563                         uint64_t *res) {
564   unsigned count = 0;
565   msgpack::foreach_map(message,
566                        [&](msgpack::byte_range key, msgpack::byte_range value) {
567                          if (msgpack::message_is_string(key, needle)) {
568                            msgpack::foronly_unsigned(value, [&](uint64_t x) {
569                              count++;
570                              *res = x;
571                            });
572                          }
573                        });
574   return count != 1;
575 }
576 
577 int array_lookup_element(msgpack::byte_range message, uint64_t elt,
578                          msgpack::byte_range *res) {
579   int rc = 1;
580   uint64_t i = 0;
581   msgpack::foreach_array(message, [&](msgpack::byte_range value) {
582     if (i == elt) {
583       *res = value;
584       rc = 0;
585     }
586     i++;
587   });
588   return rc;
589 }
590 
591 int populate_kernelArgMD(msgpack::byte_range args_element,
592                          KernelArgMD *kernelarg) {
593   using namespace msgpack;
594   int error = 0;
595   foreach_map(args_element, [&](byte_range key, byte_range value) -> void {
596     if (message_is_string(key, ".name")) {
597       foronly_string(value, [&](size_t N, const unsigned char *str) {
598         kernelarg->name_ = std::string(str, str + N);
599       });
600     } else if (message_is_string(key, ".type_name")) {
601       foronly_string(value, [&](size_t N, const unsigned char *str) {
602         kernelarg->typeName_ = std::string(str, str + N);
603       });
604     } else if (message_is_string(key, ".size")) {
605       foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; });
606     } else if (message_is_string(key, ".offset")) {
607       foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; });
608     } else if (message_is_string(key, ".value_kind")) {
609       foronly_string(value, [&](size_t N, const unsigned char *str) {
610         std::string s = std::string(str, str + N);
611         auto itValueKind = ArgValueKind.find(s);
612         if (itValueKind != ArgValueKind.end()) {
613           kernelarg->valueKind_ = itValueKind->second;
614         }
615       });
616     }
617   });
618   return error;
619 }
620 } // namespace
621 
622 static hsa_status_t get_code_object_custom_metadata(
623     void *binary, size_t binSize,
624     std::map<std::string, atl_kernel_info_t> &KernelInfoTable) {
625   // parse code object with different keys from v2
626   // also, the kernel name is not the same as the symbol name -- so a
627   // symbol->name map is needed
628 
629   std::pair<unsigned char *, unsigned char *> metadata =
630       find_metadata(binary, binSize);
631   if (!metadata.first) {
632     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
633   }
634 
635   uint64_t kernelsSize = 0;
636   int msgpack_errors = 0;
637   msgpack::byte_range kernel_array;
638   msgpack_errors =
639       map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels",
640                        &kernel_array, &kernelsSize);
641   if (msgpack_errors != 0) {
642     printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
643            "kernels lookup in program metadata");
644     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
645   }
646 
647   for (size_t i = 0; i < kernelsSize; i++) {
648     assert(msgpack_errors == 0);
649     std::string kernelName;
650     std::string symbolName;
651 
652     msgpack::byte_range element;
653     msgpack_errors += array_lookup_element(kernel_array, i, &element);
654     if (msgpack_errors != 0) {
655       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
656              "element lookup in kernel metadata");
657       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
658     }
659 
660     msgpack_errors += map_lookup_string(element, ".name", &kernelName);
661     msgpack_errors += map_lookup_string(element, ".symbol", &symbolName);
662     if (msgpack_errors != 0) {
663       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
664              "strings lookup in kernel metadata");
665       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
666     }
667 
668     // Make sure that kernelName + ".kd" == symbolName
669     if ((kernelName + ".kd") != symbolName) {
670       printf("[%s:%d] Kernel name mismatching symbol: %s != %s + .kd\n",
671              __FILE__, __LINE__, symbolName.c_str(), kernelName.c_str());
672       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
673     }
674 
675     atl_kernel_info_t info = {0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {}, {}};
676 
677     uint64_t sgpr_count, vgpr_count, sgpr_spill_count, vgpr_spill_count;
678     msgpack_errors += map_lookup_uint64_t(element, ".sgpr_count", &sgpr_count);
679     if (msgpack_errors != 0) {
680       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
681              "sgpr count metadata lookup in kernel metadata");
682       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
683     }
684 
685     info.sgpr_count = sgpr_count;
686 
687     msgpack_errors += map_lookup_uint64_t(element, ".vgpr_count", &vgpr_count);
688     if (msgpack_errors != 0) {
689       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
690              "vgpr count metadata lookup in kernel metadata");
691       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
692     }
693 
694     info.vgpr_count = vgpr_count;
695 
696     msgpack_errors +=
697         map_lookup_uint64_t(element, ".sgpr_spill_count", &sgpr_spill_count);
698     if (msgpack_errors != 0) {
699       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
700              "sgpr spill count metadata lookup in kernel metadata");
701       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
702     }
703 
704     info.sgpr_spill_count = sgpr_spill_count;
705 
706     msgpack_errors +=
707         map_lookup_uint64_t(element, ".vgpr_spill_count", &vgpr_spill_count);
708     if (msgpack_errors != 0) {
709       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
710              "vgpr spill count metadata lookup in kernel metadata");
711       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
712     }
713 
714     info.vgpr_spill_count = vgpr_spill_count;
715 
716     size_t kernel_explicit_args_size = 0;
717     uint64_t kernel_segment_size;
718     msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size",
719                                           &kernel_segment_size);
720     if (msgpack_errors != 0) {
721       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
722              "kernarg segment size metadata lookup in kernel metadata");
723       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
724     }
725 
726     bool hasHiddenArgs = false;
727     if (kernel_segment_size > 0) {
728       uint64_t argsSize;
729       size_t offset = 0;
730 
731       msgpack::byte_range args_array;
732       msgpack_errors +=
733           map_lookup_array(element, ".args", &args_array, &argsSize);
734       if (msgpack_errors != 0) {
735         printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
736                "kernel args metadata lookup in kernel metadata");
737         return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
738       }
739 
740       info.num_args = argsSize;
741 
742       for (size_t i = 0; i < argsSize; ++i) {
743         KernelArgMD lcArg;
744 
745         msgpack::byte_range args_element;
746         msgpack_errors += array_lookup_element(args_array, i, &args_element);
747         if (msgpack_errors != 0) {
748           printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
749                  "iterate args map in kernel args metadata");
750           return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
751         }
752 
753         msgpack_errors += populate_kernelArgMD(args_element, &lcArg);
754         if (msgpack_errors != 0) {
755           printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
756                  "iterate args map in kernel args metadata");
757           return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
758         }
759         // populate info with sizes and offsets
760         info.arg_sizes.push_back(lcArg.size_);
761         // v3 has offset field and not align field
762         size_t new_offset = lcArg.offset_;
763         size_t padding = new_offset - offset;
764         offset = new_offset;
765         info.arg_offsets.push_back(lcArg.offset_);
766         DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(),
767                     lcArg.size_, lcArg.offset_);
768         offset += lcArg.size_;
769 
770         // check if the arg is a hidden/implicit arg
771         // this logic assumes that all hidden args are 8-byte aligned
772         if (!isImplicit(lcArg.valueKind_)) {
773           kernel_explicit_args_size += lcArg.size_;
774         } else {
775           hasHiddenArgs = true;
776         }
777         kernel_explicit_args_size += padding;
778       }
779     }
780 
781     // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but
782     // in ATMI, do not count the compiler set implicit args, but set your own
783     // implicit args by discounting the compiler set implicit args
784     info.kernel_segment_size =
785         (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) +
786         sizeof(atmi_implicit_args_t);
787     DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(),
788                 kernel_segment_size, info.kernel_segment_size);
789 
790     // kernel received, now add it to the kernel info table
791     KernelInfoTable[kernelName] = info;
792   }
793 
794   return HSA_STATUS_SUCCESS;
795 }
796 
797 static hsa_status_t
798 populate_InfoTables(hsa_executable_symbol_t symbol,
799                     std::map<std::string, atl_kernel_info_t> &KernelInfoTable,
800                     std::map<std::string, atl_symbol_info_t> &SymbolInfoTable) {
801   hsa_symbol_kind_t type;
802 
803   uint32_t name_length;
804   hsa_status_t err;
805   err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE,
806                                        &type);
807   if (err != HSA_STATUS_SUCCESS) {
808     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
809            "Symbol info extraction", get_error_string(err));
810     return err;
811   }
812   DEBUG_PRINT("Exec Symbol type: %d\n", type);
813   if (type == HSA_SYMBOL_KIND_KERNEL) {
814     err = hsa_executable_symbol_get_info(
815         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
816     if (err != HSA_STATUS_SUCCESS) {
817       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
818              "Symbol info extraction", get_error_string(err));
819       return err;
820     }
821     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
822     err = hsa_executable_symbol_get_info(symbol,
823                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
824     if (err != HSA_STATUS_SUCCESS) {
825       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
826              "Symbol info extraction", get_error_string(err));
827       return err;
828     }
829     // remove the suffix .kd from symbol name.
830     name[name_length - 3] = 0;
831 
832     atl_kernel_info_t info;
833     std::string kernelName(name);
834     // by now, the kernel info table should already have an entry
835     // because the non-ROCr custom code object parsing is called before
836     // iterating over the code object symbols using ROCr
837     if (KernelInfoTable.find(kernelName) == KernelInfoTable.end()) {
838       if (HSA_STATUS_ERROR_INVALID_CODE_OBJECT != HSA_STATUS_SUCCESS) {
839         printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
840                "Finding the entry kernel info table",
841                get_error_string(HSA_STATUS_ERROR_INVALID_CODE_OBJECT));
842         exit(1);
843       }
844     }
845     // found, so assign and update
846     info = KernelInfoTable[kernelName];
847 
848     /* Extract dispatch information from the symbol */
849     err = hsa_executable_symbol_get_info(
850         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
851         &(info.kernel_object));
852     if (err != HSA_STATUS_SUCCESS) {
853       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
854              "Extracting the symbol from the executable",
855              get_error_string(err));
856       return err;
857     }
858     err = hsa_executable_symbol_get_info(
859         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
860         &(info.group_segment_size));
861     if (err != HSA_STATUS_SUCCESS) {
862       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
863              "Extracting the group segment size from the executable",
864              get_error_string(err));
865       return err;
866     }
867     err = hsa_executable_symbol_get_info(
868         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
869         &(info.private_segment_size));
870     if (err != HSA_STATUS_SUCCESS) {
871       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
872              "Extracting the private segment from the executable",
873              get_error_string(err));
874       return err;
875     }
876 
877     DEBUG_PRINT(
878         "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes "
879         "kernarg\n",
880         kernelName.c_str(), info.kernel_object, info.group_segment_size,
881         info.private_segment_size, info.kernel_segment_size);
882 
883     // assign it back to the kernel info table
884     KernelInfoTable[kernelName] = info;
885     free(name);
886   } else if (type == HSA_SYMBOL_KIND_VARIABLE) {
887     err = hsa_executable_symbol_get_info(
888         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
889     if (err != HSA_STATUS_SUCCESS) {
890       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
891              "Symbol info extraction", get_error_string(err));
892       return err;
893     }
894     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
895     err = hsa_executable_symbol_get_info(symbol,
896                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
897     if (err != HSA_STATUS_SUCCESS) {
898       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
899              "Symbol info extraction", get_error_string(err));
900       return err;
901     }
902     name[name_length] = 0;
903 
904     atl_symbol_info_t info;
905 
906     err = hsa_executable_symbol_get_info(
907         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr));
908     if (err != HSA_STATUS_SUCCESS) {
909       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
910              "Symbol info address extraction", get_error_string(err));
911       return err;
912     }
913 
914     err = hsa_executable_symbol_get_info(
915         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size));
916     if (err != HSA_STATUS_SUCCESS) {
917       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
918              "Symbol info size extraction", get_error_string(err));
919       return err;
920     }
921 
922     DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr,
923                 info.size);
924     SymbolInfoTable[std::string(name)] = info;
925     free(name);
926   } else {
927     DEBUG_PRINT("Symbol is an indirect function\n");
928   }
929   return HSA_STATUS_SUCCESS;
930 }
931 
932 hsa_status_t RegisterModuleFromMemory(
933     std::map<std::string, atl_kernel_info_t> &KernelInfoTable,
934     std::map<std::string, atl_symbol_info_t> &SymbolInfoTable,
935     void *module_bytes, size_t module_size, hsa_agent_t agent,
936     hsa_status_t (*on_deserialized_data)(void *data, size_t size,
937                                          void *cb_state),
938     void *cb_state, std::vector<hsa_executable_t> &HSAExecutables) {
939   hsa_status_t err;
940   hsa_executable_t executable = {0};
941   hsa_profile_t agent_profile;
942 
943   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile);
944   if (err != HSA_STATUS_SUCCESS) {
945     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
946            "Query the agent profile", get_error_string(err));
947     return HSA_STATUS_ERROR;
948   }
949   // FIXME: Assume that every profile is FULL until we understand how to build
950   // GCN with base profile
951   agent_profile = HSA_PROFILE_FULL;
952   /* Create the empty executable.  */
953   err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "",
954                               &executable);
955   if (err != HSA_STATUS_SUCCESS) {
956     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
957            "Create the executable", get_error_string(err));
958     return HSA_STATUS_ERROR;
959   }
960 
961   bool module_load_success = false;
962   do // Existing control flow used continue, preserve that for this patch
963   {
964     {
965       // Some metadata info is not available through ROCr API, so use custom
966       // code object metadata parsing to collect such metadata info
967 
968       err = get_code_object_custom_metadata(module_bytes, module_size,
969                                             KernelInfoTable);
970       if (err != HSA_STATUS_SUCCESS) {
971         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
972                     "Getting custom code object metadata",
973                     get_error_string(err));
974         continue;
975       }
976 
977       // Deserialize code object.
978       hsa_code_object_t code_object = {0};
979       err = hsa_code_object_deserialize(module_bytes, module_size, NULL,
980                                         &code_object);
981       if (err != HSA_STATUS_SUCCESS) {
982         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
983                     "Code Object Deserialization", get_error_string(err));
984         continue;
985       }
986       assert(0 != code_object.handle);
987 
988       // Mutating the device image here avoids another allocation & memcpy
989       void *code_object_alloc_data =
990           reinterpret_cast<void *>(code_object.handle);
991       hsa_status_t atmi_err =
992           on_deserialized_data(code_object_alloc_data, module_size, cb_state);
993       if (atmi_err != HSA_STATUS_SUCCESS) {
994         printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
995                "Error in deserialized_data callback",
996                get_error_string(atmi_err));
997         return atmi_err;
998       }
999 
1000       /* Load the code object.  */
1001       err =
1002           hsa_executable_load_code_object(executable, agent, code_object, NULL);
1003       if (err != HSA_STATUS_SUCCESS) {
1004         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1005                     "Loading the code object", get_error_string(err));
1006         continue;
1007       }
1008 
1009       // cannot iterate over symbols until executable is frozen
1010     }
1011     module_load_success = true;
1012   } while (0);
1013   DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success);
1014   if (module_load_success) {
1015     /* Freeze the executable; it can now be queried for symbols.  */
1016     err = hsa_executable_freeze(executable, "");
1017     if (err != HSA_STATUS_SUCCESS) {
1018       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1019              "Freeze the executable", get_error_string(err));
1020       return HSA_STATUS_ERROR;
1021     }
1022 
1023     err = hsa::executable_iterate_symbols(
1024         executable,
1025         [&](hsa_executable_t, hsa_executable_symbol_t symbol) -> hsa_status_t {
1026           return populate_InfoTables(symbol, KernelInfoTable, SymbolInfoTable);
1027         });
1028     if (err != HSA_STATUS_SUCCESS) {
1029       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1030              "Iterating over symbols for execuatable", get_error_string(err));
1031       return HSA_STATUS_ERROR;
1032     }
1033 
1034     // save the executable and destroy during finalize
1035     HSAExecutables.push_back(executable);
1036     return HSA_STATUS_SUCCESS;
1037   } else {
1038     return HSA_STATUS_ERROR;
1039   }
1040 }
1041 
1042 } // namespace core
1043