1 /*===--------------------------------------------------------------------------
2  *              ATMI (Asynchronous Task and Memory Interface)
3  *
4  * This file is distributed under the MIT License. See LICENSE.txt for details.
5  *===------------------------------------------------------------------------*/
6 #include <gelf.h>
7 #include <libelf.h>
8 
9 #include <cassert>
10 #include <cstdarg>
11 #include <fstream>
12 #include <iomanip>
13 #include <iostream>
14 #include <set>
15 #include <string>
16 
17 #include "internal.h"
18 #include "machine.h"
19 #include "rt.h"
20 
21 #include "msgpack.h"
22 
23 typedef unsigned char *address;
24 /*
25  * Note descriptors.
26  */
27 typedef struct {
28   uint32_t n_namesz; /* Length of note's name. */
29   uint32_t n_descsz; /* Length of note's value. */
30   uint32_t n_type;   /* Type of note. */
31   // then name
32   // then padding, optional
33   // then desc, at 4 byte alignment (not 8, despite being elf64)
34 } Elf_Note;
35 
36 // The following include file and following structs/enums
37 // have been replicated on a per-use basis below. For example,
38 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields,
39 // but we may care only about kernargSegmentSize_ for now, so
40 // we just include that field in our KernelMD implementation. We
41 // chose this approach to replicate in order to avoid forcing
42 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime.
43 // #include "llvm/Support/AMDGPUMetadata.h"
44 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD;
45 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD;
46 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD;
47 // using llvm::AMDGPU::HSAMD::AccessQualifier;
48 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier;
49 // using llvm::AMDGPU::HSAMD::ValueKind;
50 // using llvm::AMDGPU::HSAMD::ValueType;
51 
52 class KernelArgMD {
53 public:
54   enum class ValueKind {
55     HiddenGlobalOffsetX,
56     HiddenGlobalOffsetY,
57     HiddenGlobalOffsetZ,
58     HiddenNone,
59     HiddenPrintfBuffer,
60     HiddenDefaultQueue,
61     HiddenCompletionAction,
62     HiddenMultiGridSyncArg,
63     HiddenHostcallBuffer,
64     Unknown
65   };
66 
67   KernelArgMD()
68       : name_(std::string()), typeName_(std::string()), size_(0), offset_(0),
69         align_(0), valueKind_(ValueKind::Unknown) {}
70 
71   // fields
72   std::string name_;
73   std::string typeName_;
74   uint32_t size_;
75   uint32_t offset_;
76   uint32_t align_;
77   ValueKind valueKind_;
78 };
79 
80 class KernelMD {
81 public:
82   KernelMD() : kernargSegmentSize_(0ull) {}
83 
84   // fields
85   uint64_t kernargSegmentSize_;
86 };
87 
88 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = {
89     //    Including only those fields that are relevant to the runtime.
90     //    {"ByValue", KernelArgMD::ValueKind::ByValue},
91     //    {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer},
92     //    {"DynamicSharedPointer",
93     //    KernelArgMD::ValueKind::DynamicSharedPointer},
94     //    {"Sampler", KernelArgMD::ValueKind::Sampler},
95     //    {"Image", KernelArgMD::ValueKind::Image},
96     //    {"Pipe", KernelArgMD::ValueKind::Pipe},
97     //    {"Queue", KernelArgMD::ValueKind::Queue},
98     {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
99     {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
100     {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
101     {"HiddenNone", KernelArgMD::ValueKind::HiddenNone},
102     {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
103     {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue},
104     {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction},
105     {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
106     {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
107     // v3
108     //    {"by_value", KernelArgMD::ValueKind::ByValue},
109     //    {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer},
110     //    {"dynamic_shared_pointer",
111     //    KernelArgMD::ValueKind::DynamicSharedPointer},
112     //    {"sampler", KernelArgMD::ValueKind::Sampler},
113     //    {"image", KernelArgMD::ValueKind::Image},
114     //    {"pipe", KernelArgMD::ValueKind::Pipe},
115     //    {"queue", KernelArgMD::ValueKind::Queue},
116     {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
117     {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
118     {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
119     {"hidden_none", KernelArgMD::ValueKind::HiddenNone},
120     {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
121     {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue},
122     {"hidden_completion_action",
123      KernelArgMD::ValueKind::HiddenCompletionAction},
124     {"hidden_multigrid_sync_arg",
125      KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
126     {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
127 };
128 
129 // global variables. TODO: Get rid of these
130 atmi_machine_t g_atmi_machine;
131 ATLMachine g_atl_machine;
132 
133 hsa_region_t atl_gpu_kernarg_region;
134 std::vector<hsa_amd_memory_pool_t> atl_gpu_kernarg_pools;
135 hsa_region_t atl_cpu_kernarg_region;
136 
137 static std::vector<hsa_executable_t> g_executables;
138 
139 std::map<std::string, std::string> KernelNameMap;
140 std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable;
141 std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable;
142 
143 bool g_atmi_initialized = false;
144 bool g_atmi_hostcall_required = false;
145 
146 struct timespec context_init_time;
147 int context_init_time_init = 0;
148 
149 /*
150    atlc is all internal global values.
151    The structure atl_context_t is defined in atl_internal.h
152    Most references will use the global structure prefix atlc.
153    However the pointer value atlc_p-> is equivalent to atlc.
154 
155 */
156 
157 atl_context_t atlc = {.struct_initialized = false};
158 atl_context_t *atlc_p = NULL;
159 
160 namespace core {
161 /* Machine Info */
162 atmi_machine_t *Runtime::GetMachineInfo() {
163   if (!atlc.g_hsa_initialized)
164     return NULL;
165   return &g_atmi_machine;
166 }
167 
168 static void atl_set_atmi_initialized() {
169   // FIXME: thread safe? locks?
170   g_atmi_initialized = true;
171 }
172 
173 static void atl_reset_atmi_initialized() {
174   // FIXME: thread safe? locks?
175   g_atmi_initialized = false;
176 }
177 
178 bool atl_is_atmi_initialized() { return g_atmi_initialized; }
179 
180 void allow_access_to_all_gpu_agents(void *ptr) {
181   hsa_status_t err;
182   std::vector<ATLGPUProcessor> &gpu_procs =
183       g_atl_machine.processors<ATLGPUProcessor>();
184   std::vector<hsa_agent_t> agents;
185   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
186     agents.push_back(gpu_procs[i].agent());
187   }
188   err = hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr);
189   if (err != HSA_STATUS_SUCCESS) {
190     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
191            "Allow agents ptr access", get_error_string(err));
192     exit(1);
193   }
194 }
195 
196 atmi_status_t Runtime::Initialize() {
197   atmi_devtype_t devtype = ATMI_DEVTYPE_GPU;
198   if (atl_is_atmi_initialized())
199     return ATMI_STATUS_SUCCESS;
200 
201   if (devtype == ATMI_DEVTYPE_ALL || devtype & ATMI_DEVTYPE_GPU) {
202     if (atl_init_gpu_context() != ATMI_STATUS_SUCCESS) {
203       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "GPU context init",
204              get_atmi_error_string(atl_init_gpu_context()));
205       exit(1);
206     }
207   }
208 
209   atl_set_atmi_initialized();
210   return ATMI_STATUS_SUCCESS;
211 }
212 
213 atmi_status_t Runtime::Finalize() {
214   hsa_status_t err;
215 
216   for (uint32_t i = 0; i < g_executables.size(); i++) {
217     err = hsa_executable_destroy(g_executables[i]);
218     if (err != HSA_STATUS_SUCCESS) {
219       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
220              "Destroying executable", get_error_string(err));
221       exit(1);
222     }
223   }
224 
225   for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) {
226     SymbolInfoTable[i].clear();
227   }
228   SymbolInfoTable.clear();
229   for (uint32_t i = 0; i < KernelInfoTable.size(); i++) {
230     KernelInfoTable[i].clear();
231   }
232   KernelInfoTable.clear();
233 
234   atl_reset_atmi_initialized();
235   err = hsa_shut_down();
236   if (err != HSA_STATUS_SUCCESS) {
237     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Shutting down HSA",
238            get_error_string(err));
239     exit(1);
240   }
241 
242   return ATMI_STATUS_SUCCESS;
243 }
244 
245 static void atmi_init_context_structs() {
246   atlc_p = &atlc;
247   atlc.struct_initialized = true; /* This only gets called one time */
248   atlc.g_hsa_initialized = false;
249   atlc.g_gpu_initialized = false;
250   atlc.g_tasks_initialized = false;
251 }
252 
253 // Implement memory_pool iteration function
254 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,
255                                          void *data) {
256   ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data);
257   hsa_status_t err = HSA_STATUS_SUCCESS;
258   // Check if the memory_pool is allowed to allocate, i.e. do not return group
259   // memory
260   bool alloc_allowed = false;
261   err = hsa_amd_memory_pool_get_info(
262       memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
263       &alloc_allowed);
264   if (err != HSA_STATUS_SUCCESS) {
265     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
266            "Alloc allowed in memory pool check", get_error_string(err));
267     exit(1);
268   }
269   if (alloc_allowed) {
270     uint32_t global_flag = 0;
271     err = hsa_amd_memory_pool_get_info(
272         memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag);
273     if (err != HSA_STATUS_SUCCESS) {
274       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
275              "Get memory pool info", get_error_string(err));
276       exit(1);
277     }
278     if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) {
279       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED);
280       proc->addMemory(new_mem);
281       if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT & global_flag) {
282         DEBUG_PRINT("GPU kernel args pool handle: %lu\n", memory_pool.handle);
283         atl_gpu_kernarg_pools.push_back(memory_pool);
284       }
285     } else {
286       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED);
287       proc->addMemory(new_mem);
288     }
289   }
290 
291   return err;
292 }
293 
294 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) {
295   hsa_status_t err = HSA_STATUS_SUCCESS;
296   hsa_device_type_t device_type;
297   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
298   if (err != HSA_STATUS_SUCCESS) {
299     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
300            "Get device type info", get_error_string(err));
301     exit(1);
302   }
303   switch (device_type) {
304   case HSA_DEVICE_TYPE_CPU: {
305     ;
306     ATLCPUProcessor new_proc(agent);
307     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
308                                              &new_proc);
309     if (err != HSA_STATUS_SUCCESS) {
310       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
311              "Iterate all memory pools", get_error_string(err));
312       exit(1);
313     }
314     g_atl_machine.addProcessor(new_proc);
315   } break;
316   case HSA_DEVICE_TYPE_GPU: {
317     ;
318     hsa_profile_t profile;
319     err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile);
320     if (err != HSA_STATUS_SUCCESS) {
321       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
322              "Query the agent profile", get_error_string(err));
323       exit(1);
324     }
325     atmi_devtype_t gpu_type;
326     gpu_type =
327         (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU;
328     ATLGPUProcessor new_proc(agent, gpu_type);
329     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
330                                              &new_proc);
331     if (err != HSA_STATUS_SUCCESS) {
332       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
333              "Iterate all memory pools", get_error_string(err));
334       exit(1);
335     }
336     g_atl_machine.addProcessor(new_proc);
337   } break;
338   case HSA_DEVICE_TYPE_DSP: {
339     err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
340   } break;
341   }
342 
343   return err;
344 }
345 
346 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) {
347   hsa_region_segment_t segment;
348   hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
349   if (segment != HSA_REGION_SEGMENT_GLOBAL) {
350     return HSA_STATUS_SUCCESS;
351   }
352   hsa_region_global_flag_t flags;
353   hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
354   if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) {
355     hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
356     *ret = region;
357     return HSA_STATUS_INFO_BREAK;
358   }
359   return HSA_STATUS_SUCCESS;
360 }
361 
362 /* Determines if a memory region can be used for kernarg allocations.  */
363 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) {
364   hsa_region_segment_t segment;
365   hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
366   if (HSA_REGION_SEGMENT_GLOBAL != segment) {
367     return HSA_STATUS_SUCCESS;
368   }
369 
370   hsa_region_global_flag_t flags;
371   hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
372   if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) {
373     hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
374     *ret = region;
375     return HSA_STATUS_INFO_BREAK;
376   }
377 
378   return HSA_STATUS_SUCCESS;
379 }
380 
381 static hsa_status_t init_compute_and_memory() {
382   hsa_status_t err;
383 
384   /* Iterate over the agents and pick the gpu agent */
385   err = hsa_iterate_agents(get_agent_info, NULL);
386   if (err == HSA_STATUS_INFO_BREAK) {
387     err = HSA_STATUS_SUCCESS;
388   }
389   if (err != HSA_STATUS_SUCCESS) {
390     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Getting a gpu agent",
391            get_error_string(err));
392     exit(1);
393   }
394   if (err != HSA_STATUS_SUCCESS)
395     return err;
396 
397   /* Init all devices or individual device types? */
398   std::vector<ATLCPUProcessor> &cpu_procs =
399       g_atl_machine.processors<ATLCPUProcessor>();
400   std::vector<ATLGPUProcessor> &gpu_procs =
401       g_atl_machine.processors<ATLGPUProcessor>();
402   /* For CPU memory pools, add other devices that can access them directly
403    * or indirectly */
404   for (auto &cpu_proc : cpu_procs) {
405     for (auto &cpu_mem : cpu_proc.memories()) {
406       hsa_amd_memory_pool_t pool = cpu_mem.memory();
407       for (auto &gpu_proc : gpu_procs) {
408         hsa_agent_t agent = gpu_proc.agent();
409         hsa_amd_memory_pool_access_t access;
410         hsa_amd_agent_memory_pool_get_info(
411             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
412         if (access != 0) {
413           // this means not NEVER, but could be YES or NO
414           // add this memory pool to the proc
415           gpu_proc.addMemory(cpu_mem);
416         }
417       }
418     }
419   }
420 
421   /* FIXME: are the below combinations of procs and memory pools needed?
422    * all to all compare procs with their memory pools and add those memory
423    * pools that are accessible by the target procs */
424   for (auto &gpu_proc : gpu_procs) {
425     for (auto &gpu_mem : gpu_proc.memories()) {
426       hsa_amd_memory_pool_t pool = gpu_mem.memory();
427       for (auto &cpu_proc : cpu_procs) {
428         hsa_agent_t agent = cpu_proc.agent();
429         hsa_amd_memory_pool_access_t access;
430         hsa_amd_agent_memory_pool_get_info(
431             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
432         if (access != 0) {
433           // this means not NEVER, but could be YES or NO
434           // add this memory pool to the proc
435           cpu_proc.addMemory(gpu_mem);
436         }
437       }
438     }
439   }
440 
441   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_CPU] = cpu_procs.size();
442   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_GPU] = gpu_procs.size();
443 
444   size_t num_procs = cpu_procs.size() + gpu_procs.size();
445   // g_atmi_machine.devices = (atmi_device_t *)malloc(num_procs *
446   // sizeof(atmi_device_t));
447   atmi_device_t *all_devices = reinterpret_cast<atmi_device_t *>(
448       malloc(num_procs * sizeof(atmi_device_t)));
449   int num_iGPUs = 0;
450   int num_dGPUs = 0;
451   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
452     if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU)
453       num_iGPUs++;
454     else
455       num_dGPUs++;
456   }
457   assert(num_iGPUs + num_dGPUs == gpu_procs.size() &&
458          "Number of dGPUs and iGPUs do not add up");
459   DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size());
460   DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs);
461   DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs);
462   DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size());
463 
464   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_iGPU] = num_iGPUs;
465   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_dGPU] = num_dGPUs;
466 
467   int cpus_begin = 0;
468   int cpus_end = cpu_procs.size();
469   int gpus_begin = cpu_procs.size();
470   int gpus_end = cpu_procs.size() + gpu_procs.size();
471   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_CPU] = &all_devices[cpus_begin];
472   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_GPU] = &all_devices[gpus_begin];
473   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_iGPU] = &all_devices[gpus_begin];
474   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_dGPU] = &all_devices[gpus_begin];
475   int proc_index = 0;
476   for (int i = cpus_begin; i < cpus_end; i++) {
477     all_devices[i].type = cpu_procs[proc_index].type();
478 
479     std::vector<ATLMemory> memories = cpu_procs[proc_index].memories();
480     int fine_memories_size = 0;
481     int coarse_memories_size = 0;
482     DEBUG_PRINT("CPU memory types:\t");
483     for (auto &memory : memories) {
484       atmi_memtype_t type = memory.type();
485       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
486         fine_memories_size++;
487         DEBUG_PRINT("Fine\t");
488       } else {
489         coarse_memories_size++;
490         DEBUG_PRINT("Coarse\t");
491       }
492     }
493     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
494     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
495     proc_index++;
496   }
497   proc_index = 0;
498   for (int i = gpus_begin; i < gpus_end; i++) {
499     all_devices[i].type = gpu_procs[proc_index].type();
500 
501     std::vector<ATLMemory> memories = gpu_procs[proc_index].memories();
502     int fine_memories_size = 0;
503     int coarse_memories_size = 0;
504     DEBUG_PRINT("GPU memory types:\t");
505     for (auto &memory : memories) {
506       atmi_memtype_t type = memory.type();
507       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
508         fine_memories_size++;
509         DEBUG_PRINT("Fine\t");
510       } else {
511         coarse_memories_size++;
512         DEBUG_PRINT("Coarse\t");
513       }
514     }
515     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
516     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
517     proc_index++;
518   }
519   proc_index = 0;
520   atl_cpu_kernarg_region.handle = (uint64_t)-1;
521   if (cpu_procs.size() > 0) {
522     err = hsa_agent_iterate_regions(
523         cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region);
524     if (err == HSA_STATUS_INFO_BREAK) {
525       err = HSA_STATUS_SUCCESS;
526     }
527     err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
528                                                           : HSA_STATUS_SUCCESS;
529     if (err != HSA_STATUS_SUCCESS) {
530       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
531              "Finding a CPU kernarg memory region handle",
532              get_error_string(err));
533       exit(1);
534     }
535   }
536   /* Find a memory region that supports kernel arguments.  */
537   atl_gpu_kernarg_region.handle = (uint64_t)-1;
538   if (gpu_procs.size() > 0) {
539     hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region,
540                               &atl_gpu_kernarg_region);
541     err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
542                                                           : HSA_STATUS_SUCCESS;
543     if (err != HSA_STATUS_SUCCESS) {
544       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
545              "Finding a kernarg memory region", get_error_string(err));
546       exit(1);
547     }
548   }
549   if (num_procs > 0)
550     return HSA_STATUS_SUCCESS;
551   else
552     return HSA_STATUS_ERROR_NOT_INITIALIZED;
553 }
554 
555 hsa_status_t init_hsa() {
556   if (atlc.g_hsa_initialized == false) {
557     DEBUG_PRINT("Initializing HSA...");
558     hsa_status_t err = hsa_init();
559     if (err != HSA_STATUS_SUCCESS) {
560       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
561              "Initializing the hsa runtime", get_error_string(err));
562       exit(1);
563     }
564     if (err != HSA_STATUS_SUCCESS)
565       return err;
566 
567     err = init_compute_and_memory();
568     if (err != HSA_STATUS_SUCCESS)
569       return err;
570     if (err != HSA_STATUS_SUCCESS) {
571       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
572              "After initializing compute and memory", get_error_string(err));
573       exit(1);
574     }
575 
576     int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
577     KernelInfoTable.resize(gpu_count);
578     SymbolInfoTable.resize(gpu_count);
579     for (uint32_t i = 0; i < SymbolInfoTable.size(); i++)
580       SymbolInfoTable[i].clear();
581     for (uint32_t i = 0; i < KernelInfoTable.size(); i++)
582       KernelInfoTable[i].clear();
583     atlc.g_hsa_initialized = true;
584     DEBUG_PRINT("done\n");
585   }
586   return HSA_STATUS_SUCCESS;
587 }
588 
589 void init_tasks() {
590   if (atlc.g_tasks_initialized != false)
591     return;
592   std::vector<hsa_agent_t> gpu_agents;
593   int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
594   for (int gpu = 0; gpu < gpu_count; gpu++) {
595     atmi_place_t place = ATMI_PLACE_GPU(0, gpu);
596     ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
597     gpu_agents.push_back(proc.agent());
598   }
599   atlc.g_tasks_initialized = true;
600 }
601 
602 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) {
603 #if (ROCM_VERSION_MAJOR >= 3) ||                                               \
604     (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3)
605   if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) {
606 #else
607   if (event->event_type == GPU_MEMORY_FAULT_EVENT) {
608 #endif
609     hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault;
610     // memory_fault.agent
611     // memory_fault.virtual_address
612     // memory_fault.fault_reason_mask
613     // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address);
614     std::stringstream stream;
615     stream << std::hex << (uintptr_t)memory_fault.virtual_address;
616     std::string addr("0x" + stream.str());
617 
618     std::string err_string = "[GPU Memory Error] Addr: " + addr;
619     err_string += " Reason: ";
620     if (!(memory_fault.fault_reason_mask & 0x00111111)) {
621       err_string += "No Idea! ";
622     } else {
623       if (memory_fault.fault_reason_mask & 0x00000001)
624         err_string += "Page not present or supervisor privilege. ";
625       if (memory_fault.fault_reason_mask & 0x00000010)
626         err_string += "Write access to a read-only page. ";
627       if (memory_fault.fault_reason_mask & 0x00000100)
628         err_string += "Execute access to a page marked NX. ";
629       if (memory_fault.fault_reason_mask & 0x00001000)
630         err_string += "Host access only. ";
631       if (memory_fault.fault_reason_mask & 0x00010000)
632         err_string += "ECC failure (if supported by HW). ";
633       if (memory_fault.fault_reason_mask & 0x00100000)
634         err_string += "Can't determine the exact fault address. ";
635     }
636     fprintf(stderr, "%s\n", err_string.c_str());
637     return HSA_STATUS_ERROR;
638   }
639   return HSA_STATUS_SUCCESS;
640 }
641 
642 atmi_status_t atl_init_gpu_context() {
643   if (atlc.struct_initialized == false)
644     atmi_init_context_structs();
645   if (atlc.g_gpu_initialized != false)
646     return ATMI_STATUS_SUCCESS;
647 
648   hsa_status_t err;
649   err = init_hsa();
650   if (err != HSA_STATUS_SUCCESS)
651     return ATMI_STATUS_ERROR;
652 
653   if (context_init_time_init == 0) {
654     clock_gettime(CLOCK_MONOTONIC_RAW, &context_init_time);
655     context_init_time_init = 1;
656   }
657 
658   err = hsa_amd_register_system_event_handler(callbackEvent, NULL);
659   if (err != HSA_STATUS_SUCCESS) {
660     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
661            "Registering the system for memory faults", get_error_string(err));
662     exit(1);
663   }
664 
665   init_tasks();
666   atlc.g_gpu_initialized = true;
667   return ATMI_STATUS_SUCCESS;
668 }
669 
670 static bool isImplicit(KernelArgMD::ValueKind value_kind) {
671   switch (value_kind) {
672   case KernelArgMD::ValueKind::HiddenGlobalOffsetX:
673   case KernelArgMD::ValueKind::HiddenGlobalOffsetY:
674   case KernelArgMD::ValueKind::HiddenGlobalOffsetZ:
675   case KernelArgMD::ValueKind::HiddenNone:
676   case KernelArgMD::ValueKind::HiddenPrintfBuffer:
677   case KernelArgMD::ValueKind::HiddenDefaultQueue:
678   case KernelArgMD::ValueKind::HiddenCompletionAction:
679   case KernelArgMD::ValueKind::HiddenMultiGridSyncArg:
680   case KernelArgMD::ValueKind::HiddenHostcallBuffer:
681     return true;
682   default:
683     return false;
684   }
685 }
686 
687 static std::pair<unsigned char *, unsigned char *>
688 find_metadata(void *binary, size_t binSize) {
689   std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr};
690 
691   Elf *e = elf_memory(static_cast<char *>(binary), binSize);
692   if (elf_kind(e) != ELF_K_ELF) {
693     return failure;
694   }
695 
696   size_t numpHdrs;
697   if (elf_getphdrnum(e, &numpHdrs) != 0) {
698     return failure;
699   }
700 
701   for (size_t i = 0; i < numpHdrs; ++i) {
702     GElf_Phdr pHdr;
703     if (gelf_getphdr(e, i, &pHdr) != &pHdr) {
704       continue;
705     }
706     // Look for the runtime metadata note
707     if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) {
708       // Iterate over the notes in this segment
709       address ptr = (address)binary + pHdr.p_offset;
710       address segmentEnd = ptr + pHdr.p_filesz;
711 
712       while (ptr < segmentEnd) {
713         Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr);
714         address name = (address)&note[1];
715 
716         if (note->n_type == 7 || note->n_type == 8) {
717           return failure;
718         } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ &&
719                    note->n_namesz == sizeof "AMD" &&
720                    !memcmp(name, "AMD", note->n_namesz)) {
721           // code object v2 uses yaml metadata, no longer supported
722           return failure;
723         } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ &&
724                    note->n_namesz == sizeof "AMDGPU" &&
725                    !memcmp(name, "AMDGPU", note->n_namesz)) {
726 
727           // n_descsz = 485
728           // value is padded to 4 byte alignment, may want to move end up to
729           // match
730           size_t offset = sizeof(uint32_t) * 3 /* fields */
731                           + sizeof("AMDGPU")   /* name */
732                           + 1 /* padding to 4 byte alignment */;
733 
734           // Including the trailing padding means both pointers are 4 bytes
735           // aligned, which may be useful later.
736           unsigned char *metadata_start = (unsigned char *)ptr + offset;
737           unsigned char *metadata_end =
738               metadata_start + core::alignUp(note->n_descsz, 4);
739           return {metadata_start, metadata_end};
740         }
741         ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) +
742                core::alignUp(note->n_descsz, sizeof(int));
743       }
744     }
745   }
746 
747   return failure;
748 }
749 
750 namespace {
751 int map_lookup_array(msgpack::byte_range message, const char *needle,
752                      msgpack::byte_range *res, uint64_t *size) {
753   unsigned count = 0;
754   struct s : msgpack::functors_defaults<s> {
755     s(unsigned &count, uint64_t *size) : count(count), size(size) {}
756     unsigned &count;
757     uint64_t *size;
758     const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) {
759       count++;
760       *size = N;
761       return bytes.end;
762     }
763   };
764 
765   msgpack::foreach_map(message,
766                        [&](msgpack::byte_range key, msgpack::byte_range value) {
767                          if (msgpack::message_is_string(key, needle)) {
768                            // If the message is an array, record number of
769                            // elements in *size
770                            msgpack::handle_msgpack<s>(value, {count, size});
771                            // return the whole array
772                            *res = value;
773                          }
774                        });
775   // Only claim success if exactly one key/array pair matched
776   return count != 1;
777 }
778 
779 int map_lookup_string(msgpack::byte_range message, const char *needle,
780                       std::string *res) {
781   unsigned count = 0;
782   struct s : public msgpack::functors_defaults<s> {
783     s(unsigned &count, std::string *res) : count(count), res(res) {}
784     unsigned &count;
785     std::string *res;
786     void handle_string(size_t N, const unsigned char *str) {
787       count++;
788       *res = std::string(str, str + N);
789     }
790   };
791   msgpack::foreach_map(message,
792                        [&](msgpack::byte_range key, msgpack::byte_range value) {
793                          if (msgpack::message_is_string(key, needle)) {
794                            msgpack::handle_msgpack<s>(value, {count, res});
795                          }
796                        });
797   return count != 1;
798 }
799 
800 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle,
801                         uint64_t *res) {
802   unsigned count = 0;
803   msgpack::foreach_map(message,
804                        [&](msgpack::byte_range key, msgpack::byte_range value) {
805                          if (msgpack::message_is_string(key, needle)) {
806                            msgpack::foronly_unsigned(value, [&](uint64_t x) {
807                              count++;
808                              *res = x;
809                            });
810                          }
811                        });
812   return count != 1;
813 }
814 
815 int array_lookup_element(msgpack::byte_range message, uint64_t elt,
816                          msgpack::byte_range *res) {
817   int rc = 1;
818   uint64_t i = 0;
819   msgpack::foreach_array(message, [&](msgpack::byte_range value) {
820     if (i == elt) {
821       *res = value;
822       rc = 0;
823     }
824     i++;
825   });
826   return rc;
827 }
828 
829 int populate_kernelArgMD(msgpack::byte_range args_element,
830                          KernelArgMD *kernelarg) {
831   using namespace msgpack;
832   int error = 0;
833   foreach_map(args_element, [&](byte_range key, byte_range value) -> void {
834     if (message_is_string(key, ".name")) {
835       foronly_string(value, [&](size_t N, const unsigned char *str) {
836         kernelarg->name_ = std::string(str, str + N);
837       });
838     } else if (message_is_string(key, ".type_name")) {
839       foronly_string(value, [&](size_t N, const unsigned char *str) {
840         kernelarg->typeName_ = std::string(str, str + N);
841       });
842     } else if (message_is_string(key, ".size")) {
843       foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; });
844     } else if (message_is_string(key, ".offset")) {
845       foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; });
846     } else if (message_is_string(key, ".value_kind")) {
847       foronly_string(value, [&](size_t N, const unsigned char *str) {
848         std::string s = std::string(str, str + N);
849         auto itValueKind = ArgValueKind.find(s);
850         if (itValueKind != ArgValueKind.end()) {
851           kernelarg->valueKind_ = itValueKind->second;
852         }
853       });
854     }
855   });
856   return error;
857 }
858 } // namespace
859 
860 static hsa_status_t get_code_object_custom_metadata(void *binary,
861                                                     size_t binSize, int gpu) {
862   // parse code object with different keys from v2
863   // also, the kernel name is not the same as the symbol name -- so a
864   // symbol->name map is needed
865 
866   std::pair<unsigned char *, unsigned char *> metadata =
867       find_metadata(binary, binSize);
868   if (!metadata.first) {
869     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
870   }
871 
872   uint64_t kernelsSize = 0;
873   int msgpack_errors = 0;
874   msgpack::byte_range kernel_array;
875   msgpack_errors =
876       map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels",
877                        &kernel_array, &kernelsSize);
878   if (msgpack_errors != 0) {
879     printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
880            "kernels lookup in program metadata");
881     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
882   }
883 
884   for (size_t i = 0; i < kernelsSize; i++) {
885     assert(msgpack_errors == 0);
886     std::string kernelName;
887     std::string symbolName;
888 
889     msgpack::byte_range element;
890     msgpack_errors += array_lookup_element(kernel_array, i, &element);
891     if (msgpack_errors != 0) {
892       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
893              "element lookup in kernel metadata");
894       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
895     }
896 
897     msgpack_errors += map_lookup_string(element, ".name", &kernelName);
898     msgpack_errors += map_lookup_string(element, ".symbol", &symbolName);
899     if (msgpack_errors != 0) {
900       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
901              "strings lookup in kernel metadata");
902       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
903     }
904 
905     atl_kernel_info_t info = {0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {}, {}};
906 
907     uint64_t sgpr_count, vgpr_count, sgpr_spill_count, vgpr_spill_count;
908     msgpack_errors += map_lookup_uint64_t(element, ".sgpr_count", &sgpr_count);
909     if (msgpack_errors != 0) {
910       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
911              "sgpr count metadata lookup in kernel metadata");
912       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
913     }
914 
915     info.sgpr_count = sgpr_count;
916 
917     msgpack_errors += map_lookup_uint64_t(element, ".vgpr_count", &vgpr_count);
918     if (msgpack_errors != 0) {
919       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
920              "vgpr count metadata lookup in kernel metadata");
921       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
922     }
923 
924     info.vgpr_count = vgpr_count;
925 
926     msgpack_errors +=
927         map_lookup_uint64_t(element, ".sgpr_spill_count", &sgpr_spill_count);
928     if (msgpack_errors != 0) {
929       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
930              "sgpr spill count metadata lookup in kernel metadata");
931       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
932     }
933 
934     info.sgpr_spill_count = sgpr_spill_count;
935 
936     msgpack_errors +=
937         map_lookup_uint64_t(element, ".vgpr_spill_count", &vgpr_spill_count);
938     if (msgpack_errors != 0) {
939       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
940              "vgpr spill count metadata lookup in kernel metadata");
941       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
942     }
943 
944     info.vgpr_spill_count = vgpr_spill_count;
945 
946     size_t kernel_explicit_args_size = 0;
947     uint64_t kernel_segment_size;
948     msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size",
949                                           &kernel_segment_size);
950     if (msgpack_errors != 0) {
951       printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
952              "kernarg segment size metadata lookup in kernel metadata");
953       return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
954     }
955 
956     // create a map from symbol to name
957     DEBUG_PRINT("Kernel symbol %s; Name: %s; Size: %lu\n", symbolName.c_str(),
958                 kernelName.c_str(), kernel_segment_size);
959     KernelNameMap[symbolName] = kernelName;
960 
961     bool hasHiddenArgs = false;
962     if (kernel_segment_size > 0) {
963       uint64_t argsSize;
964       size_t offset = 0;
965 
966       msgpack::byte_range args_array;
967       msgpack_errors +=
968           map_lookup_array(element, ".args", &args_array, &argsSize);
969       if (msgpack_errors != 0) {
970         printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
971                "kernel args metadata lookup in kernel metadata");
972         return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
973       }
974 
975       info.num_args = argsSize;
976 
977       for (size_t i = 0; i < argsSize; ++i) {
978         KernelArgMD lcArg;
979 
980         msgpack::byte_range args_element;
981         msgpack_errors += array_lookup_element(args_array, i, &args_element);
982         if (msgpack_errors != 0) {
983           printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
984                  "iterate args map in kernel args metadata");
985           return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
986         }
987 
988         msgpack_errors += populate_kernelArgMD(args_element, &lcArg);
989         if (msgpack_errors != 0) {
990           printf("[%s:%d] %s failed\n", __FILE__, __LINE__,
991                  "iterate args map in kernel args metadata");
992           return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
993         }
994         // populate info with sizes and offsets
995         info.arg_sizes.push_back(lcArg.size_);
996         // v3 has offset field and not align field
997         size_t new_offset = lcArg.offset_;
998         size_t padding = new_offset - offset;
999         offset = new_offset;
1000         info.arg_offsets.push_back(lcArg.offset_);
1001         DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(),
1002                     lcArg.size_, lcArg.offset_);
1003         offset += lcArg.size_;
1004 
1005         // check if the arg is a hidden/implicit arg
1006         // this logic assumes that all hidden args are 8-byte aligned
1007         if (!isImplicit(lcArg.valueKind_)) {
1008           kernel_explicit_args_size += lcArg.size_;
1009         } else {
1010           hasHiddenArgs = true;
1011         }
1012         kernel_explicit_args_size += padding;
1013       }
1014     }
1015 
1016     // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but
1017     // in ATMI, do not count the compiler set implicit args, but set your own
1018     // implicit args by discounting the compiler set implicit args
1019     info.kernel_segment_size =
1020         (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) +
1021         sizeof(atmi_implicit_args_t);
1022     DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(),
1023                 kernel_segment_size, info.kernel_segment_size);
1024 
1025     // kernel received, now add it to the kernel info table
1026     KernelInfoTable[gpu][kernelName] = info;
1027   }
1028 
1029   return HSA_STATUS_SUCCESS;
1030 }
1031 
1032 static hsa_status_t populate_InfoTables(hsa_executable_t executable,
1033                                         hsa_executable_symbol_t symbol,
1034                                         void *data) {
1035   int gpu = *static_cast<int *>(data);
1036   hsa_symbol_kind_t type;
1037 
1038   uint32_t name_length;
1039   hsa_status_t err;
1040   err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE,
1041                                        &type);
1042   if (err != HSA_STATUS_SUCCESS) {
1043     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1044            "Symbol info extraction", get_error_string(err));
1045     exit(1);
1046   }
1047   DEBUG_PRINT("Exec Symbol type: %d\n", type);
1048   if (type == HSA_SYMBOL_KIND_KERNEL) {
1049     err = hsa_executable_symbol_get_info(
1050         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
1051     if (err != HSA_STATUS_SUCCESS) {
1052       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1053              "Symbol info extraction", get_error_string(err));
1054       exit(1);
1055     }
1056     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
1057     err = hsa_executable_symbol_get_info(symbol,
1058                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
1059     if (err != HSA_STATUS_SUCCESS) {
1060       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1061              "Symbol info extraction", get_error_string(err));
1062       exit(1);
1063     }
1064     name[name_length] = 0;
1065 
1066     if (KernelNameMap.find(std::string(name)) == KernelNameMap.end()) {
1067       // did not find kernel name in the kernel map; this can happen only
1068       // if the ROCr API for getting symbol info (name) is different from
1069       // the comgr method of getting symbol info
1070       if (HSA_STATUS_ERROR_INVALID_CODE_OBJECT != HSA_STATUS_SUCCESS) {
1071         printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1072                "Invalid kernel name",
1073                get_error_string(HSA_STATUS_ERROR_INVALID_CODE_OBJECT));
1074         exit(1);
1075       }
1076     }
1077     atl_kernel_info_t info;
1078     std::string kernelName = KernelNameMap[std::string(name)];
1079     // by now, the kernel info table should already have an entry
1080     // because the non-ROCr custom code object parsing is called before
1081     // iterating over the code object symbols using ROCr
1082     if (KernelInfoTable[gpu].find(kernelName) == KernelInfoTable[gpu].end()) {
1083       if (HSA_STATUS_ERROR_INVALID_CODE_OBJECT != HSA_STATUS_SUCCESS) {
1084         printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1085                "Finding the entry kernel info table",
1086                get_error_string(HSA_STATUS_ERROR_INVALID_CODE_OBJECT));
1087         exit(1);
1088       }
1089     }
1090     // found, so assign and update
1091     info = KernelInfoTable[gpu][kernelName];
1092 
1093     /* Extract dispatch information from the symbol */
1094     err = hsa_executable_symbol_get_info(
1095         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
1096         &(info.kernel_object));
1097     if (err != HSA_STATUS_SUCCESS) {
1098       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1099              "Extracting the symbol from the executable",
1100              get_error_string(err));
1101       exit(1);
1102     }
1103     err = hsa_executable_symbol_get_info(
1104         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
1105         &(info.group_segment_size));
1106     if (err != HSA_STATUS_SUCCESS) {
1107       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1108              "Extracting the group segment size from the executable",
1109              get_error_string(err));
1110       exit(1);
1111     }
1112     err = hsa_executable_symbol_get_info(
1113         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
1114         &(info.private_segment_size));
1115     if (err != HSA_STATUS_SUCCESS) {
1116       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1117              "Extracting the private segment from the executable",
1118              get_error_string(err));
1119       exit(1);
1120     }
1121 
1122     DEBUG_PRINT(
1123         "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes "
1124         "kernarg\n",
1125         kernelName.c_str(), info.kernel_object, info.group_segment_size,
1126         info.private_segment_size, info.kernel_segment_size);
1127 
1128     // assign it back to the kernel info table
1129     KernelInfoTable[gpu][kernelName] = info;
1130     free(name);
1131   } else if (type == HSA_SYMBOL_KIND_VARIABLE) {
1132     err = hsa_executable_symbol_get_info(
1133         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
1134     if (err != HSA_STATUS_SUCCESS) {
1135       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1136              "Symbol info extraction", get_error_string(err));
1137       exit(1);
1138     }
1139     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
1140     err = hsa_executable_symbol_get_info(symbol,
1141                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
1142     if (err != HSA_STATUS_SUCCESS) {
1143       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1144              "Symbol info extraction", get_error_string(err));
1145       exit(1);
1146     }
1147     name[name_length] = 0;
1148 
1149     atl_symbol_info_t info;
1150 
1151     err = hsa_executable_symbol_get_info(
1152         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr));
1153     if (err != HSA_STATUS_SUCCESS) {
1154       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1155              "Symbol info address extraction", get_error_string(err));
1156       exit(1);
1157     }
1158 
1159     err = hsa_executable_symbol_get_info(
1160         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size));
1161     if (err != HSA_STATUS_SUCCESS) {
1162       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1163              "Symbol info size extraction", get_error_string(err));
1164       exit(1);
1165     }
1166 
1167     atmi_mem_place_t place = ATMI_MEM_PLACE(ATMI_DEVTYPE_GPU, gpu, 0);
1168     DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr,
1169                 info.size);
1170     register_allocation(reinterpret_cast<void *>(info.addr), (size_t)info.size,
1171                         place);
1172     SymbolInfoTable[gpu][std::string(name)] = info;
1173     if (strcmp(name, "needs_hostcall_buffer") == 0)
1174       g_atmi_hostcall_required = true;
1175     free(name);
1176   } else {
1177     DEBUG_PRINT("Symbol is an indirect function\n");
1178   }
1179   return HSA_STATUS_SUCCESS;
1180 }
1181 
1182 atmi_status_t Runtime::RegisterModuleFromMemory(
1183     void *module_bytes, size_t module_size, atmi_place_t place,
1184     atmi_status_t (*on_deserialized_data)(void *data, size_t size,
1185                                           void *cb_state),
1186     void *cb_state) {
1187   hsa_status_t err;
1188   int gpu = place.device_id;
1189   assert(gpu >= 0);
1190 
1191   DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu);
1192   ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
1193   hsa_agent_t agent = proc.agent();
1194   hsa_executable_t executable = {0};
1195   hsa_profile_t agent_profile;
1196 
1197   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile);
1198   if (err != HSA_STATUS_SUCCESS) {
1199     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1200            "Query the agent profile", get_error_string(err));
1201     exit(1);
1202   }
1203   // FIXME: Assume that every profile is FULL until we understand how to build
1204   // GCN with base profile
1205   agent_profile = HSA_PROFILE_FULL;
1206   /* Create the empty executable.  */
1207   err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "",
1208                               &executable);
1209   if (err != HSA_STATUS_SUCCESS) {
1210     printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1211            "Create the executable", get_error_string(err));
1212     exit(1);
1213   }
1214 
1215   bool module_load_success = false;
1216   do // Existing control flow used continue, preserve that for this patch
1217   {
1218     {
1219       // Some metadata info is not available through ROCr API, so use custom
1220       // code object metadata parsing to collect such metadata info
1221 
1222       err = get_code_object_custom_metadata(module_bytes, module_size, gpu);
1223       if (err != HSA_STATUS_SUCCESS) {
1224         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1225                     "Getting custom code object metadata",
1226                     get_error_string(err));
1227         continue;
1228       }
1229 
1230       // Deserialize code object.
1231       hsa_code_object_t code_object = {0};
1232       err = hsa_code_object_deserialize(module_bytes, module_size, NULL,
1233                                         &code_object);
1234       if (err != HSA_STATUS_SUCCESS) {
1235         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1236                     "Code Object Deserialization", get_error_string(err));
1237         continue;
1238       }
1239       assert(0 != code_object.handle);
1240 
1241       // Mutating the device image here avoids another allocation & memcpy
1242       void *code_object_alloc_data =
1243           reinterpret_cast<void *>(code_object.handle);
1244       atmi_status_t atmi_err =
1245           on_deserialized_data(code_object_alloc_data, module_size, cb_state);
1246       if (atmi_err != ATMI_STATUS_SUCCESS) {
1247         printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1248                "Error in deserialized_data callback",
1249                get_atmi_error_string(atmi_err));
1250         exit(1);
1251       }
1252 
1253       /* Load the code object.  */
1254       err =
1255           hsa_executable_load_code_object(executable, agent, code_object, NULL);
1256       if (err != HSA_STATUS_SUCCESS) {
1257         DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1258                     "Loading the code object", get_error_string(err));
1259         continue;
1260       }
1261 
1262       // cannot iterate over symbols until executable is frozen
1263     }
1264     module_load_success = true;
1265   } while (0);
1266   DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success);
1267   if (module_load_success) {
1268     /* Freeze the executable; it can now be queried for symbols.  */
1269     err = hsa_executable_freeze(executable, "");
1270     if (err != HSA_STATUS_SUCCESS) {
1271       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1272              "Freeze the executable", get_error_string(err));
1273       exit(1);
1274     }
1275 
1276     err = hsa_executable_iterate_symbols(executable, populate_InfoTables,
1277                                          static_cast<void *>(&gpu));
1278     if (err != HSA_STATUS_SUCCESS) {
1279       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
1280              "Iterating over symbols for execuatable", get_error_string(err));
1281       exit(1);
1282     }
1283 
1284     // save the executable and destroy during finalize
1285     g_executables.push_back(executable);
1286     return ATMI_STATUS_SUCCESS;
1287   } else {
1288     return ATMI_STATUS_ERROR;
1289   }
1290 }
1291 
1292 } // namespace core
1293