1 /*===--------------------------------------------------------------------------
2  *              ATMI (Asynchronous Task and Memory Interface)
3  *
4  * This file is distributed under the MIT License. See LICENSE.txt for details.
5  *===------------------------------------------------------------------------*/
6 #include <gelf.h>
7 #include <libelf.h>
8 
9 #include <cassert>
10 #include <cstdarg>
11 #include <fstream>
12 #include <iomanip>
13 #include <iostream>
14 #include <set>
15 #include <string>
16 
17 #include "internal.h"
18 #include "machine.h"
19 #include "rt.h"
20 
21 #include "msgpack.h"
22 
23 #define msgpackErrorCheck(msg, status)                                         \
24   if (status != 0) {                                                           \
25     printf("[%s:%d] %s failed\n", __FILE__, __LINE__, #msg);                   \
26     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;                               \
27   } else {                                                                     \
28   }
29 
30 typedef unsigned char *address;
31 /*
32  * Note descriptors.
33  */
34 typedef struct {
35   uint32_t n_namesz; /* Length of note's name. */
36   uint32_t n_descsz; /* Length of note's value. */
37   uint32_t n_type;   /* Type of note. */
38   // then name
39   // then padding, optional
40   // then desc, at 4 byte alignment (not 8, despite being elf64)
41 } Elf_Note;
42 
43 // The following include file and following structs/enums
44 // have been replicated on a per-use basis below. For example,
45 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields,
46 // but we may care only about kernargSegmentSize_ for now, so
47 // we just include that field in our KernelMD implementation. We
48 // chose this approach to replicate in order to avoid forcing
49 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime.
50 // #include "llvm/Support/AMDGPUMetadata.h"
51 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD;
52 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD;
53 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD;
54 // using llvm::AMDGPU::HSAMD::AccessQualifier;
55 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier;
56 // using llvm::AMDGPU::HSAMD::ValueKind;
57 // using llvm::AMDGPU::HSAMD::ValueType;
58 
59 class KernelArgMD {
60 public:
61   enum class ValueKind {
62     HiddenGlobalOffsetX,
63     HiddenGlobalOffsetY,
64     HiddenGlobalOffsetZ,
65     HiddenNone,
66     HiddenPrintfBuffer,
67     HiddenDefaultQueue,
68     HiddenCompletionAction,
69     HiddenMultiGridSyncArg,
70     HiddenHostcallBuffer,
71     Unknown
72   };
73 
74   KernelArgMD()
75       : name_(std::string()), typeName_(std::string()), size_(0), offset_(0),
76         align_(0), valueKind_(ValueKind::Unknown) {}
77 
78   // fields
79   std::string name_;
80   std::string typeName_;
81   uint32_t size_;
82   uint32_t offset_;
83   uint32_t align_;
84   ValueKind valueKind_;
85 };
86 
87 class KernelMD {
88 public:
89   KernelMD() : kernargSegmentSize_(0ull) {}
90 
91   // fields
92   uint64_t kernargSegmentSize_;
93 };
94 
95 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = {
96     //    Including only those fields that are relevant to the runtime.
97     //    {"ByValue", KernelArgMD::ValueKind::ByValue},
98     //    {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer},
99     //    {"DynamicSharedPointer",
100     //    KernelArgMD::ValueKind::DynamicSharedPointer},
101     //    {"Sampler", KernelArgMD::ValueKind::Sampler},
102     //    {"Image", KernelArgMD::ValueKind::Image},
103     //    {"Pipe", KernelArgMD::ValueKind::Pipe},
104     //    {"Queue", KernelArgMD::ValueKind::Queue},
105     {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
106     {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
107     {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
108     {"HiddenNone", KernelArgMD::ValueKind::HiddenNone},
109     {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
110     {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue},
111     {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction},
112     {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
113     {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
114     // v3
115     //    {"by_value", KernelArgMD::ValueKind::ByValue},
116     //    {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer},
117     //    {"dynamic_shared_pointer",
118     //    KernelArgMD::ValueKind::DynamicSharedPointer},
119     //    {"sampler", KernelArgMD::ValueKind::Sampler},
120     //    {"image", KernelArgMD::ValueKind::Image},
121     //    {"pipe", KernelArgMD::ValueKind::Pipe},
122     //    {"queue", KernelArgMD::ValueKind::Queue},
123     {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX},
124     {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY},
125     {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ},
126     {"hidden_none", KernelArgMD::ValueKind::HiddenNone},
127     {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer},
128     {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue},
129     {"hidden_completion_action",
130      KernelArgMD::ValueKind::HiddenCompletionAction},
131     {"hidden_multigrid_sync_arg",
132      KernelArgMD::ValueKind::HiddenMultiGridSyncArg},
133     {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer},
134 };
135 
136 // public variables -- TODO(ashwinma) move these to a runtime object?
137 atmi_machine_t g_atmi_machine;
138 ATLMachine g_atl_machine;
139 
140 hsa_region_t atl_gpu_kernarg_region;
141 std::vector<hsa_amd_memory_pool_t> atl_gpu_kernarg_pools;
142 hsa_region_t atl_cpu_kernarg_region;
143 
144 static std::vector<hsa_executable_t> g_executables;
145 
146 std::map<std::string, std::string> KernelNameMap;
147 std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable;
148 std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable;
149 
150 bool g_atmi_initialized = false;
151 bool g_atmi_hostcall_required = false;
152 
153 struct timespec context_init_time;
154 int context_init_time_init = 0;
155 
156 /*
157    atlc is all internal global values.
158    The structure atl_context_t is defined in atl_internal.h
159    Most references will use the global structure prefix atlc.
160    However the pointer value atlc_p-> is equivalent to atlc.
161 
162 */
163 
164 atl_context_t atlc = {.struct_initialized = false};
165 atl_context_t *atlc_p = NULL;
166 
167 hsa_signal_t IdentityCopySignal;
168 
169 namespace core {
170 /* Machine Info */
171 atmi_machine_t *Runtime::GetMachineInfo() {
172   if (!atlc.g_hsa_initialized)
173     return NULL;
174   return &g_atmi_machine;
175 }
176 
177 void atl_set_atmi_initialized() {
178   // FIXME: thread safe? locks?
179   g_atmi_initialized = true;
180 }
181 
182 void atl_reset_atmi_initialized() {
183   // FIXME: thread safe? locks?
184   g_atmi_initialized = false;
185 }
186 
187 bool atl_is_atmi_initialized() { return g_atmi_initialized; }
188 
189 void allow_access_to_all_gpu_agents(void *ptr) {
190   hsa_status_t err;
191   std::vector<ATLGPUProcessor> &gpu_procs =
192       g_atl_machine.processors<ATLGPUProcessor>();
193   std::vector<hsa_agent_t> agents;
194   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
195     agents.push_back(gpu_procs[i].agent());
196   }
197   err = hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr);
198   ErrorCheck(Allow agents ptr access, err);
199 }
200 
201 atmi_status_t Runtime::Initialize() {
202   atmi_devtype_t devtype = ATMI_DEVTYPE_GPU;
203   if (atl_is_atmi_initialized())
204     return ATMI_STATUS_SUCCESS;
205 
206   if (devtype == ATMI_DEVTYPE_ALL || devtype & ATMI_DEVTYPE_GPU) {
207     ATMIErrorCheck(GPU context init, atl_init_gpu_context());
208   }
209 
210   atl_set_atmi_initialized();
211   return ATMI_STATUS_SUCCESS;
212 }
213 
214 atmi_status_t Runtime::Finalize() {
215   // TODO(ashwinma): Finalize all processors, queues, signals, kernarg memory
216   // regions
217   hsa_status_t err;
218 
219   for (uint32_t i = 0; i < g_executables.size(); i++) {
220     err = hsa_executable_destroy(g_executables[i]);
221     ErrorCheck(Destroying executable, err);
222   }
223 
224   // Finalize queues
225   for (auto &p : g_atl_machine.processors<ATLCPUProcessor>()) {
226     p.destroyQueues();
227   }
228   for (auto &p : g_atl_machine.processors<ATLGPUProcessor>()) {
229     p.destroyQueues();
230   }
231 
232   for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) {
233     SymbolInfoTable[i].clear();
234   }
235   SymbolInfoTable.clear();
236   for (uint32_t i = 0; i < KernelInfoTable.size(); i++) {
237     KernelInfoTable[i].clear();
238   }
239   KernelInfoTable.clear();
240 
241   atl_reset_atmi_initialized();
242   err = hsa_shut_down();
243   ErrorCheck(Shutting down HSA, err);
244 
245   return ATMI_STATUS_SUCCESS;
246 }
247 
248 void atmi_init_context_structs() {
249   atlc_p = &atlc;
250   atlc.struct_initialized = true; /* This only gets called one time */
251   atlc.g_hsa_initialized = false;
252   atlc.g_gpu_initialized = false;
253   atlc.g_tasks_initialized = false;
254 }
255 
256 // Implement memory_pool iteration function
257 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool,
258                                          void *data) {
259   ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data);
260   hsa_status_t err = HSA_STATUS_SUCCESS;
261   // Check if the memory_pool is allowed to allocate, i.e. do not return group
262   // memory
263   bool alloc_allowed = false;
264   err = hsa_amd_memory_pool_get_info(
265       memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
266       &alloc_allowed);
267   ErrorCheck(Alloc allowed in memory pool check, err);
268   if (alloc_allowed) {
269     uint32_t global_flag = 0;
270     err = hsa_amd_memory_pool_get_info(
271         memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag);
272     ErrorCheck(Get memory pool info, err);
273     if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) {
274       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED);
275       proc->addMemory(new_mem);
276       if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT & global_flag) {
277         DEBUG_PRINT("GPU kernel args pool handle: %lu\n", memory_pool.handle);
278         atl_gpu_kernarg_pools.push_back(memory_pool);
279       }
280     } else {
281       ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED);
282       proc->addMemory(new_mem);
283     }
284   }
285 
286   return err;
287 }
288 
289 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) {
290   hsa_status_t err = HSA_STATUS_SUCCESS;
291   hsa_device_type_t device_type;
292   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
293   ErrorCheck(Get device type info, err);
294   switch (device_type) {
295   case HSA_DEVICE_TYPE_CPU: {
296     ;
297     ATLCPUProcessor new_proc(agent);
298     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
299                                              &new_proc);
300     ErrorCheck(Iterate all memory pools, err);
301     g_atl_machine.addProcessor(new_proc);
302   } break;
303   case HSA_DEVICE_TYPE_GPU: {
304     ;
305     hsa_profile_t profile;
306     err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile);
307     ErrorCheck(Query the agent profile, err);
308     atmi_devtype_t gpu_type;
309     gpu_type =
310         (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU;
311     ATLGPUProcessor new_proc(agent, gpu_type);
312     err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info,
313                                              &new_proc);
314     ErrorCheck(Iterate all memory pools, err);
315     g_atl_machine.addProcessor(new_proc);
316   } break;
317   case HSA_DEVICE_TYPE_DSP: {
318     err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
319   } break;
320   }
321 
322   return err;
323 }
324 
325 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) {
326   hsa_region_segment_t segment;
327   hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
328   if (segment != HSA_REGION_SEGMENT_GLOBAL) {
329     return HSA_STATUS_SUCCESS;
330   }
331   hsa_region_global_flag_t flags;
332   hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
333   if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) {
334     hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
335     *ret = region;
336     return HSA_STATUS_INFO_BREAK;
337   }
338   return HSA_STATUS_SUCCESS;
339 }
340 
341 /* Determines if a memory region can be used for kernarg allocations.  */
342 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) {
343   hsa_region_segment_t segment;
344   hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment);
345   if (HSA_REGION_SEGMENT_GLOBAL != segment) {
346     return HSA_STATUS_SUCCESS;
347   }
348 
349   hsa_region_global_flag_t flags;
350   hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags);
351   if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) {
352     hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data);
353     *ret = region;
354     return HSA_STATUS_INFO_BREAK;
355   }
356 
357   return HSA_STATUS_SUCCESS;
358 }
359 
360 static hsa_status_t init_compute_and_memory() {
361   hsa_status_t err;
362 
363   /* Iterate over the agents and pick the gpu agent */
364   err = hsa_iterate_agents(get_agent_info, NULL);
365   if (err == HSA_STATUS_INFO_BREAK) {
366     err = HSA_STATUS_SUCCESS;
367   }
368   ErrorCheck(Getting a gpu agent, err);
369   if (err != HSA_STATUS_SUCCESS)
370     return err;
371 
372   /* Init all devices or individual device types? */
373   std::vector<ATLCPUProcessor> &cpu_procs =
374       g_atl_machine.processors<ATLCPUProcessor>();
375   std::vector<ATLGPUProcessor> &gpu_procs =
376       g_atl_machine.processors<ATLGPUProcessor>();
377   /* For CPU memory pools, add other devices that can access them directly
378    * or indirectly */
379   for (auto &cpu_proc : cpu_procs) {
380     for (auto &cpu_mem : cpu_proc.memories()) {
381       hsa_amd_memory_pool_t pool = cpu_mem.memory();
382       for (auto &gpu_proc : gpu_procs) {
383         hsa_agent_t agent = gpu_proc.agent();
384         hsa_amd_memory_pool_access_t access;
385         hsa_amd_agent_memory_pool_get_info(
386             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
387         if (access != 0) {
388           // this means not NEVER, but could be YES or NO
389           // add this memory pool to the proc
390           gpu_proc.addMemory(cpu_mem);
391         }
392       }
393     }
394   }
395 
396   /* FIXME: are the below combinations of procs and memory pools needed?
397    * all to all compare procs with their memory pools and add those memory
398    * pools that are accessible by the target procs */
399   for (auto &gpu_proc : gpu_procs) {
400     for (auto &gpu_mem : gpu_proc.memories()) {
401       hsa_amd_memory_pool_t pool = gpu_mem.memory();
402       for (auto &cpu_proc : cpu_procs) {
403         hsa_agent_t agent = cpu_proc.agent();
404         hsa_amd_memory_pool_access_t access;
405         hsa_amd_agent_memory_pool_get_info(
406             agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access);
407         if (access != 0) {
408           // this means not NEVER, but could be YES or NO
409           // add this memory pool to the proc
410           cpu_proc.addMemory(gpu_mem);
411         }
412       }
413     }
414   }
415 
416   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_CPU] = cpu_procs.size();
417   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_GPU] = gpu_procs.size();
418 
419   size_t num_procs = cpu_procs.size() + gpu_procs.size();
420   // g_atmi_machine.devices = (atmi_device_t *)malloc(num_procs *
421   // sizeof(atmi_device_t));
422   atmi_device_t *all_devices = reinterpret_cast<atmi_device_t *>(
423       malloc(num_procs * sizeof(atmi_device_t)));
424   int num_iGPUs = 0;
425   int num_dGPUs = 0;
426   for (uint32_t i = 0; i < gpu_procs.size(); i++) {
427     if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU)
428       num_iGPUs++;
429     else
430       num_dGPUs++;
431   }
432   assert(num_iGPUs + num_dGPUs == gpu_procs.size() &&
433          "Number of dGPUs and iGPUs do not add up");
434   DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size());
435   DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs);
436   DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs);
437   DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size());
438 
439   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_iGPU] = num_iGPUs;
440   g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_dGPU] = num_dGPUs;
441 
442   int cpus_begin = 0;
443   int cpus_end = cpu_procs.size();
444   int gpus_begin = cpu_procs.size();
445   int gpus_end = cpu_procs.size() + gpu_procs.size();
446   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_CPU] = &all_devices[cpus_begin];
447   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_GPU] = &all_devices[gpus_begin];
448   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_iGPU] = &all_devices[gpus_begin];
449   g_atmi_machine.devices_by_type[ATMI_DEVTYPE_dGPU] = &all_devices[gpus_begin];
450   int proc_index = 0;
451   for (int i = cpus_begin; i < cpus_end; i++) {
452     all_devices[i].type = cpu_procs[proc_index].type();
453     all_devices[i].core_count = cpu_procs[proc_index].num_cus();
454 
455     std::vector<ATLMemory> memories = cpu_procs[proc_index].memories();
456     int fine_memories_size = 0;
457     int coarse_memories_size = 0;
458     DEBUG_PRINT("CPU memory types:\t");
459     for (auto &memory : memories) {
460       atmi_memtype_t type = memory.type();
461       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
462         fine_memories_size++;
463         DEBUG_PRINT("Fine\t");
464       } else {
465         coarse_memories_size++;
466         DEBUG_PRINT("Coarse\t");
467       }
468     }
469     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
470     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
471     all_devices[i].memory_count = memories.size();
472     proc_index++;
473   }
474   proc_index = 0;
475   for (int i = gpus_begin; i < gpus_end; i++) {
476     all_devices[i].type = gpu_procs[proc_index].type();
477     all_devices[i].core_count = gpu_procs[proc_index].num_cus();
478 
479     std::vector<ATLMemory> memories = gpu_procs[proc_index].memories();
480     int fine_memories_size = 0;
481     int coarse_memories_size = 0;
482     DEBUG_PRINT("GPU memory types:\t");
483     for (auto &memory : memories) {
484       atmi_memtype_t type = memory.type();
485       if (type == ATMI_MEMTYPE_FINE_GRAINED) {
486         fine_memories_size++;
487         DEBUG_PRINT("Fine\t");
488       } else {
489         coarse_memories_size++;
490         DEBUG_PRINT("Coarse\t");
491       }
492     }
493     DEBUG_PRINT("\nFine Memories : %d", fine_memories_size);
494     DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size);
495     all_devices[i].memory_count = memories.size();
496     proc_index++;
497   }
498   proc_index = 0;
499   atl_cpu_kernarg_region.handle = (uint64_t)-1;
500   if (cpu_procs.size() > 0) {
501     err = hsa_agent_iterate_regions(
502         cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region);
503     if (err == HSA_STATUS_INFO_BREAK) {
504       err = HSA_STATUS_SUCCESS;
505     }
506     err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
507                                                           : HSA_STATUS_SUCCESS;
508     ErrorCheck(Finding a CPU kernarg memory region handle, err);
509   }
510   /* Find a memory region that supports kernel arguments.  */
511   atl_gpu_kernarg_region.handle = (uint64_t)-1;
512   if (gpu_procs.size() > 0) {
513     hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region,
514                               &atl_gpu_kernarg_region);
515     err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR
516                                                           : HSA_STATUS_SUCCESS;
517     ErrorCheck(Finding a kernarg memory region, err);
518   }
519   if (num_procs > 0)
520     return HSA_STATUS_SUCCESS;
521   else
522     return HSA_STATUS_ERROR_NOT_INITIALIZED;
523 }
524 
525 hsa_status_t init_hsa() {
526   if (atlc.g_hsa_initialized == false) {
527     DEBUG_PRINT("Initializing HSA...");
528     hsa_status_t err = hsa_init();
529     ErrorCheck(Initializing the hsa runtime, err);
530     if (err != HSA_STATUS_SUCCESS)
531       return err;
532 
533     err = init_compute_and_memory();
534     if (err != HSA_STATUS_SUCCESS)
535       return err;
536     ErrorCheck(After initializing compute and memory, err);
537 
538     int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
539     KernelInfoTable.resize(gpu_count);
540     SymbolInfoTable.resize(gpu_count);
541     for (uint32_t i = 0; i < SymbolInfoTable.size(); i++)
542       SymbolInfoTable[i].clear();
543     for (uint32_t i = 0; i < KernelInfoTable.size(); i++)
544       KernelInfoTable[i].clear();
545     atlc.g_hsa_initialized = true;
546     DEBUG_PRINT("done\n");
547   }
548   return HSA_STATUS_SUCCESS;
549 }
550 
551 void init_tasks() {
552   if (atlc.g_tasks_initialized != false)
553     return;
554   hsa_status_t err;
555   int task_num;
556   std::vector<hsa_agent_t> gpu_agents;
557   int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
558   for (int gpu = 0; gpu < gpu_count; gpu++) {
559     atmi_place_t place = ATMI_PLACE_GPU(0, gpu);
560     ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
561     gpu_agents.push_back(proc.agent());
562   }
563   err = hsa_signal_create(0, 0, NULL, &IdentityCopySignal);
564   ErrorCheck(Creating a HSA signal, err);
565   atlc.g_tasks_initialized = true;
566 }
567 
568 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) {
569 #if (ROCM_VERSION_MAJOR >= 3) ||                                               \
570     (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3)
571   if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) {
572 #else
573   if (event->event_type == GPU_MEMORY_FAULT_EVENT) {
574 #endif
575     hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault;
576     // memory_fault.agent
577     // memory_fault.virtual_address
578     // memory_fault.fault_reason_mask
579     // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address);
580     std::stringstream stream;
581     stream << std::hex << (uintptr_t)memory_fault.virtual_address;
582     std::string addr("0x" + stream.str());
583 
584     std::string err_string = "[GPU Memory Error] Addr: " + addr;
585     err_string += " Reason: ";
586     if (!(memory_fault.fault_reason_mask & 0x00111111)) {
587       err_string += "No Idea! ";
588     } else {
589       if (memory_fault.fault_reason_mask & 0x00000001)
590         err_string += "Page not present or supervisor privilege. ";
591       if (memory_fault.fault_reason_mask & 0x00000010)
592         err_string += "Write access to a read-only page. ";
593       if (memory_fault.fault_reason_mask & 0x00000100)
594         err_string += "Execute access to a page marked NX. ";
595       if (memory_fault.fault_reason_mask & 0x00001000)
596         err_string += "Host access only. ";
597       if (memory_fault.fault_reason_mask & 0x00010000)
598         err_string += "ECC failure (if supported by HW). ";
599       if (memory_fault.fault_reason_mask & 0x00100000)
600         err_string += "Can't determine the exact fault address. ";
601     }
602     fprintf(stderr, "%s\n", err_string.c_str());
603     return HSA_STATUS_ERROR;
604   }
605   return HSA_STATUS_SUCCESS;
606 }
607 
608 atmi_status_t atl_init_gpu_context() {
609   if (atlc.struct_initialized == false)
610     atmi_init_context_structs();
611   if (atlc.g_gpu_initialized != false)
612     return ATMI_STATUS_SUCCESS;
613 
614   hsa_status_t err;
615   err = init_hsa();
616   if (err != HSA_STATUS_SUCCESS)
617     return ATMI_STATUS_ERROR;
618 
619   int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>();
620   for (int gpu = 0; gpu < gpu_count; gpu++) {
621     atmi_place_t place = ATMI_PLACE_GPU(0, gpu);
622     ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
623     int num_gpu_queues = core::Runtime::getInstance().getNumGPUQueues();
624     if (num_gpu_queues == -1) {
625       num_gpu_queues = proc.num_cus();
626       num_gpu_queues = (num_gpu_queues > 8) ? 8 : num_gpu_queues;
627     }
628     proc.createQueues(num_gpu_queues);
629   }
630 
631   if (context_init_time_init == 0) {
632     clock_gettime(CLOCK_MONOTONIC_RAW, &context_init_time);
633     context_init_time_init = 1;
634   }
635 
636   err = hsa_amd_register_system_event_handler(callbackEvent, NULL);
637     ErrorCheck(Registering the system for memory faults, err);
638 
639     init_tasks();
640     atlc.g_gpu_initialized = true;
641     return ATMI_STATUS_SUCCESS;
642 }
643 
644 bool isImplicit(KernelArgMD::ValueKind value_kind) {
645   switch (value_kind) {
646   case KernelArgMD::ValueKind::HiddenGlobalOffsetX:
647   case KernelArgMD::ValueKind::HiddenGlobalOffsetY:
648   case KernelArgMD::ValueKind::HiddenGlobalOffsetZ:
649   case KernelArgMD::ValueKind::HiddenNone:
650   case KernelArgMD::ValueKind::HiddenPrintfBuffer:
651   case KernelArgMD::ValueKind::HiddenDefaultQueue:
652   case KernelArgMD::ValueKind::HiddenCompletionAction:
653   case KernelArgMD::ValueKind::HiddenMultiGridSyncArg:
654   case KernelArgMD::ValueKind::HiddenHostcallBuffer:
655     return true;
656   default:
657     return false;
658   }
659 }
660 
661 static std::pair<unsigned char *, unsigned char *>
662 find_metadata(void *binary, size_t binSize) {
663   std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr};
664 
665   Elf *e = elf_memory(static_cast<char *>(binary), binSize);
666   if (elf_kind(e) != ELF_K_ELF) {
667     return failure;
668   }
669 
670   size_t numpHdrs;
671   if (elf_getphdrnum(e, &numpHdrs) != 0) {
672     return failure;
673   }
674 
675   for (size_t i = 0; i < numpHdrs; ++i) {
676     GElf_Phdr pHdr;
677     if (gelf_getphdr(e, i, &pHdr) != &pHdr) {
678       continue;
679     }
680     // Look for the runtime metadata note
681     if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) {
682       // Iterate over the notes in this segment
683       address ptr = (address)binary + pHdr.p_offset;
684       address segmentEnd = ptr + pHdr.p_filesz;
685 
686       while (ptr < segmentEnd) {
687         Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr);
688         address name = (address)&note[1];
689 
690         if (note->n_type == 7 || note->n_type == 8) {
691           return failure;
692         } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ &&
693                    note->n_namesz == sizeof "AMD" &&
694                    !memcmp(name, "AMD", note->n_namesz)) {
695           // code object v2 uses yaml metadata, no longer supported
696           return failure;
697         } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ &&
698                    note->n_namesz == sizeof "AMDGPU" &&
699                    !memcmp(name, "AMDGPU", note->n_namesz)) {
700 
701           // n_descsz = 485
702           // value is padded to 4 byte alignment, may want to move end up to
703           // match
704           size_t offset = sizeof(uint32_t) * 3 /* fields */
705                           + sizeof("AMDGPU")   /* name */
706                           + 1 /* padding to 4 byte alignment */;
707 
708           // Including the trailing padding means both pointers are 4 bytes
709           // aligned, which may be useful later.
710           unsigned char *metadata_start = (unsigned char *)ptr + offset;
711           unsigned char *metadata_end =
712               metadata_start + core::alignUp(note->n_descsz, 4);
713           return {metadata_start, metadata_end};
714         }
715         ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) +
716                core::alignUp(note->n_descsz, sizeof(int));
717       }
718     }
719   }
720 
721   return failure;
722 }
723 
724 namespace {
725 int map_lookup_array(msgpack::byte_range message, const char *needle,
726                      msgpack::byte_range *res, uint64_t *size) {
727   unsigned count = 0;
728   struct s : msgpack::functors_defaults<s> {
729     s(unsigned &count, uint64_t *size) : count(count), size(size) {}
730     unsigned &count;
731     uint64_t *size;
732     const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) {
733       count++;
734       *size = N;
735       return bytes.end;
736     }
737   };
738 
739   msgpack::foreach_map(message,
740                        [&](msgpack::byte_range key, msgpack::byte_range value) {
741                          if (msgpack::message_is_string(key, needle)) {
742                            // If the message is an array, record number of
743                            // elements in *size
744                            msgpack::handle_msgpack<s>(value, {count, size});
745                            // return the whole array
746                            *res = value;
747                          }
748                        });
749   // Only claim success if exactly one key/array pair matched
750   return count != 1;
751 }
752 
753 int map_lookup_string(msgpack::byte_range message, const char *needle,
754                       std::string *res) {
755   unsigned count = 0;
756   struct s : public msgpack::functors_defaults<s> {
757     s(unsigned &count, std::string *res) : count(count), res(res) {}
758     unsigned &count;
759     std::string *res;
760     void handle_string(size_t N, const unsigned char *str) {
761       count++;
762       *res = std::string(str, str + N);
763     }
764   };
765   msgpack::foreach_map(message,
766                        [&](msgpack::byte_range key, msgpack::byte_range value) {
767                          if (msgpack::message_is_string(key, needle)) {
768                            msgpack::handle_msgpack<s>(value, {count, res});
769                          }
770                        });
771   return count != 1;
772 }
773 
774 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle,
775                         uint64_t *res) {
776   unsigned count = 0;
777   msgpack::foreach_map(message,
778                        [&](msgpack::byte_range key, msgpack::byte_range value) {
779                          if (msgpack::message_is_string(key, needle)) {
780                            msgpack::foronly_unsigned(value, [&](uint64_t x) {
781                              count++;
782                              *res = x;
783                            });
784                          }
785                        });
786   return count != 1;
787 }
788 
789 int array_lookup_element(msgpack::byte_range message, uint64_t elt,
790                          msgpack::byte_range *res) {
791   int rc = 1;
792   uint64_t i = 0;
793   msgpack::foreach_array(message, [&](msgpack::byte_range value) {
794     if (i == elt) {
795       *res = value;
796       rc = 0;
797     }
798     i++;
799   });
800   return rc;
801 }
802 
803 int populate_kernelArgMD(msgpack::byte_range args_element,
804                          KernelArgMD *kernelarg) {
805   using namespace msgpack;
806   int error = 0;
807   foreach_map(args_element, [&](byte_range key, byte_range value) -> void {
808     if (message_is_string(key, ".name")) {
809       foronly_string(value, [&](size_t N, const unsigned char *str) {
810         kernelarg->name_ = std::string(str, str + N);
811       });
812     } else if (message_is_string(key, ".type_name")) {
813       foronly_string(value, [&](size_t N, const unsigned char *str) {
814         kernelarg->typeName_ = std::string(str, str + N);
815       });
816     } else if (message_is_string(key, ".size")) {
817       foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; });
818     } else if (message_is_string(key, ".offset")) {
819       foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; });
820     } else if (message_is_string(key, ".value_kind")) {
821       foronly_string(value, [&](size_t N, const unsigned char *str) {
822         std::string s = std::string(str, str + N);
823         auto itValueKind = ArgValueKind.find(s);
824         if (itValueKind != ArgValueKind.end()) {
825           kernelarg->valueKind_ = itValueKind->second;
826         }
827       });
828     }
829   });
830   return error;
831 }
832 } // namespace
833 
834 static hsa_status_t get_code_object_custom_metadata(void *binary,
835                                                     size_t binSize, int gpu) {
836   // parse code object with different keys from v2
837   // also, the kernel name is not the same as the symbol name -- so a
838   // symbol->name map is needed
839 
840   std::pair<unsigned char *, unsigned char *> metadata =
841       find_metadata(binary, binSize);
842   if (!metadata.first) {
843     return HSA_STATUS_ERROR_INVALID_CODE_OBJECT;
844   }
845 
846   uint64_t kernelsSize = 0;
847   int msgpack_errors = 0;
848   msgpack::byte_range kernel_array;
849   msgpack_errors =
850       map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels",
851                        &kernel_array, &kernelsSize);
852   msgpackErrorCheck(kernels lookup in program metadata, msgpack_errors);
853 
854   for (size_t i = 0; i < kernelsSize; i++) {
855     assert(msgpack_errors == 0);
856     std::string kernelName;
857     std::string languageName;
858     std::string symbolName;
859 
860     msgpack::byte_range element;
861     msgpack_errors += array_lookup_element(kernel_array, i, &element);
862     msgpackErrorCheck(element lookup in kernel metadata, msgpack_errors);
863 
864     msgpack_errors += map_lookup_string(element, ".name", &kernelName);
865     msgpack_errors += map_lookup_string(element, ".language", &languageName);
866     msgpack_errors += map_lookup_string(element, ".symbol", &symbolName);
867     msgpackErrorCheck(strings lookup in kernel metadata, msgpack_errors);
868 
869     atl_kernel_info_t info = {0, 0, 0, 0, 0, {}, {}, {}};
870     size_t kernel_explicit_args_size = 0;
871     uint64_t kernel_segment_size;
872     msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size",
873                                           &kernel_segment_size);
874     msgpackErrorCheck(kernarg segment size metadata lookup in kernel metadata,
875                       msgpack_errors);
876 
877     // create a map from symbol to name
878     DEBUG_PRINT("Kernel symbol %s; Name: %s; Size: %lu\n", symbolName.c_str(),
879                 kernelName.c_str(), kernel_segment_size);
880     KernelNameMap[symbolName] = kernelName;
881 
882     bool hasHiddenArgs = false;
883     if (kernel_segment_size > 0) {
884       uint64_t argsSize;
885       size_t offset = 0;
886 
887       msgpack::byte_range args_array;
888       msgpack_errors +=
889           map_lookup_array(element, ".args", &args_array, &argsSize);
890       msgpackErrorCheck(kernel args metadata lookup in kernel metadata,
891                         msgpack_errors);
892 
893       info.num_args = argsSize;
894 
895       for (size_t i = 0; i < argsSize; ++i) {
896         KernelArgMD lcArg;
897 
898         msgpack::byte_range args_element;
899         msgpack_errors += array_lookup_element(args_array, i, &args_element);
900         msgpackErrorCheck(iterate args map in kernel args metadata,
901                           msgpack_errors);
902 
903         msgpack_errors += populate_kernelArgMD(args_element, &lcArg);
904         msgpackErrorCheck(iterate args map in kernel args metadata,
905                           msgpack_errors);
906 
907         // TODO(ashwinma): should the below population actions be done only for
908         // non-implicit args?
909         // populate info with sizes and offsets
910         info.arg_sizes.push_back(lcArg.size_);
911         // v3 has offset field and not align field
912         size_t new_offset = lcArg.offset_;
913         size_t padding = new_offset - offset;
914         offset = new_offset;
915         info.arg_offsets.push_back(lcArg.offset_);
916         DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(),
917                     lcArg.size_, lcArg.offset_);
918         offset += lcArg.size_;
919 
920         // check if the arg is a hidden/implicit arg
921         // this logic assumes that all hidden args are 8-byte aligned
922         if (!isImplicit(lcArg.valueKind_)) {
923           kernel_explicit_args_size += lcArg.size_;
924         } else {
925           hasHiddenArgs = true;
926         }
927         kernel_explicit_args_size += padding;
928       }
929     }
930 
931     // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but
932     // in ATMI, do not count the compiler set implicit args, but set your own
933     // implicit args by discounting the compiler set implicit args
934     info.kernel_segment_size =
935         (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) +
936         sizeof(atmi_implicit_args_t);
937     DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(),
938                 kernel_segment_size, info.kernel_segment_size);
939 
940     // kernel received, now add it to the kernel info table
941     KernelInfoTable[gpu][kernelName] = info;
942   }
943 
944   return HSA_STATUS_SUCCESS;
945 }
946 
947 static hsa_status_t populate_InfoTables(hsa_executable_t executable,
948                                         hsa_executable_symbol_t symbol,
949                                         void *data) {
950   int gpu = *static_cast<int *>(data);
951   hsa_symbol_kind_t type;
952 
953   uint32_t name_length;
954   hsa_status_t err;
955   err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE,
956                                        &type);
957   ErrorCheck(Symbol info extraction, err);
958   DEBUG_PRINT("Exec Symbol type: %d\n", type);
959   if (type == HSA_SYMBOL_KIND_KERNEL) {
960     err = hsa_executable_symbol_get_info(
961         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
962     ErrorCheck(Symbol info extraction, err);
963     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
964     err = hsa_executable_symbol_get_info(symbol,
965                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
966     ErrorCheck(Symbol info extraction, err);
967     name[name_length] = 0;
968 
969     if (KernelNameMap.find(std::string(name)) == KernelNameMap.end()) {
970       // did not find kernel name in the kernel map; this can happen only
971       // if the ROCr API for getting symbol info (name) is different from
972       // the comgr method of getting symbol info
973       ErrorCheck(Invalid kernel name, HSA_STATUS_ERROR_INVALID_CODE_OBJECT);
974     }
975     atl_kernel_info_t info;
976     std::string kernelName = KernelNameMap[std::string(name)];
977     // by now, the kernel info table should already have an entry
978     // because the non-ROCr custom code object parsing is called before
979     // iterating over the code object symbols using ROCr
980     if (KernelInfoTable[gpu].find(kernelName) == KernelInfoTable[gpu].end()) {
981       ErrorCheck(Finding the entry kernel info table,
982                  HSA_STATUS_ERROR_INVALID_CODE_OBJECT);
983     }
984     // found, so assign and update
985     info = KernelInfoTable[gpu][kernelName];
986 
987     /* Extract dispatch information from the symbol */
988     err = hsa_executable_symbol_get_info(
989         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT,
990         &(info.kernel_object));
991     ErrorCheck(Extracting the symbol from the executable, err);
992     err = hsa_executable_symbol_get_info(
993         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE,
994         &(info.group_segment_size));
995     ErrorCheck(Extracting the group segment size from the executable, err);
996     err = hsa_executable_symbol_get_info(
997         symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE,
998         &(info.private_segment_size));
999     ErrorCheck(Extracting the private segment from the executable, err);
1000 
1001     DEBUG_PRINT(
1002         "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes "
1003         "kernarg\n",
1004         kernelName.c_str(), info.kernel_object, info.group_segment_size,
1005         info.private_segment_size, info.kernel_segment_size);
1006 
1007     // assign it back to the kernel info table
1008     KernelInfoTable[gpu][kernelName] = info;
1009     free(name);
1010   } else if (type == HSA_SYMBOL_KIND_VARIABLE) {
1011     err = hsa_executable_symbol_get_info(
1012         symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length);
1013     ErrorCheck(Symbol info extraction, err);
1014     char *name = reinterpret_cast<char *>(malloc(name_length + 1));
1015     err = hsa_executable_symbol_get_info(symbol,
1016                                          HSA_EXECUTABLE_SYMBOL_INFO_NAME, name);
1017     ErrorCheck(Symbol info extraction, err);
1018     name[name_length] = 0;
1019 
1020     atl_symbol_info_t info;
1021 
1022     err = hsa_executable_symbol_get_info(
1023         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr));
1024     ErrorCheck(Symbol info address extraction, err);
1025 
1026     err = hsa_executable_symbol_get_info(
1027         symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size));
1028     ErrorCheck(Symbol info size extraction, err);
1029 
1030     atmi_mem_place_t place = ATMI_MEM_PLACE(ATMI_DEVTYPE_GPU, gpu, 0);
1031     DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr,
1032                 info.size);
1033     register_allocation(reinterpret_cast<void *>(info.addr), (size_t)info.size,
1034                         place);
1035     SymbolInfoTable[gpu][std::string(name)] = info;
1036     if (strcmp(name, "needs_hostcall_buffer") == 0)
1037       g_atmi_hostcall_required = true;
1038     free(name);
1039   } else {
1040     DEBUG_PRINT("Symbol is an indirect function\n");
1041   }
1042   return HSA_STATUS_SUCCESS;
1043 }
1044 
1045 atmi_status_t Runtime::RegisterModuleFromMemory(
1046     void *module_bytes, size_t module_size, atmi_place_t place,
1047     atmi_status_t (*on_deserialized_data)(void *data, size_t size,
1048                                           void *cb_state),
1049     void *cb_state) {
1050   hsa_status_t err;
1051   int gpu = place.device_id;
1052   assert(gpu >= 0);
1053 
1054   DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu);
1055   ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place);
1056   hsa_agent_t agent = proc.agent();
1057   hsa_executable_t executable = {0};
1058   hsa_profile_t agent_profile;
1059 
1060   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile);
1061   ErrorCheck(Query the agent profile, err);
1062   // FIXME: Assume that every profile is FULL until we understand how to build
1063   // GCN with base profile
1064   agent_profile = HSA_PROFILE_FULL;
1065   /* Create the empty executable.  */
1066   err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "",
1067                               &executable);
1068   ErrorCheck(Create the executable, err);
1069 
1070   bool module_load_success = false;
1071   do // Existing control flow used continue, preserve that for this patch
1072   {
1073     {
1074       // Some metadata info is not available through ROCr API, so use custom
1075       // code object metadata parsing to collect such metadata info
1076 
1077       err = get_code_object_custom_metadata(module_bytes, module_size, gpu);
1078       ErrorCheckAndContinue(Getting custom code object metadata, err);
1079 
1080       // Deserialize code object.
1081       hsa_code_object_t code_object = {0};
1082       err = hsa_code_object_deserialize(module_bytes, module_size, NULL,
1083                                         &code_object);
1084       ErrorCheckAndContinue(Code Object Deserialization, err);
1085       assert(0 != code_object.handle);
1086 
1087       // Mutating the device image here avoids another allocation & memcpy
1088       void *code_object_alloc_data =
1089           reinterpret_cast<void *>(code_object.handle);
1090       atmi_status_t atmi_err =
1091           on_deserialized_data(code_object_alloc_data, module_size, cb_state);
1092       ATMIErrorCheck(Error in deserialized_data callback, atmi_err);
1093 
1094       /* Load the code object.  */
1095       err =
1096           hsa_executable_load_code_object(executable, agent, code_object, NULL);
1097       ErrorCheckAndContinue(Loading the code object, err);
1098 
1099       // cannot iterate over symbols until executable is frozen
1100     }
1101     module_load_success = true;
1102   } while (0);
1103   DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success);
1104   if (module_load_success) {
1105     /* Freeze the executable; it can now be queried for symbols.  */
1106     err = hsa_executable_freeze(executable, "");
1107     ErrorCheck(Freeze the executable, err);
1108 
1109     err = hsa_executable_iterate_symbols(executable, populate_InfoTables,
1110                                          static_cast<void *>(&gpu));
1111     ErrorCheck(Iterating over symbols for execuatable, err);
1112 
1113     // save the executable and destroy during finalize
1114     g_executables.push_back(executable);
1115     return ATMI_STATUS_SUCCESS;
1116   } else {
1117     return ATMI_STATUS_ERROR;
1118   }
1119 }
1120 
1121 } // namespace core
1122