1 //===--- amdgpu/src/rtl.cpp --------------------------------------- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // RTL for hsa machine
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <algorithm>
14 #include <assert.h>
15 #include <cstdio>
16 #include <cstdlib>
17 #include <cstring>
18 #include <functional>
19 #include <libelf.h>
20 #include <list>
21 #include <memory>
22 #include <mutex>
23 #include <shared_mutex>
24 #include <unordered_map>
25 #include <vector>
26 
27 // Header from ATMI interface
28 #include "atmi_interop_hsa.h"
29 #include "atmi_runtime.h"
30 
31 #include "internal.h"
32 
33 #include "Debug.h"
34 #include "get_elf_mach_gfx_name.h"
35 #include "omptargetplugin.h"
36 #include "print_tracing.h"
37 
38 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
39 
40 #ifndef TARGET_NAME
41 #error "Missing TARGET_NAME macro"
42 #endif
43 #define DEBUG_PREFIX "Target " GETNAME(TARGET_NAME) " RTL"
44 
45 // hostrpc interface, FIXME: consider moving to its own include these are
46 // statically linked into amdgpu/plugin if present from hostrpc_services.a,
47 // linked as --whole-archive to override the weak symbols that are used to
48 // implement a fallback for toolchains that do not yet have a hostrpc library.
49 extern "C" {
50 unsigned long hostrpc_assign_buffer(hsa_agent_t agent, hsa_queue_t *this_Q,
51                                     uint32_t device_id);
52 hsa_status_t hostrpc_init();
53 hsa_status_t hostrpc_terminate();
54 
55 __attribute__((weak)) hsa_status_t hostrpc_init() { return HSA_STATUS_SUCCESS; }
56 __attribute__((weak)) hsa_status_t hostrpc_terminate() {
57   return HSA_STATUS_SUCCESS;
58 }
59 __attribute__((weak)) unsigned long
60 hostrpc_assign_buffer(hsa_agent_t, hsa_queue_t *, uint32_t device_id) {
61   DP("Warning: Attempting to assign hostrpc to device %u, but hostrpc library "
62      "missing\n",
63      device_id);
64   return 0;
65 }
66 }
67 
68 // Heuristic parameters used for kernel launch
69 // Number of teams per CU to allow scheduling flexibility
70 static const unsigned DefaultTeamsPerCU = 4;
71 
72 int print_kernel_trace;
73 
74 #ifdef OMPTARGET_DEBUG
75 #define check(msg, status)                                                     \
76   if (status != HSA_STATUS_SUCCESS) {                                          \
77     DP(#msg " failed\n");                                                      \
78   } else {                                                                     \
79     DP(#msg " succeeded\n");                                                   \
80   }
81 #else
82 #define check(msg, status)                                                     \
83   {}
84 #endif
85 
86 #include "elf_common.h"
87 
88 namespace core {
89 hsa_status_t RegisterModuleFromMemory(
90     std::map<std::string, atl_kernel_info_t> &KernelInfo,
91     std::map<std::string, atl_symbol_info_t> &SymbolInfoTable, void *, size_t,
92     hsa_agent_t agent,
93     hsa_status_t (*on_deserialized_data)(void *data, size_t size,
94                                          void *cb_state),
95     void *cb_state, std::vector<hsa_executable_t> &HSAExecutables);
96 }
97 
98 namespace hsa {
99 template <typename C> hsa_status_t iterate_agents(C cb) {
100   auto L = [](hsa_agent_t agent, void *data) -> hsa_status_t {
101     C *unwrapped = static_cast<C *>(data);
102     return (*unwrapped)(agent);
103   };
104   return hsa_iterate_agents(L, static_cast<void *>(&cb));
105 }
106 
107 template <typename C>
108 hsa_status_t amd_agent_iterate_memory_pools(hsa_agent_t Agent, C cb) {
109   auto L = [](hsa_amd_memory_pool_t MemoryPool, void *data) -> hsa_status_t {
110     C *unwrapped = static_cast<C *>(data);
111     return (*unwrapped)(MemoryPool);
112   };
113 
114   return hsa_amd_agent_iterate_memory_pools(Agent, L, static_cast<void *>(&cb));
115 }
116 
117 } // namespace hsa
118 
119 /// Keep entries table per device
120 struct FuncOrGblEntryTy {
121   __tgt_target_table Table;
122   std::vector<__tgt_offload_entry> Entries;
123 };
124 
125 enum ExecutionModeType {
126   SPMD,         // constructors, destructors,
127                 // combined constructs (`teams distribute parallel for [simd]`)
128   GENERIC,      // everything else
129   SPMD_GENERIC, // Generic kernel with SPMD execution
130   NONE
131 };
132 
133 struct KernelArgPool {
134 private:
135   static pthread_mutex_t mutex;
136 
137 public:
138   uint32_t kernarg_segment_size;
139   void *kernarg_region = nullptr;
140   std::queue<int> free_kernarg_segments;
141 
142   uint32_t kernarg_size_including_implicit() {
143     return kernarg_segment_size + sizeof(atmi_implicit_args_t);
144   }
145 
146   ~KernelArgPool() {
147     if (kernarg_region) {
148       auto r = hsa_amd_memory_pool_free(kernarg_region);
149       if (r != HSA_STATUS_SUCCESS) {
150         DP("hsa_amd_memory_pool_free failed: %s\n", get_error_string(r));
151       }
152     }
153   }
154 
155   // Can't really copy or move a mutex
156   KernelArgPool() = default;
157   KernelArgPool(const KernelArgPool &) = delete;
158   KernelArgPool(KernelArgPool &&) = delete;
159 
160   KernelArgPool(uint32_t kernarg_segment_size,
161                 hsa_amd_memory_pool_t &memory_pool)
162       : kernarg_segment_size(kernarg_segment_size) {
163 
164     // atmi uses one pool per kernel for all gpus, with a fixed upper size
165     // preserving that exact scheme here, including the queue<int>
166 
167     hsa_status_t err = hsa_amd_memory_pool_allocate(
168         memory_pool, kernarg_size_including_implicit() * MAX_NUM_KERNELS, 0,
169         &kernarg_region);
170 
171     if (err != HSA_STATUS_SUCCESS) {
172       DP("hsa_amd_memory_pool_allocate failed: %s\n", get_error_string(err));
173       kernarg_region = nullptr; // paranoid
174       return;
175     }
176 
177     err = core::allow_access_to_all_gpu_agents(kernarg_region);
178     if (err != HSA_STATUS_SUCCESS) {
179       DP("hsa allow_access_to_all_gpu_agents failed: %s\n",
180          get_error_string(err));
181       auto r = hsa_amd_memory_pool_free(kernarg_region);
182       if (r != HSA_STATUS_SUCCESS) {
183         // if free failed, can't do anything more to resolve it
184         DP("hsa memory poll free failed: %s\n", get_error_string(err));
185       }
186       kernarg_region = nullptr;
187       return;
188     }
189 
190     for (int i = 0; i < MAX_NUM_KERNELS; i++) {
191       free_kernarg_segments.push(i);
192     }
193   }
194 
195   void *allocate(uint64_t arg_num) {
196     assert((arg_num * sizeof(void *)) == kernarg_segment_size);
197     lock l(&mutex);
198     void *res = nullptr;
199     if (!free_kernarg_segments.empty()) {
200 
201       int free_idx = free_kernarg_segments.front();
202       res = static_cast<void *>(static_cast<char *>(kernarg_region) +
203                                 (free_idx * kernarg_size_including_implicit()));
204       assert(free_idx == pointer_to_index(res));
205       free_kernarg_segments.pop();
206     }
207     return res;
208   }
209 
210   void deallocate(void *ptr) {
211     lock l(&mutex);
212     int idx = pointer_to_index(ptr);
213     free_kernarg_segments.push(idx);
214   }
215 
216 private:
217   int pointer_to_index(void *ptr) {
218     ptrdiff_t bytes =
219         static_cast<char *>(ptr) - static_cast<char *>(kernarg_region);
220     assert(bytes >= 0);
221     assert(bytes % kernarg_size_including_implicit() == 0);
222     return bytes / kernarg_size_including_implicit();
223   }
224   struct lock {
225     lock(pthread_mutex_t *m) : m(m) { pthread_mutex_lock(m); }
226     ~lock() { pthread_mutex_unlock(m); }
227     pthread_mutex_t *m;
228   };
229 };
230 pthread_mutex_t KernelArgPool::mutex = PTHREAD_MUTEX_INITIALIZER;
231 
232 std::unordered_map<std::string /*kernel*/, std::unique_ptr<KernelArgPool>>
233     KernelArgPoolMap;
234 
235 /// Use a single entity to encode a kernel and a set of flags
236 struct KernelTy {
237   // execution mode of kernel
238   // 0 - SPMD mode (without master warp)
239   // 1 - Generic mode (with master warp)
240   // 2 - SPMD mode execution with Generic mode semantics.
241   int8_t ExecutionMode;
242   int16_t ConstWGSize;
243   int32_t device_id;
244   void *CallStackAddr = nullptr;
245   const char *Name;
246 
247   KernelTy(int8_t _ExecutionMode, int16_t _ConstWGSize, int32_t _device_id,
248            void *_CallStackAddr, const char *_Name,
249            uint32_t _kernarg_segment_size,
250            hsa_amd_memory_pool_t &KernArgMemoryPool)
251       : ExecutionMode(_ExecutionMode), ConstWGSize(_ConstWGSize),
252         device_id(_device_id), CallStackAddr(_CallStackAddr), Name(_Name) {
253     DP("Construct kernelinfo: ExecMode %d\n", ExecutionMode);
254 
255     std::string N(_Name);
256     if (KernelArgPoolMap.find(N) == KernelArgPoolMap.end()) {
257       KernelArgPoolMap.insert(
258           std::make_pair(N, std::unique_ptr<KernelArgPool>(new KernelArgPool(
259                                 _kernarg_segment_size, KernArgMemoryPool))));
260     }
261   }
262 };
263 
264 /// List that contains all the kernels.
265 /// FIXME: we may need this to be per device and per library.
266 std::list<KernelTy> KernelsList;
267 
268 template <typename Callback> static hsa_status_t FindAgents(Callback CB) {
269 
270   hsa_status_t err =
271       hsa::iterate_agents([&](hsa_agent_t agent) -> hsa_status_t {
272         hsa_device_type_t device_type;
273         // get_info fails iff HSA runtime not yet initialized
274         hsa_status_t err =
275             hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
276         if (print_kernel_trace > 0 && err != HSA_STATUS_SUCCESS)
277           printf("rtl.cpp: err %d\n", err);
278         assert(err == HSA_STATUS_SUCCESS);
279 
280         CB(device_type, agent);
281         return HSA_STATUS_SUCCESS;
282       });
283 
284   // iterate_agents fails iff HSA runtime not yet initialized
285   if (print_kernel_trace > 0 && err != HSA_STATUS_SUCCESS) {
286     printf("rtl.cpp: err %d\n", err);
287   }
288 
289   return err;
290 }
291 
292 static void callbackQueue(hsa_status_t status, hsa_queue_t *source,
293                           void *data) {
294   if (status != HSA_STATUS_SUCCESS) {
295     const char *status_string;
296     if (hsa_status_string(status, &status_string) != HSA_STATUS_SUCCESS) {
297       status_string = "unavailable";
298     }
299     fprintf(stderr, "[%s:%d] GPU error in queue %p %d (%s)\n", __FILE__,
300             __LINE__, source, status, status_string);
301     abort();
302   }
303 }
304 
305 namespace core {
306 namespace {
307 void packet_store_release(uint32_t *packet, uint16_t header, uint16_t rest) {
308   __atomic_store_n(packet, header | (rest << 16), __ATOMIC_RELEASE);
309 }
310 
311 uint16_t create_header() {
312   uint16_t header = HSA_PACKET_TYPE_KERNEL_DISPATCH << HSA_PACKET_HEADER_TYPE;
313   header |= HSA_FENCE_SCOPE_SYSTEM << HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE;
314   header |= HSA_FENCE_SCOPE_SYSTEM << HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE;
315   return header;
316 }
317 
318 hsa_status_t addKernArgPool(hsa_amd_memory_pool_t MemoryPool, void *Data) {
319   std::vector<hsa_amd_memory_pool_t> *Result =
320       static_cast<std::vector<hsa_amd_memory_pool_t> *>(Data);
321   bool AllocAllowed = false;
322   hsa_status_t err = hsa_amd_memory_pool_get_info(
323       MemoryPool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
324       &AllocAllowed);
325   if (err != HSA_STATUS_SUCCESS) {
326     fprintf(stderr, "Alloc allowed in memory pool check failed: %s\n",
327             get_error_string(err));
328     return err;
329   }
330 
331   if (!AllocAllowed) {
332     // nothing needs to be done here.
333     return HSA_STATUS_SUCCESS;
334   }
335 
336   uint32_t GlobalFlags = 0;
337   err = hsa_amd_memory_pool_get_info(
338       MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
339   if (err != HSA_STATUS_SUCCESS) {
340     fprintf(stderr, "Get memory pool info failed: %s\n", get_error_string(err));
341     return err;
342   }
343 
344   size_t size = 0;
345   err = hsa_amd_memory_pool_get_info(MemoryPool, HSA_AMD_MEMORY_POOL_INFO_SIZE,
346                                      &size);
347   if (err != HSA_STATUS_SUCCESS) {
348     fprintf(stderr, "Get memory pool size failed: %s\n", get_error_string(err));
349     return err;
350   }
351 
352   if ((GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED) &&
353       (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT) &&
354       size > 0) {
355     Result->push_back(MemoryPool);
356   }
357 
358   return HSA_STATUS_SUCCESS;
359 }
360 
361 std::pair<hsa_status_t, bool>
362 isValidMemoryPool(hsa_amd_memory_pool_t MemoryPool) {
363   bool AllocAllowed = false;
364   hsa_status_t Err = hsa_amd_memory_pool_get_info(
365       MemoryPool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
366       &AllocAllowed);
367   if (Err != HSA_STATUS_SUCCESS) {
368     fprintf(stderr, "Alloc allowed in memory pool check failed: %s\n",
369             get_error_string(Err));
370     return {Err, false};
371   }
372 
373   return {HSA_STATUS_SUCCESS, AllocAllowed};
374 }
375 
376 template <typename AccumulatorFunc>
377 hsa_status_t collectMemoryPools(const std::vector<hsa_agent_t> &Agents,
378                                 AccumulatorFunc Func) {
379   for (int DeviceId = 0; DeviceId < Agents.size(); DeviceId++) {
380     hsa_status_t Err = hsa::amd_agent_iterate_memory_pools(
381         Agents[DeviceId], [&](hsa_amd_memory_pool_t MemoryPool) {
382           hsa_status_t Err;
383           bool Valid = false;
384           std::tie(Err, Valid) = isValidMemoryPool(MemoryPool);
385           if (Err != HSA_STATUS_SUCCESS) {
386             return Err;
387           }
388           if (Valid)
389             Func(MemoryPool, DeviceId);
390           return HSA_STATUS_SUCCESS;
391         });
392 
393     if (Err != HSA_STATUS_SUCCESS) {
394       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
395              "Iterate all memory pools", get_error_string(Err));
396       return Err;
397     }
398   }
399 
400   return HSA_STATUS_SUCCESS;
401 }
402 
403 std::pair<hsa_status_t, hsa_amd_memory_pool_t>
404 FindKernargPool(const std::vector<hsa_agent_t> &HSAAgents) {
405   std::vector<hsa_amd_memory_pool_t> KernArgPools;
406   for (const auto &Agent : HSAAgents) {
407     hsa_status_t err = HSA_STATUS_SUCCESS;
408     err = hsa_amd_agent_iterate_memory_pools(
409         Agent, addKernArgPool, static_cast<void *>(&KernArgPools));
410     if (err != HSA_STATUS_SUCCESS) {
411       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
412              "Iterate all memory pools", get_error_string(err));
413       return {err, hsa_amd_memory_pool_t{}};
414     }
415   }
416 
417   if (KernArgPools.empty()) {
418     fprintf(stderr, "Unable to find any valid kernarg pool\n");
419     return {HSA_STATUS_ERROR, hsa_amd_memory_pool_t{}};
420   }
421 
422   return {HSA_STATUS_SUCCESS, KernArgPools[0]};
423 }
424 
425 } // namespace
426 } // namespace core
427 
428 struct EnvironmentVariables {
429   int NumTeams;
430   int TeamLimit;
431   int TeamThreadLimit;
432   int MaxTeamsDefault;
433 };
434 
435 /// Class containing all the device information
436 class RTLDeviceInfoTy {
437   std::vector<std::list<FuncOrGblEntryTy>> FuncGblEntries;
438   bool HSAInitializeSucceeded = false;
439 
440 public:
441   // load binary populates symbol tables and mutates various global state
442   // run uses those symbol tables
443   std::shared_timed_mutex load_run_lock;
444 
445   int NumberOfDevices = 0;
446 
447   // GPU devices
448   std::vector<hsa_agent_t> HSAAgents;
449   std::vector<hsa_queue_t *> HSAQueues; // one per gpu
450 
451   // CPUs
452   std::vector<hsa_agent_t> CPUAgents;
453 
454   // Device properties
455   std::vector<int> ComputeUnits;
456   std::vector<int> GroupsPerDevice;
457   std::vector<int> ThreadsPerGroup;
458   std::vector<int> WarpSize;
459   std::vector<std::string> GPUName;
460 
461   // OpenMP properties
462   std::vector<int> NumTeams;
463   std::vector<int> NumThreads;
464 
465   // OpenMP Environment properties
466   EnvironmentVariables Env;
467 
468   // OpenMP Requires Flags
469   int64_t RequiresFlags;
470 
471   // Resource pools
472   SignalPoolT FreeSignalPool;
473 
474   bool hostcall_required = false;
475 
476   std::vector<hsa_executable_t> HSAExecutables;
477 
478   std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable;
479   std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable;
480 
481   hsa_amd_memory_pool_t KernArgPool;
482 
483   // fine grained memory pool for host allocations
484   hsa_amd_memory_pool_t HostFineGrainedMemoryPool;
485 
486   // fine and coarse-grained memory pools per offloading device
487   std::vector<hsa_amd_memory_pool_t> DeviceFineGrainedMemoryPools;
488   std::vector<hsa_amd_memory_pool_t> DeviceCoarseGrainedMemoryPools;
489 
490   struct atmiFreePtrDeletor {
491     void operator()(void *p) {
492       core::Runtime::Memfree(p); // ignore failure to free
493     }
494   };
495 
496   // device_State shared across loaded binaries, error if inconsistent size
497   std::vector<std::pair<std::unique_ptr<void, atmiFreePtrDeletor>, uint64_t>>
498       deviceStateStore;
499 
500   static const unsigned HardTeamLimit =
501       (1 << 16) - 1; // 64K needed to fit in uint16
502   static const int DefaultNumTeams = 128;
503   static const int Max_Teams = llvm::omp::AMDGPUGridValues.GV_Max_Teams;
504   static const int Warp_Size = llvm::omp::AMDGPUGridValues.GV_Warp_Size;
505   static const int Max_WG_Size = llvm::omp::AMDGPUGridValues.GV_Max_WG_Size;
506   static const int Default_WG_Size =
507       llvm::omp::AMDGPUGridValues.GV_Default_WG_Size;
508 
509   using MemcpyFunc = hsa_status_t (*)(hsa_signal_t, void *, const void *,
510                                       size_t size, hsa_agent_t,
511                                       hsa_amd_memory_pool_t);
512   hsa_status_t freesignalpool_memcpy(void *dest, const void *src, size_t size,
513                                      MemcpyFunc Func, int32_t deviceId) {
514     hsa_agent_t agent = HSAAgents[deviceId];
515     hsa_signal_t s = FreeSignalPool.pop();
516     if (s.handle == 0) {
517       return HSA_STATUS_ERROR;
518     }
519     hsa_status_t r = Func(s, dest, src, size, agent, HostFineGrainedMemoryPool);
520     FreeSignalPool.push(s);
521     return r;
522   }
523 
524   hsa_status_t freesignalpool_memcpy_d2h(void *dest, const void *src,
525                                          size_t size, int32_t deviceId) {
526     return freesignalpool_memcpy(dest, src, size, atmi_memcpy_d2h, deviceId);
527   }
528 
529   hsa_status_t freesignalpool_memcpy_h2d(void *dest, const void *src,
530                                          size_t size, int32_t deviceId) {
531     return freesignalpool_memcpy(dest, src, size, atmi_memcpy_h2d, deviceId);
532   }
533 
534   // Record entry point associated with device
535   void addOffloadEntry(int32_t device_id, __tgt_offload_entry entry) {
536     assert(device_id < (int32_t)FuncGblEntries.size() &&
537            "Unexpected device id!");
538     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
539 
540     E.Entries.push_back(entry);
541   }
542 
543   // Return true if the entry is associated with device
544   bool findOffloadEntry(int32_t device_id, void *addr) {
545     assert(device_id < (int32_t)FuncGblEntries.size() &&
546            "Unexpected device id!");
547     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
548 
549     for (auto &it : E.Entries) {
550       if (it.addr == addr)
551         return true;
552     }
553 
554     return false;
555   }
556 
557   // Return the pointer to the target entries table
558   __tgt_target_table *getOffloadEntriesTable(int32_t device_id) {
559     assert(device_id < (int32_t)FuncGblEntries.size() &&
560            "Unexpected device id!");
561     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
562 
563     int32_t size = E.Entries.size();
564 
565     // Table is empty
566     if (!size)
567       return 0;
568 
569     __tgt_offload_entry *begin = &E.Entries[0];
570     __tgt_offload_entry *end = &E.Entries[size - 1];
571 
572     // Update table info according to the entries and return the pointer
573     E.Table.EntriesBegin = begin;
574     E.Table.EntriesEnd = ++end;
575 
576     return &E.Table;
577   }
578 
579   // Clear entries table for a device
580   void clearOffloadEntriesTable(int device_id) {
581     assert(device_id < (int32_t)FuncGblEntries.size() &&
582            "Unexpected device id!");
583     FuncGblEntries[device_id].emplace_back();
584     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
585     // KernelArgPoolMap.clear();
586     E.Entries.clear();
587     E.Table.EntriesBegin = E.Table.EntriesEnd = 0;
588   }
589 
590   hsa_status_t addDeviceMemoryPool(hsa_amd_memory_pool_t MemoryPool,
591                                    int DeviceId) {
592     assert(DeviceId < DeviceFineGrainedMemoryPools.size() && "Error here.");
593     uint32_t GlobalFlags = 0;
594     hsa_status_t Err = hsa_amd_memory_pool_get_info(
595         MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
596 
597     if (Err != HSA_STATUS_SUCCESS) {
598       return Err;
599     }
600 
601     if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED) {
602       DeviceFineGrainedMemoryPools[DeviceId] = MemoryPool;
603     } else if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_COARSE_GRAINED) {
604       DeviceCoarseGrainedMemoryPools[DeviceId] = MemoryPool;
605     }
606 
607     return HSA_STATUS_SUCCESS;
608   }
609 
610   hsa_status_t addHostMemoryPool(hsa_amd_memory_pool_t MemoryPool,
611                                  int DeviceId) {
612     uint32_t GlobalFlags = 0;
613     hsa_status_t Err = hsa_amd_memory_pool_get_info(
614         MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
615 
616     if (Err != HSA_STATUS_SUCCESS) {
617       return Err;
618     }
619 
620     uint32_t Size;
621     Err = hsa_amd_memory_pool_get_info(MemoryPool,
622                                        HSA_AMD_MEMORY_POOL_INFO_SIZE, &Size);
623     if (Err != HSA_STATUS_SUCCESS) {
624       return Err;
625     }
626 
627     if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED &&
628         Size > 0) {
629       HostFineGrainedMemoryPool = MemoryPool;
630     }
631 
632     return HSA_STATUS_SUCCESS;
633   }
634 
635   hsa_status_t setupMemoryPools() {
636     using namespace std::placeholders;
637     hsa_status_t Err;
638     Err = core::collectMemoryPools(
639         CPUAgents, std::bind(&RTLDeviceInfoTy::addHostMemoryPool, this, _1, _2));
640     if (Err != HSA_STATUS_SUCCESS) {
641       fprintf(stderr, "HSA error in collecting memory pools for CPU: %s\n",
642               get_error_string(Err));
643       return Err;
644     }
645     Err = core::collectMemoryPools(
646         HSAAgents, std::bind(&RTLDeviceInfoTy::addDeviceMemoryPool, this, _1, _2));
647     if (Err != HSA_STATUS_SUCCESS) {
648       fprintf(stderr,
649               "HSA error in collecting memory pools for offload devices: %s\n",
650               get_error_string(Err));
651       return Err;
652     }
653     return HSA_STATUS_SUCCESS;
654   }
655 
656   hsa_amd_memory_pool_t getDeviceMemoryPool(int DeviceId) {
657     assert(DeviceId >= 0 && DeviceId < DeviceCoarseGrainedMemoryPools.size() &&
658            "Invalid device Id");
659     return DeviceCoarseGrainedMemoryPools[DeviceId];
660   }
661 
662   hsa_amd_memory_pool_t getHostMemoryPool() {
663     return HostFineGrainedMemoryPool;
664   }
665 
666   static int readEnvElseMinusOne(const char *Env) {
667     const char *envStr = getenv(Env);
668     int res = -1;
669     if (envStr) {
670       res = std::stoi(envStr);
671       DP("Parsed %s=%d\n", Env, res);
672     }
673     return res;
674   }
675 
676   RTLDeviceInfoTy() {
677     // LIBOMPTARGET_KERNEL_TRACE provides a kernel launch trace to stderr
678     // anytime. You do not need a debug library build.
679     //  0 => no tracing
680     //  1 => tracing dispatch only
681     // >1 => verbosity increase
682     if (char *envStr = getenv("LIBOMPTARGET_KERNEL_TRACE"))
683       print_kernel_trace = atoi(envStr);
684     else
685       print_kernel_trace = 0;
686 
687     DP("Start initializing " GETNAME(TARGET_NAME) "\n");
688     hsa_status_t err = core::atl_init_gpu_context();
689     if (err == HSA_STATUS_SUCCESS) {
690       HSAInitializeSucceeded = true;
691     } else {
692       DP("Error when initializing " GETNAME(TARGET_NAME) "\n");
693       return;
694     }
695 
696     // Init hostcall soon after initializing ATMI
697     hostrpc_init();
698 
699     err = FindAgents([&](hsa_device_type_t DeviceType, hsa_agent_t Agent) {
700       if (DeviceType == HSA_DEVICE_TYPE_CPU) {
701         CPUAgents.push_back(Agent);
702       } else {
703         HSAAgents.push_back(Agent);
704       }
705     });
706     if (err != HSA_STATUS_SUCCESS)
707       return;
708 
709     NumberOfDevices = (int)HSAAgents.size();
710 
711     if (NumberOfDevices == 0) {
712       DP("There are no devices supporting HSA.\n");
713       return;
714     } else {
715       DP("There are %d devices supporting HSA.\n", NumberOfDevices);
716     }
717     std::tie(err, KernArgPool) = core::FindKernargPool(CPUAgents);
718     if (err != HSA_STATUS_SUCCESS) {
719       DP("Error when reading memory pools\n");
720       return;
721     }
722 
723     // Init the device info
724     HSAQueues.resize(NumberOfDevices);
725     FuncGblEntries.resize(NumberOfDevices);
726     ThreadsPerGroup.resize(NumberOfDevices);
727     ComputeUnits.resize(NumberOfDevices);
728     GPUName.resize(NumberOfDevices);
729     GroupsPerDevice.resize(NumberOfDevices);
730     WarpSize.resize(NumberOfDevices);
731     NumTeams.resize(NumberOfDevices);
732     NumThreads.resize(NumberOfDevices);
733     deviceStateStore.resize(NumberOfDevices);
734     KernelInfoTable.resize(NumberOfDevices);
735     SymbolInfoTable.resize(NumberOfDevices);
736     DeviceCoarseGrainedMemoryPools.resize(NumberOfDevices);
737     DeviceFineGrainedMemoryPools.resize(NumberOfDevices);
738 
739     err = setupMemoryPools();
740     if (err != HSA_STATUS_SUCCESS) {
741       DP("Error when setting up memory pools");
742       return;
743     }
744 
745     for (int i = 0; i < NumberOfDevices; i++) {
746       HSAQueues[i] = nullptr;
747     }
748 
749     for (int i = 0; i < NumberOfDevices; i++) {
750       uint32_t queue_size = 0;
751       {
752         hsa_status_t err = hsa_agent_get_info(
753             HSAAgents[i], HSA_AGENT_INFO_QUEUE_MAX_SIZE, &queue_size);
754         if (err != HSA_STATUS_SUCCESS) {
755           DP("HSA query QUEUE_MAX_SIZE failed for agent %d\n", i);
756           return;
757         }
758         if (queue_size > core::Runtime::getInstance().getMaxQueueSize()) {
759           queue_size = core::Runtime::getInstance().getMaxQueueSize();
760         }
761       }
762 
763       hsa_status_t rc = hsa_queue_create(
764           HSAAgents[i], queue_size, HSA_QUEUE_TYPE_MULTI, callbackQueue, NULL,
765           UINT32_MAX, UINT32_MAX, &HSAQueues[i]);
766       if (rc != HSA_STATUS_SUCCESS) {
767         DP("Failed to create HSA queue %d\n", i);
768         return;
769       }
770 
771       deviceStateStore[i] = {nullptr, 0};
772     }
773 
774     for (int i = 0; i < NumberOfDevices; i++) {
775       ThreadsPerGroup[i] = RTLDeviceInfoTy::Default_WG_Size;
776       GroupsPerDevice[i] = RTLDeviceInfoTy::DefaultNumTeams;
777       ComputeUnits[i] = 1;
778       DP("Device %d: Initial groupsPerDevice %d & threadsPerGroup %d\n", i,
779          GroupsPerDevice[i], ThreadsPerGroup[i]);
780     }
781 
782     // Get environment variables regarding teams
783     Env.TeamLimit = readEnvElseMinusOne("OMP_TEAM_LIMIT");
784     Env.NumTeams = readEnvElseMinusOne("OMP_NUM_TEAMS");
785     Env.MaxTeamsDefault = readEnvElseMinusOne("OMP_MAX_TEAMS_DEFAULT");
786     Env.TeamThreadLimit = readEnvElseMinusOne("OMP_TEAMS_THREAD_LIMIT");
787 
788     // Default state.
789     RequiresFlags = OMP_REQ_UNDEFINED;
790   }
791 
792   ~RTLDeviceInfoTy() {
793     DP("Finalizing the " GETNAME(TARGET_NAME) " DeviceInfo.\n");
794     if (!HSAInitializeSucceeded) {
795       // Then none of these can have been set up and they can't be torn down
796       return;
797     }
798     // Run destructors on types that use HSA before
799     // atmi_finalize removes access to it
800     deviceStateStore.clear();
801     KernelArgPoolMap.clear();
802     // Terminate hostrpc before finalizing ATMI
803     hostrpc_terminate();
804 
805     hsa_status_t Err;
806     for (uint32_t I = 0; I < HSAExecutables.size(); I++) {
807       Err = hsa_executable_destroy(HSAExecutables[I]);
808       if (Err != HSA_STATUS_SUCCESS) {
809         DP("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
810            "Destroying executable", get_error_string(Err));
811       }
812     }
813 
814     Err = hsa_shut_down();
815     if (Err != HSA_STATUS_SUCCESS) {
816       printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Shutting down HSA",
817              get_error_string(Err));
818     }
819   }
820 };
821 
822 pthread_mutex_t SignalPoolT::mutex = PTHREAD_MUTEX_INITIALIZER;
823 
824 // TODO: May need to drop the trailing to fields until deviceRTL is updated
825 struct omptarget_device_environmentTy {
826   int32_t debug_level; // gets value of envvar LIBOMPTARGET_DEVICE_RTL_DEBUG
827                        // only useful for Debug build of deviceRTLs
828   int32_t num_devices; // gets number of active offload devices
829   int32_t device_num;  // gets a value 0 to num_devices-1
830 };
831 
832 static RTLDeviceInfoTy DeviceInfo;
833 
834 namespace {
835 
836 int32_t dataRetrieve(int32_t DeviceId, void *HstPtr, void *TgtPtr, int64_t Size,
837                      __tgt_async_info *AsyncInfo) {
838   assert(AsyncInfo && "AsyncInfo is nullptr");
839   assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large");
840   // Return success if we are not copying back to host from target.
841   if (!HstPtr)
842     return OFFLOAD_SUCCESS;
843   hsa_status_t err;
844   DP("Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size,
845      (long long unsigned)(Elf64_Addr)TgtPtr,
846      (long long unsigned)(Elf64_Addr)HstPtr);
847 
848   err = DeviceInfo.freesignalpool_memcpy_d2h(HstPtr, TgtPtr, (size_t)Size,
849                                              DeviceId);
850 
851   if (err != HSA_STATUS_SUCCESS) {
852     DP("Error when copying data from device to host. Pointers: "
853        "host = 0x%016lx, device = 0x%016lx, size = %lld\n",
854        (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size);
855     return OFFLOAD_FAIL;
856   }
857   DP("DONE Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size,
858      (long long unsigned)(Elf64_Addr)TgtPtr,
859      (long long unsigned)(Elf64_Addr)HstPtr);
860   return OFFLOAD_SUCCESS;
861 }
862 
863 int32_t dataSubmit(int32_t DeviceId, void *TgtPtr, void *HstPtr, int64_t Size,
864                    __tgt_async_info *AsyncInfo) {
865   assert(AsyncInfo && "AsyncInfo is nullptr");
866   hsa_status_t err;
867   assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large");
868   // Return success if we are not doing host to target.
869   if (!HstPtr)
870     return OFFLOAD_SUCCESS;
871 
872   DP("Submit data %ld bytes, (hst:%016llx) -> (tgt:%016llx).\n", Size,
873      (long long unsigned)(Elf64_Addr)HstPtr,
874      (long long unsigned)(Elf64_Addr)TgtPtr);
875   err = DeviceInfo.freesignalpool_memcpy_h2d(TgtPtr, HstPtr, (size_t)Size,
876                                              DeviceId);
877   if (err != HSA_STATUS_SUCCESS) {
878     DP("Error when copying data from host to device. Pointers: "
879        "host = 0x%016lx, device = 0x%016lx, size = %lld\n",
880        (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size);
881     return OFFLOAD_FAIL;
882   }
883   return OFFLOAD_SUCCESS;
884 }
885 
886 // Async.
887 // The implementation was written with cuda streams in mind. The semantics of
888 // that are to execute kernels on a queue in order of insertion. A synchronise
889 // call then makes writes visible between host and device. This means a series
890 // of N data_submit_async calls are expected to execute serially. HSA offers
891 // various options to run the data copies concurrently. This may require changes
892 // to libomptarget.
893 
894 // __tgt_async_info* contains a void * Queue. Queue = 0 is used to indicate that
895 // there are no outstanding kernels that need to be synchronized. Any async call
896 // may be passed a Queue==0, at which point the cuda implementation will set it
897 // to non-null (see getStream). The cuda streams are per-device. Upstream may
898 // change this interface to explicitly initialize the AsyncInfo_pointer, but
899 // until then hsa lazily initializes it as well.
900 
901 void initAsyncInfo(__tgt_async_info *AsyncInfo) {
902   // set non-null while using async calls, return to null to indicate completion
903   assert(AsyncInfo);
904   if (!AsyncInfo->Queue) {
905     AsyncInfo->Queue = reinterpret_cast<void *>(UINT64_MAX);
906   }
907 }
908 void finiAsyncInfo(__tgt_async_info *AsyncInfo) {
909   assert(AsyncInfo);
910   assert(AsyncInfo->Queue);
911   AsyncInfo->Queue = 0;
912 }
913 
914 bool elf_machine_id_is_amdgcn(__tgt_device_image *image) {
915   const uint16_t amdgcnMachineID = 224; // EM_AMDGPU may not be in system elf.h
916   int32_t r = elf_check_machine(image, amdgcnMachineID);
917   if (!r) {
918     DP("Supported machine ID not found\n");
919   }
920   return r;
921 }
922 
923 uint32_t elf_e_flags(__tgt_device_image *image) {
924   char *img_begin = (char *)image->ImageStart;
925   size_t img_size = (char *)image->ImageEnd - img_begin;
926 
927   Elf *e = elf_memory(img_begin, img_size);
928   if (!e) {
929     DP("Unable to get ELF handle: %s!\n", elf_errmsg(-1));
930     return 0;
931   }
932 
933   Elf64_Ehdr *eh64 = elf64_getehdr(e);
934 
935   if (!eh64) {
936     DP("Unable to get machine ID from ELF file!\n");
937     elf_end(e);
938     return 0;
939   }
940 
941   uint32_t Flags = eh64->e_flags;
942 
943   elf_end(e);
944   DP("ELF Flags: 0x%x\n", Flags);
945   return Flags;
946 }
947 } // namespace
948 
949 int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *image) {
950   return elf_machine_id_is_amdgcn(image);
951 }
952 
953 int __tgt_rtl_number_of_devices() { return DeviceInfo.NumberOfDevices; }
954 
955 int64_t __tgt_rtl_init_requires(int64_t RequiresFlags) {
956   DP("Init requires flags to %ld\n", RequiresFlags);
957   DeviceInfo.RequiresFlags = RequiresFlags;
958   return RequiresFlags;
959 }
960 
961 namespace {
962 template <typename T> bool enforce_upper_bound(T *value, T upper) {
963   bool changed = *value > upper;
964   if (changed) {
965     *value = upper;
966   }
967   return changed;
968 }
969 } // namespace
970 
971 int32_t __tgt_rtl_init_device(int device_id) {
972   hsa_status_t err;
973 
974   // this is per device id init
975   DP("Initialize the device id: %d\n", device_id);
976 
977   hsa_agent_t agent = DeviceInfo.HSAAgents[device_id];
978 
979   // Get number of Compute Unit
980   uint32_t compute_units = 0;
981   err = hsa_agent_get_info(
982       agent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT,
983       &compute_units);
984   if (err != HSA_STATUS_SUCCESS) {
985     DeviceInfo.ComputeUnits[device_id] = 1;
986     DP("Error getting compute units : settiing to 1\n");
987   } else {
988     DeviceInfo.ComputeUnits[device_id] = compute_units;
989     DP("Using %d compute unis per grid\n", DeviceInfo.ComputeUnits[device_id]);
990   }
991 
992   char GetInfoName[64]; // 64 max size returned by get info
993   err = hsa_agent_get_info(agent, (hsa_agent_info_t)HSA_AGENT_INFO_NAME,
994                            (void *)GetInfoName);
995   if (err)
996     DeviceInfo.GPUName[device_id] = "--unknown gpu--";
997   else {
998     DeviceInfo.GPUName[device_id] = GetInfoName;
999   }
1000 
1001   if (print_kernel_trace & STARTUP_DETAILS)
1002     fprintf(stderr, "Device#%-2d CU's: %2d %s\n", device_id,
1003             DeviceInfo.ComputeUnits[device_id],
1004             DeviceInfo.GPUName[device_id].c_str());
1005 
1006   // Query attributes to determine number of threads/block and blocks/grid.
1007   uint16_t workgroup_max_dim[3];
1008   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_WORKGROUP_MAX_DIM,
1009                            &workgroup_max_dim);
1010   if (err != HSA_STATUS_SUCCESS) {
1011     DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::DefaultNumTeams;
1012     DP("Error getting grid dims: num groups : %d\n",
1013        RTLDeviceInfoTy::DefaultNumTeams);
1014   } else if (workgroup_max_dim[0] <= RTLDeviceInfoTy::HardTeamLimit) {
1015     DeviceInfo.GroupsPerDevice[device_id] = workgroup_max_dim[0];
1016     DP("Using %d ROCm blocks per grid\n",
1017        DeviceInfo.GroupsPerDevice[device_id]);
1018   } else {
1019     DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::HardTeamLimit;
1020     DP("Max ROCm blocks per grid %d exceeds the hard team limit %d, capping "
1021        "at the hard limit\n",
1022        workgroup_max_dim[0], RTLDeviceInfoTy::HardTeamLimit);
1023   }
1024 
1025   // Get thread limit
1026   hsa_dim3_t grid_max_dim;
1027   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_GRID_MAX_DIM, &grid_max_dim);
1028   if (err == HSA_STATUS_SUCCESS) {
1029     DeviceInfo.ThreadsPerGroup[device_id] =
1030         reinterpret_cast<uint32_t *>(&grid_max_dim)[0] /
1031         DeviceInfo.GroupsPerDevice[device_id];
1032 
1033     if (DeviceInfo.ThreadsPerGroup[device_id] == 0) {
1034       DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size;
1035       DP("Default thread limit: %d\n", RTLDeviceInfoTy::Max_WG_Size);
1036     } else if (enforce_upper_bound(&DeviceInfo.ThreadsPerGroup[device_id],
1037                                    RTLDeviceInfoTy::Max_WG_Size)) {
1038       DP("Capped thread limit: %d\n", RTLDeviceInfoTy::Max_WG_Size);
1039     } else {
1040       DP("Using ROCm Queried thread limit: %d\n",
1041          DeviceInfo.ThreadsPerGroup[device_id]);
1042     }
1043   } else {
1044     DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size;
1045     DP("Error getting max block dimension, use default:%d \n",
1046        RTLDeviceInfoTy::Max_WG_Size);
1047   }
1048 
1049   // Get wavefront size
1050   uint32_t wavefront_size = 0;
1051   err =
1052       hsa_agent_get_info(agent, HSA_AGENT_INFO_WAVEFRONT_SIZE, &wavefront_size);
1053   if (err == HSA_STATUS_SUCCESS) {
1054     DP("Queried wavefront size: %d\n", wavefront_size);
1055     DeviceInfo.WarpSize[device_id] = wavefront_size;
1056   } else {
1057     DP("Default wavefront size: %d\n",
1058        llvm::omp::AMDGPUGridValues.GV_Warp_Size);
1059     DeviceInfo.WarpSize[device_id] = llvm::omp::AMDGPUGridValues.GV_Warp_Size;
1060   }
1061 
1062   // Adjust teams to the env variables
1063 
1064   if (DeviceInfo.Env.TeamLimit > 0 &&
1065       (enforce_upper_bound(&DeviceInfo.GroupsPerDevice[device_id],
1066                            DeviceInfo.Env.TeamLimit))) {
1067     DP("Capping max groups per device to OMP_TEAM_LIMIT=%d\n",
1068        DeviceInfo.Env.TeamLimit);
1069   }
1070 
1071   // Set default number of teams
1072   if (DeviceInfo.Env.NumTeams > 0) {
1073     DeviceInfo.NumTeams[device_id] = DeviceInfo.Env.NumTeams;
1074     DP("Default number of teams set according to environment %d\n",
1075        DeviceInfo.Env.NumTeams);
1076   } else {
1077     char *TeamsPerCUEnvStr = getenv("OMP_TARGET_TEAMS_PER_PROC");
1078     int TeamsPerCU = DefaultTeamsPerCU;
1079     if (TeamsPerCUEnvStr) {
1080       TeamsPerCU = std::stoi(TeamsPerCUEnvStr);
1081     }
1082 
1083     DeviceInfo.NumTeams[device_id] =
1084         TeamsPerCU * DeviceInfo.ComputeUnits[device_id];
1085     DP("Default number of teams = %d * number of compute units %d\n",
1086        TeamsPerCU, DeviceInfo.ComputeUnits[device_id]);
1087   }
1088 
1089   if (enforce_upper_bound(&DeviceInfo.NumTeams[device_id],
1090                           DeviceInfo.GroupsPerDevice[device_id])) {
1091     DP("Default number of teams exceeds device limit, capping at %d\n",
1092        DeviceInfo.GroupsPerDevice[device_id]);
1093   }
1094 
1095   // Adjust threads to the env variables
1096   if (DeviceInfo.Env.TeamThreadLimit > 0 &&
1097       (enforce_upper_bound(&DeviceInfo.NumThreads[device_id],
1098                            DeviceInfo.Env.TeamThreadLimit))) {
1099     DP("Capping max number of threads to OMP_TEAMS_THREAD_LIMIT=%d\n",
1100        DeviceInfo.Env.TeamThreadLimit);
1101   }
1102 
1103   // Set default number of threads
1104   DeviceInfo.NumThreads[device_id] = RTLDeviceInfoTy::Default_WG_Size;
1105   DP("Default number of threads set according to library's default %d\n",
1106      RTLDeviceInfoTy::Default_WG_Size);
1107   if (enforce_upper_bound(&DeviceInfo.NumThreads[device_id],
1108                           DeviceInfo.ThreadsPerGroup[device_id])) {
1109     DP("Default number of threads exceeds device limit, capping at %d\n",
1110        DeviceInfo.ThreadsPerGroup[device_id]);
1111   }
1112 
1113   DP("Device %d: default limit for groupsPerDevice %d & threadsPerGroup %d\n",
1114      device_id, DeviceInfo.GroupsPerDevice[device_id],
1115      DeviceInfo.ThreadsPerGroup[device_id]);
1116 
1117   DP("Device %d: wavefront size %d, total threads %d x %d = %d\n", device_id,
1118      DeviceInfo.WarpSize[device_id], DeviceInfo.ThreadsPerGroup[device_id],
1119      DeviceInfo.GroupsPerDevice[device_id],
1120      DeviceInfo.GroupsPerDevice[device_id] *
1121          DeviceInfo.ThreadsPerGroup[device_id]);
1122 
1123   return OFFLOAD_SUCCESS;
1124 }
1125 
1126 namespace {
1127 Elf64_Shdr *find_only_SHT_HASH(Elf *elf) {
1128   size_t N;
1129   int rc = elf_getshdrnum(elf, &N);
1130   if (rc != 0) {
1131     return nullptr;
1132   }
1133 
1134   Elf64_Shdr *result = nullptr;
1135   for (size_t i = 0; i < N; i++) {
1136     Elf_Scn *scn = elf_getscn(elf, i);
1137     if (scn) {
1138       Elf64_Shdr *shdr = elf64_getshdr(scn);
1139       if (shdr) {
1140         if (shdr->sh_type == SHT_HASH) {
1141           if (result == nullptr) {
1142             result = shdr;
1143           } else {
1144             // multiple SHT_HASH sections not handled
1145             return nullptr;
1146           }
1147         }
1148       }
1149     }
1150   }
1151   return result;
1152 }
1153 
1154 const Elf64_Sym *elf_lookup(Elf *elf, char *base, Elf64_Shdr *section_hash,
1155                             const char *symname) {
1156 
1157   assert(section_hash);
1158   size_t section_symtab_index = section_hash->sh_link;
1159   Elf64_Shdr *section_symtab =
1160       elf64_getshdr(elf_getscn(elf, section_symtab_index));
1161   size_t section_strtab_index = section_symtab->sh_link;
1162 
1163   const Elf64_Sym *symtab =
1164       reinterpret_cast<const Elf64_Sym *>(base + section_symtab->sh_offset);
1165 
1166   const uint32_t *hashtab =
1167       reinterpret_cast<const uint32_t *>(base + section_hash->sh_offset);
1168 
1169   // Layout:
1170   // nbucket
1171   // nchain
1172   // bucket[nbucket]
1173   // chain[nchain]
1174   uint32_t nbucket = hashtab[0];
1175   const uint32_t *bucket = &hashtab[2];
1176   const uint32_t *chain = &hashtab[nbucket + 2];
1177 
1178   const size_t max = strlen(symname) + 1;
1179   const uint32_t hash = elf_hash(symname);
1180   for (uint32_t i = bucket[hash % nbucket]; i != 0; i = chain[i]) {
1181     char *n = elf_strptr(elf, section_strtab_index, symtab[i].st_name);
1182     if (strncmp(symname, n, max) == 0) {
1183       return &symtab[i];
1184     }
1185   }
1186 
1187   return nullptr;
1188 }
1189 
1190 struct symbol_info {
1191   void *addr = nullptr;
1192   uint32_t size = UINT32_MAX;
1193   uint32_t sh_type = SHT_NULL;
1194 };
1195 
1196 int get_symbol_info_without_loading(Elf *elf, char *base, const char *symname,
1197                                     symbol_info *res) {
1198   if (elf_kind(elf) != ELF_K_ELF) {
1199     return 1;
1200   }
1201 
1202   Elf64_Shdr *section_hash = find_only_SHT_HASH(elf);
1203   if (!section_hash) {
1204     return 1;
1205   }
1206 
1207   const Elf64_Sym *sym = elf_lookup(elf, base, section_hash, symname);
1208   if (!sym) {
1209     return 1;
1210   }
1211 
1212   if (sym->st_size > UINT32_MAX) {
1213     return 1;
1214   }
1215 
1216   if (sym->st_shndx == SHN_UNDEF) {
1217     return 1;
1218   }
1219 
1220   Elf_Scn *section = elf_getscn(elf, sym->st_shndx);
1221   if (!section) {
1222     return 1;
1223   }
1224 
1225   Elf64_Shdr *header = elf64_getshdr(section);
1226   if (!header) {
1227     return 1;
1228   }
1229 
1230   res->addr = sym->st_value + base;
1231   res->size = static_cast<uint32_t>(sym->st_size);
1232   res->sh_type = header->sh_type;
1233   return 0;
1234 }
1235 
1236 int get_symbol_info_without_loading(char *base, size_t img_size,
1237                                     const char *symname, symbol_info *res) {
1238   Elf *elf = elf_memory(base, img_size);
1239   if (elf) {
1240     int rc = get_symbol_info_without_loading(elf, base, symname, res);
1241     elf_end(elf);
1242     return rc;
1243   }
1244   return 1;
1245 }
1246 
1247 hsa_status_t interop_get_symbol_info(char *base, size_t img_size,
1248                                      const char *symname, void **var_addr,
1249                                      uint32_t *var_size) {
1250   symbol_info si;
1251   int rc = get_symbol_info_without_loading(base, img_size, symname, &si);
1252   if (rc == 0) {
1253     *var_addr = si.addr;
1254     *var_size = si.size;
1255     return HSA_STATUS_SUCCESS;
1256   } else {
1257     return HSA_STATUS_ERROR;
1258   }
1259 }
1260 
1261 template <typename C>
1262 hsa_status_t module_register_from_memory_to_place(
1263     std::map<std::string, atl_kernel_info_t> &KernelInfoTable,
1264     std::map<std::string, atl_symbol_info_t> &SymbolInfoTable,
1265     void *module_bytes, size_t module_size, int DeviceId, C cb,
1266     std::vector<hsa_executable_t> &HSAExecutables) {
1267   auto L = [](void *data, size_t size, void *cb_state) -> hsa_status_t {
1268     C *unwrapped = static_cast<C *>(cb_state);
1269     return (*unwrapped)(data, size);
1270   };
1271   return core::RegisterModuleFromMemory(
1272       KernelInfoTable, SymbolInfoTable, module_bytes, module_size,
1273       DeviceInfo.HSAAgents[DeviceId], L, static_cast<void *>(&cb),
1274       HSAExecutables);
1275 }
1276 } // namespace
1277 
1278 static uint64_t get_device_State_bytes(char *ImageStart, size_t img_size) {
1279   uint64_t device_State_bytes = 0;
1280   {
1281     // If this is the deviceRTL, get the state variable size
1282     symbol_info size_si;
1283     int rc = get_symbol_info_without_loading(
1284         ImageStart, img_size, "omptarget_nvptx_device_State_size", &size_si);
1285 
1286     if (rc == 0) {
1287       if (size_si.size != sizeof(uint64_t)) {
1288         DP("Found device_State_size variable with wrong size\n");
1289         return 0;
1290       }
1291 
1292       // Read number of bytes directly from the elf
1293       memcpy(&device_State_bytes, size_si.addr, sizeof(uint64_t));
1294     }
1295   }
1296   return device_State_bytes;
1297 }
1298 
1299 static __tgt_target_table *
1300 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image);
1301 
1302 static __tgt_target_table *
1303 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image);
1304 
1305 __tgt_target_table *__tgt_rtl_load_binary(int32_t device_id,
1306                                           __tgt_device_image *image) {
1307   DeviceInfo.load_run_lock.lock();
1308   __tgt_target_table *res = __tgt_rtl_load_binary_locked(device_id, image);
1309   DeviceInfo.load_run_lock.unlock();
1310   return res;
1311 }
1312 
1313 struct device_environment {
1314   // initialise an omptarget_device_environmentTy in the deviceRTL
1315   // patches around differences in the deviceRTL between trunk, aomp,
1316   // rocmcc. Over time these differences will tend to zero and this class
1317   // simplified.
1318   // Symbol may be in .data or .bss, and may be missing fields:
1319   //  - aomp has debug_level, num_devices, device_num
1320   //  - trunk has debug_level
1321   //  - under review in trunk is debug_level, device_num
1322   //  - rocmcc matches aomp, patch to swap num_devices and device_num
1323 
1324   // The symbol may also have been deadstripped because the device side
1325   // accessors were unused.
1326 
1327   // If the symbol is in .data (aomp, rocm) it can be written directly.
1328   // If it is in .bss, we must wait for it to be allocated space on the
1329   // gpu (trunk) and initialize after loading.
1330   const char *sym() { return "omptarget_device_environment"; }
1331 
1332   omptarget_device_environmentTy host_device_env;
1333   symbol_info si;
1334   bool valid = false;
1335 
1336   __tgt_device_image *image;
1337   const size_t img_size;
1338 
1339   device_environment(int device_id, int number_devices,
1340                      __tgt_device_image *image, const size_t img_size)
1341       : image(image), img_size(img_size) {
1342 
1343     host_device_env.num_devices = number_devices;
1344     host_device_env.device_num = device_id;
1345     host_device_env.debug_level = 0;
1346 #ifdef OMPTARGET_DEBUG
1347     if (char *envStr = getenv("LIBOMPTARGET_DEVICE_RTL_DEBUG")) {
1348       host_device_env.debug_level = std::stoi(envStr);
1349     }
1350 #endif
1351 
1352     int rc = get_symbol_info_without_loading((char *)image->ImageStart,
1353                                              img_size, sym(), &si);
1354     if (rc != 0) {
1355       DP("Finding global device environment '%s' - symbol missing.\n", sym());
1356       return;
1357     }
1358 
1359     if (si.size > sizeof(host_device_env)) {
1360       DP("Symbol '%s' has size %u, expected at most %zu.\n", sym(), si.size,
1361          sizeof(host_device_env));
1362       return;
1363     }
1364 
1365     valid = true;
1366   }
1367 
1368   bool in_image() { return si.sh_type != SHT_NOBITS; }
1369 
1370   hsa_status_t before_loading(void *data, size_t size) {
1371     if (valid) {
1372       if (in_image()) {
1373         DP("Setting global device environment before load (%u bytes)\n",
1374            si.size);
1375         uint64_t offset = (char *)si.addr - (char *)image->ImageStart;
1376         void *pos = (char *)data + offset;
1377         memcpy(pos, &host_device_env, si.size);
1378       }
1379     }
1380     return HSA_STATUS_SUCCESS;
1381   }
1382 
1383   hsa_status_t after_loading() {
1384     if (valid) {
1385       if (!in_image()) {
1386         DP("Setting global device environment after load (%u bytes)\n",
1387            si.size);
1388         int device_id = host_device_env.device_num;
1389         auto &SymbolInfo = DeviceInfo.SymbolInfoTable[device_id];
1390         void *state_ptr;
1391         uint32_t state_ptr_size;
1392         hsa_status_t err = atmi_interop_hsa_get_symbol_info(
1393             SymbolInfo, device_id, sym(), &state_ptr, &state_ptr_size);
1394         if (err != HSA_STATUS_SUCCESS) {
1395           DP("failed to find %s in loaded image\n", sym());
1396           return err;
1397         }
1398 
1399         if (state_ptr_size != si.size) {
1400           DP("Symbol had size %u before loading, %u after\n", state_ptr_size,
1401              si.size);
1402           return HSA_STATUS_ERROR;
1403         }
1404 
1405         return DeviceInfo.freesignalpool_memcpy_h2d(state_ptr, &host_device_env,
1406                                                     state_ptr_size, device_id);
1407       }
1408     }
1409     return HSA_STATUS_SUCCESS;
1410   }
1411 };
1412 
1413 static hsa_status_t atmi_calloc(void **ret_ptr, size_t size, int DeviceId) {
1414   uint64_t rounded = 4 * ((size + 3) / 4);
1415   void *ptr;
1416   hsa_amd_memory_pool_t MemoryPool = DeviceInfo.getDeviceMemoryPool(DeviceId);
1417   hsa_status_t err = hsa_amd_memory_pool_allocate(MemoryPool, rounded, 0, &ptr);
1418   if (err != HSA_STATUS_SUCCESS) {
1419     return err;
1420   }
1421 
1422   hsa_status_t rc = hsa_amd_memory_fill(ptr, 0, rounded / 4);
1423   if (rc != HSA_STATUS_SUCCESS) {
1424     fprintf(stderr, "zero fill device_state failed with %u\n", rc);
1425     core::Runtime::Memfree(ptr);
1426     return HSA_STATUS_ERROR;
1427   }
1428 
1429   *ret_ptr = ptr;
1430   return HSA_STATUS_SUCCESS;
1431 }
1432 
1433 static bool image_contains_symbol(void *data, size_t size, const char *sym) {
1434   symbol_info si;
1435   int rc = get_symbol_info_without_loading((char *)data, size, sym, &si);
1436   return (rc == 0) && (si.addr != nullptr);
1437 }
1438 
1439 __tgt_target_table *__tgt_rtl_load_binary_locked(int32_t device_id,
1440                                                  __tgt_device_image *image) {
1441   // This function loads the device image onto gpu[device_id] and does other
1442   // per-image initialization work. Specifically:
1443   //
1444   // - Initialize an omptarget_device_environmentTy instance embedded in the
1445   //   image at the symbol "omptarget_device_environment"
1446   //   Fields debug_level, device_num, num_devices. Used by the deviceRTL.
1447   //
1448   // - Allocate a large array per-gpu (could be moved to init_device)
1449   //   - Read a uint64_t at symbol omptarget_nvptx_device_State_size
1450   //   - Allocate at least that many bytes of gpu memory
1451   //   - Zero initialize it
1452   //   - Write the pointer to the symbol omptarget_nvptx_device_State
1453   //
1454   // - Pulls some per-kernel information together from various sources and
1455   //   records it in the KernelsList for quicker access later
1456   //
1457   // The initialization can be done before or after loading the image onto the
1458   // gpu. This function presently does a mixture. Using the hsa api to get/set
1459   // the information is simpler to implement, in exchange for more complicated
1460   // runtime behaviour. E.g. launching a kernel or using dma to get eight bytes
1461   // back from the gpu vs a hashtable lookup on the host.
1462 
1463   const size_t img_size = (char *)image->ImageEnd - (char *)image->ImageStart;
1464 
1465   DeviceInfo.clearOffloadEntriesTable(device_id);
1466 
1467   // We do not need to set the ELF version because the caller of this function
1468   // had to do that to decide the right runtime to use
1469 
1470   if (!elf_machine_id_is_amdgcn(image)) {
1471     return NULL;
1472   }
1473 
1474   {
1475     auto env = device_environment(device_id, DeviceInfo.NumberOfDevices, image,
1476                                   img_size);
1477 
1478     auto &KernelInfo = DeviceInfo.KernelInfoTable[device_id];
1479     auto &SymbolInfo = DeviceInfo.SymbolInfoTable[device_id];
1480     hsa_status_t err = module_register_from_memory_to_place(
1481         KernelInfo, SymbolInfo, (void *)image->ImageStart, img_size, device_id,
1482         [&](void *data, size_t size) {
1483           if (image_contains_symbol(data, size, "needs_hostcall_buffer")) {
1484             __atomic_store_n(&DeviceInfo.hostcall_required, true,
1485                              __ATOMIC_RELEASE);
1486           }
1487           return env.before_loading(data, size);
1488         },
1489         DeviceInfo.HSAExecutables);
1490 
1491     check("Module registering", err);
1492     if (err != HSA_STATUS_SUCCESS) {
1493       fprintf(stderr,
1494               "Possible gpu arch mismatch: device:%s, image:%s please check"
1495               " compiler flag: -march=<gpu>\n",
1496               DeviceInfo.GPUName[device_id].c_str(),
1497               get_elf_mach_gfx_name(elf_e_flags(image)));
1498       return NULL;
1499     }
1500 
1501     err = env.after_loading();
1502     if (err != HSA_STATUS_SUCCESS) {
1503       return NULL;
1504     }
1505   }
1506 
1507   DP("ATMI module successfully loaded!\n");
1508 
1509   {
1510     // the device_State array is either large value in bss or a void* that
1511     // needs to be assigned to a pointer to an array of size device_state_bytes
1512     // If absent, it has been deadstripped and needs no setup.
1513 
1514     void *state_ptr;
1515     uint32_t state_ptr_size;
1516     auto &SymbolInfoMap = DeviceInfo.SymbolInfoTable[device_id];
1517     hsa_status_t err = atmi_interop_hsa_get_symbol_info(
1518         SymbolInfoMap, device_id, "omptarget_nvptx_device_State", &state_ptr,
1519         &state_ptr_size);
1520 
1521     if (err != HSA_STATUS_SUCCESS) {
1522       DP("No device_state symbol found, skipping initialization\n");
1523     } else {
1524       if (state_ptr_size < sizeof(void *)) {
1525         DP("unexpected size of state_ptr %u != %zu\n", state_ptr_size,
1526            sizeof(void *));
1527         return NULL;
1528       }
1529 
1530       // if it's larger than a void*, assume it's a bss array and no further
1531       // initialization is required. Only try to set up a pointer for
1532       // sizeof(void*)
1533       if (state_ptr_size == sizeof(void *)) {
1534         uint64_t device_State_bytes =
1535             get_device_State_bytes((char *)image->ImageStart, img_size);
1536         if (device_State_bytes == 0) {
1537           DP("Can't initialize device_State, missing size information\n");
1538           return NULL;
1539         }
1540 
1541         auto &dss = DeviceInfo.deviceStateStore[device_id];
1542         if (dss.first.get() == nullptr) {
1543           assert(dss.second == 0);
1544           void *ptr = NULL;
1545           hsa_status_t err = atmi_calloc(&ptr, device_State_bytes, device_id);
1546           if (err != HSA_STATUS_SUCCESS) {
1547             DP("Failed to allocate device_state array\n");
1548             return NULL;
1549           }
1550           dss = {
1551               std::unique_ptr<void, RTLDeviceInfoTy::atmiFreePtrDeletor>{ptr},
1552               device_State_bytes,
1553           };
1554         }
1555 
1556         void *ptr = dss.first.get();
1557         if (device_State_bytes != dss.second) {
1558           DP("Inconsistent sizes of device_State unsupported\n");
1559           return NULL;
1560         }
1561 
1562         // write ptr to device memory so it can be used by later kernels
1563         err = DeviceInfo.freesignalpool_memcpy_h2d(state_ptr, &ptr,
1564                                                    sizeof(void *), device_id);
1565         if (err != HSA_STATUS_SUCCESS) {
1566           DP("memcpy install of state_ptr failed\n");
1567           return NULL;
1568         }
1569       }
1570     }
1571   }
1572 
1573   // Here, we take advantage of the data that is appended after img_end to get
1574   // the symbols' name we need to load. This data consist of the host entries
1575   // begin and end as well as the target name (see the offloading linker script
1576   // creation in clang compiler).
1577 
1578   // Find the symbols in the module by name. The name can be obtain by
1579   // concatenating the host entry name with the target name
1580 
1581   __tgt_offload_entry *HostBegin = image->EntriesBegin;
1582   __tgt_offload_entry *HostEnd = image->EntriesEnd;
1583 
1584   for (__tgt_offload_entry *e = HostBegin; e != HostEnd; ++e) {
1585 
1586     if (!e->addr) {
1587       // The host should have always something in the address to
1588       // uniquely identify the target region.
1589       fprintf(stderr, "Analyzing host entry '<null>' (size = %lld)...\n",
1590               (unsigned long long)e->size);
1591       return NULL;
1592     }
1593 
1594     if (e->size) {
1595       __tgt_offload_entry entry = *e;
1596 
1597       void *varptr;
1598       uint32_t varsize;
1599 
1600       auto &SymbolInfoMap = DeviceInfo.SymbolInfoTable[device_id];
1601       hsa_status_t err = atmi_interop_hsa_get_symbol_info(
1602           SymbolInfoMap, device_id, e->name, &varptr, &varsize);
1603 
1604       if (err != HSA_STATUS_SUCCESS) {
1605         // Inform the user what symbol prevented offloading
1606         DP("Loading global '%s' (Failed)\n", e->name);
1607         return NULL;
1608       }
1609 
1610       if (varsize != e->size) {
1611         DP("Loading global '%s' - size mismatch (%u != %lu)\n", e->name,
1612            varsize, e->size);
1613         return NULL;
1614       }
1615 
1616       DP("Entry point " DPxMOD " maps to global %s (" DPxMOD ")\n",
1617          DPxPTR(e - HostBegin), e->name, DPxPTR(varptr));
1618       entry.addr = (void *)varptr;
1619 
1620       DeviceInfo.addOffloadEntry(device_id, entry);
1621 
1622       if (DeviceInfo.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY &&
1623           e->flags & OMP_DECLARE_TARGET_LINK) {
1624         // If unified memory is present any target link variables
1625         // can access host addresses directly. There is no longer a
1626         // need for device copies.
1627         err = DeviceInfo.freesignalpool_memcpy_h2d(varptr, e->addr,
1628                                                    sizeof(void *), device_id);
1629         if (err != HSA_STATUS_SUCCESS)
1630           DP("Error when copying USM\n");
1631         DP("Copy linked variable host address (" DPxMOD ")"
1632            "to device address (" DPxMOD ")\n",
1633            DPxPTR(*((void **)e->addr)), DPxPTR(varptr));
1634       }
1635 
1636       continue;
1637     }
1638 
1639     DP("to find the kernel name: %s size: %lu\n", e->name, strlen(e->name));
1640 
1641     uint32_t kernarg_segment_size;
1642     auto &KernelInfoMap = DeviceInfo.KernelInfoTable[device_id];
1643     hsa_status_t err = atmi_interop_hsa_get_kernel_info(
1644         KernelInfoMap, device_id, e->name,
1645         HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE,
1646         &kernarg_segment_size);
1647 
1648     // each arg is a void * in this openmp implementation
1649     uint32_t arg_num = kernarg_segment_size / sizeof(void *);
1650     std::vector<size_t> arg_sizes(arg_num);
1651     for (std::vector<size_t>::iterator it = arg_sizes.begin();
1652          it != arg_sizes.end(); it++) {
1653       *it = sizeof(void *);
1654     }
1655 
1656     // default value GENERIC (in case symbol is missing from cubin file)
1657     int8_t ExecModeVal = ExecutionModeType::GENERIC;
1658 
1659     // get flat group size if present, else Default_WG_Size
1660     int16_t WGSizeVal = RTLDeviceInfoTy::Default_WG_Size;
1661 
1662     // get Kernel Descriptor if present.
1663     // Keep struct in sync wih getTgtAttributeStructQTy in CGOpenMPRuntime.cpp
1664     struct KernDescValType {
1665       uint16_t Version;
1666       uint16_t TSize;
1667       uint16_t WG_Size;
1668       uint8_t Mode;
1669     };
1670     struct KernDescValType KernDescVal;
1671     std::string KernDescNameStr(e->name);
1672     KernDescNameStr += "_kern_desc";
1673     const char *KernDescName = KernDescNameStr.c_str();
1674 
1675     void *KernDescPtr;
1676     uint32_t KernDescSize;
1677     void *CallStackAddr = nullptr;
1678     err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1679                                   KernDescName, &KernDescPtr, &KernDescSize);
1680 
1681     if (err == HSA_STATUS_SUCCESS) {
1682       if ((size_t)KernDescSize != sizeof(KernDescVal))
1683         DP("Loading global computation properties '%s' - size mismatch (%u != "
1684            "%lu)\n",
1685            KernDescName, KernDescSize, sizeof(KernDescVal));
1686 
1687       memcpy(&KernDescVal, KernDescPtr, (size_t)KernDescSize);
1688 
1689       // Check structure size against recorded size.
1690       if ((size_t)KernDescSize != KernDescVal.TSize)
1691         DP("KernDescVal size %lu does not match advertized size %d for '%s'\n",
1692            sizeof(KernDescVal), KernDescVal.TSize, KernDescName);
1693 
1694       DP("After loading global for %s KernDesc \n", KernDescName);
1695       DP("KernDesc: Version: %d\n", KernDescVal.Version);
1696       DP("KernDesc: TSize: %d\n", KernDescVal.TSize);
1697       DP("KernDesc: WG_Size: %d\n", KernDescVal.WG_Size);
1698       DP("KernDesc: Mode: %d\n", KernDescVal.Mode);
1699 
1700       // Get ExecMode
1701       ExecModeVal = KernDescVal.Mode;
1702       DP("ExecModeVal %d\n", ExecModeVal);
1703       if (KernDescVal.WG_Size == 0) {
1704         KernDescVal.WG_Size = RTLDeviceInfoTy::Default_WG_Size;
1705         DP("Setting KernDescVal.WG_Size to default %d\n", KernDescVal.WG_Size);
1706       }
1707       WGSizeVal = KernDescVal.WG_Size;
1708       DP("WGSizeVal %d\n", WGSizeVal);
1709       check("Loading KernDesc computation property", err);
1710     } else {
1711       DP("Warning: Loading KernDesc '%s' - symbol not found, ", KernDescName);
1712 
1713       // Generic
1714       std::string ExecModeNameStr(e->name);
1715       ExecModeNameStr += "_exec_mode";
1716       const char *ExecModeName = ExecModeNameStr.c_str();
1717 
1718       void *ExecModePtr;
1719       uint32_t varsize;
1720       err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1721                                     ExecModeName, &ExecModePtr, &varsize);
1722 
1723       if (err == HSA_STATUS_SUCCESS) {
1724         if ((size_t)varsize != sizeof(int8_t)) {
1725           DP("Loading global computation properties '%s' - size mismatch(%u != "
1726              "%lu)\n",
1727              ExecModeName, varsize, sizeof(int8_t));
1728           return NULL;
1729         }
1730 
1731         memcpy(&ExecModeVal, ExecModePtr, (size_t)varsize);
1732 
1733         DP("After loading global for %s ExecMode = %d\n", ExecModeName,
1734            ExecModeVal);
1735 
1736         if (ExecModeVal < 0 || ExecModeVal > 2) {
1737           DP("Error wrong exec_mode value specified in HSA code object file: "
1738              "%d\n",
1739              ExecModeVal);
1740           return NULL;
1741         }
1742       } else {
1743         DP("Loading global exec_mode '%s' - symbol missing, using default "
1744            "value "
1745            "GENERIC (1)\n",
1746            ExecModeName);
1747       }
1748       check("Loading computation property", err);
1749 
1750       // Flat group size
1751       std::string WGSizeNameStr(e->name);
1752       WGSizeNameStr += "_wg_size";
1753       const char *WGSizeName = WGSizeNameStr.c_str();
1754 
1755       void *WGSizePtr;
1756       uint32_t WGSize;
1757       err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1758                                     WGSizeName, &WGSizePtr, &WGSize);
1759 
1760       if (err == HSA_STATUS_SUCCESS) {
1761         if ((size_t)WGSize != sizeof(int16_t)) {
1762           DP("Loading global computation properties '%s' - size mismatch (%u "
1763              "!= "
1764              "%lu)\n",
1765              WGSizeName, WGSize, sizeof(int16_t));
1766           return NULL;
1767         }
1768 
1769         memcpy(&WGSizeVal, WGSizePtr, (size_t)WGSize);
1770 
1771         DP("After loading global for %s WGSize = %d\n", WGSizeName, WGSizeVal);
1772 
1773         if (WGSizeVal < RTLDeviceInfoTy::Default_WG_Size ||
1774             WGSizeVal > RTLDeviceInfoTy::Max_WG_Size) {
1775           DP("Error wrong WGSize value specified in HSA code object file: "
1776              "%d\n",
1777              WGSizeVal);
1778           WGSizeVal = RTLDeviceInfoTy::Default_WG_Size;
1779         }
1780       } else {
1781         DP("Warning: Loading WGSize '%s' - symbol not found, "
1782            "using default value %d\n",
1783            WGSizeName, WGSizeVal);
1784       }
1785 
1786       check("Loading WGSize computation property", err);
1787     }
1788 
1789     KernelsList.push_back(KernelTy(ExecModeVal, WGSizeVal, device_id,
1790                                    CallStackAddr, e->name, kernarg_segment_size,
1791                                    DeviceInfo.KernArgPool));
1792     __tgt_offload_entry entry = *e;
1793     entry.addr = (void *)&KernelsList.back();
1794     DeviceInfo.addOffloadEntry(device_id, entry);
1795     DP("Entry point %ld maps to %s\n", e - HostBegin, e->name);
1796   }
1797 
1798   return DeviceInfo.getOffloadEntriesTable(device_id);
1799 }
1800 
1801 void *__tgt_rtl_data_alloc(int device_id, int64_t size, void *, int32_t kind) {
1802   void *ptr = NULL;
1803   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1804 
1805   if (kind != TARGET_ALLOC_DEFAULT) {
1806     REPORT("Invalid target data allocation kind or requested allocator not "
1807            "implemented yet\n");
1808     return NULL;
1809   }
1810 
1811   hsa_amd_memory_pool_t MemoryPool = DeviceInfo.getDeviceMemoryPool(device_id);
1812   hsa_status_t err = hsa_amd_memory_pool_allocate(MemoryPool, size, 0, &ptr);
1813   DP("Tgt alloc data %ld bytes, (tgt:%016llx).\n", size,
1814      (long long unsigned)(Elf64_Addr)ptr);
1815   ptr = (err == HSA_STATUS_SUCCESS) ? ptr : NULL;
1816   return ptr;
1817 }
1818 
1819 int32_t __tgt_rtl_data_submit(int device_id, void *tgt_ptr, void *hst_ptr,
1820                               int64_t size) {
1821   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1822   __tgt_async_info AsyncInfo;
1823   int32_t rc = dataSubmit(device_id, tgt_ptr, hst_ptr, size, &AsyncInfo);
1824   if (rc != OFFLOAD_SUCCESS)
1825     return OFFLOAD_FAIL;
1826 
1827   return __tgt_rtl_synchronize(device_id, &AsyncInfo);
1828 }
1829 
1830 int32_t __tgt_rtl_data_submit_async(int device_id, void *tgt_ptr, void *hst_ptr,
1831                                     int64_t size, __tgt_async_info *AsyncInfo) {
1832   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1833   if (AsyncInfo) {
1834     initAsyncInfo(AsyncInfo);
1835     return dataSubmit(device_id, tgt_ptr, hst_ptr, size, AsyncInfo);
1836   } else {
1837     return __tgt_rtl_data_submit(device_id, tgt_ptr, hst_ptr, size);
1838   }
1839 }
1840 
1841 int32_t __tgt_rtl_data_retrieve(int device_id, void *hst_ptr, void *tgt_ptr,
1842                                 int64_t size) {
1843   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1844   __tgt_async_info AsyncInfo;
1845   int32_t rc = dataRetrieve(device_id, hst_ptr, tgt_ptr, size, &AsyncInfo);
1846   if (rc != OFFLOAD_SUCCESS)
1847     return OFFLOAD_FAIL;
1848 
1849   return __tgt_rtl_synchronize(device_id, &AsyncInfo);
1850 }
1851 
1852 int32_t __tgt_rtl_data_retrieve_async(int device_id, void *hst_ptr,
1853                                       void *tgt_ptr, int64_t size,
1854                                       __tgt_async_info *AsyncInfo) {
1855   assert(AsyncInfo && "AsyncInfo is nullptr");
1856   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1857   initAsyncInfo(AsyncInfo);
1858   return dataRetrieve(device_id, hst_ptr, tgt_ptr, size, AsyncInfo);
1859 }
1860 
1861 int32_t __tgt_rtl_data_delete(int device_id, void *tgt_ptr) {
1862   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1863   hsa_status_t err;
1864   DP("Tgt free data (tgt:%016llx).\n", (long long unsigned)(Elf64_Addr)tgt_ptr);
1865   err = core::Runtime::Memfree(tgt_ptr);
1866   if (err != HSA_STATUS_SUCCESS) {
1867     DP("Error when freeing CUDA memory\n");
1868     return OFFLOAD_FAIL;
1869   }
1870   return OFFLOAD_SUCCESS;
1871 }
1872 
1873 // Determine launch values for kernel.
1874 struct launchVals {
1875   int WorkgroupSize;
1876   int GridSize;
1877 };
1878 launchVals getLaunchVals(EnvironmentVariables Env, int ConstWGSize,
1879                          int ExecutionMode, int num_teams, int thread_limit,
1880                          uint64_t loop_tripcount, int DeviceNumTeams) {
1881 
1882   int threadsPerGroup = RTLDeviceInfoTy::Default_WG_Size;
1883   int num_groups = 0;
1884 
1885   int Max_Teams =
1886       Env.MaxTeamsDefault > 0 ? Env.MaxTeamsDefault : DeviceNumTeams;
1887   if (Max_Teams > RTLDeviceInfoTy::HardTeamLimit)
1888     Max_Teams = RTLDeviceInfoTy::HardTeamLimit;
1889 
1890   if (print_kernel_trace & STARTUP_DETAILS) {
1891     fprintf(stderr, "RTLDeviceInfoTy::Max_Teams: %d\n",
1892             RTLDeviceInfoTy::Max_Teams);
1893     fprintf(stderr, "Max_Teams: %d\n", Max_Teams);
1894     fprintf(stderr, "RTLDeviceInfoTy::Warp_Size: %d\n",
1895             RTLDeviceInfoTy::Warp_Size);
1896     fprintf(stderr, "RTLDeviceInfoTy::Max_WG_Size: %d\n",
1897             RTLDeviceInfoTy::Max_WG_Size);
1898     fprintf(stderr, "RTLDeviceInfoTy::Default_WG_Size: %d\n",
1899             RTLDeviceInfoTy::Default_WG_Size);
1900     fprintf(stderr, "thread_limit: %d\n", thread_limit);
1901     fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup);
1902     fprintf(stderr, "ConstWGSize: %d\n", ConstWGSize);
1903   }
1904   // check for thread_limit() clause
1905   if (thread_limit > 0) {
1906     threadsPerGroup = thread_limit;
1907     DP("Setting threads per block to requested %d\n", thread_limit);
1908     if (ExecutionMode == GENERIC) { // Add master warp for GENERIC
1909       threadsPerGroup += RTLDeviceInfoTy::Warp_Size;
1910       DP("Adding master wavefront: +%d threads\n", RTLDeviceInfoTy::Warp_Size);
1911     }
1912     if (threadsPerGroup > RTLDeviceInfoTy::Max_WG_Size) { // limit to max
1913       threadsPerGroup = RTLDeviceInfoTy::Max_WG_Size;
1914       DP("Setting threads per block to maximum %d\n", threadsPerGroup);
1915     }
1916   }
1917   // check flat_max_work_group_size attr here
1918   if (threadsPerGroup > ConstWGSize) {
1919     threadsPerGroup = ConstWGSize;
1920     DP("Reduced threadsPerGroup to flat-attr-group-size limit %d\n",
1921        threadsPerGroup);
1922   }
1923   if (print_kernel_trace & STARTUP_DETAILS)
1924     fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup);
1925   DP("Preparing %d threads\n", threadsPerGroup);
1926 
1927   // Set default num_groups (teams)
1928   if (Env.TeamLimit > 0)
1929     num_groups = (Max_Teams < Env.TeamLimit) ? Max_Teams : Env.TeamLimit;
1930   else
1931     num_groups = Max_Teams;
1932   DP("Set default num of groups %d\n", num_groups);
1933 
1934   if (print_kernel_trace & STARTUP_DETAILS) {
1935     fprintf(stderr, "num_groups: %d\n", num_groups);
1936     fprintf(stderr, "num_teams: %d\n", num_teams);
1937   }
1938 
1939   // Reduce num_groups if threadsPerGroup exceeds RTLDeviceInfoTy::Max_WG_Size
1940   // This reduction is typical for default case (no thread_limit clause).
1941   // or when user goes crazy with num_teams clause.
1942   // FIXME: We cant distinguish between a constant or variable thread limit.
1943   // So we only handle constant thread_limits.
1944   if (threadsPerGroup >
1945       RTLDeviceInfoTy::Default_WG_Size) //  256 < threadsPerGroup <= 1024
1946     // Should we round threadsPerGroup up to nearest RTLDeviceInfoTy::Warp_Size
1947     // here?
1948     num_groups = (Max_Teams * RTLDeviceInfoTy::Max_WG_Size) / threadsPerGroup;
1949 
1950   // check for num_teams() clause
1951   if (num_teams > 0) {
1952     num_groups = (num_teams < num_groups) ? num_teams : num_groups;
1953   }
1954   if (print_kernel_trace & STARTUP_DETAILS) {
1955     fprintf(stderr, "num_groups: %d\n", num_groups);
1956     fprintf(stderr, "Env.NumTeams %d\n", Env.NumTeams);
1957     fprintf(stderr, "Env.TeamLimit %d\n", Env.TeamLimit);
1958   }
1959 
1960   if (Env.NumTeams > 0) {
1961     num_groups = (Env.NumTeams < num_groups) ? Env.NumTeams : num_groups;
1962     DP("Modifying teams based on Env.NumTeams %d\n", Env.NumTeams);
1963   } else if (Env.TeamLimit > 0) {
1964     num_groups = (Env.TeamLimit < num_groups) ? Env.TeamLimit : num_groups;
1965     DP("Modifying teams based on Env.TeamLimit%d\n", Env.TeamLimit);
1966   } else {
1967     if (num_teams <= 0) {
1968       if (loop_tripcount > 0) {
1969         if (ExecutionMode == SPMD) {
1970           // round up to the nearest integer
1971           num_groups = ((loop_tripcount - 1) / threadsPerGroup) + 1;
1972         } else if (ExecutionMode == GENERIC) {
1973           num_groups = loop_tripcount;
1974         } else /* ExecutionMode == SPMD_GENERIC */ {
1975           // This is a generic kernel that was transformed to use SPMD-mode
1976           // execution but uses Generic-mode semantics for scheduling.
1977           num_groups = loop_tripcount;
1978         }
1979         DP("Using %d teams due to loop trip count %" PRIu64 " and number of "
1980            "threads per block %d\n",
1981            num_groups, loop_tripcount, threadsPerGroup);
1982       }
1983     } else {
1984       num_groups = num_teams;
1985     }
1986     if (num_groups > Max_Teams) {
1987       num_groups = Max_Teams;
1988       if (print_kernel_trace & STARTUP_DETAILS)
1989         fprintf(stderr, "Limiting num_groups %d to Max_Teams %d \n", num_groups,
1990                 Max_Teams);
1991     }
1992     if (num_groups > num_teams && num_teams > 0) {
1993       num_groups = num_teams;
1994       if (print_kernel_trace & STARTUP_DETAILS)
1995         fprintf(stderr, "Limiting num_groups %d to clause num_teams %d \n",
1996                 num_groups, num_teams);
1997     }
1998   }
1999 
2000   // num_teams clause always honored, no matter what, unless DEFAULT is active.
2001   if (num_teams > 0) {
2002     num_groups = num_teams;
2003     // Cap num_groups to EnvMaxTeamsDefault if set.
2004     if (Env.MaxTeamsDefault > 0 && num_groups > Env.MaxTeamsDefault)
2005       num_groups = Env.MaxTeamsDefault;
2006   }
2007   if (print_kernel_trace & STARTUP_DETAILS) {
2008     fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup);
2009     fprintf(stderr, "num_groups: %d\n", num_groups);
2010     fprintf(stderr, "loop_tripcount: %ld\n", loop_tripcount);
2011   }
2012   DP("Final %d num_groups and %d threadsPerGroup\n", num_groups,
2013      threadsPerGroup);
2014 
2015   launchVals res;
2016   res.WorkgroupSize = threadsPerGroup;
2017   res.GridSize = threadsPerGroup * num_groups;
2018   return res;
2019 }
2020 
2021 static uint64_t acquire_available_packet_id(hsa_queue_t *queue) {
2022   uint64_t packet_id = hsa_queue_add_write_index_relaxed(queue, 1);
2023   bool full = true;
2024   while (full) {
2025     full =
2026         packet_id >= (queue->size + hsa_queue_load_read_index_scacquire(queue));
2027   }
2028   return packet_id;
2029 }
2030 
2031 static int32_t __tgt_rtl_run_target_team_region_locked(
2032     int32_t device_id, void *tgt_entry_ptr, void **tgt_args,
2033     ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams,
2034     int32_t thread_limit, uint64_t loop_tripcount);
2035 
2036 int32_t __tgt_rtl_run_target_team_region(int32_t device_id, void *tgt_entry_ptr,
2037                                          void **tgt_args,
2038                                          ptrdiff_t *tgt_offsets,
2039                                          int32_t arg_num, int32_t num_teams,
2040                                          int32_t thread_limit,
2041                                          uint64_t loop_tripcount) {
2042 
2043   DeviceInfo.load_run_lock.lock_shared();
2044   int32_t res = __tgt_rtl_run_target_team_region_locked(
2045       device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num, num_teams,
2046       thread_limit, loop_tripcount);
2047 
2048   DeviceInfo.load_run_lock.unlock_shared();
2049   return res;
2050 }
2051 
2052 int32_t __tgt_rtl_run_target_team_region_locked(
2053     int32_t device_id, void *tgt_entry_ptr, void **tgt_args,
2054     ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams,
2055     int32_t thread_limit, uint64_t loop_tripcount) {
2056   // Set the context we are using
2057   // update thread limit content in gpu memory if un-initialized or specified
2058   // from host
2059 
2060   DP("Run target team region thread_limit %d\n", thread_limit);
2061 
2062   // All args are references.
2063   std::vector<void *> args(arg_num);
2064   std::vector<void *> ptrs(arg_num);
2065 
2066   DP("Arg_num: %d\n", arg_num);
2067   for (int32_t i = 0; i < arg_num; ++i) {
2068     ptrs[i] = (void *)((intptr_t)tgt_args[i] + tgt_offsets[i]);
2069     args[i] = &ptrs[i];
2070     DP("Offseted base: arg[%d]:" DPxMOD "\n", i, DPxPTR(ptrs[i]));
2071   }
2072 
2073   KernelTy *KernelInfo = (KernelTy *)tgt_entry_ptr;
2074 
2075   std::string kernel_name = std::string(KernelInfo->Name);
2076   auto &KernelInfoTable = DeviceInfo.KernelInfoTable;
2077   if (KernelInfoTable[device_id].find(kernel_name) ==
2078       KernelInfoTable[device_id].end()) {
2079     DP("Kernel %s not found\n", kernel_name.c_str());
2080     return OFFLOAD_FAIL;
2081   }
2082 
2083   const atl_kernel_info_t KernelInfoEntry =
2084       KernelInfoTable[device_id][kernel_name];
2085   const uint32_t group_segment_size = KernelInfoEntry.group_segment_size;
2086   const uint32_t sgpr_count = KernelInfoEntry.sgpr_count;
2087   const uint32_t vgpr_count = KernelInfoEntry.vgpr_count;
2088   const uint32_t sgpr_spill_count = KernelInfoEntry.sgpr_spill_count;
2089   const uint32_t vgpr_spill_count = KernelInfoEntry.vgpr_spill_count;
2090 
2091   assert(arg_num == (int)KernelInfoEntry.num_args);
2092 
2093   /*
2094    * Set limit based on ThreadsPerGroup and GroupsPerDevice
2095    */
2096   launchVals LV = getLaunchVals(DeviceInfo.Env, KernelInfo->ConstWGSize,
2097                                 KernelInfo->ExecutionMode,
2098                                 num_teams,      // From run_region arg
2099                                 thread_limit,   // From run_region arg
2100                                 loop_tripcount, // From run_region arg
2101                                 DeviceInfo.NumTeams[KernelInfo->device_id]);
2102   const int GridSize = LV.GridSize;
2103   const int WorkgroupSize = LV.WorkgroupSize;
2104 
2105   if (print_kernel_trace >= LAUNCH) {
2106     int num_groups = GridSize / WorkgroupSize;
2107     // enum modes are SPMD, GENERIC, NONE 0,1,2
2108     // if doing rtl timing, print to stderr, unless stdout requested.
2109     bool traceToStdout = print_kernel_trace & (RTL_TO_STDOUT | RTL_TIMING);
2110     fprintf(traceToStdout ? stdout : stderr,
2111             "DEVID:%2d SGN:%1d ConstWGSize:%-4d args:%2d teamsXthrds:(%4dX%4d) "
2112             "reqd:(%4dX%4d) lds_usage:%uB sgpr_count:%u vgpr_count:%u "
2113             "sgpr_spill_count:%u vgpr_spill_count:%u tripcount:%lu n:%s\n",
2114             device_id, KernelInfo->ExecutionMode, KernelInfo->ConstWGSize,
2115             arg_num, num_groups, WorkgroupSize, num_teams, thread_limit,
2116             group_segment_size, sgpr_count, vgpr_count, sgpr_spill_count,
2117             vgpr_spill_count, loop_tripcount, KernelInfo->Name);
2118   }
2119 
2120   // Run on the device.
2121   {
2122     hsa_queue_t *queue = DeviceInfo.HSAQueues[device_id];
2123     if (!queue) {
2124       return OFFLOAD_FAIL;
2125     }
2126     uint64_t packet_id = acquire_available_packet_id(queue);
2127 
2128     const uint32_t mask = queue->size - 1; // size is a power of 2
2129     hsa_kernel_dispatch_packet_t *packet =
2130         (hsa_kernel_dispatch_packet_t *)queue->base_address +
2131         (packet_id & mask);
2132 
2133     // packet->header is written last
2134     packet->setup = UINT16_C(1) << HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS;
2135     packet->workgroup_size_x = WorkgroupSize;
2136     packet->workgroup_size_y = 1;
2137     packet->workgroup_size_z = 1;
2138     packet->reserved0 = 0;
2139     packet->grid_size_x = GridSize;
2140     packet->grid_size_y = 1;
2141     packet->grid_size_z = 1;
2142     packet->private_segment_size = KernelInfoEntry.private_segment_size;
2143     packet->group_segment_size = KernelInfoEntry.group_segment_size;
2144     packet->kernel_object = KernelInfoEntry.kernel_object;
2145     packet->kernarg_address = 0;     // use the block allocator
2146     packet->reserved2 = 0;           // atmi writes id_ here
2147     packet->completion_signal = {0}; // may want a pool of signals
2148 
2149     KernelArgPool *ArgPool = nullptr;
2150     {
2151       auto it = KernelArgPoolMap.find(std::string(KernelInfo->Name));
2152       if (it != KernelArgPoolMap.end()) {
2153         ArgPool = (it->second).get();
2154       }
2155     }
2156     if (!ArgPool) {
2157       DP("Warning: No ArgPool for %s on device %d\n", KernelInfo->Name,
2158          device_id);
2159     }
2160     {
2161       void *kernarg = nullptr;
2162       if (ArgPool) {
2163         assert(ArgPool->kernarg_segment_size == (arg_num * sizeof(void *)));
2164         kernarg = ArgPool->allocate(arg_num);
2165       }
2166       if (!kernarg) {
2167         DP("Allocate kernarg failed\n");
2168         return OFFLOAD_FAIL;
2169       }
2170 
2171       // Copy explicit arguments
2172       for (int i = 0; i < arg_num; i++) {
2173         memcpy((char *)kernarg + sizeof(void *) * i, args[i], sizeof(void *));
2174       }
2175 
2176       // Initialize implicit arguments. ATMI seems to leave most fields
2177       // uninitialized
2178       atmi_implicit_args_t *impl_args =
2179           reinterpret_cast<atmi_implicit_args_t *>(
2180               static_cast<char *>(kernarg) + ArgPool->kernarg_segment_size);
2181       memset(impl_args, 0,
2182              sizeof(atmi_implicit_args_t)); // may not be necessary
2183       impl_args->offset_x = 0;
2184       impl_args->offset_y = 0;
2185       impl_args->offset_z = 0;
2186 
2187       // assign a hostcall buffer for the selected Q
2188       if (__atomic_load_n(&DeviceInfo.hostcall_required, __ATOMIC_ACQUIRE)) {
2189         // hostrpc_assign_buffer is not thread safe, and this function is
2190         // under a multiple reader lock, not a writer lock.
2191         static pthread_mutex_t hostcall_init_lock = PTHREAD_MUTEX_INITIALIZER;
2192         pthread_mutex_lock(&hostcall_init_lock);
2193         impl_args->hostcall_ptr = hostrpc_assign_buffer(
2194             DeviceInfo.HSAAgents[device_id], queue, device_id);
2195         pthread_mutex_unlock(&hostcall_init_lock);
2196         if (!impl_args->hostcall_ptr) {
2197           DP("hostrpc_assign_buffer failed, gpu would dereference null and "
2198              "error\n");
2199           return OFFLOAD_FAIL;
2200         }
2201       }
2202 
2203       packet->kernarg_address = kernarg;
2204     }
2205 
2206     {
2207       hsa_signal_t s = DeviceInfo.FreeSignalPool.pop();
2208       if (s.handle == 0) {
2209         DP("Failed to get signal instance\n");
2210         return OFFLOAD_FAIL;
2211       }
2212       packet->completion_signal = s;
2213       hsa_signal_store_relaxed(packet->completion_signal, 1);
2214     }
2215 
2216     core::packet_store_release(reinterpret_cast<uint32_t *>(packet),
2217                                core::create_header(), packet->setup);
2218 
2219     hsa_signal_store_relaxed(queue->doorbell_signal, packet_id);
2220 
2221     while (hsa_signal_wait_scacquire(packet->completion_signal,
2222                                      HSA_SIGNAL_CONDITION_EQ, 0, UINT64_MAX,
2223                                      HSA_WAIT_STATE_BLOCKED) != 0)
2224       ;
2225 
2226     assert(ArgPool);
2227     ArgPool->deallocate(packet->kernarg_address);
2228     DeviceInfo.FreeSignalPool.push(packet->completion_signal);
2229   }
2230 
2231   DP("Kernel completed\n");
2232   return OFFLOAD_SUCCESS;
2233 }
2234 
2235 int32_t __tgt_rtl_run_target_region(int32_t device_id, void *tgt_entry_ptr,
2236                                     void **tgt_args, ptrdiff_t *tgt_offsets,
2237                                     int32_t arg_num) {
2238   // use one team and one thread
2239   // fix thread num
2240   int32_t team_num = 1;
2241   int32_t thread_limit = 0; // use default
2242   return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args,
2243                                           tgt_offsets, arg_num, team_num,
2244                                           thread_limit, 0);
2245 }
2246 
2247 int32_t __tgt_rtl_run_target_region_async(int32_t device_id,
2248                                           void *tgt_entry_ptr, void **tgt_args,
2249                                           ptrdiff_t *tgt_offsets,
2250                                           int32_t arg_num,
2251                                           __tgt_async_info *AsyncInfo) {
2252   assert(AsyncInfo && "AsyncInfo is nullptr");
2253   initAsyncInfo(AsyncInfo);
2254 
2255   // use one team and one thread
2256   // fix thread num
2257   int32_t team_num = 1;
2258   int32_t thread_limit = 0; // use default
2259   return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args,
2260                                           tgt_offsets, arg_num, team_num,
2261                                           thread_limit, 0);
2262 }
2263 
2264 int32_t __tgt_rtl_synchronize(int32_t device_id, __tgt_async_info *AsyncInfo) {
2265   assert(AsyncInfo && "AsyncInfo is nullptr");
2266 
2267   // Cuda asserts that AsyncInfo->Queue is non-null, but this invariant
2268   // is not ensured by devices.cpp for amdgcn
2269   // assert(AsyncInfo->Queue && "AsyncInfo->Queue is nullptr");
2270   if (AsyncInfo->Queue) {
2271     finiAsyncInfo(AsyncInfo);
2272   }
2273   return OFFLOAD_SUCCESS;
2274 }
2275 
2276 namespace core {
2277 hsa_status_t allow_access_to_all_gpu_agents(void *ptr) {
2278   return hsa_amd_agents_allow_access(DeviceInfo.HSAAgents.size(),
2279                                      &DeviceInfo.HSAAgents[0], NULL, ptr);
2280 }
2281 
2282 } // namespace core
2283