1 //===--- amdgpu/src/rtl.cpp --------------------------------------- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // RTL for AMD hsa machine
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <algorithm>
14 #include <assert.h>
15 #include <cstdio>
16 #include <cstdlib>
17 #include <cstring>
18 #include <functional>
19 #include <libelf.h>
20 #include <list>
21 #include <memory>
22 #include <mutex>
23 #include <shared_mutex>
24 #include <unordered_map>
25 #include <vector>
26 
27 #include "interop_hsa.h"
28 #include "impl_runtime.h"
29 
30 #include "internal.h"
31 #include "rt.h"
32 
33 #include "DeviceEnvironment.h"
34 #include "get_elf_mach_gfx_name.h"
35 #include "omptargetplugin.h"
36 #include "print_tracing.h"
37 
38 #include "llvm/Frontend/OpenMP/OMPConstants.h"
39 #include "llvm/Frontend/OpenMP/OMPGridValues.h"
40 
41 // hostrpc interface, FIXME: consider moving to its own include these are
42 // statically linked into amdgpu/plugin if present from hostrpc_services.a,
43 // linked as --whole-archive to override the weak symbols that are used to
44 // implement a fallback for toolchains that do not yet have a hostrpc library.
45 extern "C" {
46 unsigned long hostrpc_assign_buffer(hsa_agent_t agent, hsa_queue_t *this_Q,
47                                     uint32_t device_id);
48 hsa_status_t hostrpc_init();
49 hsa_status_t hostrpc_terminate();
50 
51 __attribute__((weak)) hsa_status_t hostrpc_init() { return HSA_STATUS_SUCCESS; }
52 __attribute__((weak)) hsa_status_t hostrpc_terminate() {
53   return HSA_STATUS_SUCCESS;
54 }
55 __attribute__((weak)) unsigned long
56 hostrpc_assign_buffer(hsa_agent_t, hsa_queue_t *, uint32_t device_id) {
57   DP("Warning: Attempting to assign hostrpc to device %u, but hostrpc library "
58      "missing\n",
59      device_id);
60   return 0;
61 }
62 }
63 
64 // Heuristic parameters used for kernel launch
65 // Number of teams per CU to allow scheduling flexibility
66 static const unsigned DefaultTeamsPerCU = 4;
67 
68 int print_kernel_trace;
69 
70 #ifdef OMPTARGET_DEBUG
71 #define check(msg, status)                                                     \
72   if (status != HSA_STATUS_SUCCESS) {                                          \
73     DP(#msg " failed\n");                                                      \
74   } else {                                                                     \
75     DP(#msg " succeeded\n");                                                   \
76   }
77 #else
78 #define check(msg, status)                                                     \
79   {}
80 #endif
81 
82 #include "elf_common.h"
83 
84 namespace hsa {
85 template <typename C> hsa_status_t iterate_agents(C cb) {
86   auto L = [](hsa_agent_t agent, void *data) -> hsa_status_t {
87     C *unwrapped = static_cast<C *>(data);
88     return (*unwrapped)(agent);
89   };
90   return hsa_iterate_agents(L, static_cast<void *>(&cb));
91 }
92 
93 template <typename C>
94 hsa_status_t amd_agent_iterate_memory_pools(hsa_agent_t Agent, C cb) {
95   auto L = [](hsa_amd_memory_pool_t MemoryPool, void *data) -> hsa_status_t {
96     C *unwrapped = static_cast<C *>(data);
97     return (*unwrapped)(MemoryPool);
98   };
99 
100   return hsa_amd_agent_iterate_memory_pools(Agent, L, static_cast<void *>(&cb));
101 }
102 
103 } // namespace hsa
104 
105 /// Keep entries table per device
106 struct FuncOrGblEntryTy {
107   __tgt_target_table Table;
108   std::vector<__tgt_offload_entry> Entries;
109 };
110 
111 struct KernelArgPool {
112 private:
113   static pthread_mutex_t mutex;
114 
115 public:
116   uint32_t kernarg_segment_size;
117   void *kernarg_region = nullptr;
118   std::queue<int> free_kernarg_segments;
119 
120   uint32_t kernarg_size_including_implicit() {
121     return kernarg_segment_size + sizeof(impl_implicit_args_t);
122   }
123 
124   ~KernelArgPool() {
125     if (kernarg_region) {
126       auto r = hsa_amd_memory_pool_free(kernarg_region);
127       if (r != HSA_STATUS_SUCCESS) {
128         DP("hsa_amd_memory_pool_free failed: %s\n", get_error_string(r));
129       }
130     }
131   }
132 
133   // Can't really copy or move a mutex
134   KernelArgPool() = default;
135   KernelArgPool(const KernelArgPool &) = delete;
136   KernelArgPool(KernelArgPool &&) = delete;
137 
138   KernelArgPool(uint32_t kernarg_segment_size,
139                 hsa_amd_memory_pool_t &memory_pool)
140       : kernarg_segment_size(kernarg_segment_size) {
141 
142     // impl uses one pool per kernel for all gpus, with a fixed upper size
143     // preserving that exact scheme here, including the queue<int>
144 
145     hsa_status_t err = hsa_amd_memory_pool_allocate(
146         memory_pool, kernarg_size_including_implicit() * MAX_NUM_KERNELS, 0,
147         &kernarg_region);
148 
149     if (err != HSA_STATUS_SUCCESS) {
150       DP("hsa_amd_memory_pool_allocate failed: %s\n", get_error_string(err));
151       kernarg_region = nullptr; // paranoid
152       return;
153     }
154 
155     err = core::allow_access_to_all_gpu_agents(kernarg_region);
156     if (err != HSA_STATUS_SUCCESS) {
157       DP("hsa allow_access_to_all_gpu_agents failed: %s\n",
158          get_error_string(err));
159       auto r = hsa_amd_memory_pool_free(kernarg_region);
160       if (r != HSA_STATUS_SUCCESS) {
161         // if free failed, can't do anything more to resolve it
162         DP("hsa memory poll free failed: %s\n", get_error_string(err));
163       }
164       kernarg_region = nullptr;
165       return;
166     }
167 
168     for (int i = 0; i < MAX_NUM_KERNELS; i++) {
169       free_kernarg_segments.push(i);
170     }
171   }
172 
173   void *allocate(uint64_t arg_num) {
174     assert((arg_num * sizeof(void *)) == kernarg_segment_size);
175     lock l(&mutex);
176     void *res = nullptr;
177     if (!free_kernarg_segments.empty()) {
178 
179       int free_idx = free_kernarg_segments.front();
180       res = static_cast<void *>(static_cast<char *>(kernarg_region) +
181                                 (free_idx * kernarg_size_including_implicit()));
182       assert(free_idx == pointer_to_index(res));
183       free_kernarg_segments.pop();
184     }
185     return res;
186   }
187 
188   void deallocate(void *ptr) {
189     lock l(&mutex);
190     int idx = pointer_to_index(ptr);
191     free_kernarg_segments.push(idx);
192   }
193 
194 private:
195   int pointer_to_index(void *ptr) {
196     ptrdiff_t bytes =
197         static_cast<char *>(ptr) - static_cast<char *>(kernarg_region);
198     assert(bytes >= 0);
199     assert(bytes % kernarg_size_including_implicit() == 0);
200     return bytes / kernarg_size_including_implicit();
201   }
202   struct lock {
203     lock(pthread_mutex_t *m) : m(m) { pthread_mutex_lock(m); }
204     ~lock() { pthread_mutex_unlock(m); }
205     pthread_mutex_t *m;
206   };
207 };
208 pthread_mutex_t KernelArgPool::mutex = PTHREAD_MUTEX_INITIALIZER;
209 
210 std::unordered_map<std::string /*kernel*/, std::unique_ptr<KernelArgPool>>
211     KernelArgPoolMap;
212 
213 /// Use a single entity to encode a kernel and a set of flags
214 struct KernelTy {
215   llvm::omp::OMPTgtExecModeFlags ExecutionMode;
216   int16_t ConstWGSize;
217   int32_t device_id;
218   void *CallStackAddr = nullptr;
219   const char *Name;
220 
221   KernelTy(llvm::omp::OMPTgtExecModeFlags _ExecutionMode, int16_t _ConstWGSize,
222            int32_t _device_id, void *_CallStackAddr, const char *_Name,
223            uint32_t _kernarg_segment_size,
224            hsa_amd_memory_pool_t &KernArgMemoryPool)
225       : ExecutionMode(_ExecutionMode), ConstWGSize(_ConstWGSize),
226         device_id(_device_id), CallStackAddr(_CallStackAddr), Name(_Name) {
227     DP("Construct kernelinfo: ExecMode %d\n", ExecutionMode);
228 
229     std::string N(_Name);
230     if (KernelArgPoolMap.find(N) == KernelArgPoolMap.end()) {
231       KernelArgPoolMap.insert(
232           std::make_pair(N, std::unique_ptr<KernelArgPool>(new KernelArgPool(
233                                 _kernarg_segment_size, KernArgMemoryPool))));
234     }
235   }
236 };
237 
238 /// List that contains all the kernels.
239 /// FIXME: we may need this to be per device and per library.
240 std::list<KernelTy> KernelsList;
241 
242 template <typename Callback> static hsa_status_t FindAgents(Callback CB) {
243 
244   hsa_status_t err =
245       hsa::iterate_agents([&](hsa_agent_t agent) -> hsa_status_t {
246         hsa_device_type_t device_type;
247         // get_info fails iff HSA runtime not yet initialized
248         hsa_status_t err =
249             hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type);
250 
251         if (err != HSA_STATUS_SUCCESS) {
252           if (print_kernel_trace > 0)
253             DP("rtl.cpp: err %s\n", get_error_string(err));
254 
255           return err;
256         }
257 
258         CB(device_type, agent);
259         return HSA_STATUS_SUCCESS;
260       });
261 
262   // iterate_agents fails iff HSA runtime not yet initialized
263   if (print_kernel_trace > 0 && err != HSA_STATUS_SUCCESS) {
264     DP("rtl.cpp: err %s\n", get_error_string(err));
265   }
266 
267   return err;
268 }
269 
270 static void callbackQueue(hsa_status_t status, hsa_queue_t *source,
271                           void *data) {
272   if (status != HSA_STATUS_SUCCESS) {
273     const char *status_string;
274     if (hsa_status_string(status, &status_string) != HSA_STATUS_SUCCESS) {
275       status_string = "unavailable";
276     }
277     DP("[%s:%d] GPU error in queue %p %d (%s)\n", __FILE__, __LINE__, source,
278        status, status_string);
279     abort();
280   }
281 }
282 
283 namespace core {
284 namespace {
285 void packet_store_release(uint32_t *packet, uint16_t header, uint16_t rest) {
286   __atomic_store_n(packet, header | (rest << 16), __ATOMIC_RELEASE);
287 }
288 
289 uint16_t create_header() {
290   uint16_t header = HSA_PACKET_TYPE_KERNEL_DISPATCH << HSA_PACKET_HEADER_TYPE;
291   header |= HSA_FENCE_SCOPE_SYSTEM << HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE;
292   header |= HSA_FENCE_SCOPE_SYSTEM << HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE;
293   return header;
294 }
295 
296 hsa_status_t isValidMemoryPool(hsa_amd_memory_pool_t MemoryPool) {
297   bool AllocAllowed = false;
298   hsa_status_t Err = hsa_amd_memory_pool_get_info(
299       MemoryPool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED,
300       &AllocAllowed);
301   if (Err != HSA_STATUS_SUCCESS) {
302     DP("Alloc allowed in memory pool check failed: %s\n",
303        get_error_string(Err));
304     return Err;
305   }
306 
307   size_t Size = 0;
308   Err = hsa_amd_memory_pool_get_info(MemoryPool, HSA_AMD_MEMORY_POOL_INFO_SIZE,
309                                      &Size);
310   if (Err != HSA_STATUS_SUCCESS) {
311     DP("Get memory pool size failed: %s\n", get_error_string(Err));
312     return Err;
313   }
314 
315   return (AllocAllowed && Size > 0) ? HSA_STATUS_SUCCESS : HSA_STATUS_ERROR;
316 }
317 
318 hsa_status_t addMemoryPool(hsa_amd_memory_pool_t MemoryPool, void *Data) {
319   std::vector<hsa_amd_memory_pool_t> *Result =
320       static_cast<std::vector<hsa_amd_memory_pool_t> *>(Data);
321 
322   hsa_status_t err;
323   if ((err = isValidMemoryPool(MemoryPool)) != HSA_STATUS_SUCCESS) {
324     return err;
325   }
326 
327   Result->push_back(MemoryPool);
328   return HSA_STATUS_SUCCESS;
329 }
330 
331 } // namespace
332 } // namespace core
333 
334 struct EnvironmentVariables {
335   int NumTeams;
336   int TeamLimit;
337   int TeamThreadLimit;
338   int MaxTeamsDefault;
339 };
340 
341 template <uint32_t wavesize>
342 static constexpr const llvm::omp::GV &getGridValue() {
343   return llvm::omp::getAMDGPUGridValues<wavesize>();
344 }
345 
346 struct HSALifetime {
347   // Wrapper around HSA used to ensure it is constructed before other types
348   // and destructed after, which means said other types can use raii for
349   // cleanup without risking running outside of the lifetime of HSA
350   const hsa_status_t S;
351 
352   bool success() { return S == HSA_STATUS_SUCCESS; }
353   HSALifetime() : S(hsa_init()) {}
354 
355   ~HSALifetime() {
356     if (S == HSA_STATUS_SUCCESS) {
357       hsa_status_t Err = hsa_shut_down();
358       if (Err != HSA_STATUS_SUCCESS) {
359         // Can't call into HSA to get a string from the integer
360         DP("Shutting down HSA failed: %d\n", Err);
361       }
362     }
363   }
364 };
365 
366 /// Class containing all the device information
367 class RTLDeviceInfoTy {
368   HSALifetime HSA; // First field => constructed first and destructed last
369   std::vector<std::list<FuncOrGblEntryTy>> FuncGblEntries;
370 
371   struct QueueDeleter {
372     void operator()(hsa_queue_t *Q) {
373       if (Q) {
374         hsa_status_t Err = hsa_queue_destroy(Q);
375         if (Err != HSA_STATUS_SUCCESS) {
376           DP("Error destroying hsa queue: %s\n", get_error_string(Err));
377         }
378       }
379     }
380   };
381 
382 public:
383   bool ConstructionSucceeded = false;
384 
385   // load binary populates symbol tables and mutates various global state
386   // run uses those symbol tables
387   std::shared_timed_mutex load_run_lock;
388 
389   int NumberOfDevices = 0;
390 
391   // GPU devices
392   std::vector<hsa_agent_t> HSAAgents;
393   std::vector<std::unique_ptr<hsa_queue_t, QueueDeleter>>
394       HSAQueues; // one per gpu
395 
396   // CPUs
397   std::vector<hsa_agent_t> CPUAgents;
398 
399   // Device properties
400   std::vector<int> ComputeUnits;
401   std::vector<int> GroupsPerDevice;
402   std::vector<int> ThreadsPerGroup;
403   std::vector<int> WarpSize;
404   std::vector<std::string> GPUName;
405 
406   // OpenMP properties
407   std::vector<int> NumTeams;
408   std::vector<int> NumThreads;
409 
410   // OpenMP Environment properties
411   EnvironmentVariables Env;
412 
413   // OpenMP Requires Flags
414   int64_t RequiresFlags;
415 
416   // Resource pools
417   SignalPoolT FreeSignalPool;
418 
419   bool hostcall_required = false;
420 
421   std::vector<hsa_executable_t> HSAExecutables;
422 
423   std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable;
424   std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable;
425 
426   hsa_amd_memory_pool_t KernArgPool;
427 
428   // fine grained memory pool for host allocations
429   hsa_amd_memory_pool_t HostFineGrainedMemoryPool;
430 
431   // fine and coarse-grained memory pools per offloading device
432   std::vector<hsa_amd_memory_pool_t> DeviceFineGrainedMemoryPools;
433   std::vector<hsa_amd_memory_pool_t> DeviceCoarseGrainedMemoryPools;
434 
435   struct implFreePtrDeletor {
436     void operator()(void *p) {
437       core::Runtime::Memfree(p); // ignore failure to free
438     }
439   };
440 
441   // device_State shared across loaded binaries, error if inconsistent size
442   std::vector<std::pair<std::unique_ptr<void, implFreePtrDeletor>, uint64_t>>
443       deviceStateStore;
444 
445   static const unsigned HardTeamLimit =
446       (1 << 16) - 1; // 64K needed to fit in uint16
447   static const int DefaultNumTeams = 128;
448 
449   // These need to be per-device since different devices can have different
450   // wave sizes, but are currently the same number for each so that refactor
451   // can be postponed.
452   static_assert(getGridValue<32>().GV_Max_Teams ==
453                     getGridValue<64>().GV_Max_Teams,
454                 "");
455   static const int Max_Teams = getGridValue<64>().GV_Max_Teams;
456 
457   static_assert(getGridValue<32>().GV_Max_WG_Size ==
458                     getGridValue<64>().GV_Max_WG_Size,
459                 "");
460   static const int Max_WG_Size = getGridValue<64>().GV_Max_WG_Size;
461 
462   static_assert(getGridValue<32>().GV_Default_WG_Size ==
463                     getGridValue<64>().GV_Default_WG_Size,
464                 "");
465   static const int Default_WG_Size = getGridValue<64>().GV_Default_WG_Size;
466 
467   using MemcpyFunc = hsa_status_t (*)(hsa_signal_t, void *, const void *,
468                                       size_t size, hsa_agent_t,
469                                       hsa_amd_memory_pool_t);
470   hsa_status_t freesignalpool_memcpy(void *dest, const void *src, size_t size,
471                                      MemcpyFunc Func, int32_t deviceId) {
472     hsa_agent_t agent = HSAAgents[deviceId];
473     hsa_signal_t s = FreeSignalPool.pop();
474     if (s.handle == 0) {
475       return HSA_STATUS_ERROR;
476     }
477     hsa_status_t r = Func(s, dest, src, size, agent, HostFineGrainedMemoryPool);
478     FreeSignalPool.push(s);
479     return r;
480   }
481 
482   hsa_status_t freesignalpool_memcpy_d2h(void *dest, const void *src,
483                                          size_t size, int32_t deviceId) {
484     return freesignalpool_memcpy(dest, src, size, impl_memcpy_d2h, deviceId);
485   }
486 
487   hsa_status_t freesignalpool_memcpy_h2d(void *dest, const void *src,
488                                          size_t size, int32_t deviceId) {
489     return freesignalpool_memcpy(dest, src, size, impl_memcpy_h2d, deviceId);
490   }
491 
492   // Record entry point associated with device
493   void addOffloadEntry(int32_t device_id, __tgt_offload_entry entry) {
494     assert(device_id < (int32_t)FuncGblEntries.size() &&
495            "Unexpected device id!");
496     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
497 
498     E.Entries.push_back(entry);
499   }
500 
501   // Return true if the entry is associated with device
502   bool findOffloadEntry(int32_t device_id, void *addr) {
503     assert(device_id < (int32_t)FuncGblEntries.size() &&
504            "Unexpected device id!");
505     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
506 
507     for (auto &it : E.Entries) {
508       if (it.addr == addr)
509         return true;
510     }
511 
512     return false;
513   }
514 
515   // Return the pointer to the target entries table
516   __tgt_target_table *getOffloadEntriesTable(int32_t device_id) {
517     assert(device_id < (int32_t)FuncGblEntries.size() &&
518            "Unexpected device id!");
519     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
520 
521     int32_t size = E.Entries.size();
522 
523     // Table is empty
524     if (!size)
525       return 0;
526 
527     __tgt_offload_entry *begin = &E.Entries[0];
528     __tgt_offload_entry *end = &E.Entries[size - 1];
529 
530     // Update table info according to the entries and return the pointer
531     E.Table.EntriesBegin = begin;
532     E.Table.EntriesEnd = ++end;
533 
534     return &E.Table;
535   }
536 
537   // Clear entries table for a device
538   void clearOffloadEntriesTable(int device_id) {
539     assert(device_id < (int32_t)FuncGblEntries.size() &&
540            "Unexpected device id!");
541     FuncGblEntries[device_id].emplace_back();
542     FuncOrGblEntryTy &E = FuncGblEntries[device_id].back();
543     // KernelArgPoolMap.clear();
544     E.Entries.clear();
545     E.Table.EntriesBegin = E.Table.EntriesEnd = 0;
546   }
547 
548   hsa_status_t addDeviceMemoryPool(hsa_amd_memory_pool_t MemoryPool,
549                                    int DeviceId) {
550     assert(DeviceId < DeviceFineGrainedMemoryPools.size() && "Error here.");
551     uint32_t GlobalFlags = 0;
552     hsa_status_t Err = hsa_amd_memory_pool_get_info(
553         MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
554 
555     if (Err != HSA_STATUS_SUCCESS) {
556       return Err;
557     }
558 
559     if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED) {
560       DeviceFineGrainedMemoryPools[DeviceId] = MemoryPool;
561     } else if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_COARSE_GRAINED) {
562       DeviceCoarseGrainedMemoryPools[DeviceId] = MemoryPool;
563     }
564 
565     return HSA_STATUS_SUCCESS;
566   }
567 
568   hsa_status_t setupDevicePools(const std::vector<hsa_agent_t> &Agents) {
569     for (int DeviceId = 0; DeviceId < Agents.size(); DeviceId++) {
570       hsa_status_t Err = hsa::amd_agent_iterate_memory_pools(
571           Agents[DeviceId], [&](hsa_amd_memory_pool_t MemoryPool) {
572             hsa_status_t ValidStatus = core::isValidMemoryPool(MemoryPool);
573             if (ValidStatus != HSA_STATUS_SUCCESS) {
574               DP("Alloc allowed in memory pool check failed: %s\n",
575                  get_error_string(ValidStatus));
576               return HSA_STATUS_SUCCESS;
577             }
578             return addDeviceMemoryPool(MemoryPool, DeviceId);
579           });
580 
581       if (Err != HSA_STATUS_SUCCESS) {
582         DP("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
583            "Iterate all memory pools", get_error_string(Err));
584         return Err;
585       }
586     }
587     return HSA_STATUS_SUCCESS;
588   }
589 
590   hsa_status_t setupHostMemoryPools(std::vector<hsa_agent_t> &Agents) {
591     std::vector<hsa_amd_memory_pool_t> HostPools;
592 
593     // collect all the "valid" pools for all the given agents.
594     for (const auto &Agent : Agents) {
595       hsa_status_t Err = hsa_amd_agent_iterate_memory_pools(
596           Agent, core::addMemoryPool, static_cast<void *>(&HostPools));
597       if (Err != HSA_STATUS_SUCCESS) {
598         DP("addMemoryPool returned %s, continuing\n", get_error_string(Err));
599       }
600     }
601 
602     // We need two fine-grained pools.
603     //  1. One with kernarg flag set for storing kernel arguments
604     //  2. Second for host allocations
605     bool FineGrainedMemoryPoolSet = false;
606     bool KernArgPoolSet = false;
607     for (const auto &MemoryPool : HostPools) {
608       hsa_status_t Err = HSA_STATUS_SUCCESS;
609       uint32_t GlobalFlags = 0;
610       Err = hsa_amd_memory_pool_get_info(
611           MemoryPool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &GlobalFlags);
612       if (Err != HSA_STATUS_SUCCESS) {
613         DP("Get memory pool info failed: %s\n", get_error_string(Err));
614         return Err;
615       }
616 
617       if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED) {
618         if (GlobalFlags & HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT) {
619           KernArgPool = MemoryPool;
620           KernArgPoolSet = true;
621         }
622         HostFineGrainedMemoryPool = MemoryPool;
623         FineGrainedMemoryPoolSet = true;
624       }
625     }
626 
627     if (FineGrainedMemoryPoolSet && KernArgPoolSet)
628       return HSA_STATUS_SUCCESS;
629 
630     return HSA_STATUS_ERROR;
631   }
632 
633   hsa_amd_memory_pool_t getDeviceMemoryPool(int DeviceId) {
634     assert(DeviceId >= 0 && DeviceId < DeviceCoarseGrainedMemoryPools.size() &&
635            "Invalid device Id");
636     return DeviceCoarseGrainedMemoryPools[DeviceId];
637   }
638 
639   hsa_amd_memory_pool_t getHostMemoryPool() {
640     return HostFineGrainedMemoryPool;
641   }
642 
643   static int readEnvElseMinusOne(const char *Env) {
644     const char *envStr = getenv(Env);
645     int res = -1;
646     if (envStr) {
647       res = std::stoi(envStr);
648       DP("Parsed %s=%d\n", Env, res);
649     }
650     return res;
651   }
652 
653   RTLDeviceInfoTy() {
654     DP("Start initializing " GETNAME(TARGET_NAME) "\n");
655 
656     // LIBOMPTARGET_KERNEL_TRACE provides a kernel launch trace to stderr
657     // anytime. You do not need a debug library build.
658     //  0 => no tracing
659     //  1 => tracing dispatch only
660     // >1 => verbosity increase
661 
662     if (!HSA.success()) {
663       DP("Error when initializing HSA in " GETNAME(TARGET_NAME) "\n");
664       return;
665     }
666 
667     if (char *envStr = getenv("LIBOMPTARGET_KERNEL_TRACE"))
668       print_kernel_trace = atoi(envStr);
669     else
670       print_kernel_trace = 0;
671 
672     hsa_status_t err = core::atl_init_gpu_context();
673     if (err != HSA_STATUS_SUCCESS) {
674       DP("Error when initializing " GETNAME(TARGET_NAME) "\n");
675       return;
676     }
677 
678     // Init hostcall soon after initializing hsa
679     hostrpc_init();
680 
681     err = FindAgents([&](hsa_device_type_t DeviceType, hsa_agent_t Agent) {
682       if (DeviceType == HSA_DEVICE_TYPE_CPU) {
683         CPUAgents.push_back(Agent);
684       } else {
685         HSAAgents.push_back(Agent);
686       }
687     });
688     if (err != HSA_STATUS_SUCCESS)
689       return;
690 
691     NumberOfDevices = (int)HSAAgents.size();
692 
693     if (NumberOfDevices == 0) {
694       DP("There are no devices supporting HSA.\n");
695       return;
696     } else {
697       DP("There are %d devices supporting HSA.\n", NumberOfDevices);
698     }
699 
700     // Init the device info
701     HSAQueues.resize(NumberOfDevices);
702     FuncGblEntries.resize(NumberOfDevices);
703     ThreadsPerGroup.resize(NumberOfDevices);
704     ComputeUnits.resize(NumberOfDevices);
705     GPUName.resize(NumberOfDevices);
706     GroupsPerDevice.resize(NumberOfDevices);
707     WarpSize.resize(NumberOfDevices);
708     NumTeams.resize(NumberOfDevices);
709     NumThreads.resize(NumberOfDevices);
710     deviceStateStore.resize(NumberOfDevices);
711     KernelInfoTable.resize(NumberOfDevices);
712     SymbolInfoTable.resize(NumberOfDevices);
713     DeviceCoarseGrainedMemoryPools.resize(NumberOfDevices);
714     DeviceFineGrainedMemoryPools.resize(NumberOfDevices);
715 
716     err = setupDevicePools(HSAAgents);
717     if (err != HSA_STATUS_SUCCESS) {
718       DP("Setup for Device Memory Pools failed\n");
719       return;
720     }
721 
722     err = setupHostMemoryPools(CPUAgents);
723     if (err != HSA_STATUS_SUCCESS) {
724       DP("Setup for Host Memory Pools failed\n");
725       return;
726     }
727 
728     for (int i = 0; i < NumberOfDevices; i++) {
729       uint32_t queue_size = 0;
730       {
731         hsa_status_t err = hsa_agent_get_info(
732             HSAAgents[i], HSA_AGENT_INFO_QUEUE_MAX_SIZE, &queue_size);
733         if (err != HSA_STATUS_SUCCESS) {
734           DP("HSA query QUEUE_MAX_SIZE failed for agent %d\n", i);
735           return;
736         }
737         enum { MaxQueueSize = 4096 };
738         if (queue_size > MaxQueueSize) {
739           queue_size = MaxQueueSize;
740         }
741       }
742 
743       {
744         hsa_queue_t *Q = nullptr;
745         hsa_status_t rc =
746             hsa_queue_create(HSAAgents[i], queue_size, HSA_QUEUE_TYPE_MULTI,
747                              callbackQueue, NULL, UINT32_MAX, UINT32_MAX, &Q);
748         if (rc != HSA_STATUS_SUCCESS) {
749           DP("Failed to create HSA queue %d\n", i);
750           return;
751         }
752         HSAQueues[i].reset(Q);
753       }
754 
755       deviceStateStore[i] = {nullptr, 0};
756     }
757 
758     for (int i = 0; i < NumberOfDevices; i++) {
759       ThreadsPerGroup[i] = RTLDeviceInfoTy::Default_WG_Size;
760       GroupsPerDevice[i] = RTLDeviceInfoTy::DefaultNumTeams;
761       ComputeUnits[i] = 1;
762       DP("Device %d: Initial groupsPerDevice %d & threadsPerGroup %d\n", i,
763          GroupsPerDevice[i], ThreadsPerGroup[i]);
764     }
765 
766     // Get environment variables regarding teams
767     Env.TeamLimit = readEnvElseMinusOne("OMP_TEAM_LIMIT");
768     Env.NumTeams = readEnvElseMinusOne("OMP_NUM_TEAMS");
769     Env.MaxTeamsDefault = readEnvElseMinusOne("OMP_MAX_TEAMS_DEFAULT");
770     Env.TeamThreadLimit = readEnvElseMinusOne("OMP_TEAMS_THREAD_LIMIT");
771 
772     // Default state.
773     RequiresFlags = OMP_REQ_UNDEFINED;
774 
775     ConstructionSucceeded = true;
776   }
777 
778   ~RTLDeviceInfoTy() {
779     DP("Finalizing the " GETNAME(TARGET_NAME) " DeviceInfo.\n");
780     if (!HSA.success()) {
781       // Then none of these can have been set up and they can't be torn down
782       return;
783     }
784     // Run destructors on types that use HSA before
785     // impl_finalize removes access to it
786     deviceStateStore.clear();
787     KernelArgPoolMap.clear();
788     // Terminate hostrpc before finalizing hsa
789     hostrpc_terminate();
790 
791     hsa_status_t Err;
792     for (uint32_t I = 0; I < HSAExecutables.size(); I++) {
793       Err = hsa_executable_destroy(HSAExecutables[I]);
794       if (Err != HSA_STATUS_SUCCESS) {
795         DP("[%s:%d] %s failed: %s\n", __FILE__, __LINE__,
796            "Destroying executable", get_error_string(Err));
797       }
798     }
799   }
800 };
801 
802 pthread_mutex_t SignalPoolT::mutex = PTHREAD_MUTEX_INITIALIZER;
803 
804 static RTLDeviceInfoTy DeviceInfo;
805 
806 namespace {
807 
808 int32_t dataRetrieve(int32_t DeviceId, void *HstPtr, void *TgtPtr, int64_t Size,
809                      __tgt_async_info *AsyncInfo) {
810   assert(AsyncInfo && "AsyncInfo is nullptr");
811   assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large");
812   // Return success if we are not copying back to host from target.
813   if (!HstPtr)
814     return OFFLOAD_SUCCESS;
815   hsa_status_t err;
816   DP("Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size,
817      (long long unsigned)(Elf64_Addr)TgtPtr,
818      (long long unsigned)(Elf64_Addr)HstPtr);
819 
820   err = DeviceInfo.freesignalpool_memcpy_d2h(HstPtr, TgtPtr, (size_t)Size,
821                                              DeviceId);
822 
823   if (err != HSA_STATUS_SUCCESS) {
824     DP("Error when copying data from device to host. Pointers: "
825        "host = 0x%016lx, device = 0x%016lx, size = %lld\n",
826        (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size);
827     return OFFLOAD_FAIL;
828   }
829   DP("DONE Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size,
830      (long long unsigned)(Elf64_Addr)TgtPtr,
831      (long long unsigned)(Elf64_Addr)HstPtr);
832   return OFFLOAD_SUCCESS;
833 }
834 
835 int32_t dataSubmit(int32_t DeviceId, void *TgtPtr, void *HstPtr, int64_t Size,
836                    __tgt_async_info *AsyncInfo) {
837   assert(AsyncInfo && "AsyncInfo is nullptr");
838   hsa_status_t err;
839   assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large");
840   // Return success if we are not doing host to target.
841   if (!HstPtr)
842     return OFFLOAD_SUCCESS;
843 
844   DP("Submit data %ld bytes, (hst:%016llx) -> (tgt:%016llx).\n", Size,
845      (long long unsigned)(Elf64_Addr)HstPtr,
846      (long long unsigned)(Elf64_Addr)TgtPtr);
847   err = DeviceInfo.freesignalpool_memcpy_h2d(TgtPtr, HstPtr, (size_t)Size,
848                                              DeviceId);
849   if (err != HSA_STATUS_SUCCESS) {
850     DP("Error when copying data from host to device. Pointers: "
851        "host = 0x%016lx, device = 0x%016lx, size = %lld\n",
852        (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size);
853     return OFFLOAD_FAIL;
854   }
855   return OFFLOAD_SUCCESS;
856 }
857 
858 // Async.
859 // The implementation was written with cuda streams in mind. The semantics of
860 // that are to execute kernels on a queue in order of insertion. A synchronise
861 // call then makes writes visible between host and device. This means a series
862 // of N data_submit_async calls are expected to execute serially. HSA offers
863 // various options to run the data copies concurrently. This may require changes
864 // to libomptarget.
865 
866 // __tgt_async_info* contains a void * Queue. Queue = 0 is used to indicate that
867 // there are no outstanding kernels that need to be synchronized. Any async call
868 // may be passed a Queue==0, at which point the cuda implementation will set it
869 // to non-null (see getStream). The cuda streams are per-device. Upstream may
870 // change this interface to explicitly initialize the AsyncInfo_pointer, but
871 // until then hsa lazily initializes it as well.
872 
873 void initAsyncInfo(__tgt_async_info *AsyncInfo) {
874   // set non-null while using async calls, return to null to indicate completion
875   assert(AsyncInfo);
876   if (!AsyncInfo->Queue) {
877     AsyncInfo->Queue = reinterpret_cast<void *>(UINT64_MAX);
878   }
879 }
880 void finiAsyncInfo(__tgt_async_info *AsyncInfo) {
881   assert(AsyncInfo);
882   assert(AsyncInfo->Queue);
883   AsyncInfo->Queue = 0;
884 }
885 
886 bool elf_machine_id_is_amdgcn(__tgt_device_image *image) {
887   const uint16_t amdgcnMachineID = 224; // EM_AMDGPU may not be in system elf.h
888   int32_t r = elf_check_machine(image, amdgcnMachineID);
889   if (!r) {
890     DP("Supported machine ID not found\n");
891   }
892   return r;
893 }
894 
895 uint32_t elf_e_flags(__tgt_device_image *image) {
896   char *img_begin = (char *)image->ImageStart;
897   size_t img_size = (char *)image->ImageEnd - img_begin;
898 
899   Elf *e = elf_memory(img_begin, img_size);
900   if (!e) {
901     DP("Unable to get ELF handle: %s!\n", elf_errmsg(-1));
902     return 0;
903   }
904 
905   Elf64_Ehdr *eh64 = elf64_getehdr(e);
906 
907   if (!eh64) {
908     DP("Unable to get machine ID from ELF file!\n");
909     elf_end(e);
910     return 0;
911   }
912 
913   uint32_t Flags = eh64->e_flags;
914 
915   elf_end(e);
916   DP("ELF Flags: 0x%x\n", Flags);
917   return Flags;
918 }
919 } // namespace
920 
921 int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *image) {
922   return elf_machine_id_is_amdgcn(image);
923 }
924 
925 int __tgt_rtl_number_of_devices() {
926   // If the construction failed, no methods are safe to call
927   if (DeviceInfo.ConstructionSucceeded) {
928     return DeviceInfo.NumberOfDevices;
929   } else {
930     DP("AMDGPU plugin construction failed. Zero devices available\n");
931     return 0;
932   }
933 }
934 
935 int64_t __tgt_rtl_init_requires(int64_t RequiresFlags) {
936   DP("Init requires flags to %ld\n", RequiresFlags);
937   DeviceInfo.RequiresFlags = RequiresFlags;
938   return RequiresFlags;
939 }
940 
941 namespace {
942 template <typename T> bool enforce_upper_bound(T *value, T upper) {
943   bool changed = *value > upper;
944   if (changed) {
945     *value = upper;
946   }
947   return changed;
948 }
949 } // namespace
950 
951 int32_t __tgt_rtl_init_device(int device_id) {
952   hsa_status_t err;
953 
954   // this is per device id init
955   DP("Initialize the device id: %d\n", device_id);
956 
957   hsa_agent_t agent = DeviceInfo.HSAAgents[device_id];
958 
959   // Get number of Compute Unit
960   uint32_t compute_units = 0;
961   err = hsa_agent_get_info(
962       agent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT,
963       &compute_units);
964   if (err != HSA_STATUS_SUCCESS) {
965     DeviceInfo.ComputeUnits[device_id] = 1;
966     DP("Error getting compute units : settiing to 1\n");
967   } else {
968     DeviceInfo.ComputeUnits[device_id] = compute_units;
969     DP("Using %d compute unis per grid\n", DeviceInfo.ComputeUnits[device_id]);
970   }
971 
972   char GetInfoName[64]; // 64 max size returned by get info
973   err = hsa_agent_get_info(agent, (hsa_agent_info_t)HSA_AGENT_INFO_NAME,
974                            (void *)GetInfoName);
975   if (err)
976     DeviceInfo.GPUName[device_id] = "--unknown gpu--";
977   else {
978     DeviceInfo.GPUName[device_id] = GetInfoName;
979   }
980 
981   if (print_kernel_trace & STARTUP_DETAILS)
982     DP("Device#%-2d CU's: %2d %s\n", device_id,
983        DeviceInfo.ComputeUnits[device_id],
984        DeviceInfo.GPUName[device_id].c_str());
985 
986   // Query attributes to determine number of threads/block and blocks/grid.
987   uint16_t workgroup_max_dim[3];
988   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_WORKGROUP_MAX_DIM,
989                            &workgroup_max_dim);
990   if (err != HSA_STATUS_SUCCESS) {
991     DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::DefaultNumTeams;
992     DP("Error getting grid dims: num groups : %d\n",
993        RTLDeviceInfoTy::DefaultNumTeams);
994   } else if (workgroup_max_dim[0] <= RTLDeviceInfoTy::HardTeamLimit) {
995     DeviceInfo.GroupsPerDevice[device_id] = workgroup_max_dim[0];
996     DP("Using %d ROCm blocks per grid\n",
997        DeviceInfo.GroupsPerDevice[device_id]);
998   } else {
999     DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::HardTeamLimit;
1000     DP("Max ROCm blocks per grid %d exceeds the hard team limit %d, capping "
1001        "at the hard limit\n",
1002        workgroup_max_dim[0], RTLDeviceInfoTy::HardTeamLimit);
1003   }
1004 
1005   // Get thread limit
1006   hsa_dim3_t grid_max_dim;
1007   err = hsa_agent_get_info(agent, HSA_AGENT_INFO_GRID_MAX_DIM, &grid_max_dim);
1008   if (err == HSA_STATUS_SUCCESS) {
1009     DeviceInfo.ThreadsPerGroup[device_id] =
1010         reinterpret_cast<uint32_t *>(&grid_max_dim)[0] /
1011         DeviceInfo.GroupsPerDevice[device_id];
1012 
1013     if (DeviceInfo.ThreadsPerGroup[device_id] == 0) {
1014       DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size;
1015       DP("Default thread limit: %d\n", RTLDeviceInfoTy::Max_WG_Size);
1016     } else if (enforce_upper_bound(&DeviceInfo.ThreadsPerGroup[device_id],
1017                                    RTLDeviceInfoTy::Max_WG_Size)) {
1018       DP("Capped thread limit: %d\n", RTLDeviceInfoTy::Max_WG_Size);
1019     } else {
1020       DP("Using ROCm Queried thread limit: %d\n",
1021          DeviceInfo.ThreadsPerGroup[device_id]);
1022     }
1023   } else {
1024     DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size;
1025     DP("Error getting max block dimension, use default:%d \n",
1026        RTLDeviceInfoTy::Max_WG_Size);
1027   }
1028 
1029   // Get wavefront size
1030   uint32_t wavefront_size = 0;
1031   err =
1032       hsa_agent_get_info(agent, HSA_AGENT_INFO_WAVEFRONT_SIZE, &wavefront_size);
1033   if (err == HSA_STATUS_SUCCESS) {
1034     DP("Queried wavefront size: %d\n", wavefront_size);
1035     DeviceInfo.WarpSize[device_id] = wavefront_size;
1036   } else {
1037     // TODO: Burn the wavefront size into the code object
1038     DP("Warning: Unknown wavefront size, assuming 64\n");
1039     DeviceInfo.WarpSize[device_id] = 64;
1040   }
1041 
1042   // Adjust teams to the env variables
1043 
1044   if (DeviceInfo.Env.TeamLimit > 0 &&
1045       (enforce_upper_bound(&DeviceInfo.GroupsPerDevice[device_id],
1046                            DeviceInfo.Env.TeamLimit))) {
1047     DP("Capping max groups per device to OMP_TEAM_LIMIT=%d\n",
1048        DeviceInfo.Env.TeamLimit);
1049   }
1050 
1051   // Set default number of teams
1052   if (DeviceInfo.Env.NumTeams > 0) {
1053     DeviceInfo.NumTeams[device_id] = DeviceInfo.Env.NumTeams;
1054     DP("Default number of teams set according to environment %d\n",
1055        DeviceInfo.Env.NumTeams);
1056   } else {
1057     char *TeamsPerCUEnvStr = getenv("OMP_TARGET_TEAMS_PER_PROC");
1058     int TeamsPerCU = DefaultTeamsPerCU;
1059     if (TeamsPerCUEnvStr) {
1060       TeamsPerCU = std::stoi(TeamsPerCUEnvStr);
1061     }
1062 
1063     DeviceInfo.NumTeams[device_id] =
1064         TeamsPerCU * DeviceInfo.ComputeUnits[device_id];
1065     DP("Default number of teams = %d * number of compute units %d\n",
1066        TeamsPerCU, DeviceInfo.ComputeUnits[device_id]);
1067   }
1068 
1069   if (enforce_upper_bound(&DeviceInfo.NumTeams[device_id],
1070                           DeviceInfo.GroupsPerDevice[device_id])) {
1071     DP("Default number of teams exceeds device limit, capping at %d\n",
1072        DeviceInfo.GroupsPerDevice[device_id]);
1073   }
1074 
1075   // Adjust threads to the env variables
1076   if (DeviceInfo.Env.TeamThreadLimit > 0 &&
1077       (enforce_upper_bound(&DeviceInfo.NumThreads[device_id],
1078                            DeviceInfo.Env.TeamThreadLimit))) {
1079     DP("Capping max number of threads to OMP_TEAMS_THREAD_LIMIT=%d\n",
1080        DeviceInfo.Env.TeamThreadLimit);
1081   }
1082 
1083   // Set default number of threads
1084   DeviceInfo.NumThreads[device_id] = RTLDeviceInfoTy::Default_WG_Size;
1085   DP("Default number of threads set according to library's default %d\n",
1086      RTLDeviceInfoTy::Default_WG_Size);
1087   if (enforce_upper_bound(&DeviceInfo.NumThreads[device_id],
1088                           DeviceInfo.ThreadsPerGroup[device_id])) {
1089     DP("Default number of threads exceeds device limit, capping at %d\n",
1090        DeviceInfo.ThreadsPerGroup[device_id]);
1091   }
1092 
1093   DP("Device %d: default limit for groupsPerDevice %d & threadsPerGroup %d\n",
1094      device_id, DeviceInfo.GroupsPerDevice[device_id],
1095      DeviceInfo.ThreadsPerGroup[device_id]);
1096 
1097   DP("Device %d: wavefront size %d, total threads %d x %d = %d\n", device_id,
1098      DeviceInfo.WarpSize[device_id], DeviceInfo.ThreadsPerGroup[device_id],
1099      DeviceInfo.GroupsPerDevice[device_id],
1100      DeviceInfo.GroupsPerDevice[device_id] *
1101          DeviceInfo.ThreadsPerGroup[device_id]);
1102 
1103   return OFFLOAD_SUCCESS;
1104 }
1105 
1106 namespace {
1107 Elf64_Shdr *find_only_SHT_HASH(Elf *elf) {
1108   size_t N;
1109   int rc = elf_getshdrnum(elf, &N);
1110   if (rc != 0) {
1111     return nullptr;
1112   }
1113 
1114   Elf64_Shdr *result = nullptr;
1115   for (size_t i = 0; i < N; i++) {
1116     Elf_Scn *scn = elf_getscn(elf, i);
1117     if (scn) {
1118       Elf64_Shdr *shdr = elf64_getshdr(scn);
1119       if (shdr) {
1120         if (shdr->sh_type == SHT_HASH) {
1121           if (result == nullptr) {
1122             result = shdr;
1123           } else {
1124             // multiple SHT_HASH sections not handled
1125             return nullptr;
1126           }
1127         }
1128       }
1129     }
1130   }
1131   return result;
1132 }
1133 
1134 const Elf64_Sym *elf_lookup(Elf *elf, char *base, Elf64_Shdr *section_hash,
1135                             const char *symname) {
1136 
1137   assert(section_hash);
1138   size_t section_symtab_index = section_hash->sh_link;
1139   Elf64_Shdr *section_symtab =
1140       elf64_getshdr(elf_getscn(elf, section_symtab_index));
1141   size_t section_strtab_index = section_symtab->sh_link;
1142 
1143   const Elf64_Sym *symtab =
1144       reinterpret_cast<const Elf64_Sym *>(base + section_symtab->sh_offset);
1145 
1146   const uint32_t *hashtab =
1147       reinterpret_cast<const uint32_t *>(base + section_hash->sh_offset);
1148 
1149   // Layout:
1150   // nbucket
1151   // nchain
1152   // bucket[nbucket]
1153   // chain[nchain]
1154   uint32_t nbucket = hashtab[0];
1155   const uint32_t *bucket = &hashtab[2];
1156   const uint32_t *chain = &hashtab[nbucket + 2];
1157 
1158   const size_t max = strlen(symname) + 1;
1159   const uint32_t hash = elf_hash(symname);
1160   for (uint32_t i = bucket[hash % nbucket]; i != 0; i = chain[i]) {
1161     char *n = elf_strptr(elf, section_strtab_index, symtab[i].st_name);
1162     if (strncmp(symname, n, max) == 0) {
1163       return &symtab[i];
1164     }
1165   }
1166 
1167   return nullptr;
1168 }
1169 
1170 struct symbol_info {
1171   void *addr = nullptr;
1172   uint32_t size = UINT32_MAX;
1173   uint32_t sh_type = SHT_NULL;
1174 };
1175 
1176 int get_symbol_info_without_loading(Elf *elf, char *base, const char *symname,
1177                                     symbol_info *res) {
1178   if (elf_kind(elf) != ELF_K_ELF) {
1179     return 1;
1180   }
1181 
1182   Elf64_Shdr *section_hash = find_only_SHT_HASH(elf);
1183   if (!section_hash) {
1184     return 1;
1185   }
1186 
1187   const Elf64_Sym *sym = elf_lookup(elf, base, section_hash, symname);
1188   if (!sym) {
1189     return 1;
1190   }
1191 
1192   if (sym->st_size > UINT32_MAX) {
1193     return 1;
1194   }
1195 
1196   if (sym->st_shndx == SHN_UNDEF) {
1197     return 1;
1198   }
1199 
1200   Elf_Scn *section = elf_getscn(elf, sym->st_shndx);
1201   if (!section) {
1202     return 1;
1203   }
1204 
1205   Elf64_Shdr *header = elf64_getshdr(section);
1206   if (!header) {
1207     return 1;
1208   }
1209 
1210   res->addr = sym->st_value + base;
1211   res->size = static_cast<uint32_t>(sym->st_size);
1212   res->sh_type = header->sh_type;
1213   return 0;
1214 }
1215 
1216 int get_symbol_info_without_loading(char *base, size_t img_size,
1217                                     const char *symname, symbol_info *res) {
1218   Elf *elf = elf_memory(base, img_size);
1219   if (elf) {
1220     int rc = get_symbol_info_without_loading(elf, base, symname, res);
1221     elf_end(elf);
1222     return rc;
1223   }
1224   return 1;
1225 }
1226 
1227 hsa_status_t interop_get_symbol_info(char *base, size_t img_size,
1228                                      const char *symname, void **var_addr,
1229                                      uint32_t *var_size) {
1230   symbol_info si;
1231   int rc = get_symbol_info_without_loading(base, img_size, symname, &si);
1232   if (rc == 0) {
1233     *var_addr = si.addr;
1234     *var_size = si.size;
1235     return HSA_STATUS_SUCCESS;
1236   } else {
1237     return HSA_STATUS_ERROR;
1238   }
1239 }
1240 
1241 template <typename C>
1242 hsa_status_t module_register_from_memory_to_place(
1243     std::map<std::string, atl_kernel_info_t> &KernelInfoTable,
1244     std::map<std::string, atl_symbol_info_t> &SymbolInfoTable,
1245     void *module_bytes, size_t module_size, int DeviceId, C cb,
1246     std::vector<hsa_executable_t> &HSAExecutables) {
1247   auto L = [](void *data, size_t size, void *cb_state) -> hsa_status_t {
1248     C *unwrapped = static_cast<C *>(cb_state);
1249     return (*unwrapped)(data, size);
1250   };
1251   return core::RegisterModuleFromMemory(
1252       KernelInfoTable, SymbolInfoTable, module_bytes, module_size,
1253       DeviceInfo.HSAAgents[DeviceId], L, static_cast<void *>(&cb),
1254       HSAExecutables);
1255 }
1256 } // namespace
1257 
1258 static uint64_t get_device_State_bytes(char *ImageStart, size_t img_size) {
1259   uint64_t device_State_bytes = 0;
1260   {
1261     // If this is the deviceRTL, get the state variable size
1262     symbol_info size_si;
1263     int rc = get_symbol_info_without_loading(
1264         ImageStart, img_size, "omptarget_nvptx_device_State_size", &size_si);
1265 
1266     if (rc == 0) {
1267       if (size_si.size != sizeof(uint64_t)) {
1268         DP("Found device_State_size variable with wrong size\n");
1269         return 0;
1270       }
1271 
1272       // Read number of bytes directly from the elf
1273       memcpy(&device_State_bytes, size_si.addr, sizeof(uint64_t));
1274     }
1275   }
1276   return device_State_bytes;
1277 }
1278 
1279 static __tgt_target_table *
1280 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image);
1281 
1282 static __tgt_target_table *
1283 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image);
1284 
1285 __tgt_target_table *__tgt_rtl_load_binary(int32_t device_id,
1286                                           __tgt_device_image *image) {
1287   DeviceInfo.load_run_lock.lock();
1288   __tgt_target_table *res = __tgt_rtl_load_binary_locked(device_id, image);
1289   DeviceInfo.load_run_lock.unlock();
1290   return res;
1291 }
1292 
1293 struct device_environment {
1294   // initialise an DeviceEnvironmentTy in the deviceRTL
1295   // patches around differences in the deviceRTL between trunk, aomp,
1296   // rocmcc. Over time these differences will tend to zero and this class
1297   // simplified.
1298   // Symbol may be in .data or .bss, and may be missing fields, todo:
1299   // review aomp/trunk/rocm and simplify the following
1300 
1301   // The symbol may also have been deadstripped because the device side
1302   // accessors were unused.
1303 
1304   // If the symbol is in .data (aomp, rocm) it can be written directly.
1305   // If it is in .bss, we must wait for it to be allocated space on the
1306   // gpu (trunk) and initialize after loading.
1307   const char *sym() { return "omptarget_device_environment"; }
1308 
1309   DeviceEnvironmentTy host_device_env;
1310   symbol_info si;
1311   bool valid = false;
1312 
1313   __tgt_device_image *image;
1314   const size_t img_size;
1315 
1316   device_environment(int device_id, int number_devices,
1317                      __tgt_device_image *image, const size_t img_size)
1318       : image(image), img_size(img_size) {
1319 
1320     host_device_env.NumDevices = number_devices;
1321     host_device_env.DeviceNum = device_id;
1322     host_device_env.DebugKind = 0;
1323     host_device_env.DynamicMemSize = 0;
1324     if (char *envStr = getenv("LIBOMPTARGET_DEVICE_RTL_DEBUG")) {
1325       host_device_env.DebugKind = std::stoi(envStr);
1326     }
1327 
1328     int rc = get_symbol_info_without_loading((char *)image->ImageStart,
1329                                              img_size, sym(), &si);
1330     if (rc != 0) {
1331       DP("Finding global device environment '%s' - symbol missing.\n", sym());
1332       return;
1333     }
1334 
1335     if (si.size > sizeof(host_device_env)) {
1336       DP("Symbol '%s' has size %u, expected at most %zu.\n", sym(), si.size,
1337          sizeof(host_device_env));
1338       return;
1339     }
1340 
1341     valid = true;
1342   }
1343 
1344   bool in_image() { return si.sh_type != SHT_NOBITS; }
1345 
1346   hsa_status_t before_loading(void *data, size_t size) {
1347     if (valid) {
1348       if (in_image()) {
1349         DP("Setting global device environment before load (%u bytes)\n",
1350            si.size);
1351         uint64_t offset = (char *)si.addr - (char *)image->ImageStart;
1352         void *pos = (char *)data + offset;
1353         memcpy(pos, &host_device_env, si.size);
1354       }
1355     }
1356     return HSA_STATUS_SUCCESS;
1357   }
1358 
1359   hsa_status_t after_loading() {
1360     if (valid) {
1361       if (!in_image()) {
1362         DP("Setting global device environment after load (%u bytes)\n",
1363            si.size);
1364         int device_id = host_device_env.DeviceNum;
1365         auto &SymbolInfo = DeviceInfo.SymbolInfoTable[device_id];
1366         void *state_ptr;
1367         uint32_t state_ptr_size;
1368         hsa_status_t err = interop_hsa_get_symbol_info(
1369             SymbolInfo, device_id, sym(), &state_ptr, &state_ptr_size);
1370         if (err != HSA_STATUS_SUCCESS) {
1371           DP("failed to find %s in loaded image\n", sym());
1372           return err;
1373         }
1374 
1375         if (state_ptr_size != si.size) {
1376           DP("Symbol had size %u before loading, %u after\n", state_ptr_size,
1377              si.size);
1378           return HSA_STATUS_ERROR;
1379         }
1380 
1381         return DeviceInfo.freesignalpool_memcpy_h2d(state_ptr, &host_device_env,
1382                                                     state_ptr_size, device_id);
1383       }
1384     }
1385     return HSA_STATUS_SUCCESS;
1386   }
1387 };
1388 
1389 static hsa_status_t impl_calloc(void **ret_ptr, size_t size, int DeviceId) {
1390   uint64_t rounded = 4 * ((size + 3) / 4);
1391   void *ptr;
1392   hsa_amd_memory_pool_t MemoryPool = DeviceInfo.getDeviceMemoryPool(DeviceId);
1393   hsa_status_t err = hsa_amd_memory_pool_allocate(MemoryPool, rounded, 0, &ptr);
1394   if (err != HSA_STATUS_SUCCESS) {
1395     return err;
1396   }
1397 
1398   hsa_status_t rc = hsa_amd_memory_fill(ptr, 0, rounded / 4);
1399   if (rc != HSA_STATUS_SUCCESS) {
1400     DP("zero fill device_state failed with %u\n", rc);
1401     core::Runtime::Memfree(ptr);
1402     return HSA_STATUS_ERROR;
1403   }
1404 
1405   *ret_ptr = ptr;
1406   return HSA_STATUS_SUCCESS;
1407 }
1408 
1409 static bool image_contains_symbol(void *data, size_t size, const char *sym) {
1410   symbol_info si;
1411   int rc = get_symbol_info_without_loading((char *)data, size, sym, &si);
1412   return (rc == 0) && (si.addr != nullptr);
1413 }
1414 
1415 __tgt_target_table *__tgt_rtl_load_binary_locked(int32_t device_id,
1416                                                  __tgt_device_image *image) {
1417   // This function loads the device image onto gpu[device_id] and does other
1418   // per-image initialization work. Specifically:
1419   //
1420   // - Initialize an DeviceEnvironmentTy instance embedded in the
1421   //   image at the symbol "omptarget_device_environment"
1422   //   Fields DebugKind, DeviceNum, NumDevices. Used by the deviceRTL.
1423   //
1424   // - Allocate a large array per-gpu (could be moved to init_device)
1425   //   - Read a uint64_t at symbol omptarget_nvptx_device_State_size
1426   //   - Allocate at least that many bytes of gpu memory
1427   //   - Zero initialize it
1428   //   - Write the pointer to the symbol omptarget_nvptx_device_State
1429   //
1430   // - Pulls some per-kernel information together from various sources and
1431   //   records it in the KernelsList for quicker access later
1432   //
1433   // The initialization can be done before or after loading the image onto the
1434   // gpu. This function presently does a mixture. Using the hsa api to get/set
1435   // the information is simpler to implement, in exchange for more complicated
1436   // runtime behaviour. E.g. launching a kernel or using dma to get eight bytes
1437   // back from the gpu vs a hashtable lookup on the host.
1438 
1439   const size_t img_size = (char *)image->ImageEnd - (char *)image->ImageStart;
1440 
1441   DeviceInfo.clearOffloadEntriesTable(device_id);
1442 
1443   // We do not need to set the ELF version because the caller of this function
1444   // had to do that to decide the right runtime to use
1445 
1446   if (!elf_machine_id_is_amdgcn(image)) {
1447     return NULL;
1448   }
1449 
1450   {
1451     auto env = device_environment(device_id, DeviceInfo.NumberOfDevices, image,
1452                                   img_size);
1453 
1454     auto &KernelInfo = DeviceInfo.KernelInfoTable[device_id];
1455     auto &SymbolInfo = DeviceInfo.SymbolInfoTable[device_id];
1456     hsa_status_t err = module_register_from_memory_to_place(
1457         KernelInfo, SymbolInfo, (void *)image->ImageStart, img_size, device_id,
1458         [&](void *data, size_t size) {
1459           if (image_contains_symbol(data, size, "needs_hostcall_buffer")) {
1460             __atomic_store_n(&DeviceInfo.hostcall_required, true,
1461                              __ATOMIC_RELEASE);
1462           }
1463           return env.before_loading(data, size);
1464         },
1465         DeviceInfo.HSAExecutables);
1466 
1467     check("Module registering", err);
1468     if (err != HSA_STATUS_SUCCESS) {
1469       const char *DeviceName = DeviceInfo.GPUName[device_id].c_str();
1470       const char *ElfName = get_elf_mach_gfx_name(elf_e_flags(image));
1471 
1472       if (strcmp(DeviceName, ElfName) != 0) {
1473         DP("Possible gpu arch mismatch: device:%s, image:%s please check"
1474            " compiler flag: -march=<gpu>\n",
1475            DeviceName, ElfName);
1476       } else {
1477         DP("Error loading image onto GPU: %s\n", get_error_string(err));
1478       }
1479 
1480       return NULL;
1481     }
1482 
1483     err = env.after_loading();
1484     if (err != HSA_STATUS_SUCCESS) {
1485       return NULL;
1486     }
1487   }
1488 
1489   DP("AMDGPU module successfully loaded!\n");
1490 
1491   {
1492     // the device_State array is either large value in bss or a void* that
1493     // needs to be assigned to a pointer to an array of size device_state_bytes
1494     // If absent, it has been deadstripped and needs no setup.
1495 
1496     void *state_ptr;
1497     uint32_t state_ptr_size;
1498     auto &SymbolInfoMap = DeviceInfo.SymbolInfoTable[device_id];
1499     hsa_status_t err = interop_hsa_get_symbol_info(
1500         SymbolInfoMap, device_id, "omptarget_nvptx_device_State", &state_ptr,
1501         &state_ptr_size);
1502 
1503     if (err != HSA_STATUS_SUCCESS) {
1504       DP("No device_state symbol found, skipping initialization\n");
1505     } else {
1506       if (state_ptr_size < sizeof(void *)) {
1507         DP("unexpected size of state_ptr %u != %zu\n", state_ptr_size,
1508            sizeof(void *));
1509         return NULL;
1510       }
1511 
1512       // if it's larger than a void*, assume it's a bss array and no further
1513       // initialization is required. Only try to set up a pointer for
1514       // sizeof(void*)
1515       if (state_ptr_size == sizeof(void *)) {
1516         uint64_t device_State_bytes =
1517             get_device_State_bytes((char *)image->ImageStart, img_size);
1518         if (device_State_bytes == 0) {
1519           DP("Can't initialize device_State, missing size information\n");
1520           return NULL;
1521         }
1522 
1523         auto &dss = DeviceInfo.deviceStateStore[device_id];
1524         if (dss.first.get() == nullptr) {
1525           assert(dss.second == 0);
1526           void *ptr = NULL;
1527           hsa_status_t err = impl_calloc(&ptr, device_State_bytes, device_id);
1528           if (err != HSA_STATUS_SUCCESS) {
1529             DP("Failed to allocate device_state array\n");
1530             return NULL;
1531           }
1532           dss = {
1533               std::unique_ptr<void, RTLDeviceInfoTy::implFreePtrDeletor>{ptr},
1534               device_State_bytes,
1535           };
1536         }
1537 
1538         void *ptr = dss.first.get();
1539         if (device_State_bytes != dss.second) {
1540           DP("Inconsistent sizes of device_State unsupported\n");
1541           return NULL;
1542         }
1543 
1544         // write ptr to device memory so it can be used by later kernels
1545         err = DeviceInfo.freesignalpool_memcpy_h2d(state_ptr, &ptr,
1546                                                    sizeof(void *), device_id);
1547         if (err != HSA_STATUS_SUCCESS) {
1548           DP("memcpy install of state_ptr failed\n");
1549           return NULL;
1550         }
1551       }
1552     }
1553   }
1554 
1555   // Here, we take advantage of the data that is appended after img_end to get
1556   // the symbols' name we need to load. This data consist of the host entries
1557   // begin and end as well as the target name (see the offloading linker script
1558   // creation in clang compiler).
1559 
1560   // Find the symbols in the module by name. The name can be obtain by
1561   // concatenating the host entry name with the target name
1562 
1563   __tgt_offload_entry *HostBegin = image->EntriesBegin;
1564   __tgt_offload_entry *HostEnd = image->EntriesEnd;
1565 
1566   for (__tgt_offload_entry *e = HostBegin; e != HostEnd; ++e) {
1567 
1568     if (!e->addr) {
1569       // The host should have always something in the address to
1570       // uniquely identify the target region.
1571       DP("Analyzing host entry '<null>' (size = %lld)...\n",
1572          (unsigned long long)e->size);
1573       return NULL;
1574     }
1575 
1576     if (e->size) {
1577       __tgt_offload_entry entry = *e;
1578 
1579       void *varptr;
1580       uint32_t varsize;
1581 
1582       auto &SymbolInfoMap = DeviceInfo.SymbolInfoTable[device_id];
1583       hsa_status_t err = interop_hsa_get_symbol_info(
1584           SymbolInfoMap, device_id, e->name, &varptr, &varsize);
1585 
1586       if (err != HSA_STATUS_SUCCESS) {
1587         // Inform the user what symbol prevented offloading
1588         DP("Loading global '%s' (Failed)\n", e->name);
1589         return NULL;
1590       }
1591 
1592       if (varsize != e->size) {
1593         DP("Loading global '%s' - size mismatch (%u != %lu)\n", e->name,
1594            varsize, e->size);
1595         return NULL;
1596       }
1597 
1598       DP("Entry point " DPxMOD " maps to global %s (" DPxMOD ")\n",
1599          DPxPTR(e - HostBegin), e->name, DPxPTR(varptr));
1600       entry.addr = (void *)varptr;
1601 
1602       DeviceInfo.addOffloadEntry(device_id, entry);
1603 
1604       if (DeviceInfo.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY &&
1605           e->flags & OMP_DECLARE_TARGET_LINK) {
1606         // If unified memory is present any target link variables
1607         // can access host addresses directly. There is no longer a
1608         // need for device copies.
1609         err = DeviceInfo.freesignalpool_memcpy_h2d(varptr, e->addr,
1610                                                    sizeof(void *), device_id);
1611         if (err != HSA_STATUS_SUCCESS)
1612           DP("Error when copying USM\n");
1613         DP("Copy linked variable host address (" DPxMOD ")"
1614            "to device address (" DPxMOD ")\n",
1615            DPxPTR(*((void **)e->addr)), DPxPTR(varptr));
1616       }
1617 
1618       continue;
1619     }
1620 
1621     DP("to find the kernel name: %s size: %lu\n", e->name, strlen(e->name));
1622 
1623     uint32_t kernarg_segment_size;
1624     auto &KernelInfoMap = DeviceInfo.KernelInfoTable[device_id];
1625     hsa_status_t err = interop_hsa_get_kernel_info(
1626         KernelInfoMap, device_id, e->name,
1627         HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE,
1628         &kernarg_segment_size);
1629 
1630     // each arg is a void * in this openmp implementation
1631     uint32_t arg_num = kernarg_segment_size / sizeof(void *);
1632     std::vector<size_t> arg_sizes(arg_num);
1633     for (std::vector<size_t>::iterator it = arg_sizes.begin();
1634          it != arg_sizes.end(); it++) {
1635       *it = sizeof(void *);
1636     }
1637 
1638     // default value GENERIC (in case symbol is missing from cubin file)
1639     llvm::omp::OMPTgtExecModeFlags ExecModeVal =
1640         llvm::omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_GENERIC;
1641 
1642     // get flat group size if present, else Default_WG_Size
1643     int16_t WGSizeVal = RTLDeviceInfoTy::Default_WG_Size;
1644 
1645     // get Kernel Descriptor if present.
1646     // Keep struct in sync wih getTgtAttributeStructQTy in CGOpenMPRuntime.cpp
1647     struct KernDescValType {
1648       uint16_t Version;
1649       uint16_t TSize;
1650       uint16_t WG_Size;
1651     };
1652     struct KernDescValType KernDescVal;
1653     std::string KernDescNameStr(e->name);
1654     KernDescNameStr += "_kern_desc";
1655     const char *KernDescName = KernDescNameStr.c_str();
1656 
1657     void *KernDescPtr;
1658     uint32_t KernDescSize;
1659     void *CallStackAddr = nullptr;
1660     err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1661                                   KernDescName, &KernDescPtr, &KernDescSize);
1662 
1663     if (err == HSA_STATUS_SUCCESS) {
1664       if ((size_t)KernDescSize != sizeof(KernDescVal))
1665         DP("Loading global computation properties '%s' - size mismatch (%u != "
1666            "%lu)\n",
1667            KernDescName, KernDescSize, sizeof(KernDescVal));
1668 
1669       memcpy(&KernDescVal, KernDescPtr, (size_t)KernDescSize);
1670 
1671       // Check structure size against recorded size.
1672       if ((size_t)KernDescSize != KernDescVal.TSize)
1673         DP("KernDescVal size %lu does not match advertized size %d for '%s'\n",
1674            sizeof(KernDescVal), KernDescVal.TSize, KernDescName);
1675 
1676       DP("After loading global for %s KernDesc \n", KernDescName);
1677       DP("KernDesc: Version: %d\n", KernDescVal.Version);
1678       DP("KernDesc: TSize: %d\n", KernDescVal.TSize);
1679       DP("KernDesc: WG_Size: %d\n", KernDescVal.WG_Size);
1680 
1681       if (KernDescVal.WG_Size == 0) {
1682         KernDescVal.WG_Size = RTLDeviceInfoTy::Default_WG_Size;
1683         DP("Setting KernDescVal.WG_Size to default %d\n", KernDescVal.WG_Size);
1684       }
1685       WGSizeVal = KernDescVal.WG_Size;
1686       DP("WGSizeVal %d\n", WGSizeVal);
1687       check("Loading KernDesc computation property", err);
1688     } else {
1689       DP("Warning: Loading KernDesc '%s' - symbol not found, ", KernDescName);
1690 
1691       // Flat group size
1692       std::string WGSizeNameStr(e->name);
1693       WGSizeNameStr += "_wg_size";
1694       const char *WGSizeName = WGSizeNameStr.c_str();
1695 
1696       void *WGSizePtr;
1697       uint32_t WGSize;
1698       err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1699                                     WGSizeName, &WGSizePtr, &WGSize);
1700 
1701       if (err == HSA_STATUS_SUCCESS) {
1702         if ((size_t)WGSize != sizeof(int16_t)) {
1703           DP("Loading global computation properties '%s' - size mismatch (%u "
1704              "!= "
1705              "%lu)\n",
1706              WGSizeName, WGSize, sizeof(int16_t));
1707           return NULL;
1708         }
1709 
1710         memcpy(&WGSizeVal, WGSizePtr, (size_t)WGSize);
1711 
1712         DP("After loading global for %s WGSize = %d\n", WGSizeName, WGSizeVal);
1713 
1714         if (WGSizeVal < RTLDeviceInfoTy::Default_WG_Size ||
1715             WGSizeVal > RTLDeviceInfoTy::Max_WG_Size) {
1716           DP("Error wrong WGSize value specified in HSA code object file: "
1717              "%d\n",
1718              WGSizeVal);
1719           WGSizeVal = RTLDeviceInfoTy::Default_WG_Size;
1720         }
1721       } else {
1722         DP("Warning: Loading WGSize '%s' - symbol not found, "
1723            "using default value %d\n",
1724            WGSizeName, WGSizeVal);
1725       }
1726 
1727       check("Loading WGSize computation property", err);
1728     }
1729 
1730     // Read execution mode from global in binary
1731     std::string ExecModeNameStr(e->name);
1732     ExecModeNameStr += "_exec_mode";
1733     const char *ExecModeName = ExecModeNameStr.c_str();
1734 
1735     void *ExecModePtr;
1736     uint32_t varsize;
1737     err = interop_get_symbol_info((char *)image->ImageStart, img_size,
1738                                   ExecModeName, &ExecModePtr, &varsize);
1739 
1740     if (err == HSA_STATUS_SUCCESS) {
1741       if ((size_t)varsize != sizeof(llvm::omp::OMPTgtExecModeFlags)) {
1742         DP("Loading global computation properties '%s' - size mismatch(%u != "
1743            "%lu)\n",
1744            ExecModeName, varsize, sizeof(llvm::omp::OMPTgtExecModeFlags));
1745         return NULL;
1746       }
1747 
1748       memcpy(&ExecModeVal, ExecModePtr, (size_t)varsize);
1749 
1750       DP("After loading global for %s ExecMode = %d\n", ExecModeName,
1751          ExecModeVal);
1752 
1753       if (ExecModeVal < 0 ||
1754           ExecModeVal > llvm::omp::OMP_TGT_EXEC_MODE_GENERIC_SPMD) {
1755         DP("Error wrong exec_mode value specified in HSA code object file: "
1756            "%d\n",
1757            ExecModeVal);
1758         return NULL;
1759       }
1760     } else {
1761       DP("Loading global exec_mode '%s' - symbol missing, using default "
1762          "value "
1763          "GENERIC (1)\n",
1764          ExecModeName);
1765     }
1766     check("Loading computation property", err);
1767 
1768     KernelsList.push_back(KernelTy(ExecModeVal, WGSizeVal, device_id,
1769                                    CallStackAddr, e->name, kernarg_segment_size,
1770                                    DeviceInfo.KernArgPool));
1771     __tgt_offload_entry entry = *e;
1772     entry.addr = (void *)&KernelsList.back();
1773     DeviceInfo.addOffloadEntry(device_id, entry);
1774     DP("Entry point %ld maps to %s\n", e - HostBegin, e->name);
1775   }
1776 
1777   return DeviceInfo.getOffloadEntriesTable(device_id);
1778 }
1779 
1780 void *__tgt_rtl_data_alloc(int device_id, int64_t size, void *, int32_t kind) {
1781   void *ptr = NULL;
1782   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1783 
1784   if (kind != TARGET_ALLOC_DEFAULT) {
1785     REPORT("Invalid target data allocation kind or requested allocator not "
1786            "implemented yet\n");
1787     return NULL;
1788   }
1789 
1790   hsa_amd_memory_pool_t MemoryPool = DeviceInfo.getDeviceMemoryPool(device_id);
1791   hsa_status_t err = hsa_amd_memory_pool_allocate(MemoryPool, size, 0, &ptr);
1792   DP("Tgt alloc data %ld bytes, (tgt:%016llx).\n", size,
1793      (long long unsigned)(Elf64_Addr)ptr);
1794   ptr = (err == HSA_STATUS_SUCCESS) ? ptr : NULL;
1795   return ptr;
1796 }
1797 
1798 int32_t __tgt_rtl_data_submit(int device_id, void *tgt_ptr, void *hst_ptr,
1799                               int64_t size) {
1800   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1801   __tgt_async_info AsyncInfo;
1802   int32_t rc = dataSubmit(device_id, tgt_ptr, hst_ptr, size, &AsyncInfo);
1803   if (rc != OFFLOAD_SUCCESS)
1804     return OFFLOAD_FAIL;
1805 
1806   return __tgt_rtl_synchronize(device_id, &AsyncInfo);
1807 }
1808 
1809 int32_t __tgt_rtl_data_submit_async(int device_id, void *tgt_ptr, void *hst_ptr,
1810                                     int64_t size, __tgt_async_info *AsyncInfo) {
1811   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1812   if (AsyncInfo) {
1813     initAsyncInfo(AsyncInfo);
1814     return dataSubmit(device_id, tgt_ptr, hst_ptr, size, AsyncInfo);
1815   } else {
1816     return __tgt_rtl_data_submit(device_id, tgt_ptr, hst_ptr, size);
1817   }
1818 }
1819 
1820 int32_t __tgt_rtl_data_retrieve(int device_id, void *hst_ptr, void *tgt_ptr,
1821                                 int64_t size) {
1822   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1823   __tgt_async_info AsyncInfo;
1824   int32_t rc = dataRetrieve(device_id, hst_ptr, tgt_ptr, size, &AsyncInfo);
1825   if (rc != OFFLOAD_SUCCESS)
1826     return OFFLOAD_FAIL;
1827 
1828   return __tgt_rtl_synchronize(device_id, &AsyncInfo);
1829 }
1830 
1831 int32_t __tgt_rtl_data_retrieve_async(int device_id, void *hst_ptr,
1832                                       void *tgt_ptr, int64_t size,
1833                                       __tgt_async_info *AsyncInfo) {
1834   assert(AsyncInfo && "AsyncInfo is nullptr");
1835   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1836   initAsyncInfo(AsyncInfo);
1837   return dataRetrieve(device_id, hst_ptr, tgt_ptr, size, AsyncInfo);
1838 }
1839 
1840 int32_t __tgt_rtl_data_delete(int device_id, void *tgt_ptr) {
1841   assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large");
1842   hsa_status_t err;
1843   DP("Tgt free data (tgt:%016llx).\n", (long long unsigned)(Elf64_Addr)tgt_ptr);
1844   err = core::Runtime::Memfree(tgt_ptr);
1845   if (err != HSA_STATUS_SUCCESS) {
1846     DP("Error when freeing CUDA memory\n");
1847     return OFFLOAD_FAIL;
1848   }
1849   return OFFLOAD_SUCCESS;
1850 }
1851 
1852 // Determine launch values for kernel.
1853 struct launchVals {
1854   int WorkgroupSize;
1855   int GridSize;
1856 };
1857 launchVals getLaunchVals(int WarpSize, EnvironmentVariables Env,
1858                          int ConstWGSize,
1859                          llvm::omp::OMPTgtExecModeFlags ExecutionMode,
1860                          int num_teams, int thread_limit,
1861                          uint64_t loop_tripcount, int DeviceNumTeams) {
1862 
1863   int threadsPerGroup = RTLDeviceInfoTy::Default_WG_Size;
1864   int num_groups = 0;
1865 
1866   int Max_Teams =
1867       Env.MaxTeamsDefault > 0 ? Env.MaxTeamsDefault : DeviceNumTeams;
1868   if (Max_Teams > RTLDeviceInfoTy::HardTeamLimit)
1869     Max_Teams = RTLDeviceInfoTy::HardTeamLimit;
1870 
1871   if (print_kernel_trace & STARTUP_DETAILS) {
1872     DP("RTLDeviceInfoTy::Max_Teams: %d\n", RTLDeviceInfoTy::Max_Teams);
1873     DP("Max_Teams: %d\n", Max_Teams);
1874     DP("RTLDeviceInfoTy::Warp_Size: %d\n", WarpSize);
1875     DP("RTLDeviceInfoTy::Max_WG_Size: %d\n", RTLDeviceInfoTy::Max_WG_Size);
1876     DP("RTLDeviceInfoTy::Default_WG_Size: %d\n",
1877        RTLDeviceInfoTy::Default_WG_Size);
1878     DP("thread_limit: %d\n", thread_limit);
1879     DP("threadsPerGroup: %d\n", threadsPerGroup);
1880     DP("ConstWGSize: %d\n", ConstWGSize);
1881   }
1882   // check for thread_limit() clause
1883   if (thread_limit > 0) {
1884     threadsPerGroup = thread_limit;
1885     DP("Setting threads per block to requested %d\n", thread_limit);
1886     // Add master warp for GENERIC
1887     if (ExecutionMode ==
1888         llvm::omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_GENERIC) {
1889       threadsPerGroup += WarpSize;
1890       DP("Adding master wavefront: +%d threads\n", WarpSize);
1891     }
1892     if (threadsPerGroup > RTLDeviceInfoTy::Max_WG_Size) { // limit to max
1893       threadsPerGroup = RTLDeviceInfoTy::Max_WG_Size;
1894       DP("Setting threads per block to maximum %d\n", threadsPerGroup);
1895     }
1896   }
1897   // check flat_max_work_group_size attr here
1898   if (threadsPerGroup > ConstWGSize) {
1899     threadsPerGroup = ConstWGSize;
1900     DP("Reduced threadsPerGroup to flat-attr-group-size limit %d\n",
1901        threadsPerGroup);
1902   }
1903   if (print_kernel_trace & STARTUP_DETAILS)
1904     DP("threadsPerGroup: %d\n", threadsPerGroup);
1905   DP("Preparing %d threads\n", threadsPerGroup);
1906 
1907   // Set default num_groups (teams)
1908   if (Env.TeamLimit > 0)
1909     num_groups = (Max_Teams < Env.TeamLimit) ? Max_Teams : Env.TeamLimit;
1910   else
1911     num_groups = Max_Teams;
1912   DP("Set default num of groups %d\n", num_groups);
1913 
1914   if (print_kernel_trace & STARTUP_DETAILS) {
1915     DP("num_groups: %d\n", num_groups);
1916     DP("num_teams: %d\n", num_teams);
1917   }
1918 
1919   // Reduce num_groups if threadsPerGroup exceeds RTLDeviceInfoTy::Max_WG_Size
1920   // This reduction is typical for default case (no thread_limit clause).
1921   // or when user goes crazy with num_teams clause.
1922   // FIXME: We cant distinguish between a constant or variable thread limit.
1923   // So we only handle constant thread_limits.
1924   if (threadsPerGroup >
1925       RTLDeviceInfoTy::Default_WG_Size) //  256 < threadsPerGroup <= 1024
1926     // Should we round threadsPerGroup up to nearest WarpSize
1927     // here?
1928     num_groups = (Max_Teams * RTLDeviceInfoTy::Max_WG_Size) / threadsPerGroup;
1929 
1930   // check for num_teams() clause
1931   if (num_teams > 0) {
1932     num_groups = (num_teams < num_groups) ? num_teams : num_groups;
1933   }
1934   if (print_kernel_trace & STARTUP_DETAILS) {
1935     DP("num_groups: %d\n", num_groups);
1936     DP("Env.NumTeams %d\n", Env.NumTeams);
1937     DP("Env.TeamLimit %d\n", Env.TeamLimit);
1938   }
1939 
1940   if (Env.NumTeams > 0) {
1941     num_groups = (Env.NumTeams < num_groups) ? Env.NumTeams : num_groups;
1942     DP("Modifying teams based on Env.NumTeams %d\n", Env.NumTeams);
1943   } else if (Env.TeamLimit > 0) {
1944     num_groups = (Env.TeamLimit < num_groups) ? Env.TeamLimit : num_groups;
1945     DP("Modifying teams based on Env.TeamLimit%d\n", Env.TeamLimit);
1946   } else {
1947     if (num_teams <= 0) {
1948       if (loop_tripcount > 0) {
1949         if (ExecutionMode ==
1950             llvm::omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_SPMD) {
1951           // round up to the nearest integer
1952           num_groups = ((loop_tripcount - 1) / threadsPerGroup) + 1;
1953         } else if (ExecutionMode ==
1954                    llvm::omp::OMPTgtExecModeFlags::OMP_TGT_EXEC_MODE_GENERIC) {
1955           num_groups = loop_tripcount;
1956         } else /* OMP_TGT_EXEC_MODE_GENERIC_SPMD */ {
1957           // This is a generic kernel that was transformed to use SPMD-mode
1958           // execution but uses Generic-mode semantics for scheduling.
1959           num_groups = loop_tripcount;
1960         }
1961         DP("Using %d teams due to loop trip count %" PRIu64 " and number of "
1962            "threads per block %d\n",
1963            num_groups, loop_tripcount, threadsPerGroup);
1964       }
1965     } else {
1966       num_groups = num_teams;
1967     }
1968     if (num_groups > Max_Teams) {
1969       num_groups = Max_Teams;
1970       if (print_kernel_trace & STARTUP_DETAILS)
1971         DP("Limiting num_groups %d to Max_Teams %d \n", num_groups, Max_Teams);
1972     }
1973     if (num_groups > num_teams && num_teams > 0) {
1974       num_groups = num_teams;
1975       if (print_kernel_trace & STARTUP_DETAILS)
1976         DP("Limiting num_groups %d to clause num_teams %d \n", num_groups,
1977            num_teams);
1978     }
1979   }
1980 
1981   // num_teams clause always honored, no matter what, unless DEFAULT is active.
1982   if (num_teams > 0) {
1983     num_groups = num_teams;
1984     // Cap num_groups to EnvMaxTeamsDefault if set.
1985     if (Env.MaxTeamsDefault > 0 && num_groups > Env.MaxTeamsDefault)
1986       num_groups = Env.MaxTeamsDefault;
1987   }
1988   if (print_kernel_trace & STARTUP_DETAILS) {
1989     DP("threadsPerGroup: %d\n", threadsPerGroup);
1990     DP("num_groups: %d\n", num_groups);
1991     DP("loop_tripcount: %ld\n", loop_tripcount);
1992   }
1993   DP("Final %d num_groups and %d threadsPerGroup\n", num_groups,
1994      threadsPerGroup);
1995 
1996   launchVals res;
1997   res.WorkgroupSize = threadsPerGroup;
1998   res.GridSize = threadsPerGroup * num_groups;
1999   return res;
2000 }
2001 
2002 static uint64_t acquire_available_packet_id(hsa_queue_t *queue) {
2003   uint64_t packet_id = hsa_queue_add_write_index_relaxed(queue, 1);
2004   bool full = true;
2005   while (full) {
2006     full =
2007         packet_id >= (queue->size + hsa_queue_load_read_index_scacquire(queue));
2008   }
2009   return packet_id;
2010 }
2011 
2012 static int32_t __tgt_rtl_run_target_team_region_locked(
2013     int32_t device_id, void *tgt_entry_ptr, void **tgt_args,
2014     ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams,
2015     int32_t thread_limit, uint64_t loop_tripcount);
2016 
2017 int32_t __tgt_rtl_run_target_team_region(int32_t device_id, void *tgt_entry_ptr,
2018                                          void **tgt_args,
2019                                          ptrdiff_t *tgt_offsets,
2020                                          int32_t arg_num, int32_t num_teams,
2021                                          int32_t thread_limit,
2022                                          uint64_t loop_tripcount) {
2023 
2024   DeviceInfo.load_run_lock.lock_shared();
2025   int32_t res = __tgt_rtl_run_target_team_region_locked(
2026       device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num, num_teams,
2027       thread_limit, loop_tripcount);
2028 
2029   DeviceInfo.load_run_lock.unlock_shared();
2030   return res;
2031 }
2032 
2033 int32_t __tgt_rtl_run_target_team_region_locked(
2034     int32_t device_id, void *tgt_entry_ptr, void **tgt_args,
2035     ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams,
2036     int32_t thread_limit, uint64_t loop_tripcount) {
2037   // Set the context we are using
2038   // update thread limit content in gpu memory if un-initialized or specified
2039   // from host
2040 
2041   DP("Run target team region thread_limit %d\n", thread_limit);
2042 
2043   // All args are references.
2044   std::vector<void *> args(arg_num);
2045   std::vector<void *> ptrs(arg_num);
2046 
2047   DP("Arg_num: %d\n", arg_num);
2048   for (int32_t i = 0; i < arg_num; ++i) {
2049     ptrs[i] = (void *)((intptr_t)tgt_args[i] + tgt_offsets[i]);
2050     args[i] = &ptrs[i];
2051     DP("Offseted base: arg[%d]:" DPxMOD "\n", i, DPxPTR(ptrs[i]));
2052   }
2053 
2054   KernelTy *KernelInfo = (KernelTy *)tgt_entry_ptr;
2055 
2056   std::string kernel_name = std::string(KernelInfo->Name);
2057   auto &KernelInfoTable = DeviceInfo.KernelInfoTable;
2058   if (KernelInfoTable[device_id].find(kernel_name) ==
2059       KernelInfoTable[device_id].end()) {
2060     DP("Kernel %s not found\n", kernel_name.c_str());
2061     return OFFLOAD_FAIL;
2062   }
2063 
2064   const atl_kernel_info_t KernelInfoEntry =
2065       KernelInfoTable[device_id][kernel_name];
2066   const uint32_t group_segment_size = KernelInfoEntry.group_segment_size;
2067   const uint32_t sgpr_count = KernelInfoEntry.sgpr_count;
2068   const uint32_t vgpr_count = KernelInfoEntry.vgpr_count;
2069   const uint32_t sgpr_spill_count = KernelInfoEntry.sgpr_spill_count;
2070   const uint32_t vgpr_spill_count = KernelInfoEntry.vgpr_spill_count;
2071 
2072   assert(arg_num == (int)KernelInfoEntry.num_args);
2073 
2074   /*
2075    * Set limit based on ThreadsPerGroup and GroupsPerDevice
2076    */
2077   launchVals LV =
2078       getLaunchVals(DeviceInfo.WarpSize[device_id], DeviceInfo.Env,
2079                     KernelInfo->ConstWGSize, KernelInfo->ExecutionMode,
2080                     num_teams,      // From run_region arg
2081                     thread_limit,   // From run_region arg
2082                     loop_tripcount, // From run_region arg
2083                     DeviceInfo.NumTeams[KernelInfo->device_id]);
2084   const int GridSize = LV.GridSize;
2085   const int WorkgroupSize = LV.WorkgroupSize;
2086 
2087   if (print_kernel_trace >= LAUNCH) {
2088     int num_groups = GridSize / WorkgroupSize;
2089     // enum modes are SPMD, GENERIC, NONE 0,1,2
2090     // if doing rtl timing, print to stderr, unless stdout requested.
2091     bool traceToStdout = print_kernel_trace & (RTL_TO_STDOUT | RTL_TIMING);
2092     fprintf(traceToStdout ? stdout : stderr,
2093             "DEVID:%2d SGN:%1d ConstWGSize:%-4d args:%2d teamsXthrds:(%4dX%4d) "
2094             "reqd:(%4dX%4d) lds_usage:%uB sgpr_count:%u vgpr_count:%u "
2095             "sgpr_spill_count:%u vgpr_spill_count:%u tripcount:%lu n:%s\n",
2096             device_id, KernelInfo->ExecutionMode, KernelInfo->ConstWGSize,
2097             arg_num, num_groups, WorkgroupSize, num_teams, thread_limit,
2098             group_segment_size, sgpr_count, vgpr_count, sgpr_spill_count,
2099             vgpr_spill_count, loop_tripcount, KernelInfo->Name);
2100   }
2101 
2102   // Run on the device.
2103   {
2104     hsa_queue_t *queue = DeviceInfo.HSAQueues[device_id].get();
2105     if (!queue) {
2106       return OFFLOAD_FAIL;
2107     }
2108     uint64_t packet_id = acquire_available_packet_id(queue);
2109 
2110     const uint32_t mask = queue->size - 1; // size is a power of 2
2111     hsa_kernel_dispatch_packet_t *packet =
2112         (hsa_kernel_dispatch_packet_t *)queue->base_address +
2113         (packet_id & mask);
2114 
2115     // packet->header is written last
2116     packet->setup = UINT16_C(1) << HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS;
2117     packet->workgroup_size_x = WorkgroupSize;
2118     packet->workgroup_size_y = 1;
2119     packet->workgroup_size_z = 1;
2120     packet->reserved0 = 0;
2121     packet->grid_size_x = GridSize;
2122     packet->grid_size_y = 1;
2123     packet->grid_size_z = 1;
2124     packet->private_segment_size = KernelInfoEntry.private_segment_size;
2125     packet->group_segment_size = KernelInfoEntry.group_segment_size;
2126     packet->kernel_object = KernelInfoEntry.kernel_object;
2127     packet->kernarg_address = 0;     // use the block allocator
2128     packet->reserved2 = 0;           // impl writes id_ here
2129     packet->completion_signal = {0}; // may want a pool of signals
2130 
2131     KernelArgPool *ArgPool = nullptr;
2132     void *kernarg = nullptr;
2133     {
2134       auto it = KernelArgPoolMap.find(std::string(KernelInfo->Name));
2135       if (it != KernelArgPoolMap.end()) {
2136         ArgPool = (it->second).get();
2137       }
2138     }
2139     if (!ArgPool) {
2140       DP("Warning: No ArgPool for %s on device %d\n", KernelInfo->Name,
2141          device_id);
2142     }
2143     {
2144       if (ArgPool) {
2145         assert(ArgPool->kernarg_segment_size == (arg_num * sizeof(void *)));
2146         kernarg = ArgPool->allocate(arg_num);
2147       }
2148       if (!kernarg) {
2149         DP("Allocate kernarg failed\n");
2150         return OFFLOAD_FAIL;
2151       }
2152 
2153       // Copy explicit arguments
2154       for (int i = 0; i < arg_num; i++) {
2155         memcpy((char *)kernarg + sizeof(void *) * i, args[i], sizeof(void *));
2156       }
2157 
2158       // Initialize implicit arguments. TODO: Which of these can be dropped
2159       impl_implicit_args_t *impl_args =
2160           reinterpret_cast<impl_implicit_args_t *>(
2161               static_cast<char *>(kernarg) + ArgPool->kernarg_segment_size);
2162       memset(impl_args, 0,
2163              sizeof(impl_implicit_args_t)); // may not be necessary
2164       impl_args->offset_x = 0;
2165       impl_args->offset_y = 0;
2166       impl_args->offset_z = 0;
2167 
2168       // assign a hostcall buffer for the selected Q
2169       if (__atomic_load_n(&DeviceInfo.hostcall_required, __ATOMIC_ACQUIRE)) {
2170         // hostrpc_assign_buffer is not thread safe, and this function is
2171         // under a multiple reader lock, not a writer lock.
2172         static pthread_mutex_t hostcall_init_lock = PTHREAD_MUTEX_INITIALIZER;
2173         pthread_mutex_lock(&hostcall_init_lock);
2174         impl_args->hostcall_ptr = hostrpc_assign_buffer(
2175             DeviceInfo.HSAAgents[device_id], queue, device_id);
2176         pthread_mutex_unlock(&hostcall_init_lock);
2177         if (!impl_args->hostcall_ptr) {
2178           DP("hostrpc_assign_buffer failed, gpu would dereference null and "
2179              "error\n");
2180           return OFFLOAD_FAIL;
2181         }
2182       }
2183 
2184       packet->kernarg_address = kernarg;
2185     }
2186 
2187     hsa_signal_t s = DeviceInfo.FreeSignalPool.pop();
2188     if (s.handle == 0) {
2189       DP("Failed to get signal instance\n");
2190       return OFFLOAD_FAIL;
2191     }
2192     packet->completion_signal = s;
2193     hsa_signal_store_relaxed(packet->completion_signal, 1);
2194 
2195     // Publish the packet indicating it is ready to be processed
2196     core::packet_store_release(reinterpret_cast<uint32_t *>(packet),
2197                                core::create_header(), packet->setup);
2198 
2199     // Since the packet is already published, its contents must not be
2200     // accessed any more
2201     hsa_signal_store_relaxed(queue->doorbell_signal, packet_id);
2202 
2203     while (hsa_signal_wait_scacquire(s, HSA_SIGNAL_CONDITION_EQ, 0, UINT64_MAX,
2204                                      HSA_WAIT_STATE_BLOCKED) != 0)
2205       ;
2206 
2207     assert(ArgPool);
2208     ArgPool->deallocate(kernarg);
2209     DeviceInfo.FreeSignalPool.push(s);
2210   }
2211 
2212   DP("Kernel completed\n");
2213   return OFFLOAD_SUCCESS;
2214 }
2215 
2216 int32_t __tgt_rtl_run_target_region(int32_t device_id, void *tgt_entry_ptr,
2217                                     void **tgt_args, ptrdiff_t *tgt_offsets,
2218                                     int32_t arg_num) {
2219   // use one team and one thread
2220   // fix thread num
2221   int32_t team_num = 1;
2222   int32_t thread_limit = 0; // use default
2223   return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args,
2224                                           tgt_offsets, arg_num, team_num,
2225                                           thread_limit, 0);
2226 }
2227 
2228 int32_t __tgt_rtl_run_target_region_async(int32_t device_id,
2229                                           void *tgt_entry_ptr, void **tgt_args,
2230                                           ptrdiff_t *tgt_offsets,
2231                                           int32_t arg_num,
2232                                           __tgt_async_info *AsyncInfo) {
2233   assert(AsyncInfo && "AsyncInfo is nullptr");
2234   initAsyncInfo(AsyncInfo);
2235 
2236   // use one team and one thread
2237   // fix thread num
2238   int32_t team_num = 1;
2239   int32_t thread_limit = 0; // use default
2240   return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args,
2241                                           tgt_offsets, arg_num, team_num,
2242                                           thread_limit, 0);
2243 }
2244 
2245 int32_t __tgt_rtl_synchronize(int32_t device_id, __tgt_async_info *AsyncInfo) {
2246   assert(AsyncInfo && "AsyncInfo is nullptr");
2247 
2248   // Cuda asserts that AsyncInfo->Queue is non-null, but this invariant
2249   // is not ensured by devices.cpp for amdgcn
2250   // assert(AsyncInfo->Queue && "AsyncInfo->Queue is nullptr");
2251   if (AsyncInfo->Queue) {
2252     finiAsyncInfo(AsyncInfo);
2253   }
2254   return OFFLOAD_SUCCESS;
2255 }
2256 
2257 namespace core {
2258 hsa_status_t allow_access_to_all_gpu_agents(void *ptr) {
2259   return hsa_amd_agents_allow_access(DeviceInfo.HSAAgents.size(),
2260                                      &DeviceInfo.HSAAgents[0], NULL, ptr);
2261 }
2262 
2263 } // namespace core
2264