1 //===----RTLs/cuda/src/rtl.cpp - Target RTLs Implementation ------- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // RTL for CUDA machine
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include <cassert>
14 #include <cstddef>
15 #include <cuda.h>
16 #include <list>
17 #include <memory>
18 #include <mutex>
19 #include <string>
20 #include <vector>
21 
22 #include "Debug.h"
23 #include "omptargetplugin.h"
24 
25 #define TARGET_NAME CUDA
26 #define DEBUG_PREFIX "Target " GETNAME(TARGET_NAME) " RTL"
27 
28 #include "MemoryManager.h"
29 
30 // Utility for retrieving and printing CUDA error string.
31 #ifdef OMPTARGET_DEBUG
32 #define CUDA_ERR_STRING(err)                                                   \
33   do {                                                                         \
34     if (getDebugLevel() > 0) {                                                 \
35       const char *errStr = nullptr;                                            \
36       CUresult errStr_status = cuGetErrorString(err, &errStr);                 \
37       if (errStr_status == CUDA_ERROR_INVALID_VALUE)                           \
38         REPORT("Unrecognized CUDA error code: %d\n", err);                     \
39       else if (errStr_status == CUDA_SUCCESS)                                  \
40         REPORT("CUDA error is: %s\n", errStr);                                 \
41       else {                                                                   \
42         REPORT("Unresolved CUDA error code: %d\n", err);                       \
43         REPORT("Unsuccessful cuGetErrorString return status: %d\n",            \
44                errStr_status);                                                 \
45       }                                                                        \
46     } else {                                                                   \
47       const char *errStr = nullptr;                                            \
48       CUresult errStr_status = cuGetErrorString(err, &errStr);                 \
49       if (errStr_status == CUDA_SUCCESS)                                       \
50         REPORT("%s \n", errStr);                                               \
51     }                                                                          \
52   } while (false)
53 #else // OMPTARGET_DEBUG
54 #define CUDA_ERR_STRING(err)                                                   \
55   do {                                                                         \
56     const char *errStr = nullptr;                                              \
57     CUresult errStr_status = cuGetErrorString(err, &errStr);                   \
58     if (errStr_status == CUDA_SUCCESS)                                         \
59       REPORT("%s \n", errStr);                                                 \
60   } while (false)
61 #endif // OMPTARGET_DEBUG
62 
63 #include "elf_common.h"
64 
65 /// Keep entries table per device.
66 struct FuncOrGblEntryTy {
67   __tgt_target_table Table;
68   std::vector<__tgt_offload_entry> Entries;
69 };
70 
71 enum ExecutionModeType {
72   SPMD, // constructors, destructors,
73   // combined constructs (`teams distribute parallel for [simd]`)
74   GENERIC, // everything else
75   NONE
76 };
77 
78 /// Use a single entity to encode a kernel and a set of flags.
79 struct KernelTy {
80   CUfunction Func;
81 
82   // execution mode of kernel
83   // 0 - SPMD mode (without master warp)
84   // 1 - Generic mode (with master warp)
85   int8_t ExecutionMode;
86 
87   /// Maximal number of threads per block for this kernel.
88   int MaxThreadsPerBlock = 0;
89 
90   KernelTy(CUfunction _Func, int8_t _ExecutionMode)
91       : Func(_Func), ExecutionMode(_ExecutionMode) {}
92 };
93 
94 /// Device environment data
95 /// Manually sync with the deviceRTL side for now, move to a dedicated header
96 /// file later.
97 struct omptarget_device_environmentTy {
98   int32_t debug_level;
99 };
100 
101 namespace {
102 bool checkResult(CUresult Err, const char *ErrMsg) {
103   if (Err == CUDA_SUCCESS)
104     return true;
105 
106   REPORT("%s", ErrMsg);
107   CUDA_ERR_STRING(Err);
108   return false;
109 }
110 
111 int memcpyDtoD(const void *SrcPtr, void *DstPtr, int64_t Size,
112                CUstream Stream) {
113   CUresult Err =
114       cuMemcpyDtoDAsync((CUdeviceptr)DstPtr, (CUdeviceptr)SrcPtr, Size, Stream);
115 
116   if (Err != CUDA_SUCCESS) {
117     REPORT("Error when copying data from device to device. Pointers: src "
118            "= " DPxMOD ", dst = " DPxMOD ", size = %" PRId64 "\n",
119            DPxPTR(SrcPtr), DPxPTR(DstPtr), Size);
120     CUDA_ERR_STRING(Err);
121     return OFFLOAD_FAIL;
122   }
123 
124   return OFFLOAD_SUCCESS;
125 }
126 
127 // Structure contains per-device data
128 struct DeviceDataTy {
129   /// List that contains all the kernels.
130   std::list<KernelTy> KernelsList;
131 
132   std::list<FuncOrGblEntryTy> FuncGblEntries;
133 
134   CUcontext Context = nullptr;
135   // Device properties
136   int ThreadsPerBlock = 0;
137   int BlocksPerGrid = 0;
138   int WarpSize = 0;
139   // OpenMP properties
140   int NumTeams = 0;
141   int NumThreads = 0;
142 };
143 
144 class StreamManagerTy {
145   int NumberOfDevices;
146   // The initial size of stream pool
147   int EnvNumInitialStreams;
148   // Per-device stream mutex
149   std::vector<std::unique_ptr<std::mutex>> StreamMtx;
150   // Per-device stream Id indicates the next available stream in the pool
151   std::vector<int> NextStreamId;
152   // Per-device stream pool
153   std::vector<std::vector<CUstream>> StreamPool;
154   // Reference to per-device data
155   std::vector<DeviceDataTy> &DeviceData;
156 
157   // If there is no CUstream left in the pool, we will resize the pool to
158   // allocate more CUstream. This function should be called with device mutex,
159   // and we do not resize to smaller one.
160   void resizeStreamPool(const int DeviceId, const size_t NewSize) {
161     std::vector<CUstream> &Pool = StreamPool[DeviceId];
162     const size_t CurrentSize = Pool.size();
163     assert(NewSize > CurrentSize && "new size is not larger than current size");
164 
165     CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
166     if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n")) {
167       // We will return if cannot switch to the right context in case of
168       // creating bunch of streams that are not corresponding to the right
169       // device. The offloading will fail later because selected CUstream is
170       // nullptr.
171       return;
172     }
173 
174     Pool.resize(NewSize, nullptr);
175 
176     for (size_t I = CurrentSize; I < NewSize; ++I) {
177       checkResult(cuStreamCreate(&Pool[I], CU_STREAM_NON_BLOCKING),
178                   "Error returned from cuStreamCreate\n");
179     }
180   }
181 
182 public:
183   StreamManagerTy(const int NumberOfDevices,
184                   std::vector<DeviceDataTy> &DeviceData)
185       : NumberOfDevices(NumberOfDevices), EnvNumInitialStreams(32),
186         DeviceData(DeviceData) {
187     StreamPool.resize(NumberOfDevices);
188     NextStreamId.resize(NumberOfDevices);
189     StreamMtx.resize(NumberOfDevices);
190 
191     if (const char *EnvStr = getenv("LIBOMPTARGET_NUM_INITIAL_STREAMS"))
192       EnvNumInitialStreams = std::stoi(EnvStr);
193 
194     // Initialize the next stream id
195     std::fill(NextStreamId.begin(), NextStreamId.end(), 0);
196 
197     // Initialize stream mutex
198     for (std::unique_ptr<std::mutex> &Ptr : StreamMtx)
199       Ptr = std::make_unique<std::mutex>();
200   }
201 
202   ~StreamManagerTy() {
203     // Destroy streams
204     for (int I = 0; I < NumberOfDevices; ++I) {
205       checkResult(cuCtxSetCurrent(DeviceData[I].Context),
206                   "Error returned from cuCtxSetCurrent\n");
207 
208       for (CUstream &S : StreamPool[I]) {
209         if (S)
210           checkResult(cuStreamDestroy(S),
211                       "Error returned from cuStreamDestroy\n");
212       }
213     }
214   }
215 
216   // Get a CUstream from pool. Per-device next stream id always points to the
217   // next available CUstream. That means, CUstreams [0, id-1] have been
218   // assigned, and [id,] are still available. If there is no CUstream left, we
219   // will ask more CUstreams from CUDA RT. Each time a CUstream is assigned,
220   // the id will increase one.
221   // xxxxxs+++++++++
222   //      ^
223   //      id
224   // After assignment, the pool becomes the following and s is assigned.
225   // xxxxxs+++++++++
226   //       ^
227   //       id
228   CUstream getStream(const int DeviceId) {
229     const std::lock_guard<std::mutex> Lock(*StreamMtx[DeviceId]);
230     int &Id = NextStreamId[DeviceId];
231     // No CUstream left in the pool, we need to request from CUDA RT
232     if (Id == StreamPool[DeviceId].size()) {
233       // By default we double the stream pool every time
234       resizeStreamPool(DeviceId, Id * 2);
235     }
236     return StreamPool[DeviceId][Id++];
237   }
238 
239   // Return a CUstream back to pool. As mentioned above, per-device next
240   // stream is always points to the next available CUstream, so when we return
241   // a CUstream, we need to first decrease the id, and then copy the CUstream
242   // back.
243   // It is worth noting that, the order of streams return might be different
244   // from that they're assigned, that saying, at some point, there might be
245   // two identical CUstreams.
246   // xxax+a+++++
247   //     ^
248   //     id
249   // However, it doesn't matter, because they're always on the two sides of
250   // id. The left one will in the end be overwritten by another CUstream.
251   // Therefore, after several execution, the order of pool might be different
252   // from its initial state.
253   void returnStream(const int DeviceId, CUstream Stream) {
254     const std::lock_guard<std::mutex> Lock(*StreamMtx[DeviceId]);
255     int &Id = NextStreamId[DeviceId];
256     assert(Id > 0 && "Wrong stream ID");
257     StreamPool[DeviceId][--Id] = Stream;
258   }
259 
260   bool initializeDeviceStreamPool(const int DeviceId) {
261     assert(StreamPool[DeviceId].empty() && "stream pool has been initialized");
262 
263     resizeStreamPool(DeviceId, EnvNumInitialStreams);
264 
265     // Check the size of stream pool
266     if (StreamPool[DeviceId].size() != EnvNumInitialStreams)
267       return false;
268 
269     // Check whether each stream is valid
270     for (CUstream &S : StreamPool[DeviceId])
271       if (!S)
272         return false;
273 
274     return true;
275   }
276 };
277 
278 class DeviceRTLTy {
279   int NumberOfDevices;
280   // OpenMP environment properties
281   int EnvNumTeams;
282   int EnvTeamLimit;
283   // OpenMP requires flags
284   int64_t RequiresFlags;
285 
286   static constexpr const int HardTeamLimit = 1U << 16U; // 64k
287   static constexpr const int HardThreadLimit = 1024;
288   static constexpr const int DefaultNumTeams = 128;
289   static constexpr const int DefaultNumThreads = 128;
290 
291   std::unique_ptr<StreamManagerTy> StreamManager;
292   std::vector<DeviceDataTy> DeviceData;
293   std::vector<CUmodule> Modules;
294 
295   /// A class responsible for interacting with device native runtime library to
296   /// allocate and free memory.
297   class CUDADeviceAllocatorTy : public DeviceAllocatorTy {
298     const int DeviceId;
299     const std::vector<DeviceDataTy> &DeviceData;
300 
301   public:
302     CUDADeviceAllocatorTy(int DeviceId, std::vector<DeviceDataTy> &DeviceData)
303         : DeviceId(DeviceId), DeviceData(DeviceData) {}
304 
305     void *allocate(size_t Size, void *) override {
306       if (Size == 0)
307         return nullptr;
308 
309       CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
310       if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
311         return nullptr;
312 
313       CUdeviceptr DevicePtr;
314       Err = cuMemAlloc(&DevicePtr, Size);
315       if (!checkResult(Err, "Error returned from cuMemAlloc\n"))
316         return nullptr;
317 
318       return (void *)DevicePtr;
319     }
320 
321     int free(void *TgtPtr) override {
322       CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
323       if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
324         return OFFLOAD_FAIL;
325 
326       Err = cuMemFree((CUdeviceptr)TgtPtr);
327       if (!checkResult(Err, "Error returned from cuMemFree\n"))
328         return OFFLOAD_FAIL;
329 
330       return OFFLOAD_SUCCESS;
331     }
332   };
333 
334   /// A vector of device allocators
335   std::vector<CUDADeviceAllocatorTy> DeviceAllocators;
336 
337   /// A vector of memory managers. Since the memory manager is non-copyable and
338   // non-removable, we wrap them into std::unique_ptr.
339   std::vector<std::unique_ptr<MemoryManagerTy>> MemoryManagers;
340 
341   /// Whether use memory manager
342   bool UseMemoryManager = true;
343 
344   // Record entry point associated with device
345   void addOffloadEntry(const int DeviceId, const __tgt_offload_entry entry) {
346     FuncOrGblEntryTy &E = DeviceData[DeviceId].FuncGblEntries.back();
347     E.Entries.push_back(entry);
348   }
349 
350   // Return a pointer to the entry associated with the pointer
351   const __tgt_offload_entry *getOffloadEntry(const int DeviceId,
352                                              const void *Addr) const {
353     for (const __tgt_offload_entry &Itr :
354          DeviceData[DeviceId].FuncGblEntries.back().Entries)
355       if (Itr.addr == Addr)
356         return &Itr;
357 
358     return nullptr;
359   }
360 
361   // Return the pointer to the target entries table
362   __tgt_target_table *getOffloadEntriesTable(const int DeviceId) {
363     FuncOrGblEntryTy &E = DeviceData[DeviceId].FuncGblEntries.back();
364 
365     if (E.Entries.empty())
366       return nullptr;
367 
368     // Update table info according to the entries and return the pointer
369     E.Table.EntriesBegin = E.Entries.data();
370     E.Table.EntriesEnd = E.Entries.data() + E.Entries.size();
371 
372     return &E.Table;
373   }
374 
375   // Clear entries table for a device
376   void clearOffloadEntriesTable(const int DeviceId) {
377     DeviceData[DeviceId].FuncGblEntries.emplace_back();
378     FuncOrGblEntryTy &E = DeviceData[DeviceId].FuncGblEntries.back();
379     E.Entries.clear();
380     E.Table.EntriesBegin = E.Table.EntriesEnd = nullptr;
381   }
382 
383   CUstream getStream(const int DeviceId, __tgt_async_info *AsyncInfoPtr) const {
384     assert(AsyncInfoPtr && "AsyncInfoPtr is nullptr");
385 
386     if (!AsyncInfoPtr->Queue)
387       AsyncInfoPtr->Queue = StreamManager->getStream(DeviceId);
388 
389     return reinterpret_cast<CUstream>(AsyncInfoPtr->Queue);
390   }
391 
392 public:
393   // This class should not be copied
394   DeviceRTLTy(const DeviceRTLTy &) = delete;
395   DeviceRTLTy(DeviceRTLTy &&) = delete;
396 
397   DeviceRTLTy()
398       : NumberOfDevices(0), EnvNumTeams(-1), EnvTeamLimit(-1),
399         RequiresFlags(OMP_REQ_UNDEFINED) {
400 
401     DP("Start initializing CUDA\n");
402 
403     CUresult Err = cuInit(0);
404     if (!checkResult(Err, "Error returned from cuInit\n")) {
405       return;
406     }
407 
408     Err = cuDeviceGetCount(&NumberOfDevices);
409     if (!checkResult(Err, "Error returned from cuDeviceGetCount\n"))
410       return;
411 
412     if (NumberOfDevices == 0) {
413       DP("There are no devices supporting CUDA.\n");
414       return;
415     }
416 
417     DeviceData.resize(NumberOfDevices);
418 
419     // Get environment variables regarding teams
420     if (const char *EnvStr = getenv("OMP_TEAM_LIMIT")) {
421       // OMP_TEAM_LIMIT has been set
422       EnvTeamLimit = std::stoi(EnvStr);
423       DP("Parsed OMP_TEAM_LIMIT=%d\n", EnvTeamLimit);
424     }
425     if (const char *EnvStr = getenv("OMP_NUM_TEAMS")) {
426       // OMP_NUM_TEAMS has been set
427       EnvNumTeams = std::stoi(EnvStr);
428       DP("Parsed OMP_NUM_TEAMS=%d\n", EnvNumTeams);
429     }
430 
431     StreamManager =
432         std::make_unique<StreamManagerTy>(NumberOfDevices, DeviceData);
433 
434     for (int I = 0; I < NumberOfDevices; ++I)
435       DeviceAllocators.emplace_back(I, DeviceData);
436 
437     // Get the size threshold from environment variable
438     std::pair<size_t, bool> Res = MemoryManagerTy::getSizeThresholdFromEnv();
439     UseMemoryManager = Res.second;
440     size_t MemoryManagerThreshold = Res.first;
441 
442     if (UseMemoryManager)
443       for (int I = 0; I < NumberOfDevices; ++I)
444         MemoryManagers.emplace_back(std::make_unique<MemoryManagerTy>(
445             DeviceAllocators[I], MemoryManagerThreshold));
446   }
447 
448   ~DeviceRTLTy() {
449     // We first destruct memory managers in case that its dependent data are
450     // destroyed before it.
451     for (auto &M : MemoryManagers)
452       M.release();
453 
454     StreamManager = nullptr;
455 
456     for (CUmodule &M : Modules)
457       // Close module
458       if (M)
459         checkResult(cuModuleUnload(M), "Error returned from cuModuleUnload\n");
460 
461     for (DeviceDataTy &D : DeviceData) {
462       // Destroy context
463       if (D.Context) {
464         checkResult(cuCtxSetCurrent(D.Context),
465                     "Error returned from cuCtxSetCurrent\n");
466         CUdevice Device;
467         checkResult(cuCtxGetDevice(&Device),
468                     "Error returned from cuCtxGetDevice\n");
469         checkResult(cuDevicePrimaryCtxRelease(Device),
470                     "Error returned from cuDevicePrimaryCtxRelease\n");
471       }
472     }
473   }
474 
475   // Check whether a given DeviceId is valid
476   bool isValidDeviceId(const int DeviceId) const {
477     return DeviceId >= 0 && DeviceId < NumberOfDevices;
478   }
479 
480   int getNumOfDevices() const { return NumberOfDevices; }
481 
482   void setRequiresFlag(const int64_t Flags) { this->RequiresFlags = Flags; }
483 
484   int initDevice(const int DeviceId) {
485     CUdevice Device;
486 
487     DP("Getting device %d\n", DeviceId);
488     CUresult Err = cuDeviceGet(&Device, DeviceId);
489     if (!checkResult(Err, "Error returned from cuDeviceGet\n"))
490       return OFFLOAD_FAIL;
491 
492     // Query the current flags of the primary context and set its flags if
493     // it is inactive
494     unsigned int FormerPrimaryCtxFlags = 0;
495     int FormerPrimaryCtxIsActive = 0;
496     Err = cuDevicePrimaryCtxGetState(Device, &FormerPrimaryCtxFlags,
497                                      &FormerPrimaryCtxIsActive);
498     if (!checkResult(Err, "Error returned from cuDevicePrimaryCtxGetState\n"))
499       return OFFLOAD_FAIL;
500 
501     if (FormerPrimaryCtxIsActive) {
502       DP("The primary context is active, no change to its flags\n");
503       if ((FormerPrimaryCtxFlags & CU_CTX_SCHED_MASK) !=
504           CU_CTX_SCHED_BLOCKING_SYNC)
505         DP("Warning the current flags are not CU_CTX_SCHED_BLOCKING_SYNC\n");
506     } else {
507       DP("The primary context is inactive, set its flags to "
508          "CU_CTX_SCHED_BLOCKING_SYNC\n");
509       Err = cuDevicePrimaryCtxSetFlags(Device, CU_CTX_SCHED_BLOCKING_SYNC);
510       if (!checkResult(Err, "Error returned from cuDevicePrimaryCtxSetFlags\n"))
511         return OFFLOAD_FAIL;
512     }
513 
514     // Retain the per device primary context and save it to use whenever this
515     // device is selected.
516     Err = cuDevicePrimaryCtxRetain(&DeviceData[DeviceId].Context, Device);
517     if (!checkResult(Err, "Error returned from cuDevicePrimaryCtxRetain\n"))
518       return OFFLOAD_FAIL;
519 
520     Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
521     if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
522       return OFFLOAD_FAIL;
523 
524     // Initialize stream pool
525     if (!StreamManager->initializeDeviceStreamPool(DeviceId))
526       return OFFLOAD_FAIL;
527 
528     // Query attributes to determine number of threads/block and blocks/grid.
529     int MaxGridDimX;
530     Err = cuDeviceGetAttribute(&MaxGridDimX, CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X,
531                                Device);
532     if (Err != CUDA_SUCCESS) {
533       DP("Error getting max grid dimension, use default value %d\n",
534          DeviceRTLTy::DefaultNumTeams);
535       DeviceData[DeviceId].BlocksPerGrid = DeviceRTLTy::DefaultNumTeams;
536     } else if (MaxGridDimX <= DeviceRTLTy::HardTeamLimit) {
537       DP("Using %d CUDA blocks per grid\n", MaxGridDimX);
538       DeviceData[DeviceId].BlocksPerGrid = MaxGridDimX;
539     } else {
540       DP("Max CUDA blocks per grid %d exceeds the hard team limit %d, capping "
541          "at the hard limit\n",
542          MaxGridDimX, DeviceRTLTy::HardTeamLimit);
543       DeviceData[DeviceId].BlocksPerGrid = DeviceRTLTy::HardTeamLimit;
544     }
545 
546     // We are only exploiting threads along the x axis.
547     int MaxBlockDimX;
548     Err = cuDeviceGetAttribute(&MaxBlockDimX,
549                                CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X, Device);
550     if (Err != CUDA_SUCCESS) {
551       DP("Error getting max block dimension, use default value %d\n",
552          DeviceRTLTy::DefaultNumThreads);
553       DeviceData[DeviceId].ThreadsPerBlock = DeviceRTLTy::DefaultNumThreads;
554     } else if (MaxBlockDimX <= DeviceRTLTy::HardThreadLimit) {
555       DP("Using %d CUDA threads per block\n", MaxBlockDimX);
556       DeviceData[DeviceId].ThreadsPerBlock = MaxBlockDimX;
557     } else {
558       DP("Max CUDA threads per block %d exceeds the hard thread limit %d, "
559          "capping at the hard limit\n",
560          MaxBlockDimX, DeviceRTLTy::HardThreadLimit);
561       DeviceData[DeviceId].ThreadsPerBlock = DeviceRTLTy::HardThreadLimit;
562     }
563 
564     // Get and set warp size
565     int WarpSize;
566     Err =
567         cuDeviceGetAttribute(&WarpSize, CU_DEVICE_ATTRIBUTE_WARP_SIZE, Device);
568     if (Err != CUDA_SUCCESS) {
569       DP("Error getting warp size, assume default value 32\n");
570       DeviceData[DeviceId].WarpSize = 32;
571     } else {
572       DP("Using warp size %d\n", WarpSize);
573       DeviceData[DeviceId].WarpSize = WarpSize;
574     }
575 
576     // Adjust teams to the env variables
577     if (EnvTeamLimit > 0 && DeviceData[DeviceId].BlocksPerGrid > EnvTeamLimit) {
578       DP("Capping max CUDA blocks per grid to OMP_TEAM_LIMIT=%d\n",
579          EnvTeamLimit);
580       DeviceData[DeviceId].BlocksPerGrid = EnvTeamLimit;
581     }
582 
583     INFO(OMP_INFOTYPE_PLUGIN_KERNEL, DeviceId,
584          "Device supports up to %d CUDA blocks and %d threads with a "
585          "warp size of %d\n",
586          DeviceData[DeviceId].BlocksPerGrid,
587          DeviceData[DeviceId].ThreadsPerBlock, DeviceData[DeviceId].WarpSize);
588 
589     // Set default number of teams
590     if (EnvNumTeams > 0) {
591       DP("Default number of teams set according to environment %d\n",
592          EnvNumTeams);
593       DeviceData[DeviceId].NumTeams = EnvNumTeams;
594     } else {
595       DeviceData[DeviceId].NumTeams = DeviceRTLTy::DefaultNumTeams;
596       DP("Default number of teams set according to library's default %d\n",
597          DeviceRTLTy::DefaultNumTeams);
598     }
599 
600     if (DeviceData[DeviceId].NumTeams > DeviceData[DeviceId].BlocksPerGrid) {
601       DP("Default number of teams exceeds device limit, capping at %d\n",
602          DeviceData[DeviceId].BlocksPerGrid);
603       DeviceData[DeviceId].NumTeams = DeviceData[DeviceId].BlocksPerGrid;
604     }
605 
606     // Set default number of threads
607     DeviceData[DeviceId].NumThreads = DeviceRTLTy::DefaultNumThreads;
608     DP("Default number of threads set according to library's default %d\n",
609        DeviceRTLTy::DefaultNumThreads);
610     if (DeviceData[DeviceId].NumThreads >
611         DeviceData[DeviceId].ThreadsPerBlock) {
612       DP("Default number of threads exceeds device limit, capping at %d\n",
613          DeviceData[DeviceId].ThreadsPerBlock);
614       DeviceData[DeviceId].NumTeams = DeviceData[DeviceId].ThreadsPerBlock;
615     }
616 
617     return OFFLOAD_SUCCESS;
618   }
619 
620   __tgt_target_table *loadBinary(const int DeviceId,
621                                  const __tgt_device_image *Image) {
622     // Set the context we are using
623     CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
624     if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
625       return nullptr;
626 
627     // Clear the offload table as we are going to create a new one.
628     clearOffloadEntriesTable(DeviceId);
629 
630     // Create the module and extract the function pointers.
631     CUmodule Module;
632     DP("Load data from image " DPxMOD "\n", DPxPTR(Image->ImageStart));
633     Err = cuModuleLoadDataEx(&Module, Image->ImageStart, 0, nullptr, nullptr);
634     if (!checkResult(Err, "Error returned from cuModuleLoadDataEx\n"))
635       return nullptr;
636 
637     DP("CUDA module successfully loaded!\n");
638 
639     Modules.push_back(Module);
640 
641     // Find the symbols in the module by name.
642     const __tgt_offload_entry *HostBegin = Image->EntriesBegin;
643     const __tgt_offload_entry *HostEnd = Image->EntriesEnd;
644 
645     std::list<KernelTy> &KernelsList = DeviceData[DeviceId].KernelsList;
646     for (const __tgt_offload_entry *E = HostBegin; E != HostEnd; ++E) {
647       if (!E->addr) {
648         // We return nullptr when something like this happens, the host should
649         // have always something in the address to uniquely identify the target
650         // region.
651         DP("Invalid binary: host entry '<null>' (size = %zd)...\n", E->size);
652         return nullptr;
653       }
654 
655       if (E->size) {
656         __tgt_offload_entry Entry = *E;
657         CUdeviceptr CUPtr;
658         size_t CUSize;
659         Err = cuModuleGetGlobal(&CUPtr, &CUSize, Module, E->name);
660         // We keep this style here because we need the name
661         if (Err != CUDA_SUCCESS) {
662           REPORT("Loading global '%s' Failed\n", E->name);
663           CUDA_ERR_STRING(Err);
664           return nullptr;
665         }
666 
667         if (CUSize != E->size) {
668           DP("Loading global '%s' - size mismatch (%zd != %zd)\n", E->name,
669              CUSize, E->size);
670           return nullptr;
671         }
672 
673         DP("Entry point " DPxMOD " maps to global %s (" DPxMOD ")\n",
674            DPxPTR(E - HostBegin), E->name, DPxPTR(CUPtr));
675 
676         Entry.addr = (void *)(CUPtr);
677 
678         // Note: In the current implementation declare target variables
679         // can either be link or to. This means that once unified
680         // memory is activated via the requires directive, the variable
681         // can be used directly from the host in both cases.
682         // TODO: when variables types other than to or link are added,
683         // the below condition should be changed to explicitly
684         // check for to and link variables types:
685         // (RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && (e->flags &
686         // OMP_DECLARE_TARGET_LINK || e->flags == OMP_DECLARE_TARGET_TO))
687         if (RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) {
688           // If unified memory is present any target link or to variables
689           // can access host addresses directly. There is no longer a
690           // need for device copies.
691           cuMemcpyHtoD(CUPtr, E->addr, sizeof(void *));
692           DP("Copy linked variable host address (" DPxMOD
693              ") to device address (" DPxMOD ")\n",
694              DPxPTR(*((void **)E->addr)), DPxPTR(CUPtr));
695         }
696 
697         addOffloadEntry(DeviceId, Entry);
698 
699         continue;
700       }
701 
702       CUfunction Func;
703       Err = cuModuleGetFunction(&Func, Module, E->name);
704       // We keep this style here because we need the name
705       if (Err != CUDA_SUCCESS) {
706         REPORT("Loading '%s' Failed\n", E->name);
707         CUDA_ERR_STRING(Err);
708         return nullptr;
709       }
710 
711       DP("Entry point " DPxMOD " maps to %s (" DPxMOD ")\n",
712          DPxPTR(E - HostBegin), E->name, DPxPTR(Func));
713 
714       // default value GENERIC (in case symbol is missing from cubin file)
715       int8_t ExecModeVal = ExecutionModeType::GENERIC;
716       std::string ExecModeNameStr(E->name);
717       ExecModeNameStr += "_exec_mode";
718       const char *ExecModeName = ExecModeNameStr.c_str();
719 
720       CUdeviceptr ExecModePtr;
721       size_t CUSize;
722       Err = cuModuleGetGlobal(&ExecModePtr, &CUSize, Module, ExecModeName);
723       if (Err == CUDA_SUCCESS) {
724         if (CUSize != sizeof(int8_t)) {
725           DP("Loading global exec_mode '%s' - size mismatch (%zd != %zd)\n",
726              ExecModeName, CUSize, sizeof(int8_t));
727           return nullptr;
728         }
729 
730         Err = cuMemcpyDtoH(&ExecModeVal, ExecModePtr, CUSize);
731         if (Err != CUDA_SUCCESS) {
732           REPORT("Error when copying data from device to host. Pointers: "
733                  "host = " DPxMOD ", device = " DPxMOD ", size = %zd\n",
734                  DPxPTR(&ExecModeVal), DPxPTR(ExecModePtr), CUSize);
735           CUDA_ERR_STRING(Err);
736           return nullptr;
737         }
738 
739         if (ExecModeVal < 0 || ExecModeVal > 1) {
740           DP("Error wrong exec_mode value specified in cubin file: %d\n",
741              ExecModeVal);
742           return nullptr;
743         }
744       } else {
745         REPORT("Loading global exec_mode '%s' - symbol missing, using default "
746                "value GENERIC (1)\n",
747                ExecModeName);
748         CUDA_ERR_STRING(Err);
749       }
750 
751       KernelsList.emplace_back(Func, ExecModeVal);
752 
753       __tgt_offload_entry Entry = *E;
754       Entry.addr = &KernelsList.back();
755       addOffloadEntry(DeviceId, Entry);
756     }
757 
758     // send device environment data to the device
759     {
760       omptarget_device_environmentTy DeviceEnv{0};
761 
762 #ifdef OMPTARGET_DEBUG
763       if (const char *EnvStr = getenv("LIBOMPTARGET_DEVICE_RTL_DEBUG"))
764         DeviceEnv.debug_level = std::stoi(EnvStr);
765 #endif
766 
767       const char *DeviceEnvName = "omptarget_device_environment";
768       CUdeviceptr DeviceEnvPtr;
769       size_t CUSize;
770 
771       Err = cuModuleGetGlobal(&DeviceEnvPtr, &CUSize, Module, DeviceEnvName);
772       if (Err == CUDA_SUCCESS) {
773         if (CUSize != sizeof(DeviceEnv)) {
774           REPORT(
775               "Global device_environment '%s' - size mismatch (%zu != %zu)\n",
776               DeviceEnvName, CUSize, sizeof(int32_t));
777           CUDA_ERR_STRING(Err);
778           return nullptr;
779         }
780 
781         Err = cuMemcpyHtoD(DeviceEnvPtr, &DeviceEnv, CUSize);
782         if (Err != CUDA_SUCCESS) {
783           REPORT("Error when copying data from host to device. Pointers: "
784                  "host = " DPxMOD ", device = " DPxMOD ", size = %zu\n",
785                  DPxPTR(&DeviceEnv), DPxPTR(DeviceEnvPtr), CUSize);
786           CUDA_ERR_STRING(Err);
787           return nullptr;
788         }
789 
790         DP("Sending global device environment data %zu bytes\n", CUSize);
791       } else {
792         DP("Finding global device environment '%s' - symbol missing.\n",
793            DeviceEnvName);
794         DP("Continue, considering this is a device RTL which does not accept "
795            "environment setting.\n");
796       }
797     }
798 
799     return getOffloadEntriesTable(DeviceId);
800   }
801 
802   void *dataAlloc(const int DeviceId, const int64_t Size) {
803     if (UseMemoryManager)
804       return MemoryManagers[DeviceId]->allocate(Size, nullptr);
805 
806     return DeviceAllocators[DeviceId].allocate(Size, nullptr);
807   }
808 
809   int dataSubmit(const int DeviceId, const void *TgtPtr, const void *HstPtr,
810                  const int64_t Size, __tgt_async_info *AsyncInfoPtr) const {
811     assert(AsyncInfoPtr && "AsyncInfoPtr is nullptr");
812 
813     CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
814     if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
815       return OFFLOAD_FAIL;
816 
817     CUstream Stream = getStream(DeviceId, AsyncInfoPtr);
818 
819     Err = cuMemcpyHtoDAsync((CUdeviceptr)TgtPtr, HstPtr, Size, Stream);
820     if (Err != CUDA_SUCCESS) {
821       REPORT("Error when copying data from host to device. Pointers: host "
822              "= " DPxMOD ", device = " DPxMOD ", size = %" PRId64 "\n",
823              DPxPTR(HstPtr), DPxPTR(TgtPtr), Size);
824       CUDA_ERR_STRING(Err);
825       return OFFLOAD_FAIL;
826     }
827 
828     return OFFLOAD_SUCCESS;
829   }
830 
831   int dataRetrieve(const int DeviceId, void *HstPtr, const void *TgtPtr,
832                    const int64_t Size, __tgt_async_info *AsyncInfoPtr) const {
833     assert(AsyncInfoPtr && "AsyncInfoPtr is nullptr");
834 
835     CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
836     if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
837       return OFFLOAD_FAIL;
838 
839     CUstream Stream = getStream(DeviceId, AsyncInfoPtr);
840 
841     Err = cuMemcpyDtoHAsync(HstPtr, (CUdeviceptr)TgtPtr, Size, Stream);
842     if (Err != CUDA_SUCCESS) {
843       REPORT("Error when copying data from device to host. Pointers: host "
844              "= " DPxMOD ", device = " DPxMOD ", size = %" PRId64 "\n",
845              DPxPTR(HstPtr), DPxPTR(TgtPtr), Size);
846       CUDA_ERR_STRING(Err);
847       return OFFLOAD_FAIL;
848     }
849 
850     return OFFLOAD_SUCCESS;
851   }
852 
853   int dataExchange(int SrcDevId, const void *SrcPtr, int DstDevId, void *DstPtr,
854                    int64_t Size, __tgt_async_info *AsyncInfoPtr) const {
855     assert(AsyncInfoPtr && "AsyncInfoPtr is nullptr");
856 
857     CUresult Err = cuCtxSetCurrent(DeviceData[SrcDevId].Context);
858     if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
859       return OFFLOAD_FAIL;
860 
861     CUstream Stream = getStream(SrcDevId, AsyncInfoPtr);
862 
863     // If they are two devices, we try peer to peer copy first
864     if (SrcDevId != DstDevId) {
865       int CanAccessPeer = 0;
866       Err = cuDeviceCanAccessPeer(&CanAccessPeer, SrcDevId, DstDevId);
867       if (Err != CUDA_SUCCESS) {
868         REPORT("Error returned from cuDeviceCanAccessPeer. src = %" PRId32
869                ", dst = %" PRId32 "\n",
870                SrcDevId, DstDevId);
871         CUDA_ERR_STRING(Err);
872         return memcpyDtoD(SrcPtr, DstPtr, Size, Stream);
873       }
874 
875       if (!CanAccessPeer) {
876         DP("P2P memcpy not supported so fall back to D2D memcpy");
877         return memcpyDtoD(SrcPtr, DstPtr, Size, Stream);
878       }
879 
880       Err = cuCtxEnablePeerAccess(DeviceData[DstDevId].Context, 0);
881       if (Err != CUDA_SUCCESS) {
882         REPORT("Error returned from cuCtxEnablePeerAccess. src = %" PRId32
883                ", dst = %" PRId32 "\n",
884                SrcDevId, DstDevId);
885         CUDA_ERR_STRING(Err);
886         return memcpyDtoD(SrcPtr, DstPtr, Size, Stream);
887       }
888 
889       Err = cuMemcpyPeerAsync((CUdeviceptr)DstPtr, DeviceData[DstDevId].Context,
890                               (CUdeviceptr)SrcPtr, DeviceData[SrcDevId].Context,
891                               Size, Stream);
892       if (Err == CUDA_SUCCESS)
893         return OFFLOAD_SUCCESS;
894 
895       REPORT("Error returned from cuMemcpyPeerAsync. src_ptr = " DPxMOD
896              ", src_id =%" PRId32 ", dst_ptr = " DPxMOD ", dst_id =%" PRId32
897              "\n",
898              DPxPTR(SrcPtr), SrcDevId, DPxPTR(DstPtr), DstDevId);
899       CUDA_ERR_STRING(Err);
900     }
901 
902     return memcpyDtoD(SrcPtr, DstPtr, Size, Stream);
903   }
904 
905   int dataDelete(const int DeviceId, void *TgtPtr) {
906     if (UseMemoryManager)
907       return MemoryManagers[DeviceId]->free(TgtPtr);
908 
909     return DeviceAllocators[DeviceId].free(TgtPtr);
910   }
911 
912   int runTargetTeamRegion(const int DeviceId, void *TgtEntryPtr, void **TgtArgs,
913                           ptrdiff_t *TgtOffsets, const int ArgNum,
914                           const int TeamNum, const int ThreadLimit,
915                           const unsigned int LoopTripCount,
916                           __tgt_async_info *AsyncInfo) const {
917     CUresult Err = cuCtxSetCurrent(DeviceData[DeviceId].Context);
918     if (!checkResult(Err, "Error returned from cuCtxSetCurrent\n"))
919       return OFFLOAD_FAIL;
920 
921     // All args are references.
922     std::vector<void *> Args(ArgNum);
923     std::vector<void *> Ptrs(ArgNum);
924 
925     for (int I = 0; I < ArgNum; ++I) {
926       Ptrs[I] = (void *)((intptr_t)TgtArgs[I] + TgtOffsets[I]);
927       Args[I] = &Ptrs[I];
928     }
929 
930     KernelTy *KernelInfo = reinterpret_cast<KernelTy *>(TgtEntryPtr);
931 
932     int CudaThreadsPerBlock;
933     if (ThreadLimit > 0) {
934       DP("Setting CUDA threads per block to requested %d\n", ThreadLimit);
935       CudaThreadsPerBlock = ThreadLimit;
936       // Add master warp if necessary
937       if (KernelInfo->ExecutionMode == GENERIC) {
938         DP("Adding master warp: +%d threads\n", DeviceData[DeviceId].WarpSize);
939         CudaThreadsPerBlock += DeviceData[DeviceId].WarpSize;
940       }
941     } else {
942       DP("Setting CUDA threads per block to default %d\n",
943          DeviceData[DeviceId].NumThreads);
944       CudaThreadsPerBlock = DeviceData[DeviceId].NumThreads;
945     }
946 
947     if (CudaThreadsPerBlock > DeviceData[DeviceId].ThreadsPerBlock) {
948       DP("Threads per block capped at device limit %d\n",
949          DeviceData[DeviceId].ThreadsPerBlock);
950       CudaThreadsPerBlock = DeviceData[DeviceId].ThreadsPerBlock;
951     }
952 
953     if (!KernelInfo->MaxThreadsPerBlock) {
954       Err = cuFuncGetAttribute(&KernelInfo->MaxThreadsPerBlock,
955                                CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK,
956                                KernelInfo->Func);
957       if (!checkResult(Err, "Error returned from cuFuncGetAttribute\n"))
958         return OFFLOAD_FAIL;
959     }
960 
961     if (KernelInfo->MaxThreadsPerBlock < CudaThreadsPerBlock) {
962       DP("Threads per block capped at kernel limit %d\n",
963          KernelInfo->MaxThreadsPerBlock);
964       CudaThreadsPerBlock = KernelInfo->MaxThreadsPerBlock;
965     }
966 
967     unsigned int CudaBlocksPerGrid;
968     if (TeamNum <= 0) {
969       if (LoopTripCount > 0 && EnvNumTeams < 0) {
970         if (KernelInfo->ExecutionMode == SPMD) {
971           // We have a combined construct, i.e. `target teams distribute
972           // parallel for [simd]`. We launch so many teams so that each thread
973           // will execute one iteration of the loop. round up to the nearest
974           // integer
975           CudaBlocksPerGrid = ((LoopTripCount - 1) / CudaThreadsPerBlock) + 1;
976         } else {
977           // If we reach this point, then we have a non-combined construct, i.e.
978           // `teams distribute` with a nested `parallel for` and each team is
979           // assigned one iteration of the `distribute` loop. E.g.:
980           //
981           // #pragma omp target teams distribute
982           // for(...loop_tripcount...) {
983           //   #pragma omp parallel for
984           //   for(...) {}
985           // }
986           //
987           // Threads within a team will execute the iterations of the `parallel`
988           // loop.
989           CudaBlocksPerGrid = LoopTripCount;
990         }
991         DP("Using %d teams due to loop trip count %" PRIu32
992            " and number of threads per block %d\n",
993            CudaBlocksPerGrid, LoopTripCount, CudaThreadsPerBlock);
994       } else {
995         DP("Using default number of teams %d\n", DeviceData[DeviceId].NumTeams);
996         CudaBlocksPerGrid = DeviceData[DeviceId].NumTeams;
997       }
998     } else if (TeamNum > DeviceData[DeviceId].BlocksPerGrid) {
999       DP("Capping number of teams to team limit %d\n",
1000          DeviceData[DeviceId].BlocksPerGrid);
1001       CudaBlocksPerGrid = DeviceData[DeviceId].BlocksPerGrid;
1002     } else {
1003       DP("Using requested number of teams %d\n", TeamNum);
1004       CudaBlocksPerGrid = TeamNum;
1005     }
1006 
1007     INFO(OMP_INFOTYPE_PLUGIN_KERNEL, DeviceId,
1008          "Launching kernel %s with %d blocks and %d threads in %s "
1009          "mode\n",
1010          (getOffloadEntry(DeviceId, TgtEntryPtr))
1011              ? getOffloadEntry(DeviceId, TgtEntryPtr)->name
1012              : "(null)",
1013          CudaBlocksPerGrid, CudaThreadsPerBlock,
1014          (KernelInfo->ExecutionMode == SPMD) ? "SPMD" : "Generic");
1015 
1016     CUstream Stream = getStream(DeviceId, AsyncInfo);
1017     Err = cuLaunchKernel(KernelInfo->Func, CudaBlocksPerGrid, /* gridDimY */ 1,
1018                          /* gridDimZ */ 1, CudaThreadsPerBlock,
1019                          /* blockDimY */ 1, /* blockDimZ */ 1,
1020                          /* sharedMemBytes */ 0, Stream, &Args[0], nullptr);
1021     if (!checkResult(Err, "Error returned from cuLaunchKernel\n"))
1022       return OFFLOAD_FAIL;
1023 
1024     DP("Launch of entry point at " DPxMOD " successful!\n",
1025        DPxPTR(TgtEntryPtr));
1026 
1027     return OFFLOAD_SUCCESS;
1028   }
1029 
1030   int synchronize(const int DeviceId, __tgt_async_info *AsyncInfoPtr) const {
1031     CUstream Stream = reinterpret_cast<CUstream>(AsyncInfoPtr->Queue);
1032     CUresult Err = cuStreamSynchronize(Stream);
1033     if (Err != CUDA_SUCCESS) {
1034       REPORT("Error when synchronizing stream. stream = " DPxMOD
1035              ", async info ptr = " DPxMOD "\n",
1036              DPxPTR(Stream), DPxPTR(AsyncInfoPtr));
1037       CUDA_ERR_STRING(Err);
1038       return OFFLOAD_FAIL;
1039     }
1040 
1041     // Once the stream is synchronized, return it to stream pool and reset
1042     // async_info. This is to make sure the synchronization only works for its
1043     // own tasks.
1044     StreamManager->returnStream(
1045         DeviceId, reinterpret_cast<CUstream>(AsyncInfoPtr->Queue));
1046     AsyncInfoPtr->Queue = nullptr;
1047 
1048     return OFFLOAD_SUCCESS;
1049   }
1050 };
1051 
1052 DeviceRTLTy DeviceRTL;
1053 } // namespace
1054 
1055 // Exposed library API function
1056 #ifdef __cplusplus
1057 extern "C" {
1058 #endif
1059 
1060 int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *image) {
1061   return elf_check_machine(image, /* EM_CUDA */ 190);
1062 }
1063 
1064 int32_t __tgt_rtl_number_of_devices() { return DeviceRTL.getNumOfDevices(); }
1065 
1066 int64_t __tgt_rtl_init_requires(int64_t RequiresFlags) {
1067   DP("Init requires flags to %" PRId64 "\n", RequiresFlags);
1068   DeviceRTL.setRequiresFlag(RequiresFlags);
1069   return RequiresFlags;
1070 }
1071 
1072 int32_t __tgt_rtl_is_data_exchangable(int32_t src_dev_id, int dst_dev_id) {
1073   if (DeviceRTL.isValidDeviceId(src_dev_id) &&
1074       DeviceRTL.isValidDeviceId(dst_dev_id))
1075     return 1;
1076 
1077   return 0;
1078 }
1079 
1080 int32_t __tgt_rtl_init_device(int32_t device_id) {
1081   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1082 
1083   return DeviceRTL.initDevice(device_id);
1084 }
1085 
1086 __tgt_target_table *__tgt_rtl_load_binary(int32_t device_id,
1087                                           __tgt_device_image *image) {
1088   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1089 
1090   return DeviceRTL.loadBinary(device_id, image);
1091 }
1092 
1093 void *__tgt_rtl_data_alloc(int32_t device_id, int64_t size, void *) {
1094   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1095 
1096   return DeviceRTL.dataAlloc(device_id, size);
1097 }
1098 
1099 int32_t __tgt_rtl_data_submit(int32_t device_id, void *tgt_ptr, void *hst_ptr,
1100                               int64_t size) {
1101   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1102 
1103   __tgt_async_info async_info;
1104   const int32_t rc = __tgt_rtl_data_submit_async(device_id, tgt_ptr, hst_ptr,
1105                                                  size, &async_info);
1106   if (rc != OFFLOAD_SUCCESS)
1107     return OFFLOAD_FAIL;
1108 
1109   return __tgt_rtl_synchronize(device_id, &async_info);
1110 }
1111 
1112 int32_t __tgt_rtl_data_submit_async(int32_t device_id, void *tgt_ptr,
1113                                     void *hst_ptr, int64_t size,
1114                                     __tgt_async_info *async_info_ptr) {
1115   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1116   assert(async_info_ptr && "async_info_ptr is nullptr");
1117 
1118   return DeviceRTL.dataSubmit(device_id, tgt_ptr, hst_ptr, size,
1119                               async_info_ptr);
1120 }
1121 
1122 int32_t __tgt_rtl_data_retrieve(int32_t device_id, void *hst_ptr, void *tgt_ptr,
1123                                 int64_t size) {
1124   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1125 
1126   __tgt_async_info async_info;
1127   const int32_t rc = __tgt_rtl_data_retrieve_async(device_id, hst_ptr, tgt_ptr,
1128                                                    size, &async_info);
1129   if (rc != OFFLOAD_SUCCESS)
1130     return OFFLOAD_FAIL;
1131 
1132   return __tgt_rtl_synchronize(device_id, &async_info);
1133 }
1134 
1135 int32_t __tgt_rtl_data_retrieve_async(int32_t device_id, void *hst_ptr,
1136                                       void *tgt_ptr, int64_t size,
1137                                       __tgt_async_info *async_info_ptr) {
1138   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1139   assert(async_info_ptr && "async_info_ptr is nullptr");
1140 
1141   return DeviceRTL.dataRetrieve(device_id, hst_ptr, tgt_ptr, size,
1142                                 async_info_ptr);
1143 }
1144 
1145 int32_t __tgt_rtl_data_exchange_async(int32_t src_dev_id, void *src_ptr,
1146                                       int dst_dev_id, void *dst_ptr,
1147                                       int64_t size,
1148                                       __tgt_async_info *async_info_ptr) {
1149   assert(DeviceRTL.isValidDeviceId(src_dev_id) && "src_dev_id is invalid");
1150   assert(DeviceRTL.isValidDeviceId(dst_dev_id) && "dst_dev_id is invalid");
1151   assert(async_info_ptr && "async_info_ptr is nullptr");
1152 
1153   return DeviceRTL.dataExchange(src_dev_id, src_ptr, dst_dev_id, dst_ptr, size,
1154                                 async_info_ptr);
1155 }
1156 
1157 int32_t __tgt_rtl_data_exchange(int32_t src_dev_id, void *src_ptr,
1158                                 int32_t dst_dev_id, void *dst_ptr,
1159                                 int64_t size) {
1160   assert(DeviceRTL.isValidDeviceId(src_dev_id) && "src_dev_id is invalid");
1161   assert(DeviceRTL.isValidDeviceId(dst_dev_id) && "dst_dev_id is invalid");
1162 
1163   __tgt_async_info async_info;
1164   const int32_t rc = __tgt_rtl_data_exchange_async(
1165       src_dev_id, src_ptr, dst_dev_id, dst_ptr, size, &async_info);
1166   if (rc != OFFLOAD_SUCCESS)
1167     return OFFLOAD_FAIL;
1168 
1169   return __tgt_rtl_synchronize(src_dev_id, &async_info);
1170 }
1171 
1172 int32_t __tgt_rtl_data_delete(int32_t device_id, void *tgt_ptr) {
1173   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1174 
1175   return DeviceRTL.dataDelete(device_id, tgt_ptr);
1176 }
1177 
1178 int32_t __tgt_rtl_run_target_team_region(int32_t device_id, void *tgt_entry_ptr,
1179                                          void **tgt_args,
1180                                          ptrdiff_t *tgt_offsets,
1181                                          int32_t arg_num, int32_t team_num,
1182                                          int32_t thread_limit,
1183                                          uint64_t loop_tripcount) {
1184   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1185 
1186   __tgt_async_info async_info;
1187   const int32_t rc = __tgt_rtl_run_target_team_region_async(
1188       device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num, team_num,
1189       thread_limit, loop_tripcount, &async_info);
1190   if (rc != OFFLOAD_SUCCESS)
1191     return OFFLOAD_FAIL;
1192 
1193   return __tgt_rtl_synchronize(device_id, &async_info);
1194 }
1195 
1196 int32_t __tgt_rtl_run_target_team_region_async(
1197     int32_t device_id, void *tgt_entry_ptr, void **tgt_args,
1198     ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t team_num,
1199     int32_t thread_limit, uint64_t loop_tripcount,
1200     __tgt_async_info *async_info_ptr) {
1201   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1202 
1203   return DeviceRTL.runTargetTeamRegion(
1204       device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num, team_num,
1205       thread_limit, loop_tripcount, async_info_ptr);
1206 }
1207 
1208 int32_t __tgt_rtl_run_target_region(int32_t device_id, void *tgt_entry_ptr,
1209                                     void **tgt_args, ptrdiff_t *tgt_offsets,
1210                                     int32_t arg_num) {
1211   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1212 
1213   __tgt_async_info async_info;
1214   const int32_t rc = __tgt_rtl_run_target_region_async(
1215       device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num, &async_info);
1216   if (rc != OFFLOAD_SUCCESS)
1217     return OFFLOAD_FAIL;
1218 
1219   return __tgt_rtl_synchronize(device_id, &async_info);
1220 }
1221 
1222 int32_t __tgt_rtl_run_target_region_async(int32_t device_id,
1223                                           void *tgt_entry_ptr, void **tgt_args,
1224                                           ptrdiff_t *tgt_offsets,
1225                                           int32_t arg_num,
1226                                           __tgt_async_info *async_info_ptr) {
1227   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1228 
1229   return __tgt_rtl_run_target_team_region_async(
1230       device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num,
1231       /* team num*/ 1, /* thread_limit */ 1, /* loop_tripcount */ 0,
1232       async_info_ptr);
1233 }
1234 
1235 int32_t __tgt_rtl_synchronize(int32_t device_id,
1236                               __tgt_async_info *async_info_ptr) {
1237   assert(DeviceRTL.isValidDeviceId(device_id) && "device_id is invalid");
1238   assert(async_info_ptr && "async_info_ptr is nullptr");
1239   assert(async_info_ptr->Queue && "async_info_ptr->Queue is nullptr");
1240 
1241   return DeviceRTL.synchronize(device_id, async_info_ptr);
1242 }
1243 
1244 #ifdef __cplusplus
1245 }
1246 #endif
1247