1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Functionality for managing devices that are handled by RTL plugins.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "device.h"
14 #include "MemoryManager.h"
15 #include "private.h"
16 #include "rtl.h"
17 
18 #include <cassert>
19 #include <climits>
20 #include <string>
21 
22 /// Map between Device ID (i.e. openmp device id) and its DeviceTy.
23 DevicesTy Devices;
24 
25 DeviceTy::DeviceTy(const DeviceTy &D)
26     : DeviceID(D.DeviceID), RTL(D.RTL), RTLDeviceID(D.RTLDeviceID),
27       IsInit(D.IsInit), InitFlag(), HasPendingGlobals(D.HasPendingGlobals),
28       HostDataToTargetMap(D.HostDataToTargetMap),
29       PendingCtorsDtors(D.PendingCtorsDtors), ShadowPtrMap(D.ShadowPtrMap),
30       DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(),
31       LoopTripCnt(D.LoopTripCnt), MemoryManager(nullptr) {}
32 
33 DeviceTy &DeviceTy::operator=(const DeviceTy &D) {
34   DeviceID = D.DeviceID;
35   RTL = D.RTL;
36   RTLDeviceID = D.RTLDeviceID;
37   IsInit = D.IsInit;
38   HasPendingGlobals = D.HasPendingGlobals;
39   HostDataToTargetMap = D.HostDataToTargetMap;
40   PendingCtorsDtors = D.PendingCtorsDtors;
41   ShadowPtrMap = D.ShadowPtrMap;
42   LoopTripCnt = D.LoopTripCnt;
43 
44   return *this;
45 }
46 
47 DeviceTy::DeviceTy(RTLInfoTy *RTL)
48     : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(),
49       HasPendingGlobals(false), HostDataToTargetMap(), PendingCtorsDtors(),
50       ShadowPtrMap(), DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(),
51       MemoryManager(nullptr) {}
52 
53 DeviceTy::~DeviceTy() = default;
54 
55 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) {
56   DataMapMtx.lock();
57 
58   // Check if entry exists
59   auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin});
60   if (search != HostDataToTargetMap.end()) {
61     // Mapping already exists
62     bool isValid = search->HstPtrEnd == (uintptr_t)HstPtrBegin + Size &&
63                    search->TgtPtrBegin == (uintptr_t)TgtPtrBegin;
64     DataMapMtx.unlock();
65     if (isValid) {
66       DP("Attempt to re-associate the same device ptr+offset with the same "
67          "host ptr, nothing to do\n");
68       return OFFLOAD_SUCCESS;
69     } else {
70       DP("Not allowed to re-associate a different device ptr+offset with the "
71          "same host ptr\n");
72       return OFFLOAD_FAIL;
73     }
74   }
75 
76   // Mapping does not exist, allocate it with refCount=INF
77   HostDataToTargetTy newEntry((uintptr_t) HstPtrBegin /*HstPtrBase*/,
78                               (uintptr_t) HstPtrBegin /*HstPtrBegin*/,
79                               (uintptr_t) HstPtrBegin + Size /*HstPtrEnd*/,
80                               (uintptr_t) TgtPtrBegin /*TgtPtrBegin*/,
81                               true /*IsRefCountINF*/);
82 
83   DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", HstEnd="
84       DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(newEntry.HstPtrBase),
85       DPxPTR(newEntry.HstPtrBegin), DPxPTR(newEntry.HstPtrEnd),
86       DPxPTR(newEntry.TgtPtrBegin));
87   HostDataToTargetMap.insert(newEntry);
88 
89   DataMapMtx.unlock();
90 
91   return OFFLOAD_SUCCESS;
92 }
93 
94 int DeviceTy::disassociatePtr(void *HstPtrBegin) {
95   DataMapMtx.lock();
96 
97   auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin});
98   if (search != HostDataToTargetMap.end()) {
99     // Mapping exists
100     if (search->isRefCountInf()) {
101       DP("Association found, removing it\n");
102       HostDataToTargetMap.erase(search);
103       DataMapMtx.unlock();
104       return OFFLOAD_SUCCESS;
105     } else {
106       DP("Trying to disassociate a pointer which was not mapped via "
107          "omp_target_associate_ptr\n");
108     }
109   }
110 
111   // Mapping not found
112   DataMapMtx.unlock();
113   DP("Association not found\n");
114   return OFFLOAD_FAIL;
115 }
116 
117 // Get ref count of map entry containing HstPtrBegin
118 uint64_t DeviceTy::getMapEntryRefCnt(void *HstPtrBegin) {
119   uintptr_t hp = (uintptr_t)HstPtrBegin;
120   uint64_t RefCnt = 0;
121 
122   DataMapMtx.lock();
123   if (!HostDataToTargetMap.empty()) {
124     auto upper = HostDataToTargetMap.upper_bound(hp);
125     if (upper != HostDataToTargetMap.begin()) {
126       upper--;
127       if (hp >= upper->HstPtrBegin && hp < upper->HstPtrEnd) {
128         DP("DeviceTy::getMapEntry: requested entry found\n");
129         RefCnt = upper->getRefCount();
130       }
131     }
132   }
133   DataMapMtx.unlock();
134 
135   if (RefCnt == 0) {
136     DP("DeviceTy::getMapEntry: requested entry not found\n");
137   }
138 
139   return RefCnt;
140 }
141 
142 LookupResult DeviceTy::lookupMapping(void *HstPtrBegin, int64_t Size) {
143   uintptr_t hp = (uintptr_t)HstPtrBegin;
144   LookupResult lr;
145 
146   DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n",
147       DPxPTR(hp), Size);
148 
149   if (HostDataToTargetMap.empty())
150     return lr;
151 
152   auto upper = HostDataToTargetMap.upper_bound(hp);
153   // check the left bin
154   if (upper != HostDataToTargetMap.begin()) {
155     lr.Entry = std::prev(upper);
156     auto &HT = *lr.Entry;
157     // Is it contained?
158     lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd &&
159         (hp+Size) <= HT.HstPtrEnd;
160     // Does it extend beyond the mapped region?
161     lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd;
162   }
163 
164   // check the right bin
165   if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) &&
166       upper != HostDataToTargetMap.end()) {
167     lr.Entry = upper;
168     auto &HT = *lr.Entry;
169     // Does it extend into an already mapped region?
170     lr.Flags.ExtendsBefore = hp < HT.HstPtrBegin && (hp+Size) > HT.HstPtrBegin;
171     // Does it extend beyond the mapped region?
172     lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp+Size) > HT.HstPtrEnd;
173   }
174 
175   if (lr.Flags.ExtendsBefore) {
176     DP("WARNING: Pointer is not mapped but section extends into already "
177         "mapped data\n");
178   }
179   if (lr.Flags.ExtendsAfter) {
180     DP("WARNING: Pointer is already mapped but section extends beyond mapped "
181         "region\n");
182   }
183 
184   return lr;
185 }
186 
187 // Used by targetDataBegin
188 // Return the target pointer begin (where the data will be moved).
189 // Allocate memory if this is the first occurrence of this mapping.
190 // Increment the reference counter.
191 // If NULL is returned, then either data allocation failed or the user tried
192 // to do an illegal mapping.
193 void *DeviceTy::getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase,
194                                  int64_t Size, bool &IsNew, bool &IsHostPtr,
195                                  bool IsImplicit, bool UpdateRefCount,
196                                  bool HasCloseModifier,
197                                  bool HasPresentModifier) {
198   void *rc = NULL;
199   IsHostPtr = false;
200   IsNew = false;
201   DataMapMtx.lock();
202   LookupResult lr = lookupMapping(HstPtrBegin, Size);
203 
204   // Check if the pointer is contained.
205   // If a variable is mapped to the device manually by the user - which would
206   // lead to the IsContained flag to be true - then we must ensure that the
207   // device address is returned even under unified memory conditions.
208   if (lr.Flags.IsContained ||
209       ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && IsImplicit)) {
210     auto &HT = *lr.Entry;
211     IsNew = false;
212 
213     if (UpdateRefCount)
214       HT.incRefCount();
215 
216     uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin);
217     DP("Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", "
218         "Size=%" PRId64 ",%s RefCount=%s\n", (IsImplicit ? " (implicit)" : ""),
219         DPxPTR(HstPtrBegin), DPxPTR(tp), Size,
220         (UpdateRefCount ? " updated" : ""),
221         HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str());
222     rc = (void *)tp;
223   } else if ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && !IsImplicit) {
224     // Explicit extension of mapped data - not allowed.
225     MESSAGE("explicit extension not allowed: host address specified is " DPxMOD
226             " (%" PRId64 " bytes), but device allocation maps to host at "
227             DPxMOD " (%" PRId64 " bytes)",
228             DPxPTR(HstPtrBegin), Size, DPxPTR(lr.Entry->HstPtrBegin),
229             lr.Entry->HstPtrEnd - lr.Entry->HstPtrBegin);
230     if (HasPresentModifier)
231       MESSAGE("device mapping required by 'present' map type modifier does not "
232               "exist for host address " DPxMOD " (%" PRId64 " bytes)",
233               DPxPTR(HstPtrBegin), Size);
234   } else if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY &&
235              !HasCloseModifier) {
236     // If unified shared memory is active, implicitly mapped variables that are
237     // not privatized use host address. Any explicitly mapped variables also use
238     // host address where correctness is not impeded. In all other cases maps
239     // are respected.
240     // In addition to the mapping rules above, the close map modifier forces the
241     // mapping of the variable to the device.
242     if (Size) {
243       DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n",
244          DPxPTR((uintptr_t)HstPtrBegin), Size,
245          (UpdateRefCount ? " updated" : ""));
246       IsHostPtr = true;
247       rc = HstPtrBegin;
248     }
249   } else if (HasPresentModifier) {
250     DP("Mapping required by 'present' map type modifier does not exist for "
251        "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n",
252        DPxPTR(HstPtrBegin), Size);
253     MESSAGE("device mapping required by 'present' map type modifier does not "
254             "exist for host address " DPxMOD " (%" PRId64 " bytes)",
255             DPxPTR(HstPtrBegin), Size);
256   } else if (Size) {
257     // If it is not contained and Size > 0, we should create a new entry for it.
258     IsNew = true;
259     uintptr_t tp = (uintptr_t)allocData(Size, HstPtrBegin);
260     DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", "
261        "HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n",
262        DPxPTR(HstPtrBase), DPxPTR(HstPtrBegin),
263        DPxPTR((uintptr_t)HstPtrBegin + Size), DPxPTR(tp));
264     HostDataToTargetMap.emplace(
265         HostDataToTargetTy((uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin,
266                            (uintptr_t)HstPtrBegin + Size, tp));
267     rc = (void *)tp;
268   }
269 
270   DataMapMtx.unlock();
271   return rc;
272 }
273 
274 // Used by targetDataBegin, targetDataEnd, target_data_update and target.
275 // Return the target pointer begin (where the data will be moved).
276 // Decrement the reference counter if called from targetDataEnd.
277 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast,
278                                bool UpdateRefCount, bool &IsHostPtr,
279                                bool MustContain) {
280   void *rc = NULL;
281   IsHostPtr = false;
282   IsLast = false;
283   DataMapMtx.lock();
284   LookupResult lr = lookupMapping(HstPtrBegin, Size);
285 
286   if (lr.Flags.IsContained ||
287       (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) {
288     auto &HT = *lr.Entry;
289     IsLast = HT.getRefCount() == 1;
290 
291     if (!IsLast && UpdateRefCount)
292       HT.decRefCount();
293 
294     uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin);
295     DP("Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", "
296         "Size=%" PRId64 ",%s RefCount=%s\n", DPxPTR(HstPtrBegin), DPxPTR(tp),
297         Size, (UpdateRefCount ? " updated" : ""),
298         HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str());
299     rc = (void *)tp;
300   } else if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) {
301     // If the value isn't found in the mapping and unified shared memory
302     // is on then it means we have stumbled upon a value which we need to
303     // use directly from the host.
304     DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n",
305        DPxPTR((uintptr_t)HstPtrBegin), Size, (UpdateRefCount ? " updated" : ""));
306     IsHostPtr = true;
307     rc = HstPtrBegin;
308   }
309 
310   DataMapMtx.unlock();
311   return rc;
312 }
313 
314 // Return the target pointer begin (where the data will be moved).
315 // Lock-free version called when loading global symbols from the fat binary.
316 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size) {
317   uintptr_t hp = (uintptr_t)HstPtrBegin;
318   LookupResult lr = lookupMapping(HstPtrBegin, Size);
319   if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) {
320     auto &HT = *lr.Entry;
321     uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin);
322     return (void *)tp;
323   }
324 
325   return NULL;
326 }
327 
328 int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, bool ForceDelete,
329                             bool HasCloseModifier) {
330   if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && !HasCloseModifier)
331     return OFFLOAD_SUCCESS;
332   // Check if the pointer is contained in any sub-nodes.
333   int rc;
334   DataMapMtx.lock();
335   LookupResult lr = lookupMapping(HstPtrBegin, Size);
336   if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) {
337     auto &HT = *lr.Entry;
338     if (ForceDelete)
339       HT.resetRefCount();
340     if (HT.decRefCount() == 0) {
341       DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n",
342           DPxPTR(HT.TgtPtrBegin), Size);
343       deleteData((void *)HT.TgtPtrBegin);
344       DP("Removing%s mapping with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD
345           ", Size=%" PRId64 "\n", (ForceDelete ? " (forced)" : ""),
346           DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size);
347       HostDataToTargetMap.erase(lr.Entry);
348     }
349     rc = OFFLOAD_SUCCESS;
350   } else {
351     DP("Section to delete (hst addr " DPxMOD ") does not exist in the allocated"
352        " memory\n", DPxPTR(HstPtrBegin));
353     rc = OFFLOAD_FAIL;
354   }
355 
356   DataMapMtx.unlock();
357   return rc;
358 }
359 
360 /// Init device, should not be called directly.
361 void DeviceTy::init() {
362   // Make call to init_requires if it exists for this plugin.
363   if (RTL->init_requires)
364     RTL->init_requires(RTLs->RequiresFlags);
365   int32_t Ret = RTL->init_device(RTLDeviceID);
366   if (Ret != OFFLOAD_SUCCESS)
367     return;
368 
369   // The memory manager will only be disabled when users provide a threshold via
370   // the environment variable \p LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD and set
371   // it to 0.
372   if (const char *Env = std::getenv("LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD")) {
373     size_t Threshold = std::stoul(Env);
374     if (Threshold)
375       MemoryManager = std::make_unique<MemoryManagerTy>(*this, Threshold);
376   } else
377     MemoryManager = std::make_unique<MemoryManagerTy>(*this);
378 
379   IsInit = true;
380 }
381 
382 /// Thread-safe method to initialize the device only once.
383 int32_t DeviceTy::initOnce() {
384   std::call_once(InitFlag, &DeviceTy::init, this);
385 
386   // At this point, if IsInit is true, then either this thread or some other
387   // thread in the past successfully initialized the device, so we can return
388   // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it
389   // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means
390   // that some other thread already attempted to execute init() and if IsInit
391   // is still false, return OFFLOAD_FAIL.
392   if (IsInit)
393     return OFFLOAD_SUCCESS;
394   else
395     return OFFLOAD_FAIL;
396 }
397 
398 // Load binary to device.
399 __tgt_target_table *DeviceTy::load_binary(void *Img) {
400   RTL->Mtx.lock();
401   __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img);
402   RTL->Mtx.unlock();
403   return rc;
404 }
405 
406 void *DeviceTy::allocData(int64_t Size, void *HstPtr) {
407   // If memory manager is enabled, we will allocate data via memory manager.
408   if (MemoryManager)
409     return MemoryManager->allocate(Size, HstPtr);
410 
411   return RTL->data_alloc(RTLDeviceID, Size, HstPtr);
412 }
413 
414 int32_t DeviceTy::deleteData(void *TgtPtrBegin) {
415   // If memory manager is enabled, we will deallocate data via memory manager.
416   if (MemoryManager)
417     return MemoryManager->free(TgtPtrBegin);
418 
419   return RTL->data_delete(RTLDeviceID, TgtPtrBegin);
420 }
421 
422 // Submit data to device
423 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size,
424                              __tgt_async_info *AsyncInfoPtr) {
425   if (!AsyncInfoPtr || !RTL->data_submit_async || !RTL->synchronize)
426     return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size);
427   else
428     return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size,
429                                   AsyncInfoPtr);
430 }
431 
432 // Retrieve data from device
433 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin,
434                                int64_t Size, __tgt_async_info *AsyncInfoPtr) {
435   if (!AsyncInfoPtr || !RTL->data_retrieve_async || !RTL->synchronize)
436     return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size);
437   else
438     return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size,
439                                     AsyncInfoPtr);
440 }
441 
442 // Copy data from current device to destination device directly
443 int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr,
444                                int64_t Size, __tgt_async_info *AsyncInfo) {
445   if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) {
446     assert(RTL->data_exchange && "RTL->data_exchange is nullptr");
447     return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr,
448                               Size);
449   } else
450     return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID,
451                                     DstPtr, Size, AsyncInfo);
452 }
453 
454 // Run region on device
455 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr,
456                             ptrdiff_t *TgtOffsets, int32_t TgtVarsSize,
457                             __tgt_async_info *AsyncInfoPtr) {
458   if (!AsyncInfoPtr || !RTL->run_region || !RTL->synchronize)
459     return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets,
460                            TgtVarsSize);
461   else
462     return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr,
463                                  TgtOffsets, TgtVarsSize, AsyncInfoPtr);
464 }
465 
466 // Run team region on device.
467 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr,
468                                 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize,
469                                 int32_t NumTeams, int32_t ThreadLimit,
470                                 uint64_t LoopTripCount,
471                                 __tgt_async_info *AsyncInfoPtr) {
472   if (!AsyncInfoPtr || !RTL->run_team_region_async || !RTL->synchronize)
473     return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr,
474                                 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit,
475                                 LoopTripCount);
476   else
477     return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr,
478                                       TgtOffsets, TgtVarsSize, NumTeams,
479                                       ThreadLimit, LoopTripCount, AsyncInfoPtr);
480 }
481 
482 // Whether data can be copied to DstDevice directly
483 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) {
484   if (RTL != DstDevice.RTL || !RTL->is_data_exchangable)
485     return false;
486 
487   if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID))
488     return (RTL->data_exchange != nullptr) ||
489            (RTL->data_exchange_async != nullptr);
490 
491   return false;
492 }
493 
494 int32_t DeviceTy::synchronize(__tgt_async_info *AsyncInfoPtr) {
495   if (RTL->synchronize)
496     return RTL->synchronize(RTLDeviceID, AsyncInfoPtr);
497   return OFFLOAD_SUCCESS;
498 }
499 
500 /// Check whether a device has an associated RTL and initialize it if it's not
501 /// already initialized.
502 bool device_is_ready(int device_num) {
503   DP("Checking whether device %d is ready.\n", device_num);
504   // Devices.size() can only change while registering a new
505   // library, so try to acquire the lock of RTLs' mutex.
506   RTLsMtx->lock();
507   size_t Devices_size = Devices.size();
508   RTLsMtx->unlock();
509   if (Devices_size <= (size_t)device_num) {
510     DP("Device ID  %d does not have a matching RTL\n", device_num);
511     return false;
512   }
513 
514   // Get device info
515   DeviceTy &Device = Devices[device_num];
516 
517   DP("Is the device %d (local ID %d) initialized? %d\n", device_num,
518        Device.RTLDeviceID, Device.IsInit);
519 
520   // Init the device if not done before
521   if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) {
522     DP("Failed to init device %d\n", device_num);
523     return false;
524   }
525 
526   DP("Device %d is ready to use.\n", device_num);
527 
528   return true;
529 }
530