1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Functionality for managing devices that are handled by RTL plugins. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "device.h" 14 #include "MemoryManager.h" 15 #include "private.h" 16 #include "rtl.h" 17 18 #include <cassert> 19 #include <climits> 20 #include <string> 21 22 /// Map between Device ID (i.e. openmp device id) and its DeviceTy. 23 DevicesTy Devices; 24 25 DeviceTy::DeviceTy(const DeviceTy &D) 26 : DeviceID(D.DeviceID), RTL(D.RTL), RTLDeviceID(D.RTLDeviceID), 27 IsInit(D.IsInit), InitFlag(), HasPendingGlobals(D.HasPendingGlobals), 28 HostDataToTargetMap(D.HostDataToTargetMap), 29 PendingCtorsDtors(D.PendingCtorsDtors), ShadowPtrMap(D.ShadowPtrMap), 30 DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(), 31 LoopTripCnt(D.LoopTripCnt), MemoryManager(nullptr) {} 32 33 DeviceTy &DeviceTy::operator=(const DeviceTy &D) { 34 DeviceID = D.DeviceID; 35 RTL = D.RTL; 36 RTLDeviceID = D.RTLDeviceID; 37 IsInit = D.IsInit; 38 HasPendingGlobals = D.HasPendingGlobals; 39 HostDataToTargetMap = D.HostDataToTargetMap; 40 PendingCtorsDtors = D.PendingCtorsDtors; 41 ShadowPtrMap = D.ShadowPtrMap; 42 LoopTripCnt = D.LoopTripCnt; 43 44 return *this; 45 } 46 47 DeviceTy::DeviceTy(RTLInfoTy *RTL) 48 : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(), 49 HasPendingGlobals(false), HostDataToTargetMap(), PendingCtorsDtors(), 50 ShadowPtrMap(), DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(), 51 MemoryManager(nullptr) {} 52 53 DeviceTy::~DeviceTy() = default; 54 55 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) { 56 DataMapMtx.lock(); 57 58 // Check if entry exists 59 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin}); 60 if (search != HostDataToTargetMap.end()) { 61 // Mapping already exists 62 bool isValid = search->HstPtrEnd == (uintptr_t)HstPtrBegin + Size && 63 search->TgtPtrBegin == (uintptr_t)TgtPtrBegin; 64 DataMapMtx.unlock(); 65 if (isValid) { 66 DP("Attempt to re-associate the same device ptr+offset with the same " 67 "host ptr, nothing to do\n"); 68 return OFFLOAD_SUCCESS; 69 } else { 70 REPORT("Not allowed to re-associate a different device ptr+offset with " 71 "the same host ptr\n"); 72 return OFFLOAD_FAIL; 73 } 74 } 75 76 // Mapping does not exist, allocate it with refCount=INF 77 HostDataToTargetTy newEntry((uintptr_t) HstPtrBegin /*HstPtrBase*/, 78 (uintptr_t) HstPtrBegin /*HstPtrBegin*/, 79 (uintptr_t) HstPtrBegin + Size /*HstPtrEnd*/, 80 (uintptr_t) TgtPtrBegin /*TgtPtrBegin*/, 81 true /*IsRefCountINF*/); 82 83 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", HstEnd=" 84 DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(newEntry.HstPtrBase), 85 DPxPTR(newEntry.HstPtrBegin), DPxPTR(newEntry.HstPtrEnd), 86 DPxPTR(newEntry.TgtPtrBegin)); 87 HostDataToTargetMap.insert(newEntry); 88 89 DataMapMtx.unlock(); 90 91 return OFFLOAD_SUCCESS; 92 } 93 94 int DeviceTy::disassociatePtr(void *HstPtrBegin) { 95 DataMapMtx.lock(); 96 97 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin}); 98 if (search != HostDataToTargetMap.end()) { 99 // Mapping exists 100 if (search->isRefCountInf()) { 101 DP("Association found, removing it\n"); 102 HostDataToTargetMap.erase(search); 103 DataMapMtx.unlock(); 104 return OFFLOAD_SUCCESS; 105 } else { 106 REPORT("Trying to disassociate a pointer which was not mapped via " 107 "omp_target_associate_ptr\n"); 108 } 109 } 110 111 // Mapping not found 112 DataMapMtx.unlock(); 113 REPORT("Association not found\n"); 114 return OFFLOAD_FAIL; 115 } 116 117 // Get ref count of map entry containing HstPtrBegin 118 uint64_t DeviceTy::getMapEntryRefCnt(void *HstPtrBegin) { 119 uintptr_t hp = (uintptr_t)HstPtrBegin; 120 uint64_t RefCnt = 0; 121 122 DataMapMtx.lock(); 123 if (!HostDataToTargetMap.empty()) { 124 auto upper = HostDataToTargetMap.upper_bound(hp); 125 if (upper != HostDataToTargetMap.begin()) { 126 upper--; 127 if (hp >= upper->HstPtrBegin && hp < upper->HstPtrEnd) { 128 DP("DeviceTy::getMapEntry: requested entry found\n"); 129 RefCnt = upper->getRefCount(); 130 } 131 } 132 } 133 DataMapMtx.unlock(); 134 135 if (RefCnt == 0) { 136 DP("DeviceTy::getMapEntry: requested entry not found\n"); 137 } 138 139 return RefCnt; 140 } 141 142 LookupResult DeviceTy::lookupMapping(void *HstPtrBegin, int64_t Size) { 143 uintptr_t hp = (uintptr_t)HstPtrBegin; 144 LookupResult lr; 145 146 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n", 147 DPxPTR(hp), Size); 148 149 if (HostDataToTargetMap.empty()) 150 return lr; 151 152 auto upper = HostDataToTargetMap.upper_bound(hp); 153 // check the left bin 154 if (upper != HostDataToTargetMap.begin()) { 155 lr.Entry = std::prev(upper); 156 auto &HT = *lr.Entry; 157 // Is it contained? 158 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd && 159 (hp+Size) <= HT.HstPtrEnd; 160 // Does it extend beyond the mapped region? 161 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 162 } 163 164 // check the right bin 165 if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) && 166 upper != HostDataToTargetMap.end()) { 167 lr.Entry = upper; 168 auto &HT = *lr.Entry; 169 // Does it extend into an already mapped region? 170 lr.Flags.ExtendsBefore = hp < HT.HstPtrBegin && (hp+Size) > HT.HstPtrBegin; 171 // Does it extend beyond the mapped region? 172 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp+Size) > HT.HstPtrEnd; 173 } 174 175 if (lr.Flags.ExtendsBefore) { 176 DP("WARNING: Pointer is not mapped but section extends into already " 177 "mapped data\n"); 178 } 179 if (lr.Flags.ExtendsAfter) { 180 DP("WARNING: Pointer is already mapped but section extends beyond mapped " 181 "region\n"); 182 } 183 184 return lr; 185 } 186 187 // Used by targetDataBegin 188 // Return the target pointer begin (where the data will be moved). 189 // Allocate memory if this is the first occurrence of this mapping. 190 // Increment the reference counter. 191 // If NULL is returned, then either data allocation failed or the user tried 192 // to do an illegal mapping. 193 void *DeviceTy::getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase, 194 int64_t Size, bool &IsNew, bool &IsHostPtr, 195 bool IsImplicit, bool UpdateRefCount, 196 bool HasCloseModifier, 197 bool HasPresentModifier) { 198 void *rc = NULL; 199 IsHostPtr = false; 200 IsNew = false; 201 DataMapMtx.lock(); 202 LookupResult lr = lookupMapping(HstPtrBegin, Size); 203 204 // Check if the pointer is contained. 205 // If a variable is mapped to the device manually by the user - which would 206 // lead to the IsContained flag to be true - then we must ensure that the 207 // device address is returned even under unified memory conditions. 208 if (lr.Flags.IsContained || 209 ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && IsImplicit)) { 210 auto &HT = *lr.Entry; 211 IsNew = false; 212 213 if (UpdateRefCount) 214 HT.incRefCount(); 215 216 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 217 DP("Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 218 "Size=%" PRId64 ",%s RefCount=%s\n", (IsImplicit ? " (implicit)" : ""), 219 DPxPTR(HstPtrBegin), DPxPTR(tp), Size, 220 (UpdateRefCount ? " updated" : ""), 221 HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str()); 222 rc = (void *)tp; 223 } else if ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && !IsImplicit) { 224 // Explicit extension of mapped data - not allowed. 225 MESSAGE("explicit extension not allowed: host address specified is " DPxMOD 226 " (%" PRId64 " bytes), but device allocation maps to host at " 227 DPxMOD " (%" PRId64 " bytes)", 228 DPxPTR(HstPtrBegin), Size, DPxPTR(lr.Entry->HstPtrBegin), 229 lr.Entry->HstPtrEnd - lr.Entry->HstPtrBegin); 230 if (HasPresentModifier) 231 MESSAGE("device mapping required by 'present' map type modifier does not " 232 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 233 DPxPTR(HstPtrBegin), Size); 234 } else if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 235 !HasCloseModifier) { 236 // If unified shared memory is active, implicitly mapped variables that are 237 // not privatized use host address. Any explicitly mapped variables also use 238 // host address where correctness is not impeded. In all other cases maps 239 // are respected. 240 // In addition to the mapping rules above, the close map modifier forces the 241 // mapping of the variable to the device. 242 if (Size) { 243 DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n", 244 DPxPTR((uintptr_t)HstPtrBegin), Size, 245 (UpdateRefCount ? " updated" : "")); 246 IsHostPtr = true; 247 rc = HstPtrBegin; 248 } 249 } else if (HasPresentModifier) { 250 DP("Mapping required by 'present' map type modifier does not exist for " 251 "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n", 252 DPxPTR(HstPtrBegin), Size); 253 MESSAGE("device mapping required by 'present' map type modifier does not " 254 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 255 DPxPTR(HstPtrBegin), Size); 256 } else if (Size) { 257 // If it is not contained and Size > 0, we should create a new entry for it. 258 IsNew = true; 259 uintptr_t tp = (uintptr_t)allocData(Size, HstPtrBegin); 260 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", " 261 "HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n", 262 DPxPTR(HstPtrBase), DPxPTR(HstPtrBegin), 263 DPxPTR((uintptr_t)HstPtrBegin + Size), DPxPTR(tp)); 264 HostDataToTargetMap.emplace( 265 HostDataToTargetTy((uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin, 266 (uintptr_t)HstPtrBegin + Size, tp)); 267 rc = (void *)tp; 268 } 269 270 DataMapMtx.unlock(); 271 return rc; 272 } 273 274 // Used by targetDataBegin, targetDataEnd, target_data_update and target. 275 // Return the target pointer begin (where the data will be moved). 276 // Decrement the reference counter if called from targetDataEnd. 277 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast, 278 bool UpdateRefCount, bool &IsHostPtr, 279 bool MustContain) { 280 void *rc = NULL; 281 IsHostPtr = false; 282 IsLast = false; 283 DataMapMtx.lock(); 284 LookupResult lr = lookupMapping(HstPtrBegin, Size); 285 286 if (lr.Flags.IsContained || 287 (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) { 288 auto &HT = *lr.Entry; 289 IsLast = HT.getRefCount() == 1; 290 291 if (!IsLast && UpdateRefCount) 292 HT.decRefCount(); 293 294 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 295 DP("Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 296 "Size=%" PRId64 ",%s RefCount=%s\n", DPxPTR(HstPtrBegin), DPxPTR(tp), 297 Size, (UpdateRefCount ? " updated" : ""), 298 HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str()); 299 rc = (void *)tp; 300 } else if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) { 301 // If the value isn't found in the mapping and unified shared memory 302 // is on then it means we have stumbled upon a value which we need to 303 // use directly from the host. 304 DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n", 305 DPxPTR((uintptr_t)HstPtrBegin), Size, (UpdateRefCount ? " updated" : "")); 306 IsHostPtr = true; 307 rc = HstPtrBegin; 308 } 309 310 DataMapMtx.unlock(); 311 return rc; 312 } 313 314 // Return the target pointer begin (where the data will be moved). 315 // Lock-free version called when loading global symbols from the fat binary. 316 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size) { 317 uintptr_t hp = (uintptr_t)HstPtrBegin; 318 LookupResult lr = lookupMapping(HstPtrBegin, Size); 319 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 320 auto &HT = *lr.Entry; 321 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin); 322 return (void *)tp; 323 } 324 325 return NULL; 326 } 327 328 int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, bool ForceDelete, 329 bool HasCloseModifier) { 330 if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && !HasCloseModifier) 331 return OFFLOAD_SUCCESS; 332 // Check if the pointer is contained in any sub-nodes. 333 int rc; 334 DataMapMtx.lock(); 335 LookupResult lr = lookupMapping(HstPtrBegin, Size); 336 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 337 auto &HT = *lr.Entry; 338 if (ForceDelete) 339 HT.resetRefCount(); 340 if (HT.decRefCount() == 0) { 341 DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n", 342 DPxPTR(HT.TgtPtrBegin), Size); 343 deleteData((void *)HT.TgtPtrBegin); 344 DP("Removing%s mapping with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 345 ", Size=%" PRId64 "\n", (ForceDelete ? " (forced)" : ""), 346 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size); 347 HostDataToTargetMap.erase(lr.Entry); 348 } 349 rc = OFFLOAD_SUCCESS; 350 } else { 351 REPORT("Section to delete (hst addr " DPxMOD ") does not exist in the" 352 " allocated memory\n", 353 DPxPTR(HstPtrBegin)); 354 rc = OFFLOAD_FAIL; 355 } 356 357 DataMapMtx.unlock(); 358 return rc; 359 } 360 361 /// Init device, should not be called directly. 362 void DeviceTy::init() { 363 // Make call to init_requires if it exists for this plugin. 364 if (RTL->init_requires) 365 RTL->init_requires(RTLs->RequiresFlags); 366 int32_t Ret = RTL->init_device(RTLDeviceID); 367 if (Ret != OFFLOAD_SUCCESS) 368 return; 369 370 // The memory manager will only be disabled when users provide a threshold via 371 // the environment variable \p LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD and set 372 // it to 0. 373 if (const char *Env = std::getenv("LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD")) { 374 size_t Threshold = std::stoul(Env); 375 if (Threshold) 376 MemoryManager = std::make_unique<MemoryManagerTy>(*this, Threshold); 377 } else 378 MemoryManager = std::make_unique<MemoryManagerTy>(*this); 379 380 IsInit = true; 381 } 382 383 /// Thread-safe method to initialize the device only once. 384 int32_t DeviceTy::initOnce() { 385 std::call_once(InitFlag, &DeviceTy::init, this); 386 387 // At this point, if IsInit is true, then either this thread or some other 388 // thread in the past successfully initialized the device, so we can return 389 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it 390 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means 391 // that some other thread already attempted to execute init() and if IsInit 392 // is still false, return OFFLOAD_FAIL. 393 if (IsInit) 394 return OFFLOAD_SUCCESS; 395 else 396 return OFFLOAD_FAIL; 397 } 398 399 // Load binary to device. 400 __tgt_target_table *DeviceTy::load_binary(void *Img) { 401 RTL->Mtx.lock(); 402 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img); 403 RTL->Mtx.unlock(); 404 return rc; 405 } 406 407 void *DeviceTy::allocData(int64_t Size, void *HstPtr) { 408 // If memory manager is enabled, we will allocate data via memory manager. 409 if (MemoryManager) 410 return MemoryManager->allocate(Size, HstPtr); 411 412 return RTL->data_alloc(RTLDeviceID, Size, HstPtr); 413 } 414 415 int32_t DeviceTy::deleteData(void *TgtPtrBegin) { 416 // If memory manager is enabled, we will deallocate data via memory manager. 417 if (MemoryManager) 418 return MemoryManager->free(TgtPtrBegin); 419 420 return RTL->data_delete(RTLDeviceID, TgtPtrBegin); 421 } 422 423 // Submit data to device 424 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size, 425 __tgt_async_info *AsyncInfoPtr) { 426 if (!AsyncInfoPtr || !RTL->data_submit_async || !RTL->synchronize) 427 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size); 428 else 429 return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size, 430 AsyncInfoPtr); 431 } 432 433 // Retrieve data from device 434 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin, 435 int64_t Size, __tgt_async_info *AsyncInfoPtr) { 436 if (!AsyncInfoPtr || !RTL->data_retrieve_async || !RTL->synchronize) 437 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size); 438 else 439 return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size, 440 AsyncInfoPtr); 441 } 442 443 // Copy data from current device to destination device directly 444 int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr, 445 int64_t Size, __tgt_async_info *AsyncInfo) { 446 if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) { 447 assert(RTL->data_exchange && "RTL->data_exchange is nullptr"); 448 return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr, 449 Size); 450 } else 451 return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, 452 DstPtr, Size, AsyncInfo); 453 } 454 455 // Run region on device 456 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr, 457 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 458 __tgt_async_info *AsyncInfoPtr) { 459 if (!AsyncInfoPtr || !RTL->run_region || !RTL->synchronize) 460 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, 461 TgtVarsSize); 462 else 463 return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 464 TgtOffsets, TgtVarsSize, AsyncInfoPtr); 465 } 466 467 // Run team region on device. 468 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr, 469 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 470 int32_t NumTeams, int32_t ThreadLimit, 471 uint64_t LoopTripCount, 472 __tgt_async_info *AsyncInfoPtr) { 473 if (!AsyncInfoPtr || !RTL->run_team_region_async || !RTL->synchronize) 474 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 475 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, 476 LoopTripCount); 477 else 478 return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 479 TgtOffsets, TgtVarsSize, NumTeams, 480 ThreadLimit, LoopTripCount, AsyncInfoPtr); 481 } 482 483 // Whether data can be copied to DstDevice directly 484 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) { 485 if (RTL != DstDevice.RTL || !RTL->is_data_exchangable) 486 return false; 487 488 if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID)) 489 return (RTL->data_exchange != nullptr) || 490 (RTL->data_exchange_async != nullptr); 491 492 return false; 493 } 494 495 int32_t DeviceTy::synchronize(__tgt_async_info *AsyncInfoPtr) { 496 if (RTL->synchronize) 497 return RTL->synchronize(RTLDeviceID, AsyncInfoPtr); 498 return OFFLOAD_SUCCESS; 499 } 500 501 /// Check whether a device has an associated RTL and initialize it if it's not 502 /// already initialized. 503 bool device_is_ready(int device_num) { 504 DP("Checking whether device %d is ready.\n", device_num); 505 // Devices.size() can only change while registering a new 506 // library, so try to acquire the lock of RTLs' mutex. 507 RTLsMtx->lock(); 508 size_t Devices_size = Devices.size(); 509 RTLsMtx->unlock(); 510 if (Devices_size <= (size_t)device_num) { 511 DP("Device ID %d does not have a matching RTL\n", device_num); 512 return false; 513 } 514 515 // Get device info 516 DeviceTy &Device = Devices[device_num]; 517 518 DP("Is the device %d (local ID %d) initialized? %d\n", device_num, 519 Device.RTLDeviceID, Device.IsInit); 520 521 // Init the device if not done before 522 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) { 523 DP("Failed to init device %d\n", device_num); 524 return false; 525 } 526 527 DP("Device %d is ready to use.\n", device_num); 528 529 return true; 530 } 531