1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Functionality for managing devices that are handled by RTL plugins. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "device.h" 14 #include "MemoryManager.h" 15 #include "private.h" 16 #include "rtl.h" 17 18 #include <cassert> 19 #include <climits> 20 #include <cstdio> 21 #include <string> 22 23 DeviceTy::DeviceTy(const DeviceTy &D) 24 : DeviceID(D.DeviceID), RTL(D.RTL), RTLDeviceID(D.RTLDeviceID), 25 IsInit(D.IsInit), InitFlag(), HasPendingGlobals(D.HasPendingGlobals), 26 HostDataToTargetMap(D.HostDataToTargetMap), 27 PendingCtorsDtors(D.PendingCtorsDtors), ShadowPtrMap(D.ShadowPtrMap), 28 DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(), 29 LoopTripCnt(D.LoopTripCnt), MemoryManager(nullptr) {} 30 31 DeviceTy &DeviceTy::operator=(const DeviceTy &D) { 32 DeviceID = D.DeviceID; 33 RTL = D.RTL; 34 RTLDeviceID = D.RTLDeviceID; 35 IsInit = D.IsInit; 36 HasPendingGlobals = D.HasPendingGlobals; 37 HostDataToTargetMap = D.HostDataToTargetMap; 38 PendingCtorsDtors = D.PendingCtorsDtors; 39 ShadowPtrMap = D.ShadowPtrMap; 40 LoopTripCnt = D.LoopTripCnt; 41 42 return *this; 43 } 44 45 DeviceTy::DeviceTy(RTLInfoTy *RTL) 46 : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(), 47 HasPendingGlobals(false), HostDataToTargetMap(), PendingCtorsDtors(), 48 ShadowPtrMap(), DataMapMtx(), PendingGlobalsMtx(), ShadowMtx(), 49 MemoryManager(nullptr) {} 50 51 DeviceTy::~DeviceTy() { 52 if (DeviceID == -1 || !(getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE)) 53 return; 54 55 ident_t loc = {0, 0, 0, 0, ";libomptarget;libomptarget;0;0;;"}; 56 dumpTargetPointerMappings(&loc, *this); 57 } 58 59 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) { 60 DataMapMtx.lock(); 61 62 // Check if entry exists 63 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin}); 64 if (search != HostDataToTargetMap.end()) { 65 // Mapping already exists 66 bool isValid = search->HstPtrEnd == (uintptr_t)HstPtrBegin + Size && 67 search->TgtPtrBegin == (uintptr_t)TgtPtrBegin; 68 DataMapMtx.unlock(); 69 if (isValid) { 70 DP("Attempt to re-associate the same device ptr+offset with the same " 71 "host ptr, nothing to do\n"); 72 return OFFLOAD_SUCCESS; 73 } else { 74 REPORT("Not allowed to re-associate a different device ptr+offset with " 75 "the same host ptr\n"); 76 return OFFLOAD_FAIL; 77 } 78 } 79 80 // Mapping does not exist, allocate it with refCount=INF 81 HostDataToTargetTy newEntry((uintptr_t)HstPtrBegin /*HstPtrBase*/, 82 (uintptr_t)HstPtrBegin /*HstPtrBegin*/, 83 (uintptr_t)HstPtrBegin + Size /*HstPtrEnd*/, 84 (uintptr_t)TgtPtrBegin /*TgtPtrBegin*/, nullptr, 85 true /*IsRefCountINF*/); 86 87 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", HstEnd=" 88 DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(newEntry.HstPtrBase), 89 DPxPTR(newEntry.HstPtrBegin), DPxPTR(newEntry.HstPtrEnd), 90 DPxPTR(newEntry.TgtPtrBegin)); 91 HostDataToTargetMap.insert(newEntry); 92 93 DataMapMtx.unlock(); 94 95 return OFFLOAD_SUCCESS; 96 } 97 98 int DeviceTy::disassociatePtr(void *HstPtrBegin) { 99 DataMapMtx.lock(); 100 101 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin}); 102 if (search != HostDataToTargetMap.end()) { 103 // Mapping exists 104 if (search->isRefCountInf()) { 105 DP("Association found, removing it\n"); 106 HostDataToTargetMap.erase(search); 107 DataMapMtx.unlock(); 108 return OFFLOAD_SUCCESS; 109 } else { 110 REPORT("Trying to disassociate a pointer which was not mapped via " 111 "omp_target_associate_ptr\n"); 112 } 113 } 114 115 // Mapping not found 116 DataMapMtx.unlock(); 117 REPORT("Association not found\n"); 118 return OFFLOAD_FAIL; 119 } 120 121 // Get ref count of map entry containing HstPtrBegin 122 uint64_t DeviceTy::getMapEntryRefCnt(void *HstPtrBegin) { 123 uintptr_t hp = (uintptr_t)HstPtrBegin; 124 uint64_t RefCnt = 0; 125 126 DataMapMtx.lock(); 127 if (!HostDataToTargetMap.empty()) { 128 auto upper = HostDataToTargetMap.upper_bound(hp); 129 if (upper != HostDataToTargetMap.begin()) { 130 upper--; 131 if (hp >= upper->HstPtrBegin && hp < upper->HstPtrEnd) { 132 DP("DeviceTy::getMapEntry: requested entry found\n"); 133 RefCnt = upper->getRefCount(); 134 } 135 } 136 } 137 DataMapMtx.unlock(); 138 139 if (RefCnt == 0) { 140 DP("DeviceTy::getMapEntry: requested entry not found\n"); 141 } 142 143 return RefCnt; 144 } 145 146 LookupResult DeviceTy::lookupMapping(void *HstPtrBegin, int64_t Size) { 147 uintptr_t hp = (uintptr_t)HstPtrBegin; 148 LookupResult lr; 149 150 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n", 151 DPxPTR(hp), Size); 152 153 if (HostDataToTargetMap.empty()) 154 return lr; 155 156 auto upper = HostDataToTargetMap.upper_bound(hp); 157 // check the left bin 158 if (upper != HostDataToTargetMap.begin()) { 159 lr.Entry = std::prev(upper); 160 auto &HT = *lr.Entry; 161 // Is it contained? 162 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd && 163 (hp+Size) <= HT.HstPtrEnd; 164 // Does it extend beyond the mapped region? 165 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 166 } 167 168 // check the right bin 169 if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) && 170 upper != HostDataToTargetMap.end()) { 171 lr.Entry = upper; 172 auto &HT = *lr.Entry; 173 // Does it extend into an already mapped region? 174 lr.Flags.ExtendsBefore = hp < HT.HstPtrBegin && (hp+Size) > HT.HstPtrBegin; 175 // Does it extend beyond the mapped region? 176 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp+Size) > HT.HstPtrEnd; 177 } 178 179 if (lr.Flags.ExtendsBefore) { 180 DP("WARNING: Pointer is not mapped but section extends into already " 181 "mapped data\n"); 182 } 183 if (lr.Flags.ExtendsAfter) { 184 DP("WARNING: Pointer is already mapped but section extends beyond mapped " 185 "region\n"); 186 } 187 188 return lr; 189 } 190 191 // Used by targetDataBegin 192 // Return the target pointer begin (where the data will be moved). 193 // Allocate memory if this is the first occurrence of this mapping. 194 // Increment the reference counter. 195 // If NULL is returned, then either data allocation failed or the user tried 196 // to do an illegal mapping. 197 void *DeviceTy::getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase, 198 int64_t Size, map_var_info_t HstPtrName, 199 bool &IsNew, bool &IsHostPtr, bool IsImplicit, 200 bool UpdateRefCount, bool HasCloseModifier, 201 bool HasPresentModifier) { 202 void *rc = NULL; 203 IsHostPtr = false; 204 IsNew = false; 205 DataMapMtx.lock(); 206 LookupResult lr = lookupMapping(HstPtrBegin, Size); 207 208 // Check if the pointer is contained. 209 // If a variable is mapped to the device manually by the user - which would 210 // lead to the IsContained flag to be true - then we must ensure that the 211 // device address is returned even under unified memory conditions. 212 if (lr.Flags.IsContained || 213 ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && IsImplicit)) { 214 auto &HT = *lr.Entry; 215 IsNew = false; 216 217 if (UpdateRefCount) 218 HT.incRefCount(); 219 220 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 221 if (getDebugLevel() || getInfoLevel() & OMP_INFOTYPE_MAPPING_EXISTS) 222 INFO(DeviceID, 223 "Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 224 ", " 225 "Size=%" PRId64 ",%s RefCount=%s, Name=%s\n", 226 (IsImplicit ? " (implicit)" : ""), DPxPTR(HstPtrBegin), DPxPTR(tp), 227 Size, (UpdateRefCount ? " updated" : ""), 228 HT.isRefCountInf() ? "INF" 229 : std::to_string(HT.getRefCount()).c_str(), 230 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown"); 231 rc = (void *)tp; 232 } else if ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && !IsImplicit) { 233 // Explicit extension of mapped data - not allowed. 234 MESSAGE("explicit extension not allowed: host address specified is " DPxMOD 235 " (%" PRId64 " bytes), but device allocation maps to host at " 236 DPxMOD " (%" PRId64 " bytes)", 237 DPxPTR(HstPtrBegin), Size, DPxPTR(lr.Entry->HstPtrBegin), 238 lr.Entry->HstPtrEnd - lr.Entry->HstPtrBegin); 239 if (HasPresentModifier) 240 MESSAGE("device mapping required by 'present' map type modifier does not " 241 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 242 DPxPTR(HstPtrBegin), Size); 243 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 244 !HasCloseModifier) { 245 // If unified shared memory is active, implicitly mapped variables that are 246 // not privatized use host address. Any explicitly mapped variables also use 247 // host address where correctness is not impeded. In all other cases maps 248 // are respected. 249 // In addition to the mapping rules above, the close map modifier forces the 250 // mapping of the variable to the device. 251 if (Size) { 252 DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n", 253 DPxPTR((uintptr_t)HstPtrBegin), Size, 254 (UpdateRefCount ? " updated" : "")); 255 IsHostPtr = true; 256 rc = HstPtrBegin; 257 } 258 } else if (HasPresentModifier) { 259 DP("Mapping required by 'present' map type modifier does not exist for " 260 "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n", 261 DPxPTR(HstPtrBegin), Size); 262 MESSAGE("device mapping required by 'present' map type modifier does not " 263 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 264 DPxPTR(HstPtrBegin), Size); 265 } else if (Size) { 266 // If it is not contained and Size > 0, we should create a new entry for it. 267 IsNew = true; 268 uintptr_t tp = (uintptr_t)allocData(Size, HstPtrBegin); 269 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", " 270 "HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n", 271 DPxPTR(HstPtrBase), DPxPTR(HstPtrBegin), 272 DPxPTR((uintptr_t)HstPtrBegin + Size), DPxPTR(tp)); 273 HostDataToTargetMap.emplace( 274 HostDataToTargetTy((uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin, 275 (uintptr_t)HstPtrBegin + Size, tp, HstPtrName)); 276 rc = (void *)tp; 277 } 278 279 DataMapMtx.unlock(); 280 return rc; 281 } 282 283 // Used by targetDataBegin, targetDataEnd, targetDataUpdate and target. 284 // Return the target pointer begin (where the data will be moved). 285 // Decrement the reference counter if called from targetDataEnd. 286 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast, 287 bool UpdateRefCount, bool &IsHostPtr, 288 bool MustContain) { 289 void *rc = NULL; 290 IsHostPtr = false; 291 IsLast = false; 292 DataMapMtx.lock(); 293 LookupResult lr = lookupMapping(HstPtrBegin, Size); 294 295 if (lr.Flags.IsContained || 296 (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) { 297 auto &HT = *lr.Entry; 298 IsLast = HT.getRefCount() == 1; 299 300 if (!IsLast && UpdateRefCount) 301 HT.decRefCount(); 302 303 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 304 DP("Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 305 "Size=%" PRId64 ",%s RefCount=%s\n", DPxPTR(HstPtrBegin), DPxPTR(tp), 306 Size, (UpdateRefCount ? " updated" : ""), 307 HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str()); 308 rc = (void *)tp; 309 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) { 310 // If the value isn't found in the mapping and unified shared memory 311 // is on then it means we have stumbled upon a value which we need to 312 // use directly from the host. 313 DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n", 314 DPxPTR((uintptr_t)HstPtrBegin), Size, (UpdateRefCount ? " updated" : "")); 315 IsHostPtr = true; 316 rc = HstPtrBegin; 317 } 318 319 DataMapMtx.unlock(); 320 return rc; 321 } 322 323 // Return the target pointer begin (where the data will be moved). 324 // Lock-free version called when loading global symbols from the fat binary. 325 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size) { 326 uintptr_t hp = (uintptr_t)HstPtrBegin; 327 LookupResult lr = lookupMapping(HstPtrBegin, Size); 328 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 329 auto &HT = *lr.Entry; 330 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin); 331 return (void *)tp; 332 } 333 334 return NULL; 335 } 336 337 int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, bool ForceDelete, 338 bool HasCloseModifier) { 339 if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 340 !HasCloseModifier) 341 return OFFLOAD_SUCCESS; 342 // Check if the pointer is contained in any sub-nodes. 343 int rc; 344 DataMapMtx.lock(); 345 LookupResult lr = lookupMapping(HstPtrBegin, Size); 346 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 347 auto &HT = *lr.Entry; 348 if (ForceDelete) 349 HT.resetRefCount(); 350 if (HT.decRefCount() == 0) { 351 DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n", 352 DPxPTR(HT.TgtPtrBegin), Size); 353 deleteData((void *)HT.TgtPtrBegin); 354 DP("Removing%s mapping with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 355 ", Size=%" PRId64 "\n", (ForceDelete ? " (forced)" : ""), 356 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size); 357 HostDataToTargetMap.erase(lr.Entry); 358 } 359 rc = OFFLOAD_SUCCESS; 360 } else { 361 REPORT("Section to delete (hst addr " DPxMOD ") does not exist in the" 362 " allocated memory\n", 363 DPxPTR(HstPtrBegin)); 364 rc = OFFLOAD_FAIL; 365 } 366 367 DataMapMtx.unlock(); 368 return rc; 369 } 370 371 /// Init device, should not be called directly. 372 void DeviceTy::init() { 373 // Make call to init_requires if it exists for this plugin. 374 if (RTL->init_requires) 375 RTL->init_requires(PM->RTLs.RequiresFlags); 376 int32_t Ret = RTL->init_device(RTLDeviceID); 377 if (Ret != OFFLOAD_SUCCESS) 378 return; 379 380 // The memory manager will only be disabled when users provide a threshold via 381 // the environment variable \p LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD and set 382 // it to 0. 383 if (const char *Env = std::getenv("LIBOMPTARGET_MEMORY_MANAGER_THRESHOLD")) { 384 size_t Threshold = std::stoul(Env); 385 if (Threshold) 386 MemoryManager = std::make_unique<MemoryManagerTy>(*this, Threshold); 387 } else 388 MemoryManager = std::make_unique<MemoryManagerTy>(*this); 389 390 IsInit = true; 391 } 392 393 /// Thread-safe method to initialize the device only once. 394 int32_t DeviceTy::initOnce() { 395 std::call_once(InitFlag, &DeviceTy::init, this); 396 397 // At this point, if IsInit is true, then either this thread or some other 398 // thread in the past successfully initialized the device, so we can return 399 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it 400 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means 401 // that some other thread already attempted to execute init() and if IsInit 402 // is still false, return OFFLOAD_FAIL. 403 if (IsInit) 404 return OFFLOAD_SUCCESS; 405 else 406 return OFFLOAD_FAIL; 407 } 408 409 // Load binary to device. 410 __tgt_target_table *DeviceTy::load_binary(void *Img) { 411 RTL->Mtx.lock(); 412 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img); 413 RTL->Mtx.unlock(); 414 return rc; 415 } 416 417 void *DeviceTy::allocData(int64_t Size, void *HstPtr) { 418 // If memory manager is enabled, we will allocate data via memory manager. 419 if (MemoryManager) 420 return MemoryManager->allocate(Size, HstPtr); 421 422 return RTL->data_alloc(RTLDeviceID, Size, HstPtr); 423 } 424 425 int32_t DeviceTy::deleteData(void *TgtPtrBegin) { 426 // If memory manager is enabled, we will deallocate data via memory manager. 427 if (MemoryManager) 428 return MemoryManager->free(TgtPtrBegin); 429 430 return RTL->data_delete(RTLDeviceID, TgtPtrBegin); 431 } 432 433 // Submit data to device 434 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size, 435 __tgt_async_info *AsyncInfoPtr) { 436 if (!AsyncInfoPtr || !RTL->data_submit_async || !RTL->synchronize) 437 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size); 438 else 439 return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size, 440 AsyncInfoPtr); 441 } 442 443 // Retrieve data from device 444 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin, 445 int64_t Size, __tgt_async_info *AsyncInfoPtr) { 446 if (!AsyncInfoPtr || !RTL->data_retrieve_async || !RTL->synchronize) 447 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size); 448 else 449 return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size, 450 AsyncInfoPtr); 451 } 452 453 // Copy data from current device to destination device directly 454 int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr, 455 int64_t Size, __tgt_async_info *AsyncInfo) { 456 if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) { 457 assert(RTL->data_exchange && "RTL->data_exchange is nullptr"); 458 return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr, 459 Size); 460 } else 461 return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, 462 DstPtr, Size, AsyncInfo); 463 } 464 465 // Run region on device 466 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr, 467 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 468 __tgt_async_info *AsyncInfoPtr) { 469 if (!AsyncInfoPtr || !RTL->run_region || !RTL->synchronize) 470 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, 471 TgtVarsSize); 472 else 473 return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 474 TgtOffsets, TgtVarsSize, AsyncInfoPtr); 475 } 476 477 // Run team region on device. 478 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr, 479 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 480 int32_t NumTeams, int32_t ThreadLimit, 481 uint64_t LoopTripCount, 482 __tgt_async_info *AsyncInfoPtr) { 483 if (!AsyncInfoPtr || !RTL->run_team_region_async || !RTL->synchronize) 484 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 485 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, 486 LoopTripCount); 487 else 488 return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 489 TgtOffsets, TgtVarsSize, NumTeams, 490 ThreadLimit, LoopTripCount, AsyncInfoPtr); 491 } 492 493 // Whether data can be copied to DstDevice directly 494 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) { 495 if (RTL != DstDevice.RTL || !RTL->is_data_exchangable) 496 return false; 497 498 if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID)) 499 return (RTL->data_exchange != nullptr) || 500 (RTL->data_exchange_async != nullptr); 501 502 return false; 503 } 504 505 int32_t DeviceTy::synchronize(__tgt_async_info *AsyncInfoPtr) { 506 if (RTL->synchronize) 507 return RTL->synchronize(RTLDeviceID, AsyncInfoPtr); 508 return OFFLOAD_SUCCESS; 509 } 510 511 /// Check whether a device has an associated RTL and initialize it if it's not 512 /// already initialized. 513 bool device_is_ready(int device_num) { 514 DP("Checking whether device %d is ready.\n", device_num); 515 // Devices.size() can only change while registering a new 516 // library, so try to acquire the lock of RTLs' mutex. 517 PM->RTLsMtx.lock(); 518 size_t DevicesSize = PM->Devices.size(); 519 PM->RTLsMtx.unlock(); 520 if (DevicesSize <= (size_t)device_num) { 521 DP("Device ID %d does not have a matching RTL\n", device_num); 522 return false; 523 } 524 525 // Get device info 526 DeviceTy &Device = PM->Devices[device_num]; 527 528 DP("Is the device %d (local ID %d) initialized? %d\n", device_num, 529 Device.RTLDeviceID, Device.IsInit); 530 531 // Init the device if not done before 532 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) { 533 DP("Failed to init device %d\n", device_num); 534 return false; 535 } 536 537 DP("Device %d is ready to use.\n", device_num); 538 539 return true; 540 } 541