1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Functionality for managing devices that are handled by RTL plugins. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "device.h" 14 #include "omptarget.h" 15 #include "private.h" 16 #include "rtl.h" 17 18 #include <cassert> 19 #include <climits> 20 #include <cstdint> 21 #include <cstdio> 22 #include <string> 23 #include <thread> 24 25 int HostDataToTargetTy::addEventIfNecessary(DeviceTy &Device, 26 AsyncInfoTy &AsyncInfo) const { 27 // First, check if the user disabled atomic map transfer/malloc/dealloc. 28 if (!PM->UseEventsForAtomicTransfers) 29 return OFFLOAD_SUCCESS; 30 31 void *Event = getEvent(); 32 bool NeedNewEvent = Event == nullptr; 33 if (NeedNewEvent && Device.createEvent(&Event) != OFFLOAD_SUCCESS) { 34 REPORT("Failed to create event\n"); 35 return OFFLOAD_FAIL; 36 } 37 38 // We cannot assume the event should not be nullptr because we don't 39 // know if the target support event. But if a target doesn't, 40 // recordEvent should always return success. 41 if (Device.recordEvent(Event, AsyncInfo) != OFFLOAD_SUCCESS) { 42 REPORT("Failed to set dependence on event " DPxMOD "\n", DPxPTR(Event)); 43 return OFFLOAD_FAIL; 44 } 45 46 if (NeedNewEvent) 47 setEvent(Event); 48 49 return OFFLOAD_SUCCESS; 50 } 51 52 DeviceTy::DeviceTy(RTLInfoTy *RTL) 53 : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(), 54 HasPendingGlobals(false), PendingCtorsDtors(), ShadowPtrMap(), 55 PendingGlobalsMtx(), ShadowMtx() {} 56 57 DeviceTy::~DeviceTy() { 58 if (DeviceID == -1 || !(getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE)) 59 return; 60 61 ident_t loc = {0, 0, 0, 0, ";libomptarget;libomptarget;0;0;;"}; 62 dumpTargetPointerMappings(&loc, *this); 63 } 64 65 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) { 66 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 67 68 // Check if entry exists 69 auto It = HDTTMap->find(HstPtrBegin); 70 if (It != HDTTMap->end()) { 71 HostDataToTargetTy &HDTT = *It->HDTT; 72 // Mapping already exists 73 bool isValid = HDTT.HstPtrEnd == (uintptr_t)HstPtrBegin + Size && 74 HDTT.TgtPtrBegin == (uintptr_t)TgtPtrBegin; 75 if (isValid) { 76 DP("Attempt to re-associate the same device ptr+offset with the same " 77 "host ptr, nothing to do\n"); 78 return OFFLOAD_SUCCESS; 79 } else { 80 REPORT("Not allowed to re-associate a different device ptr+offset with " 81 "the same host ptr\n"); 82 return OFFLOAD_FAIL; 83 } 84 } 85 86 // Mapping does not exist, allocate it with refCount=INF 87 const HostDataToTargetTy &newEntry = 88 *HDTTMap 89 ->emplace(new HostDataToTargetTy( 90 /*HstPtrBase=*/(uintptr_t)HstPtrBegin, 91 /*HstPtrBegin=*/(uintptr_t)HstPtrBegin, 92 /*HstPtrEnd=*/(uintptr_t)HstPtrBegin + Size, 93 /*TgtPtrBegin=*/(uintptr_t)TgtPtrBegin, 94 /*UseHoldRefCount=*/false, /*Name=*/nullptr, 95 /*IsRefCountINF=*/true)) 96 .first->HDTT; 97 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD 98 ", HstEnd=" DPxMOD ", TgtBegin=" DPxMOD ", DynRefCount=%s, " 99 "HoldRefCount=%s\n", 100 DPxPTR(newEntry.HstPtrBase), DPxPTR(newEntry.HstPtrBegin), 101 DPxPTR(newEntry.HstPtrEnd), DPxPTR(newEntry.TgtPtrBegin), 102 newEntry.dynRefCountToStr().c_str(), newEntry.holdRefCountToStr().c_str()); 103 (void)newEntry; 104 105 return OFFLOAD_SUCCESS; 106 } 107 108 int DeviceTy::disassociatePtr(void *HstPtrBegin) { 109 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 110 111 auto It = HDTTMap->find(HstPtrBegin); 112 if (It != HDTTMap->end()) { 113 HostDataToTargetTy &HDTT = *It->HDTT; 114 // Mapping exists 115 if (HDTT.getHoldRefCount()) { 116 // This is based on OpenACC 3.1, sec 3.2.33 "acc_unmap_data", L3656-3657: 117 // "It is an error to call acc_unmap_data if the structured reference 118 // count for the pointer is not zero." 119 REPORT("Trying to disassociate a pointer with a non-zero hold reference " 120 "count\n"); 121 } else if (HDTT.isDynRefCountInf()) { 122 DP("Association found, removing it\n"); 123 void *Event = HDTT.getEvent(); 124 delete &HDTT; 125 if (Event) 126 destroyEvent(Event); 127 HDTTMap->erase(It); 128 return OFFLOAD_SUCCESS; 129 } else { 130 REPORT("Trying to disassociate a pointer which was not mapped via " 131 "omp_target_associate_ptr\n"); 132 } 133 } else { 134 REPORT("Association not found\n"); 135 } 136 137 // Mapping not found 138 return OFFLOAD_FAIL; 139 } 140 141 LookupResult DeviceTy::lookupMapping(HDTTMapAccessorTy &HDTTMap, 142 void *HstPtrBegin, int64_t Size) { 143 144 uintptr_t hp = (uintptr_t)HstPtrBegin; 145 LookupResult lr; 146 147 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n", 148 DPxPTR(hp), Size); 149 150 if (HDTTMap->empty()) 151 return lr; 152 153 auto upper = HDTTMap->upper_bound(hp); 154 155 if (Size == 0) { 156 // specification v5.1 Pointer Initialization for Device Data Environments 157 // upper_bound satisfies 158 // std::prev(upper)->HDTT.HstPtrBegin <= hp < upper->HDTT.HstPtrBegin 159 if (upper != HDTTMap->begin()) { 160 lr.Entry = std::prev(upper)->HDTT; 161 auto &HT = *lr.Entry; 162 // the left side of extended address range is satisified. 163 // hp >= HT.HstPtrBegin || hp >= HT.HstPtrBase 164 lr.Flags.IsContained = hp < HT.HstPtrEnd || hp < HT.HstPtrBase; 165 } 166 167 if (!lr.Flags.IsContained && upper != HDTTMap->end()) { 168 lr.Entry = upper->HDTT; 169 auto &HT = *lr.Entry; 170 // the right side of extended address range is satisified. 171 // hp < HT.HstPtrEnd || hp < HT.HstPtrBase 172 lr.Flags.IsContained = hp >= HT.HstPtrBase; 173 } 174 } else { 175 // check the left bin 176 if (upper != HDTTMap->begin()) { 177 lr.Entry = std::prev(upper)->HDTT; 178 auto &HT = *lr.Entry; 179 // Is it contained? 180 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd && 181 (hp + Size) <= HT.HstPtrEnd; 182 // Does it extend beyond the mapped region? 183 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 184 } 185 186 // check the right bin 187 if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) && 188 upper != HDTTMap->end()) { 189 lr.Entry = upper->HDTT; 190 auto &HT = *lr.Entry; 191 // Does it extend into an already mapped region? 192 lr.Flags.ExtendsBefore = 193 hp < HT.HstPtrBegin && (hp + Size) > HT.HstPtrBegin; 194 // Does it extend beyond the mapped region? 195 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 196 } 197 198 if (lr.Flags.ExtendsBefore) { 199 DP("WARNING: Pointer is not mapped but section extends into already " 200 "mapped data\n"); 201 } 202 if (lr.Flags.ExtendsAfter) { 203 DP("WARNING: Pointer is already mapped but section extends beyond mapped " 204 "region\n"); 205 } 206 } 207 208 return lr; 209 } 210 211 TargetPointerResultTy DeviceTy::getTargetPointer( 212 void *HstPtrBegin, void *HstPtrBase, int64_t Size, 213 map_var_info_t HstPtrName, bool HasFlagTo, bool HasFlagAlways, 214 bool IsImplicit, bool UpdateRefCount, bool HasCloseModifier, 215 bool HasPresentModifier, bool HasHoldModifier, AsyncInfoTy &AsyncInfo) { 216 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 217 218 void *TargetPointer = nullptr; 219 bool IsHostPtr = false; 220 bool IsNew = false; 221 222 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 223 auto *Entry = LR.Entry; 224 225 // Check if the pointer is contained. 226 // If a variable is mapped to the device manually by the user - which would 227 // lead to the IsContained flag to be true - then we must ensure that the 228 // device address is returned even under unified memory conditions. 229 if (LR.Flags.IsContained || 230 ((LR.Flags.ExtendsBefore || LR.Flags.ExtendsAfter) && IsImplicit)) { 231 auto &HT = *LR.Entry; 232 const char *RefCountAction; 233 if (UpdateRefCount) { 234 // After this, reference count >= 1. If the reference count was 0 but the 235 // entry was still there we can reuse the data on the device and avoid a 236 // new submission. 237 HT.incRefCount(HasHoldModifier); 238 RefCountAction = " (incremented)"; 239 } else { 240 // It might have been allocated with the parent, but it's still new. 241 IsNew = HT.getTotalRefCount() == 1; 242 RefCountAction = " (update suppressed)"; 243 } 244 const char *DynRefCountAction = HasHoldModifier ? "" : RefCountAction; 245 const char *HoldRefCountAction = HasHoldModifier ? RefCountAction : ""; 246 uintptr_t Ptr = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 247 INFO(OMP_INFOTYPE_MAPPING_EXISTS, DeviceID, 248 "Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 249 ", Size=%" PRId64 ", DynRefCount=%s%s, HoldRefCount=%s%s, Name=%s\n", 250 (IsImplicit ? " (implicit)" : ""), DPxPTR(HstPtrBegin), DPxPTR(Ptr), 251 Size, HT.dynRefCountToStr().c_str(), DynRefCountAction, 252 HT.holdRefCountToStr().c_str(), HoldRefCountAction, 253 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown"); 254 TargetPointer = (void *)Ptr; 255 } else if ((LR.Flags.ExtendsBefore || LR.Flags.ExtendsAfter) && !IsImplicit) { 256 // Explicit extension of mapped data - not allowed. 257 MESSAGE("explicit extension not allowed: host address specified is " DPxMOD 258 " (%" PRId64 259 " bytes), but device allocation maps to host at " DPxMOD 260 " (%" PRId64 " bytes)", 261 DPxPTR(HstPtrBegin), Size, DPxPTR(Entry->HstPtrBegin), 262 Entry->HstPtrEnd - Entry->HstPtrBegin); 263 if (HasPresentModifier) 264 MESSAGE("device mapping required by 'present' map type modifier does not " 265 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 266 DPxPTR(HstPtrBegin), Size); 267 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 268 !HasCloseModifier) { 269 // If unified shared memory is active, implicitly mapped variables that are 270 // not privatized use host address. Any explicitly mapped variables also use 271 // host address where correctness is not impeded. In all other cases maps 272 // are respected. 273 // In addition to the mapping rules above, the close map modifier forces the 274 // mapping of the variable to the device. 275 if (Size) { 276 DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " for unified shared " 277 "memory\n", 278 DPxPTR((uintptr_t)HstPtrBegin), Size); 279 IsHostPtr = true; 280 TargetPointer = HstPtrBegin; 281 } 282 } else if (HasPresentModifier) { 283 DP("Mapping required by 'present' map type modifier does not exist for " 284 "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n", 285 DPxPTR(HstPtrBegin), Size); 286 MESSAGE("device mapping required by 'present' map type modifier does not " 287 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 288 DPxPTR(HstPtrBegin), Size); 289 } else if (Size) { 290 // If it is not contained and Size > 0, we should create a new entry for it. 291 IsNew = true; 292 uintptr_t Ptr = (uintptr_t)allocData(Size, HstPtrBegin); 293 Entry = HDTTMap 294 ->emplace(new HostDataToTargetTy( 295 (uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin, 296 (uintptr_t)HstPtrBegin + Size, Ptr, HasHoldModifier, 297 HstPtrName)) 298 .first->HDTT; 299 INFO(OMP_INFOTYPE_MAPPING_CHANGED, DeviceID, 300 "Creating new map entry with HstPtrBase=" DPxMOD 301 ", HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", Size=%ld, " 302 "DynRefCount=%s, HoldRefCount=%s, Name=%s\n", 303 DPxPTR(HstPtrBase), DPxPTR(HstPtrBegin), DPxPTR(Ptr), Size, 304 Entry->dynRefCountToStr().c_str(), Entry->holdRefCountToStr().c_str(), 305 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown"); 306 TargetPointer = (void *)Ptr; 307 } 308 309 // If the target pointer is valid, and we need to transfer data, issue the 310 // data transfer. 311 if (TargetPointer && !IsHostPtr && HasFlagTo && (IsNew || HasFlagAlways)) { 312 // Lock the entry before releasing the mapping table lock such that another 313 // thread that could issue data movement will get the right result. 314 std::lock_guard<decltype(*Entry)> LG(*Entry); 315 // Release the mapping table lock right after the entry is locked. 316 HDTTMap.destroy(); 317 318 DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n", Size, 319 DPxPTR(HstPtrBegin), DPxPTR(TargetPointer)); 320 321 int Ret = submitData(TargetPointer, HstPtrBegin, Size, AsyncInfo); 322 if (Ret != OFFLOAD_SUCCESS) { 323 REPORT("Copying data to device failed.\n"); 324 // We will also return nullptr if the data movement fails because that 325 // pointer points to a corrupted memory region so it doesn't make any 326 // sense to continue to use it. 327 TargetPointer = nullptr; 328 } else if (Entry->addEventIfNecessary(*this, AsyncInfo) != OFFLOAD_SUCCESS) 329 return {{false /* IsNewEntry */, false /* IsHostPointer */}, 330 nullptr /* Entry */, 331 nullptr /* TargetPointer */}; 332 } else { 333 // Release the mapping table lock directly. 334 HDTTMap.destroy(); 335 // If not a host pointer and no present modifier, we need to wait for the 336 // event if it exists. 337 // Note: Entry might be nullptr because of zero length array section. 338 if (Entry && !IsHostPtr && !HasPresentModifier) { 339 std::lock_guard<decltype(*Entry)> LG(*Entry); 340 void *Event = Entry->getEvent(); 341 if (Event) { 342 int Ret = waitEvent(Event, AsyncInfo); 343 if (Ret != OFFLOAD_SUCCESS) { 344 // If it fails to wait for the event, we need to return nullptr in 345 // case of any data race. 346 REPORT("Failed to wait for event " DPxMOD ".\n", DPxPTR(Event)); 347 return {{false /* IsNewEntry */, false /* IsHostPointer */}, 348 nullptr /* Entry */, 349 nullptr /* TargetPointer */}; 350 } 351 } 352 } 353 } 354 355 return {{IsNew, IsHostPtr}, Entry, TargetPointer}; 356 } 357 358 // Used by targetDataBegin, targetDataEnd, targetDataUpdate and target. 359 // Return the target pointer begin (where the data will be moved). 360 // Decrement the reference counter if called from targetDataEnd. 361 TargetPointerResultTy 362 DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast, 363 bool UpdateRefCount, bool UseHoldRefCount, 364 bool &IsHostPtr, bool MustContain, bool ForceDelete) { 365 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 366 367 void *TargetPointer = NULL; 368 bool IsNew = false; 369 IsHostPtr = false; 370 IsLast = false; 371 LookupResult lr = lookupMapping(HDTTMap, HstPtrBegin, Size); 372 373 if (lr.Flags.IsContained || 374 (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) { 375 auto &HT = *lr.Entry; 376 IsLast = HT.decShouldRemove(UseHoldRefCount, ForceDelete); 377 378 if (ForceDelete) { 379 HT.resetRefCount(UseHoldRefCount); 380 assert(IsLast == HT.decShouldRemove(UseHoldRefCount) && 381 "expected correct IsLast prediction for reset"); 382 } 383 384 const char *RefCountAction; 385 if (!UpdateRefCount) { 386 RefCountAction = " (update suppressed)"; 387 } else if (IsLast) { 388 // Mark the entry as to be deleted by this thread. Another thread might 389 // reuse the entry and take "ownership" for the deletion while this thread 390 // is waiting for data transfers. That is fine and the current thread will 391 // simply skip the deletion step then. 392 HT.setDeleteThreadId(); 393 HT.decRefCount(UseHoldRefCount); 394 assert(HT.getTotalRefCount() == 0 && 395 "Expected zero reference count when deletion is scheduled"); 396 if (ForceDelete) 397 RefCountAction = " (reset, delayed deletion)"; 398 else 399 RefCountAction = " (decremented, delayed deletion)"; 400 } else { 401 HT.decRefCount(UseHoldRefCount); 402 RefCountAction = " (decremented)"; 403 } 404 const char *DynRefCountAction = UseHoldRefCount ? "" : RefCountAction; 405 const char *HoldRefCountAction = UseHoldRefCount ? RefCountAction : ""; 406 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 407 INFO(OMP_INFOTYPE_MAPPING_EXISTS, DeviceID, 408 "Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 409 "Size=%" PRId64 ", DynRefCount=%s%s, HoldRefCount=%s%s\n", 410 DPxPTR(HstPtrBegin), DPxPTR(tp), Size, HT.dynRefCountToStr().c_str(), 411 DynRefCountAction, HT.holdRefCountToStr().c_str(), HoldRefCountAction); 412 TargetPointer = (void *)tp; 413 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) { 414 // If the value isn't found in the mapping and unified shared memory 415 // is on then it means we have stumbled upon a value which we need to 416 // use directly from the host. 417 DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " for unified shared " 418 "memory\n", 419 DPxPTR((uintptr_t)HstPtrBegin), Size); 420 IsHostPtr = true; 421 TargetPointer = HstPtrBegin; 422 } 423 424 return {{IsNew, IsHostPtr}, lr.Entry, TargetPointer}; 425 } 426 427 // Return the target pointer begin (where the data will be moved). 428 void *DeviceTy::getTgtPtrBegin(HDTTMapAccessorTy &HDTTMap, void *HstPtrBegin, 429 int64_t Size) { 430 uintptr_t hp = (uintptr_t)HstPtrBegin; 431 LookupResult lr = lookupMapping(HDTTMap, HstPtrBegin, Size); 432 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 433 auto &HT = *lr.Entry; 434 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin); 435 return (void *)tp; 436 } 437 438 return NULL; 439 } 440 441 int DeviceTy::deallocTgtPtr(HDTTMapAccessorTy &HDTTMap, LookupResult LR, 442 int64_t Size) { 443 // Check if the pointer is contained in any sub-nodes. 444 if (!(LR.Flags.IsContained || LR.Flags.ExtendsBefore || 445 LR.Flags.ExtendsAfter)) { 446 REPORT("Section to delete (hst addr " DPxMOD ") does not exist in the" 447 " allocated memory\n", 448 DPxPTR(LR.Entry->HstPtrBegin)); 449 return OFFLOAD_FAIL; 450 } 451 452 auto &HT = *LR.Entry; 453 // Verify this thread is still in charge of deleting the entry. 454 assert(HT.getTotalRefCount() == 0 && 455 HT.getDeleteThreadId() == std::this_thread::get_id() && 456 "Trying to delete entry that is in use or owned by another thread."); 457 458 DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n", 459 DPxPTR(HT.TgtPtrBegin), Size); 460 deleteData((void *)HT.TgtPtrBegin); 461 INFO(OMP_INFOTYPE_MAPPING_CHANGED, DeviceID, 462 "Removing map entry with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 463 ", Size=%" PRId64 ", Name=%s\n", 464 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size, 465 (HT.HstPtrName) ? getNameFromMapping(HT.HstPtrName).c_str() : "unknown"); 466 void *Event = LR.Entry->getEvent(); 467 HDTTMap->erase(LR.Entry); 468 delete LR.Entry; 469 470 int Ret = OFFLOAD_SUCCESS; 471 if (Event && destroyEvent(Event) != OFFLOAD_SUCCESS) { 472 REPORT("Failed to destroy event " DPxMOD "\n", DPxPTR(Event)); 473 Ret = OFFLOAD_FAIL; 474 } 475 476 return Ret; 477 } 478 479 /// Init device, should not be called directly. 480 void DeviceTy::init() { 481 // Make call to init_requires if it exists for this plugin. 482 if (RTL->init_requires) 483 RTL->init_requires(PM->RTLs.RequiresFlags); 484 int32_t Ret = RTL->init_device(RTLDeviceID); 485 if (Ret != OFFLOAD_SUCCESS) 486 return; 487 488 IsInit = true; 489 } 490 491 /// Thread-safe method to initialize the device only once. 492 int32_t DeviceTy::initOnce() { 493 std::call_once(InitFlag, &DeviceTy::init, this); 494 495 // At this point, if IsInit is true, then either this thread or some other 496 // thread in the past successfully initialized the device, so we can return 497 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it 498 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means 499 // that some other thread already attempted to execute init() and if IsInit 500 // is still false, return OFFLOAD_FAIL. 501 if (IsInit) 502 return OFFLOAD_SUCCESS; 503 else 504 return OFFLOAD_FAIL; 505 } 506 507 void DeviceTy::deinit() { 508 if (RTL->deinit_device) 509 RTL->deinit_device(RTLDeviceID); 510 } 511 512 // Load binary to device. 513 __tgt_target_table *DeviceTy::load_binary(void *Img) { 514 std::lock_guard<decltype(RTL->Mtx)> LG(RTL->Mtx); 515 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img); 516 return rc; 517 } 518 519 void *DeviceTy::allocData(int64_t Size, void *HstPtr, int32_t Kind) { 520 return RTL->data_alloc(RTLDeviceID, Size, HstPtr, Kind); 521 } 522 523 int32_t DeviceTy::deleteData(void *TgtPtrBegin) { 524 return RTL->data_delete(RTLDeviceID, TgtPtrBegin); 525 } 526 527 // Submit data to device 528 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size, 529 AsyncInfoTy &AsyncInfo) { 530 if (getInfoLevel() & OMP_INFOTYPE_DATA_TRANSFER) { 531 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 532 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 533 auto *HT = &*LR.Entry; 534 535 INFO(OMP_INFOTYPE_DATA_TRANSFER, DeviceID, 536 "Copying data from host to device, HstPtr=" DPxMOD ", TgtPtr=" DPxMOD 537 ", Size=%" PRId64 ", Name=%s\n", 538 DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin), Size, 539 (HT && HT->HstPtrName) ? getNameFromMapping(HT->HstPtrName).c_str() 540 : "unknown"); 541 } 542 543 if (!AsyncInfo || !RTL->data_submit_async || !RTL->synchronize) 544 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size); 545 else 546 return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size, 547 AsyncInfo); 548 } 549 550 // Retrieve data from device 551 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin, 552 int64_t Size, AsyncInfoTy &AsyncInfo) { 553 if (getInfoLevel() & OMP_INFOTYPE_DATA_TRANSFER) { 554 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 555 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 556 auto *HT = &*LR.Entry; 557 INFO(OMP_INFOTYPE_DATA_TRANSFER, DeviceID, 558 "Copying data from device to host, TgtPtr=" DPxMOD ", HstPtr=" DPxMOD 559 ", Size=%" PRId64 ", Name=%s\n", 560 DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin), Size, 561 (HT && HT->HstPtrName) ? getNameFromMapping(HT->HstPtrName).c_str() 562 : "unknown"); 563 } 564 565 if (!RTL->data_retrieve_async || !RTL->synchronize) 566 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size); 567 else 568 return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size, 569 AsyncInfo); 570 } 571 572 // Copy data from current device to destination device directly 573 int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr, 574 int64_t Size, AsyncInfoTy &AsyncInfo) { 575 if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) { 576 assert(RTL->data_exchange && "RTL->data_exchange is nullptr"); 577 return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr, 578 Size); 579 } else 580 return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, 581 DstPtr, Size, AsyncInfo); 582 } 583 584 // Run region on device 585 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr, 586 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 587 AsyncInfoTy &AsyncInfo) { 588 if (!RTL->run_region || !RTL->synchronize) 589 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, 590 TgtVarsSize); 591 else 592 return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 593 TgtOffsets, TgtVarsSize, AsyncInfo); 594 } 595 596 // Run region on device 597 bool DeviceTy::printDeviceInfo(int32_t RTLDevId) { 598 if (!RTL->print_device_info) 599 return false; 600 RTL->print_device_info(RTLDevId); 601 return true; 602 } 603 604 // Run team region on device. 605 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr, 606 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 607 int32_t NumTeams, int32_t ThreadLimit, 608 uint64_t LoopTripCount, 609 AsyncInfoTy &AsyncInfo) { 610 if (!RTL->run_team_region_async || !RTL->synchronize) 611 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 612 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, 613 LoopTripCount); 614 else 615 return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 616 TgtOffsets, TgtVarsSize, NumTeams, 617 ThreadLimit, LoopTripCount, AsyncInfo); 618 } 619 620 // Whether data can be copied to DstDevice directly 621 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) { 622 if (RTL != DstDevice.RTL || !RTL->is_data_exchangable) 623 return false; 624 625 if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID)) 626 return (RTL->data_exchange != nullptr) || 627 (RTL->data_exchange_async != nullptr); 628 629 return false; 630 } 631 632 int32_t DeviceTy::synchronize(AsyncInfoTy &AsyncInfo) { 633 if (RTL->synchronize) 634 return RTL->synchronize(RTLDeviceID, AsyncInfo); 635 return OFFLOAD_SUCCESS; 636 } 637 638 int32_t DeviceTy::createEvent(void **Event) { 639 if (RTL->create_event) 640 return RTL->create_event(RTLDeviceID, Event); 641 642 return OFFLOAD_SUCCESS; 643 } 644 645 int32_t DeviceTy::recordEvent(void *Event, AsyncInfoTy &AsyncInfo) { 646 if (RTL->record_event) 647 return RTL->record_event(RTLDeviceID, Event, AsyncInfo); 648 649 return OFFLOAD_SUCCESS; 650 } 651 652 int32_t DeviceTy::waitEvent(void *Event, AsyncInfoTy &AsyncInfo) { 653 if (RTL->wait_event) 654 return RTL->wait_event(RTLDeviceID, Event, AsyncInfo); 655 656 return OFFLOAD_SUCCESS; 657 } 658 659 int32_t DeviceTy::syncEvent(void *Event) { 660 if (RTL->sync_event) 661 return RTL->sync_event(RTLDeviceID, Event); 662 663 return OFFLOAD_SUCCESS; 664 } 665 666 int32_t DeviceTy::destroyEvent(void *Event) { 667 if (RTL->create_event) 668 return RTL->destroy_event(RTLDeviceID, Event); 669 670 return OFFLOAD_SUCCESS; 671 } 672 673 /// Check whether a device has an associated RTL and initialize it if it's not 674 /// already initialized. 675 bool device_is_ready(int device_num) { 676 DP("Checking whether device %d is ready.\n", device_num); 677 // Devices.size() can only change while registering a new 678 // library, so try to acquire the lock of RTLs' mutex. 679 size_t DevicesSize; 680 { 681 std::lock_guard<decltype(PM->RTLsMtx)> LG(PM->RTLsMtx); 682 DevicesSize = PM->Devices.size(); 683 } 684 if (DevicesSize <= (size_t)device_num) { 685 DP("Device ID %d does not have a matching RTL\n", device_num); 686 return false; 687 } 688 689 // Get device info 690 DeviceTy &Device = *PM->Devices[device_num]; 691 692 DP("Is the device %d (local ID %d) initialized? %d\n", device_num, 693 Device.RTLDeviceID, Device.IsInit); 694 695 // Init the device if not done before 696 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) { 697 DP("Failed to init device %d\n", device_num); 698 return false; 699 } 700 701 DP("Device %d is ready to use.\n", device_num); 702 703 return true; 704 } 705