1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Functionality for managing devices that are handled by RTL plugins. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "device.h" 14 #include "omptarget.h" 15 #include "private.h" 16 #include "rtl.h" 17 18 #include <cassert> 19 #include <climits> 20 #include <cstdint> 21 #include <cstdio> 22 #include <string> 23 #include <thread> 24 25 int HostDataToTargetTy::addEventIfNecessary(DeviceTy &Device, 26 AsyncInfoTy &AsyncInfo) const { 27 // First, check if the user disabled atomic map transfer/malloc/dealloc. 28 if (!PM->UseEventsForAtomicTransfers) 29 return OFFLOAD_SUCCESS; 30 31 void *Event = getEvent(); 32 bool NeedNewEvent = Event == nullptr; 33 if (NeedNewEvent && Device.createEvent(&Event) != OFFLOAD_SUCCESS) { 34 REPORT("Failed to create event\n"); 35 return OFFLOAD_FAIL; 36 } 37 38 // We cannot assume the event should not be nullptr because we don't 39 // know if the target support event. But if a target doesn't, 40 // recordEvent should always return success. 41 if (Device.recordEvent(Event, AsyncInfo) != OFFLOAD_SUCCESS) { 42 REPORT("Failed to set dependence on event " DPxMOD "\n", DPxPTR(Event)); 43 return OFFLOAD_FAIL; 44 } 45 46 if (NeedNewEvent) 47 setEvent(Event); 48 49 return OFFLOAD_SUCCESS; 50 } 51 52 DeviceTy::DeviceTy(RTLInfoTy *RTL) 53 : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(), 54 HasPendingGlobals(false), PendingCtorsDtors(), ShadowPtrMap(), 55 PendingGlobalsMtx(), ShadowMtx() {} 56 57 DeviceTy::~DeviceTy() { 58 if (DeviceID == -1 || !(getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE)) 59 return; 60 61 ident_t loc = {0, 0, 0, 0, ";libomptarget;libomptarget;0;0;;"}; 62 dumpTargetPointerMappings(&loc, *this); 63 } 64 65 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) { 66 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 67 68 // Check if entry exists 69 auto It = HDTTMap->find(HstPtrBegin); 70 if (It != HDTTMap->end()) { 71 HostDataToTargetTy &HDTT = *It->HDTT; 72 // Mapping already exists 73 bool isValid = HDTT.HstPtrEnd == (uintptr_t)HstPtrBegin + Size && 74 HDTT.TgtPtrBegin == (uintptr_t)TgtPtrBegin; 75 if (isValid) { 76 DP("Attempt to re-associate the same device ptr+offset with the same " 77 "host ptr, nothing to do\n"); 78 return OFFLOAD_SUCCESS; 79 } else { 80 REPORT("Not allowed to re-associate a different device ptr+offset with " 81 "the same host ptr\n"); 82 return OFFLOAD_FAIL; 83 } 84 } 85 86 // Mapping does not exist, allocate it with refCount=INF 87 const HostDataToTargetTy &newEntry = 88 *HDTTMap 89 ->emplace(new HostDataToTargetTy( 90 /*HstPtrBase=*/(uintptr_t)HstPtrBegin, 91 /*HstPtrBegin=*/(uintptr_t)HstPtrBegin, 92 /*HstPtrEnd=*/(uintptr_t)HstPtrBegin + Size, 93 /*TgtPtrBegin=*/(uintptr_t)TgtPtrBegin, 94 /*UseHoldRefCount=*/false, /*Name=*/nullptr, 95 /*IsRefCountINF=*/true)) 96 .first->HDTT; 97 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD 98 ", HstEnd=" DPxMOD ", TgtBegin=" DPxMOD ", DynRefCount=%s, " 99 "HoldRefCount=%s\n", 100 DPxPTR(newEntry.HstPtrBase), DPxPTR(newEntry.HstPtrBegin), 101 DPxPTR(newEntry.HstPtrEnd), DPxPTR(newEntry.TgtPtrBegin), 102 newEntry.dynRefCountToStr().c_str(), newEntry.holdRefCountToStr().c_str()); 103 (void)newEntry; 104 105 return OFFLOAD_SUCCESS; 106 } 107 108 int DeviceTy::disassociatePtr(void *HstPtrBegin) { 109 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 110 111 auto It = HDTTMap->find(HstPtrBegin); 112 if (It != HDTTMap->end()) { 113 HostDataToTargetTy &HDTT = *It->HDTT; 114 // Mapping exists 115 if (HDTT.getHoldRefCount()) { 116 // This is based on OpenACC 3.1, sec 3.2.33 "acc_unmap_data", L3656-3657: 117 // "It is an error to call acc_unmap_data if the structured reference 118 // count for the pointer is not zero." 119 REPORT("Trying to disassociate a pointer with a non-zero hold reference " 120 "count\n"); 121 } else if (HDTT.isDynRefCountInf()) { 122 DP("Association found, removing it\n"); 123 void *Event = HDTT.getEvent(); 124 delete &HDTT; 125 if (Event) 126 destroyEvent(Event); 127 HDTTMap->erase(It); 128 return OFFLOAD_SUCCESS; 129 } else { 130 REPORT("Trying to disassociate a pointer which was not mapped via " 131 "omp_target_associate_ptr\n"); 132 } 133 } else { 134 REPORT("Association not found\n"); 135 } 136 137 // Mapping not found 138 return OFFLOAD_FAIL; 139 } 140 141 LookupResult DeviceTy::lookupMapping(HDTTMapAccessorTy &HDTTMap, 142 void *HstPtrBegin, int64_t Size) { 143 144 uintptr_t hp = (uintptr_t)HstPtrBegin; 145 LookupResult lr; 146 147 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n", 148 DPxPTR(hp), Size); 149 150 if (HDTTMap->empty()) 151 return lr; 152 153 auto upper = HDTTMap->upper_bound(hp); 154 // check the left bin 155 if (upper != HDTTMap->begin()) { 156 lr.Entry = std::prev(upper)->HDTT; 157 auto &HT = *lr.Entry; 158 // Is it contained? 159 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd && 160 (hp + Size) <= HT.HstPtrEnd; 161 // Does it extend beyond the mapped region? 162 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 163 } 164 165 // check the right bin 166 if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) && 167 upper != HDTTMap->end()) { 168 lr.Entry = upper->HDTT; 169 auto &HT = *lr.Entry; 170 // Does it extend into an already mapped region? 171 lr.Flags.ExtendsBefore = 172 hp < HT.HstPtrBegin && (hp + Size) > HT.HstPtrBegin; 173 // Does it extend beyond the mapped region? 174 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 175 } 176 177 if (lr.Flags.ExtendsBefore) { 178 DP("WARNING: Pointer is not mapped but section extends into already " 179 "mapped data\n"); 180 } 181 if (lr.Flags.ExtendsAfter) { 182 DP("WARNING: Pointer is already mapped but section extends beyond mapped " 183 "region\n"); 184 } 185 186 return lr; 187 } 188 189 TargetPointerResultTy DeviceTy::getTargetPointer( 190 void *HstPtrBegin, void *HstPtrBase, int64_t Size, 191 map_var_info_t HstPtrName, bool HasFlagTo, bool HasFlagAlways, 192 bool IsImplicit, bool UpdateRefCount, bool HasCloseModifier, 193 bool HasPresentModifier, bool HasHoldModifier, AsyncInfoTy &AsyncInfo) { 194 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 195 196 void *TargetPointer = nullptr; 197 bool IsHostPtr = false; 198 bool IsNew = false; 199 200 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 201 auto *Entry = LR.Entry; 202 203 // Check if the pointer is contained. 204 // If a variable is mapped to the device manually by the user - which would 205 // lead to the IsContained flag to be true - then we must ensure that the 206 // device address is returned even under unified memory conditions. 207 if (LR.Flags.IsContained || 208 ((LR.Flags.ExtendsBefore || LR.Flags.ExtendsAfter) && IsImplicit)) { 209 auto &HT = *LR.Entry; 210 const char *RefCountAction; 211 if (UpdateRefCount) { 212 // After this, reference count >= 1. If the reference count was 0 but the 213 // entry was still there we can reuse the data on the device and avoid a 214 // new submission. 215 HT.incRefCount(HasHoldModifier); 216 RefCountAction = " (incremented)"; 217 } else { 218 // It might have been allocated with the parent, but it's still new. 219 IsNew = HT.getTotalRefCount() == 1; 220 RefCountAction = " (update suppressed)"; 221 } 222 const char *DynRefCountAction = HasHoldModifier ? "" : RefCountAction; 223 const char *HoldRefCountAction = HasHoldModifier ? RefCountAction : ""; 224 uintptr_t Ptr = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 225 INFO(OMP_INFOTYPE_MAPPING_EXISTS, DeviceID, 226 "Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 227 ", Size=%" PRId64 ", DynRefCount=%s%s, HoldRefCount=%s%s, Name=%s\n", 228 (IsImplicit ? " (implicit)" : ""), DPxPTR(HstPtrBegin), DPxPTR(Ptr), 229 Size, HT.dynRefCountToStr().c_str(), DynRefCountAction, 230 HT.holdRefCountToStr().c_str(), HoldRefCountAction, 231 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown"); 232 TargetPointer = (void *)Ptr; 233 } else if ((LR.Flags.ExtendsBefore || LR.Flags.ExtendsAfter) && !IsImplicit) { 234 // Explicit extension of mapped data - not allowed. 235 MESSAGE("explicit extension not allowed: host address specified is " DPxMOD 236 " (%" PRId64 237 " bytes), but device allocation maps to host at " DPxMOD 238 " (%" PRId64 " bytes)", 239 DPxPTR(HstPtrBegin), Size, DPxPTR(Entry->HstPtrBegin), 240 Entry->HstPtrEnd - Entry->HstPtrBegin); 241 if (HasPresentModifier) 242 MESSAGE("device mapping required by 'present' map type modifier does not " 243 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 244 DPxPTR(HstPtrBegin), Size); 245 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 246 !HasCloseModifier) { 247 // If unified shared memory is active, implicitly mapped variables that are 248 // not privatized use host address. Any explicitly mapped variables also use 249 // host address where correctness is not impeded. In all other cases maps 250 // are respected. 251 // In addition to the mapping rules above, the close map modifier forces the 252 // mapping of the variable to the device. 253 if (Size) { 254 DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " for unified shared " 255 "memory\n", 256 DPxPTR((uintptr_t)HstPtrBegin), Size); 257 IsHostPtr = true; 258 TargetPointer = HstPtrBegin; 259 } 260 } else if (HasPresentModifier) { 261 DP("Mapping required by 'present' map type modifier does not exist for " 262 "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n", 263 DPxPTR(HstPtrBegin), Size); 264 MESSAGE("device mapping required by 'present' map type modifier does not " 265 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 266 DPxPTR(HstPtrBegin), Size); 267 } else if (Size) { 268 // If it is not contained and Size > 0, we should create a new entry for it. 269 IsNew = true; 270 uintptr_t Ptr = (uintptr_t)allocData(Size, HstPtrBegin); 271 Entry = HDTTMap 272 ->emplace(new HostDataToTargetTy( 273 (uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin, 274 (uintptr_t)HstPtrBegin + Size, Ptr, HasHoldModifier, 275 HstPtrName)) 276 .first->HDTT; 277 INFO(OMP_INFOTYPE_MAPPING_CHANGED, DeviceID, 278 "Creating new map entry with " 279 "HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", Size=%ld, " 280 "DynRefCount=%s, HoldRefCount=%s, Name=%s\n", 281 DPxPTR(HstPtrBegin), DPxPTR(Ptr), Size, 282 Entry->dynRefCountToStr().c_str(), Entry->holdRefCountToStr().c_str(), 283 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown"); 284 TargetPointer = (void *)Ptr; 285 } 286 287 // If the target pointer is valid, and we need to transfer data, issue the 288 // data transfer. 289 if (TargetPointer && !IsHostPtr && HasFlagTo && (IsNew || HasFlagAlways)) { 290 // Lock the entry before releasing the mapping table lock such that another 291 // thread that could issue data movement will get the right result. 292 std::lock_guard<decltype(*Entry)> LG(*Entry); 293 // Release the mapping table lock right after the entry is locked. 294 HDTTMap.destroy(); 295 296 DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n", Size, 297 DPxPTR(HstPtrBegin), DPxPTR(TargetPointer)); 298 299 int Ret = submitData(TargetPointer, HstPtrBegin, Size, AsyncInfo); 300 if (Ret != OFFLOAD_SUCCESS) { 301 REPORT("Copying data to device failed.\n"); 302 // We will also return nullptr if the data movement fails because that 303 // pointer points to a corrupted memory region so it doesn't make any 304 // sense to continue to use it. 305 TargetPointer = nullptr; 306 } else if (Entry->addEventIfNecessary(*this, AsyncInfo) != OFFLOAD_SUCCESS) 307 return {{false /* IsNewEntry */, false /* IsHostPointer */}, 308 nullptr /* Entry */, 309 nullptr /* TargetPointer */}; 310 } else { 311 // Release the mapping table lock directly. 312 HDTTMap.destroy(); 313 // If not a host pointer and no present modifier, we need to wait for the 314 // event if it exists. 315 // Note: Entry might be nullptr because of zero length array section. 316 if (Entry && !IsHostPtr && !HasPresentModifier) { 317 std::lock_guard<decltype(*Entry)> LG(*Entry); 318 void *Event = Entry->getEvent(); 319 if (Event) { 320 int Ret = waitEvent(Event, AsyncInfo); 321 if (Ret != OFFLOAD_SUCCESS) { 322 // If it fails to wait for the event, we need to return nullptr in 323 // case of any data race. 324 REPORT("Failed to wait for event " DPxMOD ".\n", DPxPTR(Event)); 325 return {{false /* IsNewEntry */, false /* IsHostPointer */}, 326 nullptr /* Entry */, 327 nullptr /* TargetPointer */}; 328 } 329 } 330 } 331 } 332 333 return {{IsNew, IsHostPtr}, Entry, TargetPointer}; 334 } 335 336 // Used by targetDataBegin, targetDataEnd, targetDataUpdate and target. 337 // Return the target pointer begin (where the data will be moved). 338 // Decrement the reference counter if called from targetDataEnd. 339 TargetPointerResultTy 340 DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast, 341 bool UpdateRefCount, bool UseHoldRefCount, 342 bool &IsHostPtr, bool MustContain, bool ForceDelete) { 343 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 344 345 void *TargetPointer = NULL; 346 bool IsNew = false; 347 IsHostPtr = false; 348 IsLast = false; 349 LookupResult lr = lookupMapping(HDTTMap, HstPtrBegin, Size); 350 351 if (lr.Flags.IsContained || 352 (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) { 353 auto &HT = *lr.Entry; 354 IsLast = HT.decShouldRemove(UseHoldRefCount, ForceDelete); 355 356 if (ForceDelete) { 357 HT.resetRefCount(UseHoldRefCount); 358 assert(IsLast == HT.decShouldRemove(UseHoldRefCount) && 359 "expected correct IsLast prediction for reset"); 360 } 361 362 const char *RefCountAction; 363 if (!UpdateRefCount) { 364 RefCountAction = " (update suppressed)"; 365 } else if (IsLast) { 366 // Mark the entry as to be deleted by this thread. Another thread might 367 // reuse the entry and take "ownership" for the deletion while this thread 368 // is waiting for data transfers. That is fine and the current thread will 369 // simply skip the deletion step then. 370 HT.setDeleteThreadId(); 371 HT.decRefCount(UseHoldRefCount); 372 assert(HT.getTotalRefCount() == 0 && 373 "Expected zero reference count when deletion is scheduled"); 374 if (ForceDelete) 375 RefCountAction = " (reset, delayed deletion)"; 376 else 377 RefCountAction = " (decremented, delayed deletion)"; 378 } else { 379 HT.decRefCount(UseHoldRefCount); 380 RefCountAction = " (decremented)"; 381 } 382 const char *DynRefCountAction = UseHoldRefCount ? "" : RefCountAction; 383 const char *HoldRefCountAction = UseHoldRefCount ? RefCountAction : ""; 384 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 385 INFO(OMP_INFOTYPE_MAPPING_EXISTS, DeviceID, 386 "Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 387 "Size=%" PRId64 ", DynRefCount=%s%s, HoldRefCount=%s%s\n", 388 DPxPTR(HstPtrBegin), DPxPTR(tp), Size, HT.dynRefCountToStr().c_str(), 389 DynRefCountAction, HT.holdRefCountToStr().c_str(), HoldRefCountAction); 390 TargetPointer = (void *)tp; 391 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) { 392 // If the value isn't found in the mapping and unified shared memory 393 // is on then it means we have stumbled upon a value which we need to 394 // use directly from the host. 395 DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " for unified shared " 396 "memory\n", 397 DPxPTR((uintptr_t)HstPtrBegin), Size); 398 IsHostPtr = true; 399 TargetPointer = HstPtrBegin; 400 } 401 402 return {{IsNew, IsHostPtr}, lr.Entry, TargetPointer}; 403 } 404 405 // Return the target pointer begin (where the data will be moved). 406 void *DeviceTy::getTgtPtrBegin(HDTTMapAccessorTy &HDTTMap, void *HstPtrBegin, 407 int64_t Size) { 408 uintptr_t hp = (uintptr_t)HstPtrBegin; 409 LookupResult lr = lookupMapping(HDTTMap, HstPtrBegin, Size); 410 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 411 auto &HT = *lr.Entry; 412 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin); 413 return (void *)tp; 414 } 415 416 return NULL; 417 } 418 419 int DeviceTy::deallocTgtPtr(HDTTMapAccessorTy &HDTTMap, LookupResult LR, 420 int64_t Size) { 421 // Check if the pointer is contained in any sub-nodes. 422 if (!(LR.Flags.IsContained || LR.Flags.ExtendsBefore || 423 LR.Flags.ExtendsAfter)) { 424 REPORT("Section to delete (hst addr " DPxMOD ") does not exist in the" 425 " allocated memory\n", 426 DPxPTR(LR.Entry->HstPtrBegin)); 427 return OFFLOAD_FAIL; 428 } 429 430 auto &HT = *LR.Entry; 431 // Verify this thread is still in charge of deleting the entry. 432 assert(HT.getTotalRefCount() == 0 && 433 HT.getDeleteThreadId() == std::this_thread::get_id() && 434 "Trying to delete entry that is in use or owned by another thread."); 435 436 DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n", 437 DPxPTR(HT.TgtPtrBegin), Size); 438 deleteData((void *)HT.TgtPtrBegin); 439 INFO(OMP_INFOTYPE_MAPPING_CHANGED, DeviceID, 440 "Removing map entry with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 441 ", Size=%" PRId64 ", Name=%s\n", 442 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size, 443 (HT.HstPtrName) ? getNameFromMapping(HT.HstPtrName).c_str() : "unknown"); 444 void *Event = LR.Entry->getEvent(); 445 HDTTMap->erase(LR.Entry); 446 delete LR.Entry; 447 448 int Ret = OFFLOAD_SUCCESS; 449 if (Event && destroyEvent(Event) != OFFLOAD_SUCCESS) { 450 REPORT("Failed to destroy event " DPxMOD "\n", DPxPTR(Event)); 451 Ret = OFFLOAD_FAIL; 452 } 453 454 return Ret; 455 } 456 457 /// Init device, should not be called directly. 458 void DeviceTy::init() { 459 // Make call to init_requires if it exists for this plugin. 460 if (RTL->init_requires) 461 RTL->init_requires(PM->RTLs.RequiresFlags); 462 int32_t Ret = RTL->init_device(RTLDeviceID); 463 if (Ret != OFFLOAD_SUCCESS) 464 return; 465 466 IsInit = true; 467 } 468 469 /// Thread-safe method to initialize the device only once. 470 int32_t DeviceTy::initOnce() { 471 std::call_once(InitFlag, &DeviceTy::init, this); 472 473 // At this point, if IsInit is true, then either this thread or some other 474 // thread in the past successfully initialized the device, so we can return 475 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it 476 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means 477 // that some other thread already attempted to execute init() and if IsInit 478 // is still false, return OFFLOAD_FAIL. 479 if (IsInit) 480 return OFFLOAD_SUCCESS; 481 else 482 return OFFLOAD_FAIL; 483 } 484 485 void DeviceTy::deinit() { 486 if (RTL->deinit_device) 487 RTL->deinit_device(RTLDeviceID); 488 } 489 490 // Load binary to device. 491 __tgt_target_table *DeviceTy::load_binary(void *Img) { 492 std::lock_guard<decltype(RTL->Mtx)> LG(RTL->Mtx); 493 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img); 494 return rc; 495 } 496 497 void *DeviceTy::allocData(int64_t Size, void *HstPtr, int32_t Kind) { 498 return RTL->data_alloc(RTLDeviceID, Size, HstPtr, Kind); 499 } 500 501 int32_t DeviceTy::deleteData(void *TgtPtrBegin) { 502 return RTL->data_delete(RTLDeviceID, TgtPtrBegin); 503 } 504 505 // Submit data to device 506 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size, 507 AsyncInfoTy &AsyncInfo) { 508 if (getInfoLevel() & OMP_INFOTYPE_DATA_TRANSFER) { 509 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 510 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 511 auto *HT = &*LR.Entry; 512 513 INFO(OMP_INFOTYPE_DATA_TRANSFER, DeviceID, 514 "Copying data from host to device, HstPtr=" DPxMOD ", TgtPtr=" DPxMOD 515 ", Size=%" PRId64 ", Name=%s\n", 516 DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin), Size, 517 (HT && HT->HstPtrName) ? getNameFromMapping(HT->HstPtrName).c_str() 518 : "unknown"); 519 } 520 521 if (!AsyncInfo || !RTL->data_submit_async || !RTL->synchronize) 522 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size); 523 else 524 return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size, 525 AsyncInfo); 526 } 527 528 // Retrieve data from device 529 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin, 530 int64_t Size, AsyncInfoTy &AsyncInfo) { 531 if (getInfoLevel() & OMP_INFOTYPE_DATA_TRANSFER) { 532 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 533 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 534 auto *HT = &*LR.Entry; 535 INFO(OMP_INFOTYPE_DATA_TRANSFER, DeviceID, 536 "Copying data from device to host, TgtPtr=" DPxMOD ", HstPtr=" DPxMOD 537 ", Size=%" PRId64 ", Name=%s\n", 538 DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin), Size, 539 (HT && HT->HstPtrName) ? getNameFromMapping(HT->HstPtrName).c_str() 540 : "unknown"); 541 } 542 543 if (!RTL->data_retrieve_async || !RTL->synchronize) 544 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size); 545 else 546 return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size, 547 AsyncInfo); 548 } 549 550 // Copy data from current device to destination device directly 551 int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr, 552 int64_t Size, AsyncInfoTy &AsyncInfo) { 553 if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) { 554 assert(RTL->data_exchange && "RTL->data_exchange is nullptr"); 555 return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr, 556 Size); 557 } else 558 return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, 559 DstPtr, Size, AsyncInfo); 560 } 561 562 // Run region on device 563 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr, 564 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 565 AsyncInfoTy &AsyncInfo) { 566 if (!RTL->run_region || !RTL->synchronize) 567 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, 568 TgtVarsSize); 569 else 570 return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 571 TgtOffsets, TgtVarsSize, AsyncInfo); 572 } 573 574 // Run region on device 575 bool DeviceTy::printDeviceInfo(int32_t RTLDevId) { 576 if (!RTL->print_device_info) 577 return false; 578 RTL->print_device_info(RTLDevId); 579 return true; 580 } 581 582 // Run team region on device. 583 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr, 584 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 585 int32_t NumTeams, int32_t ThreadLimit, 586 uint64_t LoopTripCount, 587 AsyncInfoTy &AsyncInfo) { 588 if (!RTL->run_team_region_async || !RTL->synchronize) 589 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 590 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, 591 LoopTripCount); 592 else 593 return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 594 TgtOffsets, TgtVarsSize, NumTeams, 595 ThreadLimit, LoopTripCount, AsyncInfo); 596 } 597 598 // Whether data can be copied to DstDevice directly 599 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) { 600 if (RTL != DstDevice.RTL || !RTL->is_data_exchangable) 601 return false; 602 603 if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID)) 604 return (RTL->data_exchange != nullptr) || 605 (RTL->data_exchange_async != nullptr); 606 607 return false; 608 } 609 610 int32_t DeviceTy::synchronize(AsyncInfoTy &AsyncInfo) { 611 if (RTL->synchronize) 612 return RTL->synchronize(RTLDeviceID, AsyncInfo); 613 return OFFLOAD_SUCCESS; 614 } 615 616 int32_t DeviceTy::createEvent(void **Event) { 617 if (RTL->create_event) 618 return RTL->create_event(RTLDeviceID, Event); 619 620 return OFFLOAD_SUCCESS; 621 } 622 623 int32_t DeviceTy::recordEvent(void *Event, AsyncInfoTy &AsyncInfo) { 624 if (RTL->record_event) 625 return RTL->record_event(RTLDeviceID, Event, AsyncInfo); 626 627 return OFFLOAD_SUCCESS; 628 } 629 630 int32_t DeviceTy::waitEvent(void *Event, AsyncInfoTy &AsyncInfo) { 631 if (RTL->wait_event) 632 return RTL->wait_event(RTLDeviceID, Event, AsyncInfo); 633 634 return OFFLOAD_SUCCESS; 635 } 636 637 int32_t DeviceTy::syncEvent(void *Event) { 638 if (RTL->sync_event) 639 return RTL->sync_event(RTLDeviceID, Event); 640 641 return OFFLOAD_SUCCESS; 642 } 643 644 int32_t DeviceTy::destroyEvent(void *Event) { 645 if (RTL->create_event) 646 return RTL->destroy_event(RTLDeviceID, Event); 647 648 return OFFLOAD_SUCCESS; 649 } 650 651 /// Check whether a device has an associated RTL and initialize it if it's not 652 /// already initialized. 653 bool device_is_ready(int device_num) { 654 DP("Checking whether device %d is ready.\n", device_num); 655 // Devices.size() can only change while registering a new 656 // library, so try to acquire the lock of RTLs' mutex. 657 size_t DevicesSize; 658 { 659 std::lock_guard<decltype(PM->RTLsMtx)> LG(PM->RTLsMtx); 660 DevicesSize = PM->Devices.size(); 661 } 662 if (DevicesSize <= (size_t)device_num) { 663 DP("Device ID %d does not have a matching RTL\n", device_num); 664 return false; 665 } 666 667 // Get device info 668 DeviceTy &Device = *PM->Devices[device_num]; 669 670 DP("Is the device %d (local ID %d) initialized? %d\n", device_num, 671 Device.RTLDeviceID, Device.IsInit); 672 673 // Init the device if not done before 674 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) { 675 DP("Failed to init device %d\n", device_num); 676 return false; 677 } 678 679 DP("Device %d is ready to use.\n", device_num); 680 681 return true; 682 } 683