1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Functionality for managing devices that are handled by RTL plugins. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "device.h" 14 #include "omptarget.h" 15 #include "private.h" 16 #include "rtl.h" 17 18 #include <cassert> 19 #include <climits> 20 #include <cstdint> 21 #include <cstdio> 22 #include <string> 23 24 int HostDataToTargetTy::addEventIfNecessary(DeviceTy &Device, 25 AsyncInfoTy &AsyncInfo) const { 26 // First, check if the user disabled atomic map transfer/malloc/dealloc. 27 if (!PM->UseEventsForAtomicTransfers) 28 return OFFLOAD_SUCCESS; 29 30 void *Event = getEvent(); 31 bool NeedNewEvent = Event == nullptr; 32 if (NeedNewEvent && Device.createEvent(&Event) != OFFLOAD_SUCCESS) { 33 REPORT("Failed to create event\n"); 34 return OFFLOAD_FAIL; 35 } 36 37 // We cannot assume the event should not be nullptr because we don't 38 // know if the target support event. But if a target doesn't, 39 // recordEvent should always return success. 40 if (Device.recordEvent(Event, AsyncInfo) != OFFLOAD_SUCCESS) { 41 REPORT("Failed to set dependence on event " DPxMOD "\n", DPxPTR(Event)); 42 return OFFLOAD_FAIL; 43 } 44 45 if (NeedNewEvent) 46 setEvent(Event); 47 48 return OFFLOAD_SUCCESS; 49 } 50 51 DeviceTy::DeviceTy(RTLInfoTy *RTL) 52 : DeviceID(-1), RTL(RTL), RTLDeviceID(-1), IsInit(false), InitFlag(), 53 HasPendingGlobals(false), PendingCtorsDtors(), ShadowPtrMap(), 54 PendingGlobalsMtx(), ShadowMtx() {} 55 56 DeviceTy::~DeviceTy() { 57 if (DeviceID == -1 || !(getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE)) 58 return; 59 60 ident_t loc = {0, 0, 0, 0, ";libomptarget;libomptarget;0;0;;"}; 61 dumpTargetPointerMappings(&loc, *this); 62 } 63 64 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) { 65 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 66 67 // Check if entry exists 68 auto It = HDTTMap->find(HstPtrBegin); 69 if (It != HDTTMap->end()) { 70 HostDataToTargetTy &HDTT = *It->HDTT; 71 // Mapping already exists 72 bool isValid = HDTT.HstPtrEnd == (uintptr_t)HstPtrBegin + Size && 73 HDTT.TgtPtrBegin == (uintptr_t)TgtPtrBegin; 74 if (isValid) { 75 DP("Attempt to re-associate the same device ptr+offset with the same " 76 "host ptr, nothing to do\n"); 77 return OFFLOAD_SUCCESS; 78 } else { 79 REPORT("Not allowed to re-associate a different device ptr+offset with " 80 "the same host ptr\n"); 81 return OFFLOAD_FAIL; 82 } 83 } 84 85 // Mapping does not exist, allocate it with refCount=INF 86 const HostDataToTargetTy &newEntry = 87 *HDTTMap 88 ->emplace(new HostDataToTargetTy( 89 /*HstPtrBase=*/(uintptr_t)HstPtrBegin, 90 /*HstPtrBegin=*/(uintptr_t)HstPtrBegin, 91 /*HstPtrEnd=*/(uintptr_t)HstPtrBegin + Size, 92 /*TgtPtrBegin=*/(uintptr_t)TgtPtrBegin, 93 /*UseHoldRefCount=*/false, /*Name=*/nullptr, 94 /*IsRefCountINF=*/true)) 95 .first->HDTT; 96 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD 97 ", HstEnd=" DPxMOD ", TgtBegin=" DPxMOD ", DynRefCount=%s, " 98 "HoldRefCount=%s\n", 99 DPxPTR(newEntry.HstPtrBase), DPxPTR(newEntry.HstPtrBegin), 100 DPxPTR(newEntry.HstPtrEnd), DPxPTR(newEntry.TgtPtrBegin), 101 newEntry.dynRefCountToStr().c_str(), newEntry.holdRefCountToStr().c_str()); 102 (void)newEntry; 103 104 return OFFLOAD_SUCCESS; 105 } 106 107 int DeviceTy::disassociatePtr(void *HstPtrBegin) { 108 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 109 110 auto It = HDTTMap->find(HstPtrBegin); 111 if (It != HDTTMap->end()) { 112 HostDataToTargetTy &HDTT = *It->HDTT; 113 // Mapping exists 114 if (HDTT.getHoldRefCount()) { 115 // This is based on OpenACC 3.1, sec 3.2.33 "acc_unmap_data", L3656-3657: 116 // "It is an error to call acc_unmap_data if the structured reference 117 // count for the pointer is not zero." 118 REPORT("Trying to disassociate a pointer with a non-zero hold reference " 119 "count\n"); 120 } else if (HDTT.isDynRefCountInf()) { 121 DP("Association found, removing it\n"); 122 void *Event = HDTT.getEvent(); 123 delete &HDTT; 124 if (Event) 125 destroyEvent(Event); 126 HDTTMap->erase(It); 127 return OFFLOAD_SUCCESS; 128 } else { 129 REPORT("Trying to disassociate a pointer which was not mapped via " 130 "omp_target_associate_ptr\n"); 131 } 132 } else { 133 REPORT("Association not found\n"); 134 } 135 136 // Mapping not found 137 return OFFLOAD_FAIL; 138 } 139 140 LookupResult DeviceTy::lookupMapping(HDTTMapAccessorTy &HDTTMap, 141 void *HstPtrBegin, int64_t Size) { 142 143 uintptr_t hp = (uintptr_t)HstPtrBegin; 144 LookupResult lr; 145 146 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n", 147 DPxPTR(hp), Size); 148 149 if (HDTTMap->empty()) 150 return lr; 151 152 auto upper = HDTTMap->upper_bound(hp); 153 // check the left bin 154 if (upper != HDTTMap->begin()) { 155 lr.Entry = std::prev(upper)->HDTT; 156 auto &HT = *lr.Entry; 157 // Is it contained? 158 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd && 159 (hp + Size) <= HT.HstPtrEnd; 160 // Does it extend beyond the mapped region? 161 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 162 } 163 164 // check the right bin 165 if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) && 166 upper != HDTTMap->end()) { 167 lr.Entry = upper->HDTT; 168 auto &HT = *lr.Entry; 169 // Does it extend into an already mapped region? 170 lr.Flags.ExtendsBefore = 171 hp < HT.HstPtrBegin && (hp + Size) > HT.HstPtrBegin; 172 // Does it extend beyond the mapped region? 173 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 174 } 175 176 if (lr.Flags.ExtendsBefore) { 177 DP("WARNING: Pointer is not mapped but section extends into already " 178 "mapped data\n"); 179 } 180 if (lr.Flags.ExtendsAfter) { 181 DP("WARNING: Pointer is already mapped but section extends beyond mapped " 182 "region\n"); 183 } 184 185 return lr; 186 } 187 188 TargetPointerResultTy DeviceTy::getTargetPointer( 189 void *HstPtrBegin, void *HstPtrBase, int64_t Size, 190 map_var_info_t HstPtrName, bool HasFlagTo, bool HasFlagAlways, 191 bool IsImplicit, bool UpdateRefCount, bool HasCloseModifier, 192 bool HasPresentModifier, bool HasHoldModifier, AsyncInfoTy &AsyncInfo) { 193 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 194 195 void *TargetPointer = nullptr; 196 bool IsHostPtr = false; 197 bool IsNew = false; 198 199 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 200 auto *Entry = LR.Entry; 201 202 // Check if the pointer is contained. 203 // If a variable is mapped to the device manually by the user - which would 204 // lead to the IsContained flag to be true - then we must ensure that the 205 // device address is returned even under unified memory conditions. 206 if (LR.Flags.IsContained || 207 ((LR.Flags.ExtendsBefore || LR.Flags.ExtendsAfter) && IsImplicit)) { 208 auto &HT = *LR.Entry; 209 const char *RefCountAction; 210 assert(HT.getTotalRefCount() > 0 && "expected existing RefCount > 0"); 211 if (UpdateRefCount) { 212 // After this, RefCount > 1. 213 HT.incRefCount(HasHoldModifier); 214 RefCountAction = " (incremented)"; 215 } else { 216 // It might have been allocated with the parent, but it's still new. 217 IsNew = HT.getTotalRefCount() == 1; 218 RefCountAction = " (update suppressed)"; 219 } 220 const char *DynRefCountAction = HasHoldModifier ? "" : RefCountAction; 221 const char *HoldRefCountAction = HasHoldModifier ? RefCountAction : ""; 222 uintptr_t Ptr = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 223 INFO(OMP_INFOTYPE_MAPPING_EXISTS, DeviceID, 224 "Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 225 ", Size=%" PRId64 ", DynRefCount=%s%s, HoldRefCount=%s%s, Name=%s\n", 226 (IsImplicit ? " (implicit)" : ""), DPxPTR(HstPtrBegin), DPxPTR(Ptr), 227 Size, HT.dynRefCountToStr().c_str(), DynRefCountAction, 228 HT.holdRefCountToStr().c_str(), HoldRefCountAction, 229 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown"); 230 TargetPointer = (void *)Ptr; 231 } else if ((LR.Flags.ExtendsBefore || LR.Flags.ExtendsAfter) && !IsImplicit) { 232 // Explicit extension of mapped data - not allowed. 233 MESSAGE("explicit extension not allowed: host address specified is " DPxMOD 234 " (%" PRId64 235 " bytes), but device allocation maps to host at " DPxMOD 236 " (%" PRId64 " bytes)", 237 DPxPTR(HstPtrBegin), Size, DPxPTR(Entry->HstPtrBegin), 238 Entry->HstPtrEnd - Entry->HstPtrBegin); 239 if (HasPresentModifier) 240 MESSAGE("device mapping required by 'present' map type modifier does not " 241 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 242 DPxPTR(HstPtrBegin), Size); 243 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 244 !HasCloseModifier) { 245 // If unified shared memory is active, implicitly mapped variables that are 246 // not privatized use host address. Any explicitly mapped variables also use 247 // host address where correctness is not impeded. In all other cases maps 248 // are respected. 249 // In addition to the mapping rules above, the close map modifier forces the 250 // mapping of the variable to the device. 251 if (Size) { 252 DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " for unified shared " 253 "memory\n", 254 DPxPTR((uintptr_t)HstPtrBegin), Size); 255 IsHostPtr = true; 256 TargetPointer = HstPtrBegin; 257 } 258 } else if (HasPresentModifier) { 259 DP("Mapping required by 'present' map type modifier does not exist for " 260 "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n", 261 DPxPTR(HstPtrBegin), Size); 262 MESSAGE("device mapping required by 'present' map type modifier does not " 263 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 264 DPxPTR(HstPtrBegin), Size); 265 } else if (Size) { 266 // If it is not contained and Size > 0, we should create a new entry for it. 267 IsNew = true; 268 uintptr_t Ptr = (uintptr_t)allocData(Size, HstPtrBegin); 269 Entry = HDTTMap 270 ->emplace(new HostDataToTargetTy( 271 (uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin, 272 (uintptr_t)HstPtrBegin + Size, Ptr, HasHoldModifier, 273 HstPtrName)) 274 .first->HDTT; 275 INFO(OMP_INFOTYPE_MAPPING_CHANGED, DeviceID, 276 "Creating new map entry with " 277 "HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", Size=%ld, " 278 "DynRefCount=%s, HoldRefCount=%s, Name=%s\n", 279 DPxPTR(HstPtrBegin), DPxPTR(Ptr), Size, 280 Entry->dynRefCountToStr().c_str(), Entry->holdRefCountToStr().c_str(), 281 (HstPtrName) ? getNameFromMapping(HstPtrName).c_str() : "unknown"); 282 TargetPointer = (void *)Ptr; 283 } 284 285 // If the target pointer is valid, and we need to transfer data, issue the 286 // data transfer. 287 if (TargetPointer && !IsHostPtr && HasFlagTo && (IsNew || HasFlagAlways)) { 288 // Lock the entry before releasing the mapping table lock such that another 289 // thread that could issue data movement will get the right result. 290 std::lock_guard<decltype(*Entry)> LG(*Entry); 291 // Release the mapping table lock right after the entry is locked. 292 HDTTMap.destroy(); 293 294 DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n", Size, 295 DPxPTR(HstPtrBegin), DPxPTR(TargetPointer)); 296 297 int Ret = submitData(TargetPointer, HstPtrBegin, Size, AsyncInfo); 298 if (Ret != OFFLOAD_SUCCESS) { 299 REPORT("Copying data to device failed.\n"); 300 // We will also return nullptr if the data movement fails because that 301 // pointer points to a corrupted memory region so it doesn't make any 302 // sense to continue to use it. 303 TargetPointer = nullptr; 304 } else if (Entry->addEventIfNecessary(*this, AsyncInfo) != OFFLOAD_SUCCESS) 305 return {{false /* IsNewEntry */, false /* IsHostPointer */}, 306 nullptr /* Entry */, 307 nullptr /* TargetPointer */}; 308 } else { 309 // Release the mapping table lock directly. 310 HDTTMap.destroy(); 311 // If not a host pointer and no present modifier, we need to wait for the 312 // event if it exists. 313 // Note: Entry might be nullptr because of zero length array section. 314 if (Entry && !IsHostPtr && !HasPresentModifier) { 315 std::lock_guard<decltype(*Entry)> LG(*Entry); 316 void *Event = Entry->getEvent(); 317 if (Event) { 318 int Ret = waitEvent(Event, AsyncInfo); 319 if (Ret != OFFLOAD_SUCCESS) { 320 // If it fails to wait for the event, we need to return nullptr in 321 // case of any data race. 322 REPORT("Failed to wait for event " DPxMOD ".\n", DPxPTR(Event)); 323 return {{false /* IsNewEntry */, false /* IsHostPointer */}, 324 nullptr /* Entry */, 325 nullptr /* TargetPointer */}; 326 } 327 } 328 } 329 } 330 331 return {{IsNew, IsHostPtr}, Entry, TargetPointer}; 332 } 333 334 // Used by targetDataBegin, targetDataEnd, targetDataUpdate and target. 335 // Return the target pointer begin (where the data will be moved). 336 // Decrement the reference counter if called from targetDataEnd. 337 TargetPointerResultTy 338 DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast, 339 bool UpdateRefCount, bool UseHoldRefCount, 340 bool &IsHostPtr, bool MustContain, bool ForceDelete) { 341 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 342 343 void *TargetPointer = NULL; 344 bool IsNew = false; 345 IsHostPtr = false; 346 IsLast = false; 347 LookupResult lr = lookupMapping(HDTTMap, HstPtrBegin, Size); 348 349 if (lr.Flags.IsContained || 350 (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) { 351 auto &HT = *lr.Entry; 352 // We do not zero the total reference count here. deallocTgtPtr does that 353 // atomically with removing the mapping. Otherwise, before this thread 354 // removed the mapping in deallocTgtPtr, another thread could retrieve the 355 // mapping, increment and decrement back to zero, and then both threads 356 // would try to remove the mapping, resulting in a double free. 357 IsLast = HT.decShouldRemove(UseHoldRefCount, ForceDelete); 358 const char *RefCountAction; 359 if (!UpdateRefCount) { 360 RefCountAction = " (update suppressed)"; 361 } else if (ForceDelete) { 362 HT.resetRefCount(UseHoldRefCount); 363 assert(IsLast == HT.decShouldRemove(UseHoldRefCount) && 364 "expected correct IsLast prediction for reset"); 365 if (IsLast) 366 RefCountAction = " (reset, deferred final decrement)"; 367 else { 368 HT.decRefCount(UseHoldRefCount); 369 RefCountAction = " (reset)"; 370 } 371 } else if (IsLast) { 372 RefCountAction = " (deferred final decrement)"; 373 } else { 374 HT.decRefCount(UseHoldRefCount); 375 RefCountAction = " (decremented)"; 376 } 377 const char *DynRefCountAction = UseHoldRefCount ? "" : RefCountAction; 378 const char *HoldRefCountAction = UseHoldRefCount ? RefCountAction : ""; 379 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 380 INFO(OMP_INFOTYPE_MAPPING_EXISTS, DeviceID, 381 "Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 382 "Size=%" PRId64 ", DynRefCount=%s%s, HoldRefCount=%s%s\n", 383 DPxPTR(HstPtrBegin), DPxPTR(tp), Size, HT.dynRefCountToStr().c_str(), 384 DynRefCountAction, HT.holdRefCountToStr().c_str(), HoldRefCountAction); 385 TargetPointer = (void *)tp; 386 } else if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) { 387 // If the value isn't found in the mapping and unified shared memory 388 // is on then it means we have stumbled upon a value which we need to 389 // use directly from the host. 390 DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " for unified shared " 391 "memory\n", 392 DPxPTR((uintptr_t)HstPtrBegin), Size); 393 IsHostPtr = true; 394 TargetPointer = HstPtrBegin; 395 } 396 397 return {{IsNew, IsHostPtr}, lr.Entry, TargetPointer}; 398 } 399 400 // Return the target pointer begin (where the data will be moved). 401 void *DeviceTy::getTgtPtrBegin(HDTTMapAccessorTy &HDTTMap, void *HstPtrBegin, 402 int64_t Size) { 403 uintptr_t hp = (uintptr_t)HstPtrBegin; 404 LookupResult lr = lookupMapping(HDTTMap, HstPtrBegin, Size); 405 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 406 auto &HT = *lr.Entry; 407 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin); 408 return (void *)tp; 409 } 410 411 return NULL; 412 } 413 414 int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, 415 bool HasHoldModifier) { 416 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 417 418 // Check if the pointer is contained in any sub-nodes. 419 int Ret = OFFLOAD_SUCCESS; 420 LookupResult lr = lookupMapping(HDTTMap, HstPtrBegin, Size); 421 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 422 auto &HT = *lr.Entry; 423 if (HT.decRefCount(HasHoldModifier) == 0) { 424 DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n", 425 DPxPTR(HT.TgtPtrBegin), Size); 426 deleteData((void *)HT.TgtPtrBegin); 427 INFO(OMP_INFOTYPE_MAPPING_CHANGED, DeviceID, 428 "Removing map entry with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 429 ", Size=%" PRId64 ", Name=%s\n", 430 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size, 431 (HT.HstPtrName) ? getNameFromMapping(HT.HstPtrName).c_str() 432 : "unknown"); 433 void *Event = lr.Entry->getEvent(); 434 HDTTMap->erase(lr.Entry); 435 delete lr.Entry; 436 if (Event && destroyEvent(Event) != OFFLOAD_SUCCESS) { 437 REPORT("Failed to destroy event " DPxMOD "\n", DPxPTR(Event)); 438 Ret = OFFLOAD_FAIL; 439 } 440 } 441 } else { 442 REPORT("Section to delete (hst addr " DPxMOD ") does not exist in the" 443 " allocated memory\n", 444 DPxPTR(HstPtrBegin)); 445 Ret = OFFLOAD_FAIL; 446 } 447 448 return Ret; 449 } 450 451 /// Init device, should not be called directly. 452 void DeviceTy::init() { 453 // Make call to init_requires if it exists for this plugin. 454 if (RTL->init_requires) 455 RTL->init_requires(PM->RTLs.RequiresFlags); 456 int32_t Ret = RTL->init_device(RTLDeviceID); 457 if (Ret != OFFLOAD_SUCCESS) 458 return; 459 460 IsInit = true; 461 } 462 463 /// Thread-safe method to initialize the device only once. 464 int32_t DeviceTy::initOnce() { 465 std::call_once(InitFlag, &DeviceTy::init, this); 466 467 // At this point, if IsInit is true, then either this thread or some other 468 // thread in the past successfully initialized the device, so we can return 469 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it 470 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means 471 // that some other thread already attempted to execute init() and if IsInit 472 // is still false, return OFFLOAD_FAIL. 473 if (IsInit) 474 return OFFLOAD_SUCCESS; 475 else 476 return OFFLOAD_FAIL; 477 } 478 479 void DeviceTy::deinit() { 480 if (RTL->deinit_device) 481 RTL->deinit_device(RTLDeviceID); 482 } 483 484 // Load binary to device. 485 __tgt_target_table *DeviceTy::load_binary(void *Img) { 486 std::lock_guard<decltype(RTL->Mtx)> LG(RTL->Mtx); 487 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img); 488 return rc; 489 } 490 491 void *DeviceTy::allocData(int64_t Size, void *HstPtr, int32_t Kind) { 492 return RTL->data_alloc(RTLDeviceID, Size, HstPtr, Kind); 493 } 494 495 int32_t DeviceTy::deleteData(void *TgtPtrBegin) { 496 return RTL->data_delete(RTLDeviceID, TgtPtrBegin); 497 } 498 499 // Submit data to device 500 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size, 501 AsyncInfoTy &AsyncInfo) { 502 if (getInfoLevel() & OMP_INFOTYPE_DATA_TRANSFER) { 503 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 504 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 505 auto *HT = &*LR.Entry; 506 507 INFO(OMP_INFOTYPE_DATA_TRANSFER, DeviceID, 508 "Copying data from host to device, HstPtr=" DPxMOD ", TgtPtr=" DPxMOD 509 ", Size=%" PRId64 ", Name=%s\n", 510 DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin), Size, 511 (HT && HT->HstPtrName) ? getNameFromMapping(HT->HstPtrName).c_str() 512 : "unknown"); 513 } 514 515 if (!AsyncInfo || !RTL->data_submit_async || !RTL->synchronize) 516 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size); 517 else 518 return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size, 519 AsyncInfo); 520 } 521 522 // Retrieve data from device 523 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin, 524 int64_t Size, AsyncInfoTy &AsyncInfo) { 525 if (getInfoLevel() & OMP_INFOTYPE_DATA_TRANSFER) { 526 HDTTMapAccessorTy HDTTMap = HostDataToTargetMap.getExclusiveAccessor(); 527 LookupResult LR = lookupMapping(HDTTMap, HstPtrBegin, Size); 528 auto *HT = &*LR.Entry; 529 INFO(OMP_INFOTYPE_DATA_TRANSFER, DeviceID, 530 "Copying data from device to host, TgtPtr=" DPxMOD ", HstPtr=" DPxMOD 531 ", Size=%" PRId64 ", Name=%s\n", 532 DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin), Size, 533 (HT && HT->HstPtrName) ? getNameFromMapping(HT->HstPtrName).c_str() 534 : "unknown"); 535 } 536 537 if (!RTL->data_retrieve_async || !RTL->synchronize) 538 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size); 539 else 540 return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size, 541 AsyncInfo); 542 } 543 544 // Copy data from current device to destination device directly 545 int32_t DeviceTy::dataExchange(void *SrcPtr, DeviceTy &DstDev, void *DstPtr, 546 int64_t Size, AsyncInfoTy &AsyncInfo) { 547 if (!AsyncInfo || !RTL->data_exchange_async || !RTL->synchronize) { 548 assert(RTL->data_exchange && "RTL->data_exchange is nullptr"); 549 return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr, 550 Size); 551 } else 552 return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, 553 DstPtr, Size, AsyncInfo); 554 } 555 556 // Run region on device 557 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr, 558 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 559 AsyncInfoTy &AsyncInfo) { 560 if (!RTL->run_region || !RTL->synchronize) 561 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, 562 TgtVarsSize); 563 else 564 return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 565 TgtOffsets, TgtVarsSize, AsyncInfo); 566 } 567 568 // Run region on device 569 bool DeviceTy::printDeviceInfo(int32_t RTLDevId) { 570 if (!RTL->print_device_info) 571 return false; 572 RTL->print_device_info(RTLDevId); 573 return true; 574 } 575 576 // Run team region on device. 577 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr, 578 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 579 int32_t NumTeams, int32_t ThreadLimit, 580 uint64_t LoopTripCount, 581 AsyncInfoTy &AsyncInfo) { 582 if (!RTL->run_team_region_async || !RTL->synchronize) 583 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 584 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, 585 LoopTripCount); 586 else 587 return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 588 TgtOffsets, TgtVarsSize, NumTeams, 589 ThreadLimit, LoopTripCount, AsyncInfo); 590 } 591 592 // Whether data can be copied to DstDevice directly 593 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) { 594 if (RTL != DstDevice.RTL || !RTL->is_data_exchangable) 595 return false; 596 597 if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID)) 598 return (RTL->data_exchange != nullptr) || 599 (RTL->data_exchange_async != nullptr); 600 601 return false; 602 } 603 604 int32_t DeviceTy::synchronize(AsyncInfoTy &AsyncInfo) { 605 if (RTL->synchronize) 606 return RTL->synchronize(RTLDeviceID, AsyncInfo); 607 return OFFLOAD_SUCCESS; 608 } 609 610 int32_t DeviceTy::createEvent(void **Event) { 611 if (RTL->create_event) 612 return RTL->create_event(RTLDeviceID, Event); 613 614 return OFFLOAD_SUCCESS; 615 } 616 617 int32_t DeviceTy::recordEvent(void *Event, AsyncInfoTy &AsyncInfo) { 618 if (RTL->record_event) 619 return RTL->record_event(RTLDeviceID, Event, AsyncInfo); 620 621 return OFFLOAD_SUCCESS; 622 } 623 624 int32_t DeviceTy::waitEvent(void *Event, AsyncInfoTy &AsyncInfo) { 625 if (RTL->wait_event) 626 return RTL->wait_event(RTLDeviceID, Event, AsyncInfo); 627 628 return OFFLOAD_SUCCESS; 629 } 630 631 int32_t DeviceTy::syncEvent(void *Event) { 632 if (RTL->sync_event) 633 return RTL->sync_event(RTLDeviceID, Event); 634 635 return OFFLOAD_SUCCESS; 636 } 637 638 int32_t DeviceTy::destroyEvent(void *Event) { 639 if (RTL->create_event) 640 return RTL->destroy_event(RTLDeviceID, Event); 641 642 return OFFLOAD_SUCCESS; 643 } 644 645 /// Check whether a device has an associated RTL and initialize it if it's not 646 /// already initialized. 647 bool device_is_ready(int device_num) { 648 DP("Checking whether device %d is ready.\n", device_num); 649 // Devices.size() can only change while registering a new 650 // library, so try to acquire the lock of RTLs' mutex. 651 size_t DevicesSize; 652 { 653 std::lock_guard<decltype(PM->RTLsMtx)> LG(PM->RTLsMtx); 654 DevicesSize = PM->Devices.size(); 655 } 656 if (DevicesSize <= (size_t)device_num) { 657 DP("Device ID %d does not have a matching RTL\n", device_num); 658 return false; 659 } 660 661 // Get device info 662 DeviceTy &Device = *PM->Devices[device_num]; 663 664 DP("Is the device %d (local ID %d) initialized? %d\n", device_num, 665 Device.RTLDeviceID, Device.IsInit); 666 667 // Init the device if not done before 668 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) { 669 DP("Failed to init device %d\n", device_num); 670 return false; 671 } 672 673 DP("Device %d is ready to use.\n", device_num); 674 675 return true; 676 } 677