1 //===--------- device.cpp - Target independent OpenMP target RTL ----------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Functionality for managing devices that are handled by RTL plugins. 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "device.h" 14 #include "private.h" 15 #include "rtl.h" 16 17 #include <cassert> 18 #include <climits> 19 #include <string> 20 21 /// Map between Device ID (i.e. openmp device id) and its DeviceTy. 22 DevicesTy Devices; 23 24 int DeviceTy::associatePtr(void *HstPtrBegin, void *TgtPtrBegin, int64_t Size) { 25 DataMapMtx.lock(); 26 27 // Check if entry exists 28 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin}); 29 if (search != HostDataToTargetMap.end()) { 30 // Mapping already exists 31 bool isValid = search->HstPtrEnd == (uintptr_t)HstPtrBegin + Size && 32 search->TgtPtrBegin == (uintptr_t)TgtPtrBegin; 33 DataMapMtx.unlock(); 34 if (isValid) { 35 DP("Attempt to re-associate the same device ptr+offset with the same " 36 "host ptr, nothing to do\n"); 37 return OFFLOAD_SUCCESS; 38 } else { 39 DP("Not allowed to re-associate a different device ptr+offset with the " 40 "same host ptr\n"); 41 return OFFLOAD_FAIL; 42 } 43 } 44 45 // Mapping does not exist, allocate it with refCount=INF 46 HostDataToTargetTy newEntry((uintptr_t) HstPtrBegin /*HstPtrBase*/, 47 (uintptr_t) HstPtrBegin /*HstPtrBegin*/, 48 (uintptr_t) HstPtrBegin + Size /*HstPtrEnd*/, 49 (uintptr_t) TgtPtrBegin /*TgtPtrBegin*/, 50 true /*IsRefCountINF*/); 51 52 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", HstEnd=" 53 DPxMOD ", TgtBegin=" DPxMOD "\n", DPxPTR(newEntry.HstPtrBase), 54 DPxPTR(newEntry.HstPtrBegin), DPxPTR(newEntry.HstPtrEnd), 55 DPxPTR(newEntry.TgtPtrBegin)); 56 HostDataToTargetMap.insert(newEntry); 57 58 DataMapMtx.unlock(); 59 60 return OFFLOAD_SUCCESS; 61 } 62 63 int DeviceTy::disassociatePtr(void *HstPtrBegin) { 64 DataMapMtx.lock(); 65 66 auto search = HostDataToTargetMap.find(HstPtrBeginTy{(uintptr_t)HstPtrBegin}); 67 if (search != HostDataToTargetMap.end()) { 68 // Mapping exists 69 if (search->isRefCountInf()) { 70 DP("Association found, removing it\n"); 71 HostDataToTargetMap.erase(search); 72 DataMapMtx.unlock(); 73 return OFFLOAD_SUCCESS; 74 } else { 75 DP("Trying to disassociate a pointer which was not mapped via " 76 "omp_target_associate_ptr\n"); 77 } 78 } 79 80 // Mapping not found 81 DataMapMtx.unlock(); 82 DP("Association not found\n"); 83 return OFFLOAD_FAIL; 84 } 85 86 // Get ref count of map entry containing HstPtrBegin 87 uint64_t DeviceTy::getMapEntryRefCnt(void *HstPtrBegin) { 88 uintptr_t hp = (uintptr_t)HstPtrBegin; 89 uint64_t RefCnt = 0; 90 91 DataMapMtx.lock(); 92 if (!HostDataToTargetMap.empty()) { 93 auto upper = HostDataToTargetMap.upper_bound(hp); 94 if (upper != HostDataToTargetMap.begin()) { 95 upper--; 96 if (hp >= upper->HstPtrBegin && hp < upper->HstPtrEnd) { 97 DP("DeviceTy::getMapEntry: requested entry found\n"); 98 RefCnt = upper->getRefCount(); 99 } 100 } 101 } 102 DataMapMtx.unlock(); 103 104 if (RefCnt == 0) { 105 DP("DeviceTy::getMapEntry: requested entry not found\n"); 106 } 107 108 return RefCnt; 109 } 110 111 LookupResult DeviceTy::lookupMapping(void *HstPtrBegin, int64_t Size) { 112 uintptr_t hp = (uintptr_t)HstPtrBegin; 113 LookupResult lr; 114 115 DP("Looking up mapping(HstPtrBegin=" DPxMOD ", Size=%" PRId64 ")...\n", 116 DPxPTR(hp), Size); 117 118 if (HostDataToTargetMap.empty()) 119 return lr; 120 121 auto upper = HostDataToTargetMap.upper_bound(hp); 122 // check the left bin 123 if (upper != HostDataToTargetMap.begin()) { 124 lr.Entry = std::prev(upper); 125 auto &HT = *lr.Entry; 126 // Is it contained? 127 lr.Flags.IsContained = hp >= HT.HstPtrBegin && hp < HT.HstPtrEnd && 128 (hp+Size) <= HT.HstPtrEnd; 129 // Does it extend beyond the mapped region? 130 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp + Size) > HT.HstPtrEnd; 131 } 132 133 // check the right bin 134 if (!(lr.Flags.IsContained || lr.Flags.ExtendsAfter) && 135 upper != HostDataToTargetMap.end()) { 136 lr.Entry = upper; 137 auto &HT = *lr.Entry; 138 // Does it extend into an already mapped region? 139 lr.Flags.ExtendsBefore = hp < HT.HstPtrBegin && (hp+Size) > HT.HstPtrBegin; 140 // Does it extend beyond the mapped region? 141 lr.Flags.ExtendsAfter = hp < HT.HstPtrEnd && (hp+Size) > HT.HstPtrEnd; 142 } 143 144 if (lr.Flags.ExtendsBefore) { 145 DP("WARNING: Pointer is not mapped but section extends into already " 146 "mapped data\n"); 147 } 148 if (lr.Flags.ExtendsAfter) { 149 DP("WARNING: Pointer is already mapped but section extends beyond mapped " 150 "region\n"); 151 } 152 153 return lr; 154 } 155 156 // Used by targetDataBegin 157 // Return the target pointer begin (where the data will be moved). 158 // Allocate memory if this is the first occurrence of this mapping. 159 // Increment the reference counter. 160 // If NULL is returned, then either data allocation failed or the user tried 161 // to do an illegal mapping. 162 void *DeviceTy::getOrAllocTgtPtr(void *HstPtrBegin, void *HstPtrBase, 163 int64_t Size, bool &IsNew, bool &IsHostPtr, 164 bool IsImplicit, bool UpdateRefCount, 165 bool HasCloseModifier, 166 bool HasPresentModifier) { 167 void *rc = NULL; 168 IsHostPtr = false; 169 IsNew = false; 170 DataMapMtx.lock(); 171 LookupResult lr = lookupMapping(HstPtrBegin, Size); 172 173 // Check if the pointer is contained. 174 // If a variable is mapped to the device manually by the user - which would 175 // lead to the IsContained flag to be true - then we must ensure that the 176 // device address is returned even under unified memory conditions. 177 if (lr.Flags.IsContained || 178 ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && IsImplicit)) { 179 auto &HT = *lr.Entry; 180 IsNew = false; 181 182 if (UpdateRefCount) 183 HT.incRefCount(); 184 185 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 186 DP("Mapping exists%s with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 187 "Size=%" PRId64 ",%s RefCount=%s\n", (IsImplicit ? " (implicit)" : ""), 188 DPxPTR(HstPtrBegin), DPxPTR(tp), Size, 189 (UpdateRefCount ? " updated" : ""), 190 HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str()); 191 rc = (void *)tp; 192 } else if ((lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) && !IsImplicit) { 193 // Explicit extension of mapped data - not allowed. 194 MESSAGE("explicit extension not allowed: host address specified is " DPxMOD 195 " (%" PRId64 " bytes), but device allocation maps to host at " 196 DPxMOD " (%" PRId64 " bytes)", 197 DPxPTR(HstPtrBegin), Size, DPxPTR(lr.Entry->HstPtrBegin), 198 lr.Entry->HstPtrEnd - lr.Entry->HstPtrBegin); 199 if (HasPresentModifier) 200 MESSAGE("device mapping required by 'present' map type modifier does not " 201 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 202 DPxPTR(HstPtrBegin), Size); 203 } else if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 204 !HasCloseModifier) { 205 // If unified shared memory is active, implicitly mapped variables that are 206 // not privatized use host address. Any explicitly mapped variables also use 207 // host address where correctness is not impeded. In all other cases maps 208 // are respected. 209 // In addition to the mapping rules above, the close map modifier forces the 210 // mapping of the variable to the device. 211 if (Size) { 212 DP("Return HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n", 213 DPxPTR((uintptr_t)HstPtrBegin), Size, 214 (UpdateRefCount ? " updated" : "")); 215 IsHostPtr = true; 216 rc = HstPtrBegin; 217 } 218 } else if (HasPresentModifier) { 219 DP("Mapping required by 'present' map type modifier does not exist for " 220 "HstPtrBegin=" DPxMOD ", Size=%" PRId64 "\n", 221 DPxPTR(HstPtrBegin), Size); 222 MESSAGE("device mapping required by 'present' map type modifier does not " 223 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 224 DPxPTR(HstPtrBegin), Size); 225 } else if (Size) { 226 // If it is not contained and Size > 0, we should create a new entry for it. 227 IsNew = true; 228 uintptr_t tp = (uintptr_t)allocData(Size, HstPtrBegin); 229 DP("Creating new map entry: HstBase=" DPxMOD ", HstBegin=" DPxMOD ", " 230 "HstEnd=" DPxMOD ", TgtBegin=" DPxMOD "\n", 231 DPxPTR(HstPtrBase), DPxPTR(HstPtrBegin), 232 DPxPTR((uintptr_t)HstPtrBegin + Size), DPxPTR(tp)); 233 HostDataToTargetMap.emplace( 234 HostDataToTargetTy((uintptr_t)HstPtrBase, (uintptr_t)HstPtrBegin, 235 (uintptr_t)HstPtrBegin + Size, tp)); 236 rc = (void *)tp; 237 } 238 239 DataMapMtx.unlock(); 240 return rc; 241 } 242 243 // Used by targetDataBegin, targetDataEnd, target_data_update and target. 244 // Return the target pointer begin (where the data will be moved). 245 // Decrement the reference counter if called from targetDataEnd. 246 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size, bool &IsLast, 247 bool UpdateRefCount, bool &IsHostPtr, 248 bool MustContain) { 249 void *rc = NULL; 250 IsHostPtr = false; 251 IsLast = false; 252 DataMapMtx.lock(); 253 LookupResult lr = lookupMapping(HstPtrBegin, Size); 254 255 if (lr.Flags.IsContained || 256 (!MustContain && (lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter))) { 257 auto &HT = *lr.Entry; 258 IsLast = HT.getRefCount() == 1; 259 260 if (!IsLast && UpdateRefCount) 261 HT.decRefCount(); 262 263 uintptr_t tp = HT.TgtPtrBegin + ((uintptr_t)HstPtrBegin - HT.HstPtrBegin); 264 DP("Mapping exists with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD ", " 265 "Size=%" PRId64 ",%s RefCount=%s\n", DPxPTR(HstPtrBegin), DPxPTR(tp), 266 Size, (UpdateRefCount ? " updated" : ""), 267 HT.isRefCountInf() ? "INF" : std::to_string(HT.getRefCount()).c_str()); 268 rc = (void *)tp; 269 } else if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) { 270 // If the value isn't found in the mapping and unified shared memory 271 // is on then it means we have stumbled upon a value which we need to 272 // use directly from the host. 273 DP("Get HstPtrBegin " DPxMOD " Size=%" PRId64 " RefCount=%s\n", 274 DPxPTR((uintptr_t)HstPtrBegin), Size, (UpdateRefCount ? " updated" : "")); 275 IsHostPtr = true; 276 rc = HstPtrBegin; 277 } 278 279 DataMapMtx.unlock(); 280 return rc; 281 } 282 283 // Return the target pointer begin (where the data will be moved). 284 // Lock-free version called when loading global symbols from the fat binary. 285 void *DeviceTy::getTgtPtrBegin(void *HstPtrBegin, int64_t Size) { 286 uintptr_t hp = (uintptr_t)HstPtrBegin; 287 LookupResult lr = lookupMapping(HstPtrBegin, Size); 288 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 289 auto &HT = *lr.Entry; 290 uintptr_t tp = HT.TgtPtrBegin + (hp - HT.HstPtrBegin); 291 return (void *)tp; 292 } 293 294 return NULL; 295 } 296 297 int DeviceTy::deallocTgtPtr(void *HstPtrBegin, int64_t Size, bool ForceDelete, 298 bool HasCloseModifier) { 299 if (RTLs->RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && !HasCloseModifier) 300 return OFFLOAD_SUCCESS; 301 // Check if the pointer is contained in any sub-nodes. 302 int rc; 303 DataMapMtx.lock(); 304 LookupResult lr = lookupMapping(HstPtrBegin, Size); 305 if (lr.Flags.IsContained || lr.Flags.ExtendsBefore || lr.Flags.ExtendsAfter) { 306 auto &HT = *lr.Entry; 307 if (ForceDelete) 308 HT.resetRefCount(); 309 if (HT.decRefCount() == 0) { 310 DP("Deleting tgt data " DPxMOD " of size %" PRId64 "\n", 311 DPxPTR(HT.TgtPtrBegin), Size); 312 deleteData((void *)HT.TgtPtrBegin); 313 DP("Removing%s mapping with HstPtrBegin=" DPxMOD ", TgtPtrBegin=" DPxMOD 314 ", Size=%" PRId64 "\n", (ForceDelete ? " (forced)" : ""), 315 DPxPTR(HT.HstPtrBegin), DPxPTR(HT.TgtPtrBegin), Size); 316 HostDataToTargetMap.erase(lr.Entry); 317 } 318 rc = OFFLOAD_SUCCESS; 319 } else { 320 DP("Section to delete (hst addr " DPxMOD ") does not exist in the allocated" 321 " memory\n", DPxPTR(HstPtrBegin)); 322 rc = OFFLOAD_FAIL; 323 } 324 325 DataMapMtx.unlock(); 326 return rc; 327 } 328 329 /// Init device, should not be called directly. 330 void DeviceTy::init() { 331 // Make call to init_requires if it exists for this plugin. 332 if (RTL->init_requires) 333 RTL->init_requires(RTLs->RequiresFlags); 334 int32_t rc = RTL->init_device(RTLDeviceID); 335 if (rc == OFFLOAD_SUCCESS) { 336 IsInit = true; 337 } 338 } 339 340 /// Thread-safe method to initialize the device only once. 341 int32_t DeviceTy::initOnce() { 342 std::call_once(InitFlag, &DeviceTy::init, this); 343 344 // At this point, if IsInit is true, then either this thread or some other 345 // thread in the past successfully initialized the device, so we can return 346 // OFFLOAD_SUCCESS. If this thread executed init() via call_once() and it 347 // failed, return OFFLOAD_FAIL. If call_once did not invoke init(), it means 348 // that some other thread already attempted to execute init() and if IsInit 349 // is still false, return OFFLOAD_FAIL. 350 if (IsInit) 351 return OFFLOAD_SUCCESS; 352 else 353 return OFFLOAD_FAIL; 354 } 355 356 // Load binary to device. 357 __tgt_target_table *DeviceTy::load_binary(void *Img) { 358 RTL->Mtx.lock(); 359 __tgt_target_table *rc = RTL->load_binary(RTLDeviceID, Img); 360 RTL->Mtx.unlock(); 361 return rc; 362 } 363 364 void *DeviceTy::allocData(int64_t Size, void *HstPtr) { 365 return RTL->data_alloc(RTLDeviceID, Size, HstPtr); 366 } 367 368 int32_t DeviceTy::deleteData(void *TgtPtrBegin) { 369 return RTL->data_delete(RTLDeviceID, TgtPtrBegin); 370 } 371 372 // Submit data to device 373 int32_t DeviceTy::submitData(void *TgtPtrBegin, void *HstPtrBegin, int64_t Size, 374 __tgt_async_info *AsyncInfoPtr) { 375 if (!AsyncInfoPtr || !RTL->data_submit_async || !RTL->synchronize) 376 return RTL->data_submit(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size); 377 else 378 return RTL->data_submit_async(RTLDeviceID, TgtPtrBegin, HstPtrBegin, Size, 379 AsyncInfoPtr); 380 } 381 382 // Retrieve data from device 383 int32_t DeviceTy::retrieveData(void *HstPtrBegin, void *TgtPtrBegin, 384 int64_t Size, __tgt_async_info *AsyncInfoPtr) { 385 if (!AsyncInfoPtr || !RTL->data_retrieve_async || !RTL->synchronize) 386 return RTL->data_retrieve(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size); 387 else 388 return RTL->data_retrieve_async(RTLDeviceID, HstPtrBegin, TgtPtrBegin, Size, 389 AsyncInfoPtr); 390 } 391 392 // Copy data from current device to destination device directly 393 int32_t DeviceTy::data_exchange(void *SrcPtr, DeviceTy DstDev, void *DstPtr, 394 int64_t Size, __tgt_async_info *AsyncInfoPtr) { 395 if (!AsyncInfoPtr || !RTL->data_exchange_async || !RTL->synchronize) { 396 assert(RTL->data_exchange && "RTL->data_exchange is nullptr"); 397 return RTL->data_exchange(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, DstPtr, 398 Size); 399 } else 400 return RTL->data_exchange_async(RTLDeviceID, SrcPtr, DstDev.RTLDeviceID, 401 DstPtr, Size, AsyncInfoPtr); 402 } 403 404 // Run region on device 405 int32_t DeviceTy::runRegion(void *TgtEntryPtr, void **TgtVarsPtr, 406 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 407 __tgt_async_info *AsyncInfoPtr) { 408 if (!AsyncInfoPtr || !RTL->run_region || !RTL->synchronize) 409 return RTL->run_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, TgtOffsets, 410 TgtVarsSize); 411 else 412 return RTL->run_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 413 TgtOffsets, TgtVarsSize, AsyncInfoPtr); 414 } 415 416 // Run team region on device. 417 int32_t DeviceTy::runTeamRegion(void *TgtEntryPtr, void **TgtVarsPtr, 418 ptrdiff_t *TgtOffsets, int32_t TgtVarsSize, 419 int32_t NumTeams, int32_t ThreadLimit, 420 uint64_t LoopTripCount, 421 __tgt_async_info *AsyncInfoPtr) { 422 if (!AsyncInfoPtr || !RTL->run_team_region_async || !RTL->synchronize) 423 return RTL->run_team_region(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 424 TgtOffsets, TgtVarsSize, NumTeams, ThreadLimit, 425 LoopTripCount); 426 else 427 return RTL->run_team_region_async(RTLDeviceID, TgtEntryPtr, TgtVarsPtr, 428 TgtOffsets, TgtVarsSize, NumTeams, 429 ThreadLimit, LoopTripCount, AsyncInfoPtr); 430 } 431 432 // Whether data can be copied to DstDevice directly 433 bool DeviceTy::isDataExchangable(const DeviceTy &DstDevice) { 434 if (RTL != DstDevice.RTL || !RTL->is_data_exchangable) 435 return false; 436 437 if (RTL->is_data_exchangable(RTLDeviceID, DstDevice.RTLDeviceID)) 438 return (RTL->data_exchange != nullptr) || 439 (RTL->data_exchange_async != nullptr); 440 441 return false; 442 } 443 444 int32_t DeviceTy::synchronize(__tgt_async_info *AsyncInfoPtr) { 445 if (RTL->synchronize) 446 return RTL->synchronize(RTLDeviceID, AsyncInfoPtr); 447 return OFFLOAD_SUCCESS; 448 } 449 450 /// Check whether a device has an associated RTL and initialize it if it's not 451 /// already initialized. 452 bool device_is_ready(int device_num) { 453 DP("Checking whether device %d is ready.\n", device_num); 454 // Devices.size() can only change while registering a new 455 // library, so try to acquire the lock of RTLs' mutex. 456 RTLsMtx->lock(); 457 size_t Devices_size = Devices.size(); 458 RTLsMtx->unlock(); 459 if (Devices_size <= (size_t)device_num) { 460 DP("Device ID %d does not have a matching RTL\n", device_num); 461 return false; 462 } 463 464 // Get device info 465 DeviceTy &Device = Devices[device_num]; 466 467 DP("Is the device %d (local ID %d) initialized? %d\n", device_num, 468 Device.RTLDeviceID, Device.IsInit); 469 470 // Init the device if not done before 471 if (!Device.IsInit && Device.initOnce() != OFFLOAD_SUCCESS) { 472 DP("Failed to init device %d\n", device_num); 473 return false; 474 } 475 476 DP("Device %d is ready to use.\n", device_num); 477 478 return true; 479 } 480