1 //===------ omptarget.cpp - Target independent OpenMP target RTL -- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Implementation of the interface to be used by Clang during the codegen of a 10 // target region. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "omptarget.h" 15 #include "device.h" 16 #include "private.h" 17 #include "rtl.h" 18 19 #include <cassert> 20 #include <vector> 21 22 int AsyncInfoTy::synchronize() { 23 int Result = OFFLOAD_SUCCESS; 24 if (AsyncInfo.Queue) { 25 // If we have a queue we need to synchronize it now. 26 Result = Device.synchronize(*this); 27 assert(AsyncInfo.Queue == nullptr && 28 "The device plugin should have nulled the queue to indicate there " 29 "are no outstanding actions!"); 30 } 31 return Result; 32 } 33 34 void *&AsyncInfoTy::getVoidPtrLocation() { 35 BufferLocations.push_back(nullptr); 36 return BufferLocations.back(); 37 } 38 39 /* All begin addresses for partially mapped structs must be 8-aligned in order 40 * to ensure proper alignment of members. E.g. 41 * 42 * struct S { 43 * int a; // 4-aligned 44 * int b; // 4-aligned 45 * int *p; // 8-aligned 46 * } s1; 47 * ... 48 * #pragma omp target map(tofrom: s1.b, s1.p[0:N]) 49 * { 50 * s1.b = 5; 51 * for (int i...) s1.p[i] = ...; 52 * } 53 * 54 * Here we are mapping s1 starting from member b, so BaseAddress=&s1=&s1.a and 55 * BeginAddress=&s1.b. Let's assume that the struct begins at address 0x100, 56 * then &s1.a=0x100, &s1.b=0x104, &s1.p=0x108. Each member obeys the alignment 57 * requirements for its type. Now, when we allocate memory on the device, in 58 * CUDA's case cuMemAlloc() returns an address which is at least 256-aligned. 59 * This means that the chunk of the struct on the device will start at a 60 * 256-aligned address, let's say 0x200. Then the address of b will be 0x200 and 61 * address of p will be a misaligned 0x204 (on the host there was no need to add 62 * padding between b and p, so p comes exactly 4 bytes after b). If the device 63 * kernel tries to access s1.p, a misaligned address error occurs (as reported 64 * by the CUDA plugin). By padding the begin address down to a multiple of 8 and 65 * extending the size of the allocated chuck accordingly, the chuck on the 66 * device will start at 0x200 with the padding (4 bytes), then &s1.b=0x204 and 67 * &s1.p=0x208, as they should be to satisfy the alignment requirements. 68 */ 69 static const int64_t Alignment = 8; 70 71 /// Map global data and execute pending ctors 72 static int InitLibrary(DeviceTy &Device) { 73 /* 74 * Map global data 75 */ 76 int32_t device_id = Device.DeviceID; 77 int rc = OFFLOAD_SUCCESS; 78 bool supportsEmptyImages = Device.RTL->supports_empty_images && 79 Device.RTL->supports_empty_images() > 0; 80 81 Device.PendingGlobalsMtx.lock(); 82 PM->TrlTblMtx.lock(); 83 for (auto *HostEntriesBegin : PM->HostEntriesBeginRegistrationOrder) { 84 TranslationTable *TransTable = 85 &PM->HostEntriesBeginToTransTable[HostEntriesBegin]; 86 if (TransTable->HostTable.EntriesBegin == 87 TransTable->HostTable.EntriesEnd && 88 !supportsEmptyImages) { 89 // No host entry so no need to proceed 90 continue; 91 } 92 93 if (TransTable->TargetsTable[device_id] != 0) { 94 // Library entries have already been processed 95 continue; 96 } 97 98 // 1) get image. 99 assert(TransTable->TargetsImages.size() > (size_t)device_id && 100 "Not expecting a device ID outside the table's bounds!"); 101 __tgt_device_image *img = TransTable->TargetsImages[device_id]; 102 if (!img) { 103 REPORT("No image loaded for device id %d.\n", device_id); 104 rc = OFFLOAD_FAIL; 105 break; 106 } 107 // 2) load image into the target table. 108 __tgt_target_table *TargetTable = TransTable->TargetsTable[device_id] = 109 Device.load_binary(img); 110 // Unable to get table for this image: invalidate image and fail. 111 if (!TargetTable) { 112 REPORT("Unable to generate entries table for device id %d.\n", device_id); 113 TransTable->TargetsImages[device_id] = 0; 114 rc = OFFLOAD_FAIL; 115 break; 116 } 117 118 // Verify whether the two table sizes match. 119 size_t hsize = 120 TransTable->HostTable.EntriesEnd - TransTable->HostTable.EntriesBegin; 121 size_t tsize = TargetTable->EntriesEnd - TargetTable->EntriesBegin; 122 123 // Invalid image for these host entries! 124 if (hsize != tsize) { 125 REPORT("Host and Target tables mismatch for device id %d [%zx != %zx].\n", 126 device_id, hsize, tsize); 127 TransTable->TargetsImages[device_id] = 0; 128 TransTable->TargetsTable[device_id] = 0; 129 rc = OFFLOAD_FAIL; 130 break; 131 } 132 133 // process global data that needs to be mapped. 134 Device.DataMapMtx.lock(); 135 __tgt_target_table *HostTable = &TransTable->HostTable; 136 for (__tgt_offload_entry *CurrDeviceEntry = TargetTable->EntriesBegin, 137 *CurrHostEntry = HostTable->EntriesBegin, 138 *EntryDeviceEnd = TargetTable->EntriesEnd; 139 CurrDeviceEntry != EntryDeviceEnd; 140 CurrDeviceEntry++, CurrHostEntry++) { 141 if (CurrDeviceEntry->size != 0) { 142 // has data. 143 assert(CurrDeviceEntry->size == CurrHostEntry->size && 144 "data size mismatch"); 145 146 // Fortran may use multiple weak declarations for the same symbol, 147 // therefore we must allow for multiple weak symbols to be loaded from 148 // the fat binary. Treat these mappings as any other "regular" mapping. 149 // Add entry to map. 150 if (Device.getTgtPtrBegin(CurrHostEntry->addr, CurrHostEntry->size)) 151 continue; 152 DP("Add mapping from host " DPxMOD " to device " DPxMOD " with size %zu" 153 "\n", 154 DPxPTR(CurrHostEntry->addr), DPxPTR(CurrDeviceEntry->addr), 155 CurrDeviceEntry->size); 156 Device.HostDataToTargetMap.emplace( 157 (uintptr_t)CurrHostEntry->addr /*HstPtrBase*/, 158 (uintptr_t)CurrHostEntry->addr /*HstPtrBegin*/, 159 (uintptr_t)CurrHostEntry->addr + CurrHostEntry->size /*HstPtrEnd*/, 160 (uintptr_t)CurrDeviceEntry->addr /*TgtPtrBegin*/, nullptr, 161 true /*IsRefCountINF*/); 162 } 163 } 164 Device.DataMapMtx.unlock(); 165 } 166 PM->TrlTblMtx.unlock(); 167 168 if (rc != OFFLOAD_SUCCESS) { 169 Device.PendingGlobalsMtx.unlock(); 170 return rc; 171 } 172 173 /* 174 * Run ctors for static objects 175 */ 176 if (!Device.PendingCtorsDtors.empty()) { 177 AsyncInfoTy AsyncInfo(Device); 178 // Call all ctors for all libraries registered so far 179 for (auto &lib : Device.PendingCtorsDtors) { 180 if (!lib.second.PendingCtors.empty()) { 181 DP("Has pending ctors... call now\n"); 182 for (auto &entry : lib.second.PendingCtors) { 183 void *ctor = entry; 184 int rc = 185 target(nullptr, Device, ctor, 0, nullptr, nullptr, nullptr, 186 nullptr, nullptr, nullptr, 1, 1, true /*team*/, AsyncInfo); 187 if (rc != OFFLOAD_SUCCESS) { 188 REPORT("Running ctor " DPxMOD " failed.\n", DPxPTR(ctor)); 189 Device.PendingGlobalsMtx.unlock(); 190 return OFFLOAD_FAIL; 191 } 192 } 193 // Clear the list to indicate that this device has been used 194 lib.second.PendingCtors.clear(); 195 DP("Done with pending ctors for lib " DPxMOD "\n", DPxPTR(lib.first)); 196 } 197 } 198 // All constructors have been issued, wait for them now. 199 if (AsyncInfo.synchronize() != OFFLOAD_SUCCESS) 200 return OFFLOAD_FAIL; 201 } 202 Device.HasPendingGlobals = false; 203 Device.PendingGlobalsMtx.unlock(); 204 205 return OFFLOAD_SUCCESS; 206 } 207 208 void handleTargetOutcome(bool Success, ident_t *Loc) { 209 switch (PM->TargetOffloadPolicy) { 210 case tgt_disabled: 211 if (Success) { 212 FATAL_MESSAGE0(1, "expected no offloading while offloading is disabled"); 213 } 214 break; 215 case tgt_default: 216 FATAL_MESSAGE0(1, "default offloading policy must be switched to " 217 "mandatory or disabled"); 218 break; 219 case tgt_mandatory: 220 if (!Success) { 221 if (getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE) 222 for (auto &Device : PM->Devices) 223 dumpTargetPointerMappings(Loc, Device); 224 else 225 FAILURE_MESSAGE("Run with LIBOMPTARGET_DEBUG=%d to dump host-target " 226 "pointer mappings.\n", 227 OMP_INFOTYPE_DUMP_TABLE); 228 229 SourceInfo info(Loc); 230 if (info.isAvailible()) 231 fprintf(stderr, "%s:%d:%d: ", info.getFilename(), info.getLine(), 232 info.getColumn()); 233 else 234 FAILURE_MESSAGE("Source location information not present. Compile with " 235 "-g or -gline-tables-only.\n"); 236 FATAL_MESSAGE0( 237 1, "failure of target construct while offloading is mandatory"); 238 } else { 239 if (getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE) 240 for (auto &Device : PM->Devices) 241 dumpTargetPointerMappings(Loc, Device); 242 } 243 break; 244 } 245 } 246 247 static void handleDefaultTargetOffload() { 248 PM->TargetOffloadMtx.lock(); 249 if (PM->TargetOffloadPolicy == tgt_default) { 250 if (omp_get_num_devices() > 0) { 251 DP("Default TARGET OFFLOAD policy is now mandatory " 252 "(devices were found)\n"); 253 PM->TargetOffloadPolicy = tgt_mandatory; 254 } else { 255 DP("Default TARGET OFFLOAD policy is now disabled " 256 "(no devices were found)\n"); 257 PM->TargetOffloadPolicy = tgt_disabled; 258 } 259 } 260 PM->TargetOffloadMtx.unlock(); 261 } 262 263 static bool isOffloadDisabled() { 264 if (PM->TargetOffloadPolicy == tgt_default) 265 handleDefaultTargetOffload(); 266 return PM->TargetOffloadPolicy == tgt_disabled; 267 } 268 269 // If offload is enabled, ensure that device DeviceID has been initialized, 270 // global ctors have been executed, and global data has been mapped. 271 // 272 // There are three possible results: 273 // - Return OFFLOAD_SUCCESS if the device is ready for offload. 274 // - Return OFFLOAD_FAIL without reporting a runtime error if offload is 275 // disabled, perhaps because the initial device was specified. 276 // - Report a runtime error and return OFFLOAD_FAIL. 277 // 278 // If DeviceID == OFFLOAD_DEVICE_DEFAULT, set DeviceID to the default device. 279 // This step might be skipped if offload is disabled. 280 int checkDeviceAndCtors(int64_t &DeviceID, ident_t *Loc) { 281 if (isOffloadDisabled()) { 282 DP("Offload is disabled\n"); 283 return OFFLOAD_FAIL; 284 } 285 286 if (DeviceID == OFFLOAD_DEVICE_DEFAULT) { 287 DeviceID = omp_get_default_device(); 288 DP("Use default device id %" PRId64 "\n", DeviceID); 289 } 290 291 // Proposed behavior for OpenMP 5.2 in OpenMP spec github issue 2669. 292 if (omp_get_num_devices() == 0) { 293 DP("omp_get_num_devices() == 0 but offload is manadatory\n"); 294 handleTargetOutcome(false, Loc); 295 return OFFLOAD_FAIL; 296 } 297 298 if (DeviceID == omp_get_initial_device()) { 299 DP("Device is host (%" PRId64 "), returning as if offload is disabled\n", 300 DeviceID); 301 return OFFLOAD_FAIL; 302 } 303 304 // Is device ready? 305 if (!device_is_ready(DeviceID)) { 306 REPORT("Device %" PRId64 " is not ready.\n", DeviceID); 307 handleTargetOutcome(false, Loc); 308 return OFFLOAD_FAIL; 309 } 310 311 // Get device info. 312 DeviceTy &Device = PM->Devices[DeviceID]; 313 314 // Check whether global data has been mapped for this device 315 Device.PendingGlobalsMtx.lock(); 316 bool hasPendingGlobals = Device.HasPendingGlobals; 317 Device.PendingGlobalsMtx.unlock(); 318 if (hasPendingGlobals && InitLibrary(Device) != OFFLOAD_SUCCESS) { 319 REPORT("Failed to init globals on device %" PRId64 "\n", DeviceID); 320 handleTargetOutcome(false, Loc); 321 return OFFLOAD_FAIL; 322 } 323 324 return OFFLOAD_SUCCESS; 325 } 326 327 static int32_t getParentIndex(int64_t type) { 328 return ((type & OMP_TGT_MAPTYPE_MEMBER_OF) >> 48) - 1; 329 } 330 331 /// Call the user-defined mapper function followed by the appropriate 332 // targetData* function (targetData{Begin,End,Update}). 333 int targetDataMapper(ident_t *loc, DeviceTy &Device, void *arg_base, void *arg, 334 int64_t arg_size, int64_t arg_type, 335 map_var_info_t arg_names, void *arg_mapper, 336 AsyncInfoTy &AsyncInfo, 337 TargetDataFuncPtrTy target_data_function) { 338 TIMESCOPE_WITH_IDENT(loc); 339 DP("Calling the mapper function " DPxMOD "\n", DPxPTR(arg_mapper)); 340 341 // The mapper function fills up Components. 342 MapperComponentsTy MapperComponents; 343 MapperFuncPtrTy MapperFuncPtr = (MapperFuncPtrTy)(arg_mapper); 344 (*MapperFuncPtr)((void *)&MapperComponents, arg_base, arg, arg_size, arg_type, 345 arg_names); 346 347 // Construct new arrays for args_base, args, arg_sizes and arg_types 348 // using the information in MapperComponents and call the corresponding 349 // targetData* function using these new arrays. 350 std::vector<void *> MapperArgsBase(MapperComponents.Components.size()); 351 std::vector<void *> MapperArgs(MapperComponents.Components.size()); 352 std::vector<int64_t> MapperArgSizes(MapperComponents.Components.size()); 353 std::vector<int64_t> MapperArgTypes(MapperComponents.Components.size()); 354 std::vector<void *> MapperArgNames(MapperComponents.Components.size()); 355 356 for (unsigned I = 0, E = MapperComponents.Components.size(); I < E; ++I) { 357 auto &C = 358 MapperComponents 359 .Components[target_data_function == targetDataEnd ? E - I - 1 : I]; 360 MapperArgsBase[I] = C.Base; 361 MapperArgs[I] = C.Begin; 362 MapperArgSizes[I] = C.Size; 363 MapperArgTypes[I] = C.Type; 364 MapperArgNames[I] = C.Name; 365 } 366 367 int rc = target_data_function(loc, Device, MapperComponents.Components.size(), 368 MapperArgsBase.data(), MapperArgs.data(), 369 MapperArgSizes.data(), MapperArgTypes.data(), 370 MapperArgNames.data(), /*arg_mappers*/ nullptr, 371 AsyncInfo, /*FromMapper=*/true); 372 373 return rc; 374 } 375 376 /// Internal function to do the mapping and transfer the data to the device 377 int targetDataBegin(ident_t *loc, DeviceTy &Device, int32_t arg_num, 378 void **args_base, void **args, int64_t *arg_sizes, 379 int64_t *arg_types, map_var_info_t *arg_names, 380 void **arg_mappers, AsyncInfoTy &AsyncInfo, 381 bool FromMapper) { 382 // process each input. 383 for (int32_t i = 0; i < arg_num; ++i) { 384 // Ignore private variables and arrays - there is no mapping for them. 385 if ((arg_types[i] & OMP_TGT_MAPTYPE_LITERAL) || 386 (arg_types[i] & OMP_TGT_MAPTYPE_PRIVATE)) 387 continue; 388 389 if (arg_mappers && arg_mappers[i]) { 390 // Instead of executing the regular path of targetDataBegin, call the 391 // targetDataMapper variant which will call targetDataBegin again 392 // with new arguments. 393 DP("Calling targetDataMapper for the %dth argument\n", i); 394 395 map_var_info_t arg_name = (!arg_names) ? nullptr : arg_names[i]; 396 int rc = targetDataMapper(loc, Device, args_base[i], args[i], 397 arg_sizes[i], arg_types[i], arg_name, 398 arg_mappers[i], AsyncInfo, targetDataBegin); 399 400 if (rc != OFFLOAD_SUCCESS) { 401 REPORT("Call to targetDataBegin via targetDataMapper for custom mapper" 402 " failed.\n"); 403 return OFFLOAD_FAIL; 404 } 405 406 // Skip the rest of this function, continue to the next argument. 407 continue; 408 } 409 410 void *HstPtrBegin = args[i]; 411 void *HstPtrBase = args_base[i]; 412 int64_t data_size = arg_sizes[i]; 413 map_var_info_t HstPtrName = (!arg_names) ? nullptr : arg_names[i]; 414 415 // Adjust for proper alignment if this is a combined entry (for structs). 416 // Look at the next argument - if that is MEMBER_OF this one, then this one 417 // is a combined entry. 418 int64_t padding = 0; 419 const int next_i = i + 1; 420 if (getParentIndex(arg_types[i]) < 0 && next_i < arg_num && 421 getParentIndex(arg_types[next_i]) == i) { 422 padding = (int64_t)HstPtrBegin % Alignment; 423 if (padding) { 424 DP("Using a padding of %" PRId64 " bytes for begin address " DPxMOD 425 "\n", 426 padding, DPxPTR(HstPtrBegin)); 427 HstPtrBegin = (char *)HstPtrBegin - padding; 428 data_size += padding; 429 } 430 } 431 432 // Address of pointer on the host and device, respectively. 433 void *Pointer_HstPtrBegin, *PointerTgtPtrBegin; 434 bool IsNew, Pointer_IsNew; 435 bool IsHostPtr = false; 436 bool IsImplicit = arg_types[i] & OMP_TGT_MAPTYPE_IMPLICIT; 437 // Force the creation of a device side copy of the data when: 438 // a close map modifier was associated with a map that contained a to. 439 bool HasCloseModifier = arg_types[i] & OMP_TGT_MAPTYPE_CLOSE; 440 bool HasPresentModifier = arg_types[i] & OMP_TGT_MAPTYPE_PRESENT; 441 // UpdateRef is based on MEMBER_OF instead of TARGET_PARAM because if we 442 // have reached this point via __tgt_target_data_begin and not __tgt_target 443 // then no argument is marked as TARGET_PARAM ("omp target data map" is not 444 // associated with a target region, so there are no target parameters). This 445 // may be considered a hack, we could revise the scheme in the future. 446 bool UpdateRef = !(arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF); 447 if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ) { 448 DP("Has a pointer entry: \n"); 449 // Base is address of pointer. 450 // 451 // Usually, the pointer is already allocated by this time. For example: 452 // 453 // #pragma omp target map(s.p[0:N]) 454 // 455 // The map entry for s comes first, and the PTR_AND_OBJ entry comes 456 // afterward, so the pointer is already allocated by the time the 457 // PTR_AND_OBJ entry is handled below, and PointerTgtPtrBegin is thus 458 // non-null. However, "declare target link" can produce a PTR_AND_OBJ 459 // entry for a global that might not already be allocated by the time the 460 // PTR_AND_OBJ entry is handled below, and so the allocation might fail 461 // when HasPresentModifier. 462 PointerTgtPtrBegin = Device.getOrAllocTgtPtr( 463 HstPtrBase, HstPtrBase, sizeof(void *), nullptr, Pointer_IsNew, 464 IsHostPtr, IsImplicit, UpdateRef, HasCloseModifier, 465 HasPresentModifier); 466 if (!PointerTgtPtrBegin) { 467 REPORT("Call to getOrAllocTgtPtr returned null pointer (%s).\n", 468 HasPresentModifier ? "'present' map type modifier" 469 : "device failure or illegal mapping"); 470 return OFFLOAD_FAIL; 471 } 472 DP("There are %zu bytes allocated at target address " DPxMOD " - is%s new" 473 "\n", 474 sizeof(void *), DPxPTR(PointerTgtPtrBegin), 475 (Pointer_IsNew ? "" : " not")); 476 Pointer_HstPtrBegin = HstPtrBase; 477 // modify current entry. 478 HstPtrBase = *(void **)HstPtrBase; 479 // No need to update pointee ref count for the first element of the 480 // subelement that comes from mapper. 481 UpdateRef = 482 (!FromMapper || i != 0); // subsequently update ref count of pointee 483 } 484 485 void *TgtPtrBegin = Device.getOrAllocTgtPtr( 486 HstPtrBegin, HstPtrBase, data_size, HstPtrName, IsNew, IsHostPtr, 487 IsImplicit, UpdateRef, HasCloseModifier, HasPresentModifier); 488 // If data_size==0, then the argument could be a zero-length pointer to 489 // NULL, so getOrAlloc() returning NULL is not an error. 490 if (!TgtPtrBegin && (data_size || HasPresentModifier)) { 491 REPORT("Call to getOrAllocTgtPtr returned null pointer (%s).\n", 492 HasPresentModifier ? "'present' map type modifier" 493 : "device failure or illegal mapping"); 494 return OFFLOAD_FAIL; 495 } 496 DP("There are %" PRId64 " bytes allocated at target address " DPxMOD 497 " - is%s new\n", 498 data_size, DPxPTR(TgtPtrBegin), (IsNew ? "" : " not")); 499 500 if (arg_types[i] & OMP_TGT_MAPTYPE_RETURN_PARAM) { 501 uintptr_t Delta = (uintptr_t)HstPtrBegin - (uintptr_t)HstPtrBase; 502 void *TgtPtrBase = (void *)((uintptr_t)TgtPtrBegin - Delta); 503 DP("Returning device pointer " DPxMOD "\n", DPxPTR(TgtPtrBase)); 504 args_base[i] = TgtPtrBase; 505 } 506 507 if (arg_types[i] & OMP_TGT_MAPTYPE_TO) { 508 bool copy = false; 509 if (!(PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) || 510 HasCloseModifier) { 511 if (IsNew || (arg_types[i] & OMP_TGT_MAPTYPE_ALWAYS)) { 512 copy = true; 513 } else if ((arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF) && 514 !(arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)) { 515 // Copy data only if the "parent" struct has RefCount==1. 516 // If this is a PTR_AND_OBJ entry, the OBJ is not part of the struct, 517 // so exclude it from this check. 518 int32_t parent_idx = getParentIndex(arg_types[i]); 519 uint64_t parent_rc = Device.getMapEntryRefCnt(args[parent_idx]); 520 assert(parent_rc > 0 && "parent struct not found"); 521 if (parent_rc == 1) { 522 copy = true; 523 } 524 } 525 } 526 527 if (copy && !IsHostPtr) { 528 DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n", 529 data_size, DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin)); 530 int rt = 531 Device.submitData(TgtPtrBegin, HstPtrBegin, data_size, AsyncInfo); 532 if (rt != OFFLOAD_SUCCESS) { 533 REPORT("Copying data to device failed.\n"); 534 return OFFLOAD_FAIL; 535 } 536 } 537 } 538 539 if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ && !IsHostPtr) { 540 DP("Update pointer (" DPxMOD ") -> [" DPxMOD "]\n", 541 DPxPTR(PointerTgtPtrBegin), DPxPTR(TgtPtrBegin)); 542 uint64_t Delta = (uint64_t)HstPtrBegin - (uint64_t)HstPtrBase; 543 void *&TgtPtrBase = AsyncInfo.getVoidPtrLocation(); 544 TgtPtrBase = (void *)((uint64_t)TgtPtrBegin - Delta); 545 int rt = Device.submitData(PointerTgtPtrBegin, &TgtPtrBase, 546 sizeof(void *), AsyncInfo); 547 if (rt != OFFLOAD_SUCCESS) { 548 REPORT("Copying data to device failed.\n"); 549 return OFFLOAD_FAIL; 550 } 551 // create shadow pointers for this entry 552 Device.ShadowMtx.lock(); 553 Device.ShadowPtrMap[Pointer_HstPtrBegin] = { 554 HstPtrBase, PointerTgtPtrBegin, TgtPtrBase}; 555 Device.ShadowMtx.unlock(); 556 } 557 } 558 559 return OFFLOAD_SUCCESS; 560 } 561 562 namespace { 563 /// This structure contains information to deallocate a target pointer, aka. 564 /// used to call the function \p DeviceTy::deallocTgtPtr. 565 struct DeallocTgtPtrInfo { 566 /// Host pointer used to look up into the map table 567 void *HstPtrBegin; 568 /// Size of the data 569 int64_t DataSize; 570 /// Whether it is forced to be removed from the map table 571 bool ForceDelete; 572 /// Whether it has \p close modifier 573 bool HasCloseModifier; 574 575 DeallocTgtPtrInfo(void *HstPtr, int64_t Size, bool ForceDelete, 576 bool HasCloseModifier) 577 : HstPtrBegin(HstPtr), DataSize(Size), ForceDelete(ForceDelete), 578 HasCloseModifier(HasCloseModifier) {} 579 }; 580 } // namespace 581 582 /// Internal function to undo the mapping and retrieve the data from the device. 583 int targetDataEnd(ident_t *loc, DeviceTy &Device, int32_t ArgNum, 584 void **ArgBases, void **Args, int64_t *ArgSizes, 585 int64_t *ArgTypes, map_var_info_t *ArgNames, 586 void **ArgMappers, AsyncInfoTy &AsyncInfo, bool FromMapper) { 587 int Ret; 588 std::vector<DeallocTgtPtrInfo> DeallocTgtPtrs; 589 // process each input. 590 for (int32_t I = ArgNum - 1; I >= 0; --I) { 591 // Ignore private variables and arrays - there is no mapping for them. 592 // Also, ignore the use_device_ptr directive, it has no effect here. 593 if ((ArgTypes[I] & OMP_TGT_MAPTYPE_LITERAL) || 594 (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE)) 595 continue; 596 597 if (ArgMappers && ArgMappers[I]) { 598 // Instead of executing the regular path of targetDataEnd, call the 599 // targetDataMapper variant which will call targetDataEnd again 600 // with new arguments. 601 DP("Calling targetDataMapper for the %dth argument\n", I); 602 603 map_var_info_t ArgName = (!ArgNames) ? nullptr : ArgNames[I]; 604 Ret = targetDataMapper(loc, Device, ArgBases[I], Args[I], ArgSizes[I], 605 ArgTypes[I], ArgName, ArgMappers[I], AsyncInfo, 606 targetDataEnd); 607 608 if (Ret != OFFLOAD_SUCCESS) { 609 REPORT("Call to targetDataEnd via targetDataMapper for custom mapper" 610 " failed.\n"); 611 return OFFLOAD_FAIL; 612 } 613 614 // Skip the rest of this function, continue to the next argument. 615 continue; 616 } 617 618 void *HstPtrBegin = Args[I]; 619 int64_t DataSize = ArgSizes[I]; 620 // Adjust for proper alignment if this is a combined entry (for structs). 621 // Look at the next argument - if that is MEMBER_OF this one, then this one 622 // is a combined entry. 623 const int NextI = I + 1; 624 if (getParentIndex(ArgTypes[I]) < 0 && NextI < ArgNum && 625 getParentIndex(ArgTypes[NextI]) == I) { 626 int64_t Padding = (int64_t)HstPtrBegin % Alignment; 627 if (Padding) { 628 DP("Using a Padding of %" PRId64 " bytes for begin address " DPxMOD 629 "\n", 630 Padding, DPxPTR(HstPtrBegin)); 631 HstPtrBegin = (char *)HstPtrBegin - Padding; 632 DataSize += Padding; 633 } 634 } 635 636 bool IsLast, IsHostPtr; 637 bool IsImplicit = ArgTypes[I] & OMP_TGT_MAPTYPE_IMPLICIT; 638 bool UpdateRef = !(ArgTypes[I] & OMP_TGT_MAPTYPE_MEMBER_OF) || 639 (ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ && 640 (!FromMapper || I != ArgNum - 1)); 641 bool ForceDelete = ArgTypes[I] & OMP_TGT_MAPTYPE_DELETE; 642 bool HasCloseModifier = ArgTypes[I] & OMP_TGT_MAPTYPE_CLOSE; 643 bool HasPresentModifier = ArgTypes[I] & OMP_TGT_MAPTYPE_PRESENT; 644 645 // If PTR_AND_OBJ, HstPtrBegin is address of pointee 646 void *TgtPtrBegin = Device.getTgtPtrBegin( 647 HstPtrBegin, DataSize, IsLast, UpdateRef, IsHostPtr, !IsImplicit); 648 if (!TgtPtrBegin && (DataSize || HasPresentModifier)) { 649 DP("Mapping does not exist (%s)\n", 650 (HasPresentModifier ? "'present' map type modifier" : "ignored")); 651 if (HasPresentModifier) { 652 // OpenMP 5.1, sec. 2.21.7.1 "map Clause", p. 350 L10-13: 653 // "If a map clause appears on a target, target data, target enter data 654 // or target exit data construct with a present map-type-modifier then 655 // on entry to the region if the corresponding list item does not appear 656 // in the device data environment then an error occurs and the program 657 // terminates." 658 // 659 // This should be an error upon entering an "omp target exit data". It 660 // should not be an error upon exiting an "omp target data" or "omp 661 // target". For "omp target data", Clang thus doesn't include present 662 // modifiers for end calls. For "omp target", we have not found a valid 663 // OpenMP program for which the error matters: it appears that, if a 664 // program can guarantee that data is present at the beginning of an 665 // "omp target" region so that there's no error there, that data is also 666 // guaranteed to be present at the end. 667 MESSAGE("device mapping required by 'present' map type modifier does " 668 "not exist for host address " DPxMOD " (%" PRId64 " bytes)", 669 DPxPTR(HstPtrBegin), DataSize); 670 return OFFLOAD_FAIL; 671 } 672 } else { 673 DP("There are %" PRId64 " bytes allocated at target address " DPxMOD 674 " - is%s last\n", 675 DataSize, DPxPTR(TgtPtrBegin), (IsLast ? "" : " not")); 676 } 677 678 // OpenMP 5.1, sec. 2.21.7.1 "map Clause", p. 351 L14-16: 679 // "If the map clause appears on a target, target data, or target exit data 680 // construct and a corresponding list item of the original list item is not 681 // present in the device data environment on exit from the region then the 682 // list item is ignored." 683 if (!TgtPtrBegin) 684 continue; 685 686 bool DelEntry = IsLast || ForceDelete; 687 688 // If the last element from the mapper (for end transfer args comes in 689 // reverse order), do not remove the partial entry, the parent struct still 690 // exists. 691 if (((ArgTypes[I] & OMP_TGT_MAPTYPE_MEMBER_OF) && 692 !(ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)) || 693 (ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ && FromMapper && 694 I == ArgNum - 1)) { 695 DelEntry = false; // protect parent struct from being deallocated 696 } 697 698 if ((ArgTypes[I] & OMP_TGT_MAPTYPE_FROM) || DelEntry) { 699 // Move data back to the host 700 if (ArgTypes[I] & OMP_TGT_MAPTYPE_FROM) { 701 bool Always = ArgTypes[I] & OMP_TGT_MAPTYPE_ALWAYS; 702 bool CopyMember = false; 703 if (!(PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY) || 704 HasCloseModifier) { 705 if ((ArgTypes[I] & OMP_TGT_MAPTYPE_MEMBER_OF) && 706 !(ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)) { 707 // Copy data only if the "parent" struct has RefCount==1. 708 int32_t ParentIdx = getParentIndex(ArgTypes[I]); 709 uint64_t ParentRC = Device.getMapEntryRefCnt(Args[ParentIdx]); 710 assert(ParentRC > 0 && "parent struct not found"); 711 if (ParentRC == 1) 712 CopyMember = true; 713 } 714 } 715 716 if ((DelEntry || Always || CopyMember) && 717 !(PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 718 TgtPtrBegin == HstPtrBegin)) { 719 DP("Moving %" PRId64 " bytes (tgt:" DPxMOD ") -> (hst:" DPxMOD ")\n", 720 DataSize, DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin)); 721 Ret = Device.retrieveData(HstPtrBegin, TgtPtrBegin, DataSize, 722 AsyncInfo); 723 if (Ret != OFFLOAD_SUCCESS) { 724 REPORT("Copying data from device failed.\n"); 725 return OFFLOAD_FAIL; 726 } 727 } 728 } 729 730 // If we copied back to the host a struct/array containing pointers, we 731 // need to restore the original host pointer values from their shadow 732 // copies. If the struct is going to be deallocated, remove any remaining 733 // shadow pointer entries for this struct. 734 uintptr_t LB = (uintptr_t)HstPtrBegin; 735 uintptr_t UB = (uintptr_t)HstPtrBegin + DataSize; 736 Device.ShadowMtx.lock(); 737 for (ShadowPtrListTy::iterator Itr = Device.ShadowPtrMap.begin(); 738 Itr != Device.ShadowPtrMap.end();) { 739 void **ShadowHstPtrAddr = (void **)Itr->first; 740 741 // An STL map is sorted on its keys; use this property 742 // to quickly determine when to break out of the loop. 743 if ((uintptr_t)ShadowHstPtrAddr < LB) { 744 ++Itr; 745 continue; 746 } 747 if ((uintptr_t)ShadowHstPtrAddr >= UB) 748 break; 749 750 // If we copied the struct to the host, we need to restore the pointer. 751 if (ArgTypes[I] & OMP_TGT_MAPTYPE_FROM) { 752 DP("Restoring original host pointer value " DPxMOD " for host " 753 "pointer " DPxMOD "\n", 754 DPxPTR(Itr->second.HstPtrVal), DPxPTR(ShadowHstPtrAddr)); 755 *ShadowHstPtrAddr = Itr->second.HstPtrVal; 756 } 757 // If the struct is to be deallocated, remove the shadow entry. 758 if (DelEntry) { 759 DP("Removing shadow pointer " DPxMOD "\n", DPxPTR(ShadowHstPtrAddr)); 760 Itr = Device.ShadowPtrMap.erase(Itr); 761 } else { 762 ++Itr; 763 } 764 } 765 Device.ShadowMtx.unlock(); 766 767 // Add pointer to the buffer for later deallocation 768 if (DelEntry) 769 DeallocTgtPtrs.emplace_back(HstPtrBegin, DataSize, ForceDelete, 770 HasCloseModifier); 771 } 772 } 773 774 // TODO: We should not synchronize here but pass the AsyncInfo object to the 775 // allocate/deallocate device APIs. 776 // 777 // We need to synchronize before deallocating data. 778 Ret = AsyncInfo.synchronize(); 779 if (Ret != OFFLOAD_SUCCESS) 780 return OFFLOAD_FAIL; 781 782 // Deallocate target pointer 783 for (DeallocTgtPtrInfo &Info : DeallocTgtPtrs) { 784 Ret = Device.deallocTgtPtr(Info.HstPtrBegin, Info.DataSize, 785 Info.ForceDelete, Info.HasCloseModifier); 786 if (Ret != OFFLOAD_SUCCESS) { 787 REPORT("Deallocating data from device failed.\n"); 788 return OFFLOAD_FAIL; 789 } 790 } 791 792 return OFFLOAD_SUCCESS; 793 } 794 795 static int targetDataContiguous(ident_t *loc, DeviceTy &Device, void *ArgsBase, 796 void *HstPtrBegin, int64_t ArgSize, 797 int64_t ArgType, AsyncInfoTy &AsyncInfo) { 798 TIMESCOPE_WITH_IDENT(loc); 799 bool IsLast, IsHostPtr; 800 void *TgtPtrBegin = Device.getTgtPtrBegin(HstPtrBegin, ArgSize, IsLast, false, 801 IsHostPtr, /*MustContain=*/true); 802 if (!TgtPtrBegin) { 803 DP("hst data:" DPxMOD " not found, becomes a noop\n", DPxPTR(HstPtrBegin)); 804 if (ArgType & OMP_TGT_MAPTYPE_PRESENT) { 805 MESSAGE("device mapping required by 'present' motion modifier does not " 806 "exist for host address " DPxMOD " (%" PRId64 " bytes)", 807 DPxPTR(HstPtrBegin), ArgSize); 808 return OFFLOAD_FAIL; 809 } 810 return OFFLOAD_SUCCESS; 811 } 812 813 if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 814 TgtPtrBegin == HstPtrBegin) { 815 DP("hst data:" DPxMOD " unified and shared, becomes a noop\n", 816 DPxPTR(HstPtrBegin)); 817 return OFFLOAD_SUCCESS; 818 } 819 820 if (ArgType & OMP_TGT_MAPTYPE_FROM) { 821 DP("Moving %" PRId64 " bytes (tgt:" DPxMOD ") -> (hst:" DPxMOD ")\n", 822 ArgSize, DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin)); 823 int Ret = Device.retrieveData(HstPtrBegin, TgtPtrBegin, ArgSize, AsyncInfo); 824 if (Ret != OFFLOAD_SUCCESS) { 825 REPORT("Copying data from device failed.\n"); 826 return OFFLOAD_FAIL; 827 } 828 829 uintptr_t LB = (uintptr_t)HstPtrBegin; 830 uintptr_t UB = (uintptr_t)HstPtrBegin + ArgSize; 831 Device.ShadowMtx.lock(); 832 for (ShadowPtrListTy::iterator IT = Device.ShadowPtrMap.begin(); 833 IT != Device.ShadowPtrMap.end(); ++IT) { 834 void **ShadowHstPtrAddr = (void **)IT->first; 835 if ((uintptr_t)ShadowHstPtrAddr < LB) 836 continue; 837 if ((uintptr_t)ShadowHstPtrAddr >= UB) 838 break; 839 DP("Restoring original host pointer value " DPxMOD 840 " for host pointer " DPxMOD "\n", 841 DPxPTR(IT->second.HstPtrVal), DPxPTR(ShadowHstPtrAddr)); 842 *ShadowHstPtrAddr = IT->second.HstPtrVal; 843 } 844 Device.ShadowMtx.unlock(); 845 } 846 847 if (ArgType & OMP_TGT_MAPTYPE_TO) { 848 DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n", 849 ArgSize, DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin)); 850 int Ret = Device.submitData(TgtPtrBegin, HstPtrBegin, ArgSize, AsyncInfo); 851 if (Ret != OFFLOAD_SUCCESS) { 852 REPORT("Copying data to device failed.\n"); 853 return OFFLOAD_FAIL; 854 } 855 856 uintptr_t LB = (uintptr_t)HstPtrBegin; 857 uintptr_t UB = (uintptr_t)HstPtrBegin + ArgSize; 858 Device.ShadowMtx.lock(); 859 for (ShadowPtrListTy::iterator IT = Device.ShadowPtrMap.begin(); 860 IT != Device.ShadowPtrMap.end(); ++IT) { 861 void **ShadowHstPtrAddr = (void **)IT->first; 862 if ((uintptr_t)ShadowHstPtrAddr < LB) 863 continue; 864 if ((uintptr_t)ShadowHstPtrAddr >= UB) 865 break; 866 DP("Restoring original target pointer value " DPxMOD " for target " 867 "pointer " DPxMOD "\n", 868 DPxPTR(IT->second.TgtPtrVal), DPxPTR(IT->second.TgtPtrAddr)); 869 Ret = Device.submitData(IT->second.TgtPtrAddr, &IT->second.TgtPtrVal, 870 sizeof(void *), AsyncInfo); 871 if (Ret != OFFLOAD_SUCCESS) { 872 REPORT("Copying data to device failed.\n"); 873 Device.ShadowMtx.unlock(); 874 return OFFLOAD_FAIL; 875 } 876 } 877 Device.ShadowMtx.unlock(); 878 } 879 return OFFLOAD_SUCCESS; 880 } 881 882 static int targetDataNonContiguous(ident_t *loc, DeviceTy &Device, 883 void *ArgsBase, 884 __tgt_target_non_contig *NonContig, 885 uint64_t Size, int64_t ArgType, 886 int CurrentDim, int DimSize, uint64_t Offset, 887 AsyncInfoTy &AsyncInfo) { 888 TIMESCOPE_WITH_IDENT(loc); 889 int Ret = OFFLOAD_SUCCESS; 890 if (CurrentDim < DimSize) { 891 for (unsigned int I = 0; I < NonContig[CurrentDim].Count; ++I) { 892 uint64_t CurOffset = 893 (NonContig[CurrentDim].Offset + I) * NonContig[CurrentDim].Stride; 894 // we only need to transfer the first element for the last dimension 895 // since we've already got a contiguous piece. 896 if (CurrentDim != DimSize - 1 || I == 0) { 897 Ret = targetDataNonContiguous(loc, Device, ArgsBase, NonContig, Size, 898 ArgType, CurrentDim + 1, DimSize, 899 Offset + CurOffset, AsyncInfo); 900 // Stop the whole process if any contiguous piece returns anything 901 // other than OFFLOAD_SUCCESS. 902 if (Ret != OFFLOAD_SUCCESS) 903 return Ret; 904 } 905 } 906 } else { 907 char *Ptr = (char *)ArgsBase + Offset; 908 DP("Transfer of non-contiguous : host ptr " DPxMOD " offset %" PRIu64 909 " len %" PRIu64 "\n", 910 DPxPTR(Ptr), Offset, Size); 911 Ret = targetDataContiguous(loc, Device, ArgsBase, Ptr, Size, ArgType, 912 AsyncInfo); 913 } 914 return Ret; 915 } 916 917 static int getNonContigMergedDimension(__tgt_target_non_contig *NonContig, 918 int32_t DimSize) { 919 int RemovedDim = 0; 920 for (int I = DimSize - 1; I > 0; --I) { 921 if (NonContig[I].Count * NonContig[I].Stride == NonContig[I - 1].Stride) 922 RemovedDim++; 923 } 924 return RemovedDim; 925 } 926 927 /// Internal function to pass data to/from the target. 928 int targetDataUpdate(ident_t *loc, DeviceTy &Device, int32_t ArgNum, 929 void **ArgsBase, void **Args, int64_t *ArgSizes, 930 int64_t *ArgTypes, map_var_info_t *ArgNames, 931 void **ArgMappers, AsyncInfoTy &AsyncInfo, bool) { 932 // process each input. 933 for (int32_t I = 0; I < ArgNum; ++I) { 934 if ((ArgTypes[I] & OMP_TGT_MAPTYPE_LITERAL) || 935 (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE)) 936 continue; 937 938 if (ArgMappers && ArgMappers[I]) { 939 // Instead of executing the regular path of targetDataUpdate, call the 940 // targetDataMapper variant which will call targetDataUpdate again 941 // with new arguments. 942 DP("Calling targetDataMapper for the %dth argument\n", I); 943 944 map_var_info_t ArgName = (!ArgNames) ? nullptr : ArgNames[I]; 945 int Ret = targetDataMapper(loc, Device, ArgsBase[I], Args[I], ArgSizes[I], 946 ArgTypes[I], ArgName, ArgMappers[I], AsyncInfo, 947 targetDataUpdate); 948 949 if (Ret != OFFLOAD_SUCCESS) { 950 REPORT("Call to targetDataUpdate via targetDataMapper for custom mapper" 951 " failed.\n"); 952 return OFFLOAD_FAIL; 953 } 954 955 // Skip the rest of this function, continue to the next argument. 956 continue; 957 } 958 959 int Ret = OFFLOAD_SUCCESS; 960 961 if (ArgTypes[I] & OMP_TGT_MAPTYPE_NON_CONTIG) { 962 __tgt_target_non_contig *NonContig = (__tgt_target_non_contig *)Args[I]; 963 int32_t DimSize = ArgSizes[I]; 964 uint64_t Size = 965 NonContig[DimSize - 1].Count * NonContig[DimSize - 1].Stride; 966 int32_t MergedDim = getNonContigMergedDimension(NonContig, DimSize); 967 Ret = targetDataNonContiguous( 968 loc, Device, ArgsBase[I], NonContig, Size, ArgTypes[I], 969 /*current_dim=*/0, DimSize - MergedDim, /*offset=*/0, AsyncInfo); 970 } else { 971 Ret = targetDataContiguous(loc, Device, ArgsBase[I], Args[I], ArgSizes[I], 972 ArgTypes[I], AsyncInfo); 973 } 974 if (Ret == OFFLOAD_FAIL) 975 return OFFLOAD_FAIL; 976 } 977 return OFFLOAD_SUCCESS; 978 } 979 980 static const unsigned LambdaMapping = OMP_TGT_MAPTYPE_PTR_AND_OBJ | 981 OMP_TGT_MAPTYPE_LITERAL | 982 OMP_TGT_MAPTYPE_IMPLICIT; 983 static bool isLambdaMapping(int64_t Mapping) { 984 return (Mapping & LambdaMapping) == LambdaMapping; 985 } 986 987 namespace { 988 /// Find the table information in the map or look it up in the translation 989 /// tables. 990 TableMap *getTableMap(void *HostPtr) { 991 std::lock_guard<std::mutex> TblMapLock(PM->TblMapMtx); 992 HostPtrToTableMapTy::iterator TableMapIt = 993 PM->HostPtrToTableMap.find(HostPtr); 994 995 if (TableMapIt != PM->HostPtrToTableMap.end()) 996 return &TableMapIt->second; 997 998 // We don't have a map. So search all the registered libraries. 999 TableMap *TM = nullptr; 1000 std::lock_guard<std::mutex> TrlTblLock(PM->TrlTblMtx); 1001 for (HostEntriesBeginToTransTableTy::iterator Itr = 1002 PM->HostEntriesBeginToTransTable.begin(); 1003 Itr != PM->HostEntriesBeginToTransTable.end(); ++Itr) { 1004 // get the translation table (which contains all the good info). 1005 TranslationTable *TransTable = &Itr->second; 1006 // iterate over all the host table entries to see if we can locate the 1007 // host_ptr. 1008 __tgt_offload_entry *Cur = TransTable->HostTable.EntriesBegin; 1009 for (uint32_t I = 0; Cur < TransTable->HostTable.EntriesEnd; ++Cur, ++I) { 1010 if (Cur->addr != HostPtr) 1011 continue; 1012 // we got a match, now fill the HostPtrToTableMap so that we 1013 // may avoid this search next time. 1014 TM = &(PM->HostPtrToTableMap)[HostPtr]; 1015 TM->Table = TransTable; 1016 TM->Index = I; 1017 return TM; 1018 } 1019 } 1020 1021 return nullptr; 1022 } 1023 1024 /// Get loop trip count 1025 /// FIXME: This function will not work right if calling 1026 /// __kmpc_push_target_tripcount in one thread but doing offloading in another 1027 /// thread, which might occur when we call task yield. 1028 uint64_t getLoopTripCount(int64_t DeviceId) { 1029 DeviceTy &Device = PM->Devices[DeviceId]; 1030 uint64_t LoopTripCount = 0; 1031 1032 { 1033 std::lock_guard<std::mutex> TblMapLock(PM->TblMapMtx); 1034 auto I = Device.LoopTripCnt.find(__kmpc_global_thread_num(NULL)); 1035 if (I != Device.LoopTripCnt.end()) { 1036 LoopTripCount = I->second; 1037 Device.LoopTripCnt.erase(I); 1038 DP("loop trip count is %" PRIu64 ".\n", LoopTripCount); 1039 } 1040 } 1041 1042 return LoopTripCount; 1043 } 1044 1045 /// A class manages private arguments in a target region. 1046 class PrivateArgumentManagerTy { 1047 /// A data structure for the information of first-private arguments. We can 1048 /// use this information to optimize data transfer by packing all 1049 /// first-private arguments and transfer them all at once. 1050 struct FirstPrivateArgInfoTy { 1051 /// The index of the element in \p TgtArgs corresponding to the argument 1052 const int Index; 1053 /// Host pointer begin 1054 const char *HstPtrBegin; 1055 /// Host pointer end 1056 const char *HstPtrEnd; 1057 /// Aligned size 1058 const int64_t AlignedSize; 1059 /// Host pointer name 1060 const map_var_info_t HstPtrName = nullptr; 1061 1062 FirstPrivateArgInfoTy(int Index, const void *HstPtr, int64_t Size, 1063 const map_var_info_t HstPtrName = nullptr) 1064 : Index(Index), HstPtrBegin(reinterpret_cast<const char *>(HstPtr)), 1065 HstPtrEnd(HstPtrBegin + Size), AlignedSize(Size + Size % Alignment), 1066 HstPtrName(HstPtrName) {} 1067 }; 1068 1069 /// A vector of target pointers for all private arguments 1070 std::vector<void *> TgtPtrs; 1071 1072 /// A vector of information of all first-private arguments to be packed 1073 std::vector<FirstPrivateArgInfoTy> FirstPrivateArgInfo; 1074 /// Host buffer for all arguments to be packed 1075 std::vector<char> FirstPrivateArgBuffer; 1076 /// The total size of all arguments to be packed 1077 int64_t FirstPrivateArgSize = 0; 1078 1079 /// A reference to the \p DeviceTy object 1080 DeviceTy &Device; 1081 /// A pointer to a \p AsyncInfoTy object 1082 AsyncInfoTy &AsyncInfo; 1083 1084 // TODO: What would be the best value here? Should we make it configurable? 1085 // If the size is larger than this threshold, we will allocate and transfer it 1086 // immediately instead of packing it. 1087 static constexpr const int64_t FirstPrivateArgSizeThreshold = 1024; 1088 1089 public: 1090 /// Constructor 1091 PrivateArgumentManagerTy(DeviceTy &Dev, AsyncInfoTy &AsyncInfo) 1092 : Device(Dev), AsyncInfo(AsyncInfo) {} 1093 1094 /// Add a private argument 1095 int addArg(void *HstPtr, int64_t ArgSize, int64_t ArgOffset, 1096 bool IsFirstPrivate, void *&TgtPtr, int TgtArgsIndex, 1097 const map_var_info_t HstPtrName = nullptr) { 1098 // If the argument is not first-private, or its size is greater than a 1099 // predefined threshold, we will allocate memory and issue the transfer 1100 // immediately. 1101 if (ArgSize > FirstPrivateArgSizeThreshold || !IsFirstPrivate) { 1102 TgtPtr = Device.allocData(ArgSize, HstPtr); 1103 if (!TgtPtr) { 1104 DP("Data allocation for %sprivate array " DPxMOD " failed.\n", 1105 (IsFirstPrivate ? "first-" : ""), DPxPTR(HstPtr)); 1106 return OFFLOAD_FAIL; 1107 } 1108 #ifdef OMPTARGET_DEBUG 1109 void *TgtPtrBase = (void *)((intptr_t)TgtPtr + ArgOffset); 1110 DP("Allocated %" PRId64 " bytes of target memory at " DPxMOD 1111 " for %sprivate array " DPxMOD " - pushing target argument " DPxMOD 1112 "\n", 1113 ArgSize, DPxPTR(TgtPtr), (IsFirstPrivate ? "first-" : ""), 1114 DPxPTR(HstPtr), DPxPTR(TgtPtrBase)); 1115 #endif 1116 // If first-private, copy data from host 1117 if (IsFirstPrivate) { 1118 int Ret = Device.submitData(TgtPtr, HstPtr, ArgSize, AsyncInfo); 1119 if (Ret != OFFLOAD_SUCCESS) { 1120 DP("Copying data to device failed, failed.\n"); 1121 return OFFLOAD_FAIL; 1122 } 1123 } 1124 TgtPtrs.push_back(TgtPtr); 1125 } else { 1126 DP("Firstprivate array " DPxMOD " of size %" PRId64 " will be packed\n", 1127 DPxPTR(HstPtr), ArgSize); 1128 // When reach this point, the argument must meet all following 1129 // requirements: 1130 // 1. Its size does not exceed the threshold (see the comment for 1131 // FirstPrivateArgSizeThreshold); 1132 // 2. It must be first-private (needs to be mapped to target device). 1133 // We will pack all this kind of arguments to transfer them all at once 1134 // to reduce the number of data transfer. We will not take 1135 // non-first-private arguments, aka. private arguments that doesn't need 1136 // to be mapped to target device, into account because data allocation 1137 // can be very efficient with memory manager. 1138 1139 // Placeholder value 1140 TgtPtr = nullptr; 1141 FirstPrivateArgInfo.emplace_back(TgtArgsIndex, HstPtr, ArgSize, 1142 HstPtrName); 1143 FirstPrivateArgSize += FirstPrivateArgInfo.back().AlignedSize; 1144 } 1145 1146 return OFFLOAD_SUCCESS; 1147 } 1148 1149 /// Pack first-private arguments, replace place holder pointers in \p TgtArgs, 1150 /// and start the transfer. 1151 int packAndTransfer(std::vector<void *> &TgtArgs) { 1152 if (!FirstPrivateArgInfo.empty()) { 1153 assert(FirstPrivateArgSize != 0 && 1154 "FirstPrivateArgSize is 0 but FirstPrivateArgInfo is empty"); 1155 FirstPrivateArgBuffer.resize(FirstPrivateArgSize, 0); 1156 auto Itr = FirstPrivateArgBuffer.begin(); 1157 // Copy all host data to this buffer 1158 for (FirstPrivateArgInfoTy &Info : FirstPrivateArgInfo) { 1159 std::copy(Info.HstPtrBegin, Info.HstPtrEnd, Itr); 1160 Itr = std::next(Itr, Info.AlignedSize); 1161 } 1162 // Allocate target memory 1163 void *TgtPtr = 1164 Device.allocData(FirstPrivateArgSize, FirstPrivateArgBuffer.data()); 1165 if (TgtPtr == nullptr) { 1166 DP("Failed to allocate target memory for private arguments.\n"); 1167 return OFFLOAD_FAIL; 1168 } 1169 TgtPtrs.push_back(TgtPtr); 1170 DP("Allocated %" PRId64 " bytes of target memory at " DPxMOD "\n", 1171 FirstPrivateArgSize, DPxPTR(TgtPtr)); 1172 // Transfer data to target device 1173 int Ret = Device.submitData(TgtPtr, FirstPrivateArgBuffer.data(), 1174 FirstPrivateArgSize, AsyncInfo); 1175 if (Ret != OFFLOAD_SUCCESS) { 1176 DP("Failed to submit data of private arguments.\n"); 1177 return OFFLOAD_FAIL; 1178 } 1179 // Fill in all placeholder pointers 1180 auto TP = reinterpret_cast<uintptr_t>(TgtPtr); 1181 for (FirstPrivateArgInfoTy &Info : FirstPrivateArgInfo) { 1182 void *&Ptr = TgtArgs[Info.Index]; 1183 assert(Ptr == nullptr && "Target pointer is already set by mistaken"); 1184 Ptr = reinterpret_cast<void *>(TP); 1185 TP += Info.AlignedSize; 1186 DP("Firstprivate array " DPxMOD " of size %" PRId64 " mapped to " DPxMOD 1187 "\n", 1188 DPxPTR(Info.HstPtrBegin), Info.HstPtrEnd - Info.HstPtrBegin, 1189 DPxPTR(Ptr)); 1190 } 1191 } 1192 1193 return OFFLOAD_SUCCESS; 1194 } 1195 1196 /// Free all target memory allocated for private arguments 1197 int free() { 1198 for (void *P : TgtPtrs) { 1199 int Ret = Device.deleteData(P); 1200 if (Ret != OFFLOAD_SUCCESS) { 1201 DP("Deallocation of (first-)private arrays failed.\n"); 1202 return OFFLOAD_FAIL; 1203 } 1204 } 1205 1206 TgtPtrs.clear(); 1207 1208 return OFFLOAD_SUCCESS; 1209 } 1210 }; 1211 1212 /// Process data before launching the kernel, including calling targetDataBegin 1213 /// to map and transfer data to target device, transferring (first-)private 1214 /// variables. 1215 static int processDataBefore(ident_t *loc, int64_t DeviceId, void *HostPtr, 1216 int32_t ArgNum, void **ArgBases, void **Args, 1217 int64_t *ArgSizes, int64_t *ArgTypes, 1218 map_var_info_t *ArgNames, void **ArgMappers, 1219 std::vector<void *> &TgtArgs, 1220 std::vector<ptrdiff_t> &TgtOffsets, 1221 PrivateArgumentManagerTy &PrivateArgumentManager, 1222 AsyncInfoTy &AsyncInfo) { 1223 TIMESCOPE_WITH_NAME_AND_IDENT("mappingBeforeTargetRegion", loc); 1224 DeviceTy &Device = PM->Devices[DeviceId]; 1225 int Ret = targetDataBegin(loc, Device, ArgNum, ArgBases, Args, ArgSizes, 1226 ArgTypes, ArgNames, ArgMappers, AsyncInfo); 1227 if (Ret != OFFLOAD_SUCCESS) { 1228 REPORT("Call to targetDataBegin failed, abort target.\n"); 1229 return OFFLOAD_FAIL; 1230 } 1231 1232 // List of (first-)private arrays allocated for this target region 1233 std::vector<int> TgtArgsPositions(ArgNum, -1); 1234 1235 for (int32_t I = 0; I < ArgNum; ++I) { 1236 if (!(ArgTypes[I] & OMP_TGT_MAPTYPE_TARGET_PARAM)) { 1237 // This is not a target parameter, do not push it into TgtArgs. 1238 // Check for lambda mapping. 1239 if (isLambdaMapping(ArgTypes[I])) { 1240 assert((ArgTypes[I] & OMP_TGT_MAPTYPE_MEMBER_OF) && 1241 "PTR_AND_OBJ must be also MEMBER_OF."); 1242 unsigned Idx = getParentIndex(ArgTypes[I]); 1243 int TgtIdx = TgtArgsPositions[Idx]; 1244 assert(TgtIdx != -1 && "Base address must be translated already."); 1245 // The parent lambda must be processed already and it must be the last 1246 // in TgtArgs and TgtOffsets arrays. 1247 void *HstPtrVal = Args[I]; 1248 void *HstPtrBegin = ArgBases[I]; 1249 void *HstPtrBase = Args[Idx]; 1250 bool IsLast, IsHostPtr; // unused. 1251 void *TgtPtrBase = 1252 (void *)((intptr_t)TgtArgs[TgtIdx] + TgtOffsets[TgtIdx]); 1253 DP("Parent lambda base " DPxMOD "\n", DPxPTR(TgtPtrBase)); 1254 uint64_t Delta = (uint64_t)HstPtrBegin - (uint64_t)HstPtrBase; 1255 void *TgtPtrBegin = (void *)((uintptr_t)TgtPtrBase + Delta); 1256 void *&PointerTgtPtrBegin = AsyncInfo.getVoidPtrLocation(); 1257 PointerTgtPtrBegin = Device.getTgtPtrBegin(HstPtrVal, ArgSizes[I], 1258 IsLast, false, IsHostPtr); 1259 if (!PointerTgtPtrBegin) { 1260 DP("No lambda captured variable mapped (" DPxMOD ") - ignored\n", 1261 DPxPTR(HstPtrVal)); 1262 continue; 1263 } 1264 if (PM->RTLs.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 1265 TgtPtrBegin == HstPtrBegin) { 1266 DP("Unified memory is active, no need to map lambda captured" 1267 "variable (" DPxMOD ")\n", 1268 DPxPTR(HstPtrVal)); 1269 continue; 1270 } 1271 DP("Update lambda reference (" DPxMOD ") -> [" DPxMOD "]\n", 1272 DPxPTR(PointerTgtPtrBegin), DPxPTR(TgtPtrBegin)); 1273 Ret = Device.submitData(TgtPtrBegin, &PointerTgtPtrBegin, 1274 sizeof(void *), AsyncInfo); 1275 if (Ret != OFFLOAD_SUCCESS) { 1276 REPORT("Copying data to device failed.\n"); 1277 return OFFLOAD_FAIL; 1278 } 1279 } 1280 continue; 1281 } 1282 void *HstPtrBegin = Args[I]; 1283 void *HstPtrBase = ArgBases[I]; 1284 void *TgtPtrBegin; 1285 map_var_info_t HstPtrName = (!ArgNames) ? nullptr : ArgNames[I]; 1286 ptrdiff_t TgtBaseOffset; 1287 bool IsLast, IsHostPtr; // unused. 1288 if (ArgTypes[I] & OMP_TGT_MAPTYPE_LITERAL) { 1289 DP("Forwarding first-private value " DPxMOD " to the target construct\n", 1290 DPxPTR(HstPtrBase)); 1291 TgtPtrBegin = HstPtrBase; 1292 TgtBaseOffset = 0; 1293 } else if (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE) { 1294 TgtBaseOffset = (intptr_t)HstPtrBase - (intptr_t)HstPtrBegin; 1295 // Can be marked for optimization if the next argument(s) do(es) not 1296 // depend on this one. 1297 const bool IsFirstPrivate = 1298 (I >= ArgNum - 1 || !(ArgTypes[I + 1] & OMP_TGT_MAPTYPE_MEMBER_OF)); 1299 Ret = PrivateArgumentManager.addArg( 1300 HstPtrBegin, ArgSizes[I], TgtBaseOffset, IsFirstPrivate, TgtPtrBegin, 1301 TgtArgs.size(), HstPtrName); 1302 if (Ret != OFFLOAD_SUCCESS) { 1303 REPORT("Failed to process %sprivate argument " DPxMOD "\n", 1304 (IsFirstPrivate ? "first-" : ""), DPxPTR(HstPtrBegin)); 1305 return OFFLOAD_FAIL; 1306 } 1307 } else { 1308 if (ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ) 1309 HstPtrBase = *reinterpret_cast<void **>(HstPtrBase); 1310 TgtPtrBegin = Device.getTgtPtrBegin(HstPtrBegin, ArgSizes[I], IsLast, 1311 false, IsHostPtr); 1312 TgtBaseOffset = (intptr_t)HstPtrBase - (intptr_t)HstPtrBegin; 1313 #ifdef OMPTARGET_DEBUG 1314 void *TgtPtrBase = (void *)((intptr_t)TgtPtrBegin + TgtBaseOffset); 1315 DP("Obtained target argument " DPxMOD " from host pointer " DPxMOD "\n", 1316 DPxPTR(TgtPtrBase), DPxPTR(HstPtrBegin)); 1317 #endif 1318 } 1319 TgtArgsPositions[I] = TgtArgs.size(); 1320 TgtArgs.push_back(TgtPtrBegin); 1321 TgtOffsets.push_back(TgtBaseOffset); 1322 } 1323 1324 assert(TgtArgs.size() == TgtOffsets.size() && 1325 "Size mismatch in arguments and offsets"); 1326 1327 // Pack and transfer first-private arguments 1328 Ret = PrivateArgumentManager.packAndTransfer(TgtArgs); 1329 if (Ret != OFFLOAD_SUCCESS) { 1330 DP("Failed to pack and transfer first private arguments\n"); 1331 return OFFLOAD_FAIL; 1332 } 1333 1334 return OFFLOAD_SUCCESS; 1335 } 1336 1337 /// Process data after launching the kernel, including transferring data back to 1338 /// host if needed and deallocating target memory of (first-)private variables. 1339 static int processDataAfter(ident_t *loc, int64_t DeviceId, void *HostPtr, 1340 int32_t ArgNum, void **ArgBases, void **Args, 1341 int64_t *ArgSizes, int64_t *ArgTypes, 1342 map_var_info_t *ArgNames, void **ArgMappers, 1343 PrivateArgumentManagerTy &PrivateArgumentManager, 1344 AsyncInfoTy &AsyncInfo) { 1345 TIMESCOPE_WITH_NAME_AND_IDENT("mappingAfterTargetRegion", loc); 1346 DeviceTy &Device = PM->Devices[DeviceId]; 1347 1348 // Move data from device. 1349 int Ret = targetDataEnd(loc, Device, ArgNum, ArgBases, Args, ArgSizes, 1350 ArgTypes, ArgNames, ArgMappers, AsyncInfo); 1351 if (Ret != OFFLOAD_SUCCESS) { 1352 REPORT("Call to targetDataEnd failed, abort target.\n"); 1353 return OFFLOAD_FAIL; 1354 } 1355 1356 // Free target memory for private arguments 1357 Ret = PrivateArgumentManager.free(); 1358 if (Ret != OFFLOAD_SUCCESS) { 1359 REPORT("Failed to deallocate target memory for private args\n"); 1360 return OFFLOAD_FAIL; 1361 } 1362 1363 return OFFLOAD_SUCCESS; 1364 } 1365 } // namespace 1366 1367 /// performs the same actions as data_begin in case arg_num is 1368 /// non-zero and initiates run of the offloaded region on the target platform; 1369 /// if arg_num is non-zero after the region execution is done it also 1370 /// performs the same action as data_update and data_end above. This function 1371 /// returns 0 if it was able to transfer the execution to a target and an 1372 /// integer different from zero otherwise. 1373 int target(ident_t *loc, DeviceTy &Device, void *HostPtr, int32_t ArgNum, 1374 void **ArgBases, void **Args, int64_t *ArgSizes, int64_t *ArgTypes, 1375 map_var_info_t *ArgNames, void **ArgMappers, int32_t TeamNum, 1376 int32_t ThreadLimit, int IsTeamConstruct, AsyncInfoTy &AsyncInfo) { 1377 int32_t DeviceId = Device.DeviceID; 1378 1379 TableMap *TM = getTableMap(HostPtr); 1380 // No map for this host pointer found! 1381 if (!TM) { 1382 REPORT("Host ptr " DPxMOD " does not have a matching target pointer.\n", 1383 DPxPTR(HostPtr)); 1384 return OFFLOAD_FAIL; 1385 } 1386 1387 // get target table. 1388 __tgt_target_table *TargetTable = nullptr; 1389 { 1390 std::lock_guard<std::mutex> TrlTblLock(PM->TrlTblMtx); 1391 assert(TM->Table->TargetsTable.size() > (size_t)DeviceId && 1392 "Not expecting a device ID outside the table's bounds!"); 1393 TargetTable = TM->Table->TargetsTable[DeviceId]; 1394 } 1395 assert(TargetTable && "Global data has not been mapped\n"); 1396 1397 std::vector<void *> TgtArgs; 1398 std::vector<ptrdiff_t> TgtOffsets; 1399 1400 PrivateArgumentManagerTy PrivateArgumentManager(Device, AsyncInfo); 1401 1402 int Ret; 1403 if (ArgNum) { 1404 // Process data, such as data mapping, before launching the kernel 1405 Ret = processDataBefore(loc, DeviceId, HostPtr, ArgNum, ArgBases, Args, 1406 ArgSizes, ArgTypes, ArgNames, ArgMappers, TgtArgs, 1407 TgtOffsets, PrivateArgumentManager, AsyncInfo); 1408 if (Ret != OFFLOAD_SUCCESS) { 1409 REPORT("Failed to process data before launching the kernel.\n"); 1410 return OFFLOAD_FAIL; 1411 } 1412 } 1413 1414 // Get loop trip count 1415 uint64_t LoopTripCount = getLoopTripCount(DeviceId); 1416 1417 // Launch device execution. 1418 void *TgtEntryPtr = TargetTable->EntriesBegin[TM->Index].addr; 1419 DP("Launching target execution %s with pointer " DPxMOD " (index=%d).\n", 1420 TargetTable->EntriesBegin[TM->Index].name, DPxPTR(TgtEntryPtr), TM->Index); 1421 1422 { 1423 TIMESCOPE_WITH_NAME_AND_IDENT( 1424 IsTeamConstruct ? "runTargetTeamRegion" : "runTargetRegion", loc); 1425 if (IsTeamConstruct) 1426 Ret = Device.runTeamRegion(TgtEntryPtr, &TgtArgs[0], &TgtOffsets[0], 1427 TgtArgs.size(), TeamNum, ThreadLimit, 1428 LoopTripCount, AsyncInfo); 1429 else 1430 Ret = Device.runRegion(TgtEntryPtr, &TgtArgs[0], &TgtOffsets[0], 1431 TgtArgs.size(), AsyncInfo); 1432 } 1433 1434 if (Ret != OFFLOAD_SUCCESS) { 1435 REPORT("Executing target region abort target.\n"); 1436 return OFFLOAD_FAIL; 1437 } 1438 1439 if (ArgNum) { 1440 // Transfer data back and deallocate target memory for (first-)private 1441 // variables 1442 Ret = processDataAfter(loc, DeviceId, HostPtr, ArgNum, ArgBases, Args, 1443 ArgSizes, ArgTypes, ArgNames, ArgMappers, 1444 PrivateArgumentManager, AsyncInfo); 1445 if (Ret != OFFLOAD_SUCCESS) { 1446 REPORT("Failed to process data after launching the kernel.\n"); 1447 return OFFLOAD_FAIL; 1448 } 1449 } 1450 1451 return OFFLOAD_SUCCESS; 1452 } 1453