1 //===------ omptarget.cpp - Target independent OpenMP target RTL -- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Implementation of the interface to be used by Clang during the codegen of a
10 // target region.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "omptarget.h"
15 #include "device.h"
16 #include "private.h"
17 #include "rtl.h"
18 
19 #include <cassert>
20 #include <vector>
21 
22 int AsyncInfoTy::synchronize() {
23   int Result = OFFLOAD_SUCCESS;
24   if (AsyncInfo.Queue) {
25     // If we have a queue we need to synchronize it now.
26     Result = Device.synchronize(*this);
27     assert(AsyncInfo.Queue == nullptr &&
28            "The device plugin should have nulled the queue to indicate there "
29            "are no outstanding actions!");
30   }
31   return Result;
32 }
33 
34 void *&AsyncInfoTy::getVoidPtrLocation() {
35   BufferLocations.push_back(nullptr);
36   return BufferLocations.back();
37 }
38 
39 /* All begin addresses for partially mapped structs must be 8-aligned in order
40  * to ensure proper alignment of members. E.g.
41  *
42  * struct S {
43  *   int a;   // 4-aligned
44  *   int b;   // 4-aligned
45  *   int *p;  // 8-aligned
46  * } s1;
47  * ...
48  * #pragma omp target map(tofrom: s1.b, s1.p[0:N])
49  * {
50  *   s1.b = 5;
51  *   for (int i...) s1.p[i] = ...;
52  * }
53  *
54  * Here we are mapping s1 starting from member b, so BaseAddress=&s1=&s1.a and
55  * BeginAddress=&s1.b. Let's assume that the struct begins at address 0x100,
56  * then &s1.a=0x100, &s1.b=0x104, &s1.p=0x108. Each member obeys the alignment
57  * requirements for its type. Now, when we allocate memory on the device, in
58  * CUDA's case cuMemAlloc() returns an address which is at least 256-aligned.
59  * This means that the chunk of the struct on the device will start at a
60  * 256-aligned address, let's say 0x200. Then the address of b will be 0x200 and
61  * address of p will be a misaligned 0x204 (on the host there was no need to add
62  * padding between b and p, so p comes exactly 4 bytes after b). If the device
63  * kernel tries to access s1.p, a misaligned address error occurs (as reported
64  * by the CUDA plugin). By padding the begin address down to a multiple of 8 and
65  * extending the size of the allocated chuck accordingly, the chuck on the
66  * device will start at 0x200 with the padding (4 bytes), then &s1.b=0x204 and
67  * &s1.p=0x208, as they should be to satisfy the alignment requirements.
68  */
69 static const int64_t Alignment = 8;
70 
71 /// Map global data and execute pending ctors
72 static int InitLibrary(DeviceTy &Device) {
73   /*
74    * Map global data
75    */
76   int32_t device_id = Device.DeviceID;
77   int rc = OFFLOAD_SUCCESS;
78   bool supportsEmptyImages = Device.RTL->supports_empty_images &&
79                              Device.RTL->supports_empty_images() > 0;
80 
81   Device.PendingGlobalsMtx.lock();
82   PM->TrlTblMtx.lock();
83   for (auto *HostEntriesBegin : PM->HostEntriesBeginRegistrationOrder) {
84     TranslationTable *TransTable =
85         &PM->HostEntriesBeginToTransTable[HostEntriesBegin];
86     if (TransTable->HostTable.EntriesBegin ==
87             TransTable->HostTable.EntriesEnd &&
88         !supportsEmptyImages) {
89       // No host entry so no need to proceed
90       continue;
91     }
92 
93     if (TransTable->TargetsTable[device_id] != 0) {
94       // Library entries have already been processed
95       continue;
96     }
97 
98     // 1) get image.
99     assert(TransTable->TargetsImages.size() > (size_t)device_id &&
100            "Not expecting a device ID outside the table's bounds!");
101     __tgt_device_image *img = TransTable->TargetsImages[device_id];
102     if (!img) {
103       REPORT("No image loaded for device id %d.\n", device_id);
104       rc = OFFLOAD_FAIL;
105       break;
106     }
107     // 2) load image into the target table.
108     __tgt_target_table *TargetTable = TransTable->TargetsTable[device_id] =
109         Device.load_binary(img);
110     // Unable to get table for this image: invalidate image and fail.
111     if (!TargetTable) {
112       REPORT("Unable to generate entries table for device id %d.\n", device_id);
113       TransTable->TargetsImages[device_id] = 0;
114       rc = OFFLOAD_FAIL;
115       break;
116     }
117 
118     // Verify whether the two table sizes match.
119     size_t hsize =
120         TransTable->HostTable.EntriesEnd - TransTable->HostTable.EntriesBegin;
121     size_t tsize = TargetTable->EntriesEnd - TargetTable->EntriesBegin;
122 
123     // Invalid image for these host entries!
124     if (hsize != tsize) {
125       REPORT("Host and Target tables mismatch for device id %d [%zx != %zx].\n",
126              device_id, hsize, tsize);
127       TransTable->TargetsImages[device_id] = 0;
128       TransTable->TargetsTable[device_id] = 0;
129       rc = OFFLOAD_FAIL;
130       break;
131     }
132 
133     // process global data that needs to be mapped.
134     Device.DataMapMtx.lock();
135     __tgt_target_table *HostTable = &TransTable->HostTable;
136     for (__tgt_offload_entry *CurrDeviceEntry = TargetTable->EntriesBegin,
137                              *CurrHostEntry = HostTable->EntriesBegin,
138                              *EntryDeviceEnd = TargetTable->EntriesEnd;
139          CurrDeviceEntry != EntryDeviceEnd;
140          CurrDeviceEntry++, CurrHostEntry++) {
141       if (CurrDeviceEntry->size != 0) {
142         // has data.
143         assert(CurrDeviceEntry->size == CurrHostEntry->size &&
144                "data size mismatch");
145 
146         // Fortran may use multiple weak declarations for the same symbol,
147         // therefore we must allow for multiple weak symbols to be loaded from
148         // the fat binary. Treat these mappings as any other "regular" mapping.
149         // Add entry to map.
150         if (Device.getTgtPtrBegin(CurrHostEntry->addr, CurrHostEntry->size))
151           continue;
152         DP("Add mapping from host " DPxMOD " to device " DPxMOD " with size %zu"
153            "\n",
154            DPxPTR(CurrHostEntry->addr), DPxPTR(CurrDeviceEntry->addr),
155            CurrDeviceEntry->size);
156         Device.HostDataToTargetMap.emplace(
157             (uintptr_t)CurrHostEntry->addr /*HstPtrBase*/,
158             (uintptr_t)CurrHostEntry->addr /*HstPtrBegin*/,
159             (uintptr_t)CurrHostEntry->addr + CurrHostEntry->size /*HstPtrEnd*/,
160             (uintptr_t)CurrDeviceEntry->addr /*TgtPtrBegin*/,
161             false /*UseHoldRefCount*/, nullptr /*Name*/,
162             true /*IsRefCountINF*/);
163       }
164     }
165     Device.DataMapMtx.unlock();
166   }
167   PM->TrlTblMtx.unlock();
168 
169   if (rc != OFFLOAD_SUCCESS) {
170     Device.PendingGlobalsMtx.unlock();
171     return rc;
172   }
173 
174   /*
175    * Run ctors for static objects
176    */
177   if (!Device.PendingCtorsDtors.empty()) {
178     AsyncInfoTy AsyncInfo(Device);
179     // Call all ctors for all libraries registered so far
180     for (auto &lib : Device.PendingCtorsDtors) {
181       if (!lib.second.PendingCtors.empty()) {
182         DP("Has pending ctors... call now\n");
183         for (auto &entry : lib.second.PendingCtors) {
184           void *ctor = entry;
185           int rc =
186               target(nullptr, Device, ctor, 0, nullptr, nullptr, nullptr,
187                      nullptr, nullptr, nullptr, 1, 1, true /*team*/, AsyncInfo);
188           if (rc != OFFLOAD_SUCCESS) {
189             REPORT("Running ctor " DPxMOD " failed.\n", DPxPTR(ctor));
190             Device.PendingGlobalsMtx.unlock();
191             return OFFLOAD_FAIL;
192           }
193         }
194         // Clear the list to indicate that this device has been used
195         lib.second.PendingCtors.clear();
196         DP("Done with pending ctors for lib " DPxMOD "\n", DPxPTR(lib.first));
197       }
198     }
199     // All constructors have been issued, wait for them now.
200     if (AsyncInfo.synchronize() != OFFLOAD_SUCCESS)
201       return OFFLOAD_FAIL;
202   }
203   Device.HasPendingGlobals = false;
204   Device.PendingGlobalsMtx.unlock();
205 
206   return OFFLOAD_SUCCESS;
207 }
208 
209 void handleTargetOutcome(bool Success, ident_t *Loc) {
210   switch (PM->TargetOffloadPolicy) {
211   case tgt_disabled:
212     if (Success) {
213       FATAL_MESSAGE0(1, "expected no offloading while offloading is disabled");
214     }
215     break;
216   case tgt_default:
217     FATAL_MESSAGE0(1, "default offloading policy must be switched to "
218                       "mandatory or disabled");
219     break;
220   case tgt_mandatory:
221     if (!Success) {
222       if (getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE)
223         for (auto &Device : PM->Devices)
224           dumpTargetPointerMappings(Loc, *Device);
225       else
226         FAILURE_MESSAGE("Run with LIBOMPTARGET_INFO=%d to dump host-target "
227                         "pointer mappings.\n",
228                         OMP_INFOTYPE_DUMP_TABLE);
229 
230       SourceInfo info(Loc);
231       if (info.isAvailible())
232         fprintf(stderr, "%s:%d:%d: ", info.getFilename(), info.getLine(),
233                 info.getColumn());
234       else
235         FAILURE_MESSAGE("Source location information not present. Compile with "
236                         "-g or -gline-tables-only.\n");
237       FATAL_MESSAGE0(
238           1, "failure of target construct while offloading is mandatory");
239     } else {
240       if (getInfoLevel() & OMP_INFOTYPE_DUMP_TABLE)
241         for (auto &Device : PM->Devices)
242           dumpTargetPointerMappings(Loc, *Device);
243     }
244     break;
245   }
246 }
247 
248 static void handleDefaultTargetOffload() {
249   PM->TargetOffloadMtx.lock();
250   if (PM->TargetOffloadPolicy == tgt_default) {
251     if (omp_get_num_devices() > 0) {
252       DP("Default TARGET OFFLOAD policy is now mandatory "
253          "(devices were found)\n");
254       PM->TargetOffloadPolicy = tgt_mandatory;
255     } else {
256       DP("Default TARGET OFFLOAD policy is now disabled "
257          "(no devices were found)\n");
258       PM->TargetOffloadPolicy = tgt_disabled;
259     }
260   }
261   PM->TargetOffloadMtx.unlock();
262 }
263 
264 static bool isOffloadDisabled() {
265   if (PM->TargetOffloadPolicy == tgt_default)
266     handleDefaultTargetOffload();
267   return PM->TargetOffloadPolicy == tgt_disabled;
268 }
269 
270 // If offload is enabled, ensure that device DeviceID has been initialized,
271 // global ctors have been executed, and global data has been mapped.
272 //
273 // The return bool indicates if the offload is to the host device
274 // There are three possible results:
275 // - Return false if the taregt device is ready for offload
276 // - Return true without reporting a runtime error if offload is
277 //   disabled, perhaps because the initial device was specified.
278 // - Report a runtime error and return true.
279 //
280 // If DeviceID == OFFLOAD_DEVICE_DEFAULT, set DeviceID to the default device.
281 // This step might be skipped if offload is disabled.
282 bool checkDeviceAndCtors(int64_t &DeviceID, ident_t *Loc) {
283   if (isOffloadDisabled()) {
284     DP("Offload is disabled\n");
285     return true;
286   }
287 
288   if (DeviceID == OFFLOAD_DEVICE_DEFAULT) {
289     DeviceID = omp_get_default_device();
290     DP("Use default device id %" PRId64 "\n", DeviceID);
291   }
292 
293   // Proposed behavior for OpenMP 5.2 in OpenMP spec github issue 2669.
294   if (omp_get_num_devices() == 0) {
295     DP("omp_get_num_devices() == 0 but offload is manadatory\n");
296     handleTargetOutcome(false, Loc);
297     return true;
298   }
299 
300   if (DeviceID == omp_get_initial_device()) {
301     DP("Device is host (%" PRId64 "), returning as if offload is disabled\n",
302        DeviceID);
303     return true;
304   }
305 
306   // Is device ready?
307   if (!device_is_ready(DeviceID)) {
308     REPORT("Device %" PRId64 " is not ready.\n", DeviceID);
309     handleTargetOutcome(false, Loc);
310     return true;
311   }
312 
313   // Get device info.
314   DeviceTy &Device = *PM->Devices[DeviceID];
315 
316   // Check whether global data has been mapped for this device
317   Device.PendingGlobalsMtx.lock();
318   bool hasPendingGlobals = Device.HasPendingGlobals;
319   Device.PendingGlobalsMtx.unlock();
320   if (hasPendingGlobals && InitLibrary(Device) != OFFLOAD_SUCCESS) {
321     REPORT("Failed to init globals on device %" PRId64 "\n", DeviceID);
322     handleTargetOutcome(false, Loc);
323     return true;
324   }
325 
326   return false;
327 }
328 
329 static int32_t getParentIndex(int64_t type) {
330   return ((type & OMP_TGT_MAPTYPE_MEMBER_OF) >> 48) - 1;
331 }
332 
333 void *targetAllocExplicit(size_t size, int device_num, int kind,
334                           const char *name) {
335   TIMESCOPE();
336   DP("Call to %s for device %d requesting %zu bytes\n", name, device_num, size);
337 
338   if (size <= 0) {
339     DP("Call to %s with non-positive length\n", name);
340     return NULL;
341   }
342 
343   void *rc = NULL;
344 
345   if (device_num == omp_get_initial_device()) {
346     rc = malloc(size);
347     DP("%s returns host ptr " DPxMOD "\n", name, DPxPTR(rc));
348     return rc;
349   }
350 
351   if (!device_is_ready(device_num)) {
352     DP("%s returns NULL ptr\n", name);
353     return NULL;
354   }
355 
356   DeviceTy &Device = *PM->Devices[device_num];
357   rc = Device.allocData(size, nullptr, kind);
358   DP("%s returns device ptr " DPxMOD "\n", name, DPxPTR(rc));
359   return rc;
360 }
361 
362 /// Call the user-defined mapper function followed by the appropriate
363 // targetData* function (targetData{Begin,End,Update}).
364 int targetDataMapper(ident_t *loc, DeviceTy &Device, void *arg_base, void *arg,
365                      int64_t arg_size, int64_t arg_type,
366                      map_var_info_t arg_names, void *arg_mapper,
367                      AsyncInfoTy &AsyncInfo,
368                      TargetDataFuncPtrTy target_data_function) {
369   TIMESCOPE_WITH_IDENT(loc);
370   DP("Calling the mapper function " DPxMOD "\n", DPxPTR(arg_mapper));
371 
372   // The mapper function fills up Components.
373   MapperComponentsTy MapperComponents;
374   MapperFuncPtrTy MapperFuncPtr = (MapperFuncPtrTy)(arg_mapper);
375   (*MapperFuncPtr)((void *)&MapperComponents, arg_base, arg, arg_size, arg_type,
376                    arg_names);
377 
378   // Construct new arrays for args_base, args, arg_sizes and arg_types
379   // using the information in MapperComponents and call the corresponding
380   // targetData* function using these new arrays.
381   std::vector<void *> MapperArgsBase(MapperComponents.Components.size());
382   std::vector<void *> MapperArgs(MapperComponents.Components.size());
383   std::vector<int64_t> MapperArgSizes(MapperComponents.Components.size());
384   std::vector<int64_t> MapperArgTypes(MapperComponents.Components.size());
385   std::vector<void *> MapperArgNames(MapperComponents.Components.size());
386 
387   for (unsigned I = 0, E = MapperComponents.Components.size(); I < E; ++I) {
388     auto &C = MapperComponents.Components[I];
389     MapperArgsBase[I] = C.Base;
390     MapperArgs[I] = C.Begin;
391     MapperArgSizes[I] = C.Size;
392     MapperArgTypes[I] = C.Type;
393     MapperArgNames[I] = C.Name;
394   }
395 
396   int rc = target_data_function(loc, Device, MapperComponents.Components.size(),
397                                 MapperArgsBase.data(), MapperArgs.data(),
398                                 MapperArgSizes.data(), MapperArgTypes.data(),
399                                 MapperArgNames.data(), /*arg_mappers*/ nullptr,
400                                 AsyncInfo, /*FromMapper=*/true);
401 
402   return rc;
403 }
404 
405 /// Internal function to do the mapping and transfer the data to the device
406 int targetDataBegin(ident_t *loc, DeviceTy &Device, int32_t arg_num,
407                     void **args_base, void **args, int64_t *arg_sizes,
408                     int64_t *arg_types, map_var_info_t *arg_names,
409                     void **arg_mappers, AsyncInfoTy &AsyncInfo,
410                     bool FromMapper) {
411   // process each input.
412   for (int32_t i = 0; i < arg_num; ++i) {
413     // Ignore private variables and arrays - there is no mapping for them.
414     if ((arg_types[i] & OMP_TGT_MAPTYPE_LITERAL) ||
415         (arg_types[i] & OMP_TGT_MAPTYPE_PRIVATE))
416       continue;
417 
418     if (arg_mappers && arg_mappers[i]) {
419       // Instead of executing the regular path of targetDataBegin, call the
420       // targetDataMapper variant which will call targetDataBegin again
421       // with new arguments.
422       DP("Calling targetDataMapper for the %dth argument\n", i);
423 
424       map_var_info_t arg_name = (!arg_names) ? nullptr : arg_names[i];
425       int rc = targetDataMapper(loc, Device, args_base[i], args[i],
426                                 arg_sizes[i], arg_types[i], arg_name,
427                                 arg_mappers[i], AsyncInfo, targetDataBegin);
428 
429       if (rc != OFFLOAD_SUCCESS) {
430         REPORT("Call to targetDataBegin via targetDataMapper for custom mapper"
431                " failed.\n");
432         return OFFLOAD_FAIL;
433       }
434 
435       // Skip the rest of this function, continue to the next argument.
436       continue;
437     }
438 
439     void *HstPtrBegin = args[i];
440     void *HstPtrBase = args_base[i];
441     int64_t data_size = arg_sizes[i];
442     map_var_info_t HstPtrName = (!arg_names) ? nullptr : arg_names[i];
443 
444     // Adjust for proper alignment if this is a combined entry (for structs).
445     // Look at the next argument - if that is MEMBER_OF this one, then this one
446     // is a combined entry.
447     int64_t padding = 0;
448     const int next_i = i + 1;
449     if (getParentIndex(arg_types[i]) < 0 && next_i < arg_num &&
450         getParentIndex(arg_types[next_i]) == i) {
451       padding = (int64_t)HstPtrBegin % Alignment;
452       if (padding) {
453         DP("Using a padding of %" PRId64 " bytes for begin address " DPxMOD
454            "\n",
455            padding, DPxPTR(HstPtrBegin));
456         HstPtrBegin = (char *)HstPtrBegin - padding;
457         data_size += padding;
458       }
459     }
460 
461     // Address of pointer on the host and device, respectively.
462     void *Pointer_HstPtrBegin, *PointerTgtPtrBegin;
463     TargetPointerResultTy Pointer_TPR;
464     bool IsHostPtr = false;
465     bool IsImplicit = arg_types[i] & OMP_TGT_MAPTYPE_IMPLICIT;
466     // Force the creation of a device side copy of the data when:
467     // a close map modifier was associated with a map that contained a to.
468     bool HasCloseModifier = arg_types[i] & OMP_TGT_MAPTYPE_CLOSE;
469     bool HasPresentModifier = arg_types[i] & OMP_TGT_MAPTYPE_PRESENT;
470     bool HasHoldModifier = arg_types[i] & OMP_TGT_MAPTYPE_OMPX_HOLD;
471     // UpdateRef is based on MEMBER_OF instead of TARGET_PARAM because if we
472     // have reached this point via __tgt_target_data_begin and not __tgt_target
473     // then no argument is marked as TARGET_PARAM ("omp target data map" is not
474     // associated with a target region, so there are no target parameters). This
475     // may be considered a hack, we could revise the scheme in the future.
476     bool UpdateRef =
477         !(arg_types[i] & OMP_TGT_MAPTYPE_MEMBER_OF) && !(FromMapper && i == 0);
478     if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ) {
479       DP("Has a pointer entry: \n");
480       // Base is address of pointer.
481       //
482       // Usually, the pointer is already allocated by this time.  For example:
483       //
484       //   #pragma omp target map(s.p[0:N])
485       //
486       // The map entry for s comes first, and the PTR_AND_OBJ entry comes
487       // afterward, so the pointer is already allocated by the time the
488       // PTR_AND_OBJ entry is handled below, and PointerTgtPtrBegin is thus
489       // non-null.  However, "declare target link" can produce a PTR_AND_OBJ
490       // entry for a global that might not already be allocated by the time the
491       // PTR_AND_OBJ entry is handled below, and so the allocation might fail
492       // when HasPresentModifier.
493       Pointer_TPR = Device.getTargetPointer(
494           HstPtrBase, HstPtrBase, sizeof(void *), /*HstPtrName=*/nullptr,
495           /*HasFlagTo=*/false, /*HasFlagAlways=*/false, IsImplicit, UpdateRef,
496           HasCloseModifier, HasPresentModifier, HasHoldModifier, AsyncInfo);
497       PointerTgtPtrBegin = Pointer_TPR.TargetPointer;
498       IsHostPtr = Pointer_TPR.Flags.IsHostPointer;
499       if (!PointerTgtPtrBegin) {
500         REPORT("Call to getTargetPointer returned null pointer (%s).\n",
501                HasPresentModifier ? "'present' map type modifier"
502                                   : "device failure or illegal mapping");
503         return OFFLOAD_FAIL;
504       }
505       DP("There are %zu bytes allocated at target address " DPxMOD " - is%s new"
506          "\n",
507          sizeof(void *), DPxPTR(PointerTgtPtrBegin),
508          (Pointer_TPR.Flags.IsNewEntry ? "" : " not"));
509       Pointer_HstPtrBegin = HstPtrBase;
510       // modify current entry.
511       HstPtrBase = *(void **)HstPtrBase;
512       // No need to update pointee ref count for the first element of the
513       // subelement that comes from mapper.
514       UpdateRef =
515           (!FromMapper || i != 0); // subsequently update ref count of pointee
516     }
517 
518     const bool HasFlagTo = arg_types[i] & OMP_TGT_MAPTYPE_TO;
519     const bool HasFlagAlways = arg_types[i] & OMP_TGT_MAPTYPE_ALWAYS;
520     auto TPR = Device.getTargetPointer(HstPtrBegin, HstPtrBase, data_size,
521                                        HstPtrName, HasFlagTo, HasFlagAlways,
522                                        IsImplicit, UpdateRef, HasCloseModifier,
523                                        HasPresentModifier, HasHoldModifier,
524                                        AsyncInfo);
525     void *TgtPtrBegin = TPR.TargetPointer;
526     IsHostPtr = TPR.Flags.IsHostPointer;
527     // If data_size==0, then the argument could be a zero-length pointer to
528     // NULL, so getOrAlloc() returning NULL is not an error.
529     if (!TgtPtrBegin && (data_size || HasPresentModifier)) {
530       REPORT("Call to getTargetPointer returned null pointer (%s).\n",
531              HasPresentModifier ? "'present' map type modifier"
532                                 : "device failure or illegal mapping");
533       return OFFLOAD_FAIL;
534     }
535     DP("There are %" PRId64 " bytes allocated at target address " DPxMOD
536        " - is%s new\n",
537        data_size, DPxPTR(TgtPtrBegin), (TPR.Flags.IsNewEntry ? "" : " not"));
538 
539     if (arg_types[i] & OMP_TGT_MAPTYPE_RETURN_PARAM) {
540       uintptr_t Delta = (uintptr_t)HstPtrBegin - (uintptr_t)HstPtrBase;
541       void *TgtPtrBase = (void *)((uintptr_t)TgtPtrBegin - Delta);
542       DP("Returning device pointer " DPxMOD "\n", DPxPTR(TgtPtrBase));
543       args_base[i] = TgtPtrBase;
544     }
545 
546     if (arg_types[i] & OMP_TGT_MAPTYPE_PTR_AND_OBJ && !IsHostPtr) {
547       // Check whether we need to update the pointer on the device
548       bool UpdateDevPtr = false;
549 
550       uint64_t Delta = (uint64_t)HstPtrBegin - (uint64_t)HstPtrBase;
551       void *ExpectedTgtPtrBase = (void *)((uint64_t)TgtPtrBegin - Delta);
552 
553       Device.ShadowMtx.lock();
554       auto Entry = Device.ShadowPtrMap.find(Pointer_HstPtrBegin);
555       // If this pointer is not in the map we need to insert it. If the map
556       // contains a stale entry, we need to update it (e.g. if the pointee was
557       // deallocated and later on is reallocated at another device address). The
558       // latter scenario is the subject of LIT test env/base_ptr_ref_count.c. An
559       // entry is removed from ShadowPtrMap only when the PTR of a PTR_AND_OBJ
560       // pair is deallocated, not when the OBJ is deallocated. In
561       // env/base_ptr_ref_count.c the PTR is a global "declare target" pointer,
562       // so it stays in the map for the lifetime of the application. When the
563       // OBJ is deallocated and later on allocated again (at a different device
564       // address), ShadowPtrMap still contains an entry for Pointer_HstPtrBegin
565       // which is stale, pointing to the old ExpectedTgtPtrBase of the OBJ.
566       if (Entry == Device.ShadowPtrMap.end() ||
567           Entry->second.TgtPtrVal != ExpectedTgtPtrBase) {
568         // create or update shadow pointers for this entry
569         Device.ShadowPtrMap[Pointer_HstPtrBegin] = {
570             HstPtrBase, PointerTgtPtrBegin, ExpectedTgtPtrBase};
571         UpdateDevPtr = true;
572       }
573 
574       if (UpdateDevPtr) {
575         Pointer_TPR.MapTableEntry->lock();
576         Device.ShadowMtx.unlock();
577 
578         DP("Update pointer (" DPxMOD ") -> [" DPxMOD "]\n",
579            DPxPTR(PointerTgtPtrBegin), DPxPTR(TgtPtrBegin));
580 
581         void *&TgtPtrBase = AsyncInfo.getVoidPtrLocation();
582         TgtPtrBase = ExpectedTgtPtrBase;
583 
584         int rt = Device.submitData(PointerTgtPtrBegin, &TgtPtrBase,
585                                    sizeof(void *), AsyncInfo);
586         Pointer_TPR.MapTableEntry->unlock();
587 
588         if (rt != OFFLOAD_SUCCESS) {
589           REPORT("Copying data to device failed.\n");
590           return OFFLOAD_FAIL;
591         }
592       } else
593         Device.ShadowMtx.unlock();
594     }
595   }
596 
597   return OFFLOAD_SUCCESS;
598 }
599 
600 namespace {
601 /// This structure contains information to deallocate a target pointer, aka.
602 /// used to call the function \p DeviceTy::deallocTgtPtr.
603 struct DeallocTgtPtrInfo {
604   /// Host pointer used to look up into the map table
605   void *HstPtrBegin;
606   /// Size of the data
607   int64_t DataSize;
608   /// Whether it has \p ompx_hold modifier
609   bool HasHoldModifier;
610 
611   DeallocTgtPtrInfo(void *HstPtr, int64_t Size, bool HasHoldModifier)
612       : HstPtrBegin(HstPtr), DataSize(Size), HasHoldModifier(HasHoldModifier) {}
613 };
614 } // namespace
615 
616 /// Internal function to undo the mapping and retrieve the data from the device.
617 int targetDataEnd(ident_t *loc, DeviceTy &Device, int32_t ArgNum,
618                   void **ArgBases, void **Args, int64_t *ArgSizes,
619                   int64_t *ArgTypes, map_var_info_t *ArgNames,
620                   void **ArgMappers, AsyncInfoTy &AsyncInfo, bool FromMapper) {
621   int Ret;
622   std::vector<DeallocTgtPtrInfo> DeallocTgtPtrs;
623   void *FromMapperBase = nullptr;
624   // process each input.
625   for (int32_t I = ArgNum - 1; I >= 0; --I) {
626     // Ignore private variables and arrays - there is no mapping for them.
627     // Also, ignore the use_device_ptr directive, it has no effect here.
628     if ((ArgTypes[I] & OMP_TGT_MAPTYPE_LITERAL) ||
629         (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE))
630       continue;
631 
632     if (ArgMappers && ArgMappers[I]) {
633       // Instead of executing the regular path of targetDataEnd, call the
634       // targetDataMapper variant which will call targetDataEnd again
635       // with new arguments.
636       DP("Calling targetDataMapper for the %dth argument\n", I);
637 
638       map_var_info_t ArgName = (!ArgNames) ? nullptr : ArgNames[I];
639       Ret = targetDataMapper(loc, Device, ArgBases[I], Args[I], ArgSizes[I],
640                              ArgTypes[I], ArgName, ArgMappers[I], AsyncInfo,
641                              targetDataEnd);
642 
643       if (Ret != OFFLOAD_SUCCESS) {
644         REPORT("Call to targetDataEnd via targetDataMapper for custom mapper"
645                " failed.\n");
646         return OFFLOAD_FAIL;
647       }
648 
649       // Skip the rest of this function, continue to the next argument.
650       continue;
651     }
652 
653     void *HstPtrBegin = Args[I];
654     int64_t DataSize = ArgSizes[I];
655     // Adjust for proper alignment if this is a combined entry (for structs).
656     // Look at the next argument - if that is MEMBER_OF this one, then this one
657     // is a combined entry.
658     const int NextI = I + 1;
659     if (getParentIndex(ArgTypes[I]) < 0 && NextI < ArgNum &&
660         getParentIndex(ArgTypes[NextI]) == I) {
661       int64_t Padding = (int64_t)HstPtrBegin % Alignment;
662       if (Padding) {
663         DP("Using a Padding of %" PRId64 " bytes for begin address " DPxMOD
664            "\n",
665            Padding, DPxPTR(HstPtrBegin));
666         HstPtrBegin = (char *)HstPtrBegin - Padding;
667         DataSize += Padding;
668       }
669     }
670 
671     bool IsLast, IsHostPtr;
672     bool IsImplicit = ArgTypes[I] & OMP_TGT_MAPTYPE_IMPLICIT;
673     bool UpdateRef = (!(ArgTypes[I] & OMP_TGT_MAPTYPE_MEMBER_OF) ||
674                       (ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)) &&
675                      !(FromMapper && I == 0);
676     bool ForceDelete = ArgTypes[I] & OMP_TGT_MAPTYPE_DELETE;
677     bool HasPresentModifier = ArgTypes[I] & OMP_TGT_MAPTYPE_PRESENT;
678     bool HasHoldModifier = ArgTypes[I] & OMP_TGT_MAPTYPE_OMPX_HOLD;
679 
680     // If PTR_AND_OBJ, HstPtrBegin is address of pointee
681     void *TgtPtrBegin = Device.getTgtPtrBegin(
682         HstPtrBegin, DataSize, IsLast, UpdateRef, HasHoldModifier, IsHostPtr,
683         !IsImplicit, ForceDelete);
684     if (!TgtPtrBegin && (DataSize || HasPresentModifier)) {
685       DP("Mapping does not exist (%s)\n",
686          (HasPresentModifier ? "'present' map type modifier" : "ignored"));
687       if (HasPresentModifier) {
688         // OpenMP 5.1, sec. 2.21.7.1 "map Clause", p. 350 L10-13:
689         // "If a map clause appears on a target, target data, target enter data
690         // or target exit data construct with a present map-type-modifier then
691         // on entry to the region if the corresponding list item does not appear
692         // in the device data environment then an error occurs and the program
693         // terminates."
694         //
695         // This should be an error upon entering an "omp target exit data".  It
696         // should not be an error upon exiting an "omp target data" or "omp
697         // target".  For "omp target data", Clang thus doesn't include present
698         // modifiers for end calls.  For "omp target", we have not found a valid
699         // OpenMP program for which the error matters: it appears that, if a
700         // program can guarantee that data is present at the beginning of an
701         // "omp target" region so that there's no error there, that data is also
702         // guaranteed to be present at the end.
703         MESSAGE("device mapping required by 'present' map type modifier does "
704                 "not exist for host address " DPxMOD " (%" PRId64 " bytes)",
705                 DPxPTR(HstPtrBegin), DataSize);
706         return OFFLOAD_FAIL;
707       }
708     } else {
709       DP("There are %" PRId64 " bytes allocated at target address " DPxMOD
710          " - is%s last\n",
711          DataSize, DPxPTR(TgtPtrBegin), (IsLast ? "" : " not"));
712     }
713 
714     // OpenMP 5.1, sec. 2.21.7.1 "map Clause", p. 351 L14-16:
715     // "If the map clause appears on a target, target data, or target exit data
716     // construct and a corresponding list item of the original list item is not
717     // present in the device data environment on exit from the region then the
718     // list item is ignored."
719     if (!TgtPtrBegin)
720       continue;
721 
722     bool DelEntry = IsLast;
723 
724     // If the last element from the mapper (for end transfer args comes in
725     // reverse order), do not remove the partial entry, the parent struct still
726     // exists.
727     if ((ArgTypes[I] & OMP_TGT_MAPTYPE_MEMBER_OF) &&
728         !(ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)) {
729       DelEntry = false; // protect parent struct from being deallocated
730     }
731 
732     if ((ArgTypes[I] & OMP_TGT_MAPTYPE_FROM) || DelEntry) {
733       // Move data back to the host
734       if (ArgTypes[I] & OMP_TGT_MAPTYPE_FROM) {
735         bool Always = ArgTypes[I] & OMP_TGT_MAPTYPE_ALWAYS;
736         if ((Always || IsLast) && !IsHostPtr) {
737           DP("Moving %" PRId64 " bytes (tgt:" DPxMOD ") -> (hst:" DPxMOD ")\n",
738              DataSize, DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin));
739           Ret = Device.retrieveData(HstPtrBegin, TgtPtrBegin, DataSize,
740                                     AsyncInfo);
741           if (Ret != OFFLOAD_SUCCESS) {
742             REPORT("Copying data from device failed.\n");
743             return OFFLOAD_FAIL;
744           }
745         }
746       }
747       if (DelEntry && FromMapper && I == 0) {
748         DelEntry = false;
749         FromMapperBase = HstPtrBegin;
750       }
751 
752       // If we copied back to the host a struct/array containing pointers, we
753       // need to restore the original host pointer values from their shadow
754       // copies. If the struct is going to be deallocated, remove any remaining
755       // shadow pointer entries for this struct.
756       uintptr_t LB = (uintptr_t)HstPtrBegin;
757       uintptr_t UB = (uintptr_t)HstPtrBegin + DataSize;
758       Device.ShadowMtx.lock();
759       for (ShadowPtrListTy::iterator Itr = Device.ShadowPtrMap.begin();
760            Itr != Device.ShadowPtrMap.end();) {
761         void **ShadowHstPtrAddr = (void **)Itr->first;
762 
763         // An STL map is sorted on its keys; use this property
764         // to quickly determine when to break out of the loop.
765         if ((uintptr_t)ShadowHstPtrAddr < LB) {
766           ++Itr;
767           continue;
768         }
769         if ((uintptr_t)ShadowHstPtrAddr >= UB)
770           break;
771 
772         // If we copied the struct to the host, we need to restore the pointer.
773         if (ArgTypes[I] & OMP_TGT_MAPTYPE_FROM) {
774           DP("Restoring original host pointer value " DPxMOD " for host "
775              "pointer " DPxMOD "\n",
776              DPxPTR(Itr->second.HstPtrVal), DPxPTR(ShadowHstPtrAddr));
777           *ShadowHstPtrAddr = Itr->second.HstPtrVal;
778         }
779         // If the struct is to be deallocated, remove the shadow entry.
780         if (DelEntry) {
781           DP("Removing shadow pointer " DPxMOD "\n", DPxPTR(ShadowHstPtrAddr));
782           Itr = Device.ShadowPtrMap.erase(Itr);
783         } else {
784           ++Itr;
785         }
786       }
787       Device.ShadowMtx.unlock();
788 
789       // Add pointer to the buffer for later deallocation
790       if (DelEntry && !IsHostPtr)
791         DeallocTgtPtrs.emplace_back(HstPtrBegin, DataSize, HasHoldModifier);
792     }
793   }
794 
795   // TODO: We should not synchronize here but pass the AsyncInfo object to the
796   //       allocate/deallocate device APIs.
797   //
798   // We need to synchronize before deallocating data.
799   Ret = AsyncInfo.synchronize();
800   if (Ret != OFFLOAD_SUCCESS)
801     return OFFLOAD_FAIL;
802 
803   // Deallocate target pointer
804   for (DeallocTgtPtrInfo &Info : DeallocTgtPtrs) {
805     if (FromMapperBase && FromMapperBase == Info.HstPtrBegin)
806       continue;
807     Ret = Device.deallocTgtPtr(Info.HstPtrBegin, Info.DataSize,
808                                Info.HasHoldModifier);
809     if (Ret != OFFLOAD_SUCCESS) {
810       REPORT("Deallocating data from device failed.\n");
811       return OFFLOAD_FAIL;
812     }
813   }
814 
815   return OFFLOAD_SUCCESS;
816 }
817 
818 static int targetDataContiguous(ident_t *loc, DeviceTy &Device, void *ArgsBase,
819                                 void *HstPtrBegin, int64_t ArgSize,
820                                 int64_t ArgType, AsyncInfoTy &AsyncInfo) {
821   TIMESCOPE_WITH_IDENT(loc);
822   bool IsLast, IsHostPtr;
823   void *TgtPtrBegin = Device.getTgtPtrBegin(
824       HstPtrBegin, ArgSize, IsLast, /*UpdateRefCount=*/false,
825       /*UseHoldRefCount=*/false, IsHostPtr, /*MustContain=*/true);
826   if (!TgtPtrBegin) {
827     DP("hst data:" DPxMOD " not found, becomes a noop\n", DPxPTR(HstPtrBegin));
828     if (ArgType & OMP_TGT_MAPTYPE_PRESENT) {
829       MESSAGE("device mapping required by 'present' motion modifier does not "
830               "exist for host address " DPxMOD " (%" PRId64 " bytes)",
831               DPxPTR(HstPtrBegin), ArgSize);
832       return OFFLOAD_FAIL;
833     }
834     return OFFLOAD_SUCCESS;
835   }
836 
837   if (IsHostPtr) {
838     DP("hst data:" DPxMOD " unified and shared, becomes a noop\n",
839        DPxPTR(HstPtrBegin));
840     return OFFLOAD_SUCCESS;
841   }
842 
843   if (ArgType & OMP_TGT_MAPTYPE_FROM) {
844     DP("Moving %" PRId64 " bytes (tgt:" DPxMOD ") -> (hst:" DPxMOD ")\n",
845        ArgSize, DPxPTR(TgtPtrBegin), DPxPTR(HstPtrBegin));
846     int Ret = Device.retrieveData(HstPtrBegin, TgtPtrBegin, ArgSize, AsyncInfo);
847     if (Ret != OFFLOAD_SUCCESS) {
848       REPORT("Copying data from device failed.\n");
849       return OFFLOAD_FAIL;
850     }
851 
852     uintptr_t LB = (uintptr_t)HstPtrBegin;
853     uintptr_t UB = (uintptr_t)HstPtrBegin + ArgSize;
854     Device.ShadowMtx.lock();
855     for (ShadowPtrListTy::iterator IT = Device.ShadowPtrMap.begin();
856          IT != Device.ShadowPtrMap.end(); ++IT) {
857       void **ShadowHstPtrAddr = (void **)IT->first;
858       if ((uintptr_t)ShadowHstPtrAddr < LB)
859         continue;
860       if ((uintptr_t)ShadowHstPtrAddr >= UB)
861         break;
862       DP("Restoring original host pointer value " DPxMOD
863          " for host pointer " DPxMOD "\n",
864          DPxPTR(IT->second.HstPtrVal), DPxPTR(ShadowHstPtrAddr));
865       *ShadowHstPtrAddr = IT->second.HstPtrVal;
866     }
867     Device.ShadowMtx.unlock();
868   }
869 
870   if (ArgType & OMP_TGT_MAPTYPE_TO) {
871     DP("Moving %" PRId64 " bytes (hst:" DPxMOD ") -> (tgt:" DPxMOD ")\n",
872        ArgSize, DPxPTR(HstPtrBegin), DPxPTR(TgtPtrBegin));
873     int Ret = Device.submitData(TgtPtrBegin, HstPtrBegin, ArgSize, AsyncInfo);
874     if (Ret != OFFLOAD_SUCCESS) {
875       REPORT("Copying data to device failed.\n");
876       return OFFLOAD_FAIL;
877     }
878 
879     uintptr_t LB = (uintptr_t)HstPtrBegin;
880     uintptr_t UB = (uintptr_t)HstPtrBegin + ArgSize;
881     Device.ShadowMtx.lock();
882     for (ShadowPtrListTy::iterator IT = Device.ShadowPtrMap.begin();
883          IT != Device.ShadowPtrMap.end(); ++IT) {
884       void **ShadowHstPtrAddr = (void **)IT->first;
885       if ((uintptr_t)ShadowHstPtrAddr < LB)
886         continue;
887       if ((uintptr_t)ShadowHstPtrAddr >= UB)
888         break;
889       DP("Restoring original target pointer value " DPxMOD " for target "
890          "pointer " DPxMOD "\n",
891          DPxPTR(IT->second.TgtPtrVal), DPxPTR(IT->second.TgtPtrAddr));
892       Ret = Device.submitData(IT->second.TgtPtrAddr, &IT->second.TgtPtrVal,
893                               sizeof(void *), AsyncInfo);
894       if (Ret != OFFLOAD_SUCCESS) {
895         REPORT("Copying data to device failed.\n");
896         Device.ShadowMtx.unlock();
897         return OFFLOAD_FAIL;
898       }
899     }
900     Device.ShadowMtx.unlock();
901   }
902   return OFFLOAD_SUCCESS;
903 }
904 
905 static int targetDataNonContiguous(ident_t *loc, DeviceTy &Device,
906                                    void *ArgsBase,
907                                    __tgt_target_non_contig *NonContig,
908                                    uint64_t Size, int64_t ArgType,
909                                    int CurrentDim, int DimSize, uint64_t Offset,
910                                    AsyncInfoTy &AsyncInfo) {
911   TIMESCOPE_WITH_IDENT(loc);
912   int Ret = OFFLOAD_SUCCESS;
913   if (CurrentDim < DimSize) {
914     for (unsigned int I = 0; I < NonContig[CurrentDim].Count; ++I) {
915       uint64_t CurOffset =
916           (NonContig[CurrentDim].Offset + I) * NonContig[CurrentDim].Stride;
917       // we only need to transfer the first element for the last dimension
918       // since we've already got a contiguous piece.
919       if (CurrentDim != DimSize - 1 || I == 0) {
920         Ret = targetDataNonContiguous(loc, Device, ArgsBase, NonContig, Size,
921                                       ArgType, CurrentDim + 1, DimSize,
922                                       Offset + CurOffset, AsyncInfo);
923         // Stop the whole process if any contiguous piece returns anything
924         // other than OFFLOAD_SUCCESS.
925         if (Ret != OFFLOAD_SUCCESS)
926           return Ret;
927       }
928     }
929   } else {
930     char *Ptr = (char *)ArgsBase + Offset;
931     DP("Transfer of non-contiguous : host ptr " DPxMOD " offset %" PRIu64
932        " len %" PRIu64 "\n",
933        DPxPTR(Ptr), Offset, Size);
934     Ret = targetDataContiguous(loc, Device, ArgsBase, Ptr, Size, ArgType,
935                                AsyncInfo);
936   }
937   return Ret;
938 }
939 
940 static int getNonContigMergedDimension(__tgt_target_non_contig *NonContig,
941                                        int32_t DimSize) {
942   int RemovedDim = 0;
943   for (int I = DimSize - 1; I > 0; --I) {
944     if (NonContig[I].Count * NonContig[I].Stride == NonContig[I - 1].Stride)
945       RemovedDim++;
946   }
947   return RemovedDim;
948 }
949 
950 /// Internal function to pass data to/from the target.
951 int targetDataUpdate(ident_t *loc, DeviceTy &Device, int32_t ArgNum,
952                      void **ArgsBase, void **Args, int64_t *ArgSizes,
953                      int64_t *ArgTypes, map_var_info_t *ArgNames,
954                      void **ArgMappers, AsyncInfoTy &AsyncInfo, bool) {
955   // process each input.
956   for (int32_t I = 0; I < ArgNum; ++I) {
957     if ((ArgTypes[I] & OMP_TGT_MAPTYPE_LITERAL) ||
958         (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE))
959       continue;
960 
961     if (ArgMappers && ArgMappers[I]) {
962       // Instead of executing the regular path of targetDataUpdate, call the
963       // targetDataMapper variant which will call targetDataUpdate again
964       // with new arguments.
965       DP("Calling targetDataMapper for the %dth argument\n", I);
966 
967       map_var_info_t ArgName = (!ArgNames) ? nullptr : ArgNames[I];
968       int Ret = targetDataMapper(loc, Device, ArgsBase[I], Args[I], ArgSizes[I],
969                                  ArgTypes[I], ArgName, ArgMappers[I], AsyncInfo,
970                                  targetDataUpdate);
971 
972       if (Ret != OFFLOAD_SUCCESS) {
973         REPORT("Call to targetDataUpdate via targetDataMapper for custom mapper"
974                " failed.\n");
975         return OFFLOAD_FAIL;
976       }
977 
978       // Skip the rest of this function, continue to the next argument.
979       continue;
980     }
981 
982     int Ret = OFFLOAD_SUCCESS;
983 
984     if (ArgTypes[I] & OMP_TGT_MAPTYPE_NON_CONTIG) {
985       __tgt_target_non_contig *NonContig = (__tgt_target_non_contig *)Args[I];
986       int32_t DimSize = ArgSizes[I];
987       uint64_t Size =
988           NonContig[DimSize - 1].Count * NonContig[DimSize - 1].Stride;
989       int32_t MergedDim = getNonContigMergedDimension(NonContig, DimSize);
990       Ret = targetDataNonContiguous(
991           loc, Device, ArgsBase[I], NonContig, Size, ArgTypes[I],
992           /*current_dim=*/0, DimSize - MergedDim, /*offset=*/0, AsyncInfo);
993     } else {
994       Ret = targetDataContiguous(loc, Device, ArgsBase[I], Args[I], ArgSizes[I],
995                                  ArgTypes[I], AsyncInfo);
996     }
997     if (Ret == OFFLOAD_FAIL)
998       return OFFLOAD_FAIL;
999   }
1000   return OFFLOAD_SUCCESS;
1001 }
1002 
1003 static const unsigned LambdaMapping = OMP_TGT_MAPTYPE_PTR_AND_OBJ |
1004                                       OMP_TGT_MAPTYPE_LITERAL |
1005                                       OMP_TGT_MAPTYPE_IMPLICIT;
1006 static bool isLambdaMapping(int64_t Mapping) {
1007   return (Mapping & LambdaMapping) == LambdaMapping;
1008 }
1009 
1010 namespace {
1011 /// Find the table information in the map or look it up in the translation
1012 /// tables.
1013 TableMap *getTableMap(void *HostPtr) {
1014   std::lock_guard<std::mutex> TblMapLock(PM->TblMapMtx);
1015   HostPtrToTableMapTy::iterator TableMapIt =
1016       PM->HostPtrToTableMap.find(HostPtr);
1017 
1018   if (TableMapIt != PM->HostPtrToTableMap.end())
1019     return &TableMapIt->second;
1020 
1021   // We don't have a map. So search all the registered libraries.
1022   TableMap *TM = nullptr;
1023   std::lock_guard<std::mutex> TrlTblLock(PM->TrlTblMtx);
1024   for (HostEntriesBeginToTransTableTy::iterator Itr =
1025            PM->HostEntriesBeginToTransTable.begin();
1026        Itr != PM->HostEntriesBeginToTransTable.end(); ++Itr) {
1027     // get the translation table (which contains all the good info).
1028     TranslationTable *TransTable = &Itr->second;
1029     // iterate over all the host table entries to see if we can locate the
1030     // host_ptr.
1031     __tgt_offload_entry *Cur = TransTable->HostTable.EntriesBegin;
1032     for (uint32_t I = 0; Cur < TransTable->HostTable.EntriesEnd; ++Cur, ++I) {
1033       if (Cur->addr != HostPtr)
1034         continue;
1035       // we got a match, now fill the HostPtrToTableMap so that we
1036       // may avoid this search next time.
1037       TM = &(PM->HostPtrToTableMap)[HostPtr];
1038       TM->Table = TransTable;
1039       TM->Index = I;
1040       return TM;
1041     }
1042   }
1043 
1044   return nullptr;
1045 }
1046 
1047 /// Get loop trip count
1048 /// FIXME: This function will not work right if calling
1049 /// __kmpc_push_target_tripcount_mapper in one thread but doing offloading in
1050 /// another thread, which might occur when we call task yield.
1051 uint64_t getLoopTripCount(int64_t DeviceId) {
1052   DeviceTy &Device = *PM->Devices[DeviceId];
1053   uint64_t LoopTripCount = 0;
1054 
1055   {
1056     std::lock_guard<std::mutex> TblMapLock(PM->TblMapMtx);
1057     auto I = Device.LoopTripCnt.find(__kmpc_global_thread_num(NULL));
1058     if (I != Device.LoopTripCnt.end()) {
1059       LoopTripCount = I->second;
1060       Device.LoopTripCnt.erase(I);
1061       DP("loop trip count is %" PRIu64 ".\n", LoopTripCount);
1062     }
1063   }
1064 
1065   return LoopTripCount;
1066 }
1067 
1068 /// A class manages private arguments in a target region.
1069 class PrivateArgumentManagerTy {
1070   /// A data structure for the information of first-private arguments. We can
1071   /// use this information to optimize data transfer by packing all
1072   /// first-private arguments and transfer them all at once.
1073   struct FirstPrivateArgInfoTy {
1074     /// The index of the element in \p TgtArgs corresponding to the argument
1075     const int Index;
1076     /// Host pointer begin
1077     const char *HstPtrBegin;
1078     /// Host pointer end
1079     const char *HstPtrEnd;
1080     /// Aligned size
1081     const int64_t AlignedSize;
1082     /// Host pointer name
1083     const map_var_info_t HstPtrName = nullptr;
1084 
1085     FirstPrivateArgInfoTy(int Index, const void *HstPtr, int64_t Size,
1086                           const map_var_info_t HstPtrName = nullptr)
1087         : Index(Index), HstPtrBegin(reinterpret_cast<const char *>(HstPtr)),
1088           HstPtrEnd(HstPtrBegin + Size), AlignedSize(Size + Size % Alignment),
1089           HstPtrName(HstPtrName) {}
1090   };
1091 
1092   /// A vector of target pointers for all private arguments
1093   std::vector<void *> TgtPtrs;
1094 
1095   /// A vector of information of all first-private arguments to be packed
1096   std::vector<FirstPrivateArgInfoTy> FirstPrivateArgInfo;
1097   /// Host buffer for all arguments to be packed
1098   std::vector<char> FirstPrivateArgBuffer;
1099   /// The total size of all arguments to be packed
1100   int64_t FirstPrivateArgSize = 0;
1101 
1102   /// A reference to the \p DeviceTy object
1103   DeviceTy &Device;
1104   /// A pointer to a \p AsyncInfoTy object
1105   AsyncInfoTy &AsyncInfo;
1106 
1107   // TODO: What would be the best value here? Should we make it configurable?
1108   // If the size is larger than this threshold, we will allocate and transfer it
1109   // immediately instead of packing it.
1110   static constexpr const int64_t FirstPrivateArgSizeThreshold = 1024;
1111 
1112 public:
1113   /// Constructor
1114   PrivateArgumentManagerTy(DeviceTy &Dev, AsyncInfoTy &AsyncInfo)
1115       : Device(Dev), AsyncInfo(AsyncInfo) {}
1116 
1117   /// Add a private argument
1118   int addArg(void *HstPtr, int64_t ArgSize, int64_t ArgOffset,
1119              bool IsFirstPrivate, void *&TgtPtr, int TgtArgsIndex,
1120              const map_var_info_t HstPtrName = nullptr,
1121              const bool AllocImmediately = false) {
1122     // If the argument is not first-private, or its size is greater than a
1123     // predefined threshold, we will allocate memory and issue the transfer
1124     // immediately.
1125     if (ArgSize > FirstPrivateArgSizeThreshold || !IsFirstPrivate ||
1126         AllocImmediately) {
1127       TgtPtr = Device.allocData(ArgSize, HstPtr);
1128       if (!TgtPtr) {
1129         DP("Data allocation for %sprivate array " DPxMOD " failed.\n",
1130            (IsFirstPrivate ? "first-" : ""), DPxPTR(HstPtr));
1131         return OFFLOAD_FAIL;
1132       }
1133 #ifdef OMPTARGET_DEBUG
1134       void *TgtPtrBase = (void *)((intptr_t)TgtPtr + ArgOffset);
1135       DP("Allocated %" PRId64 " bytes of target memory at " DPxMOD
1136          " for %sprivate array " DPxMOD " - pushing target argument " DPxMOD
1137          "\n",
1138          ArgSize, DPxPTR(TgtPtr), (IsFirstPrivate ? "first-" : ""),
1139          DPxPTR(HstPtr), DPxPTR(TgtPtrBase));
1140 #endif
1141       // If first-private, copy data from host
1142       if (IsFirstPrivate) {
1143         DP("Submitting firstprivate data to the device.\n");
1144         int Ret = Device.submitData(TgtPtr, HstPtr, ArgSize, AsyncInfo);
1145         if (Ret != OFFLOAD_SUCCESS) {
1146           DP("Copying data to device failed, failed.\n");
1147           return OFFLOAD_FAIL;
1148         }
1149       }
1150       TgtPtrs.push_back(TgtPtr);
1151     } else {
1152       DP("Firstprivate array " DPxMOD " of size %" PRId64 " will be packed\n",
1153          DPxPTR(HstPtr), ArgSize);
1154       // When reach this point, the argument must meet all following
1155       // requirements:
1156       // 1. Its size does not exceed the threshold (see the comment for
1157       // FirstPrivateArgSizeThreshold);
1158       // 2. It must be first-private (needs to be mapped to target device).
1159       // We will pack all this kind of arguments to transfer them all at once
1160       // to reduce the number of data transfer. We will not take
1161       // non-first-private arguments, aka. private arguments that doesn't need
1162       // to be mapped to target device, into account because data allocation
1163       // can be very efficient with memory manager.
1164 
1165       // Placeholder value
1166       TgtPtr = nullptr;
1167       FirstPrivateArgInfo.emplace_back(TgtArgsIndex, HstPtr, ArgSize,
1168                                        HstPtrName);
1169       FirstPrivateArgSize += FirstPrivateArgInfo.back().AlignedSize;
1170     }
1171 
1172     return OFFLOAD_SUCCESS;
1173   }
1174 
1175   /// Pack first-private arguments, replace place holder pointers in \p TgtArgs,
1176   /// and start the transfer.
1177   int packAndTransfer(std::vector<void *> &TgtArgs) {
1178     if (!FirstPrivateArgInfo.empty()) {
1179       assert(FirstPrivateArgSize != 0 &&
1180              "FirstPrivateArgSize is 0 but FirstPrivateArgInfo is empty");
1181       FirstPrivateArgBuffer.resize(FirstPrivateArgSize, 0);
1182       auto Itr = FirstPrivateArgBuffer.begin();
1183       // Copy all host data to this buffer
1184       for (FirstPrivateArgInfoTy &Info : FirstPrivateArgInfo) {
1185         std::copy(Info.HstPtrBegin, Info.HstPtrEnd, Itr);
1186         Itr = std::next(Itr, Info.AlignedSize);
1187       }
1188       // Allocate target memory
1189       void *TgtPtr =
1190           Device.allocData(FirstPrivateArgSize, FirstPrivateArgBuffer.data());
1191       if (TgtPtr == nullptr) {
1192         DP("Failed to allocate target memory for private arguments.\n");
1193         return OFFLOAD_FAIL;
1194       }
1195       TgtPtrs.push_back(TgtPtr);
1196       DP("Allocated %" PRId64 " bytes of target memory at " DPxMOD "\n",
1197          FirstPrivateArgSize, DPxPTR(TgtPtr));
1198       // Transfer data to target device
1199       int Ret = Device.submitData(TgtPtr, FirstPrivateArgBuffer.data(),
1200                                   FirstPrivateArgSize, AsyncInfo);
1201       if (Ret != OFFLOAD_SUCCESS) {
1202         DP("Failed to submit data of private arguments.\n");
1203         return OFFLOAD_FAIL;
1204       }
1205       // Fill in all placeholder pointers
1206       auto TP = reinterpret_cast<uintptr_t>(TgtPtr);
1207       for (FirstPrivateArgInfoTy &Info : FirstPrivateArgInfo) {
1208         void *&Ptr = TgtArgs[Info.Index];
1209         assert(Ptr == nullptr && "Target pointer is already set by mistaken");
1210         Ptr = reinterpret_cast<void *>(TP);
1211         TP += Info.AlignedSize;
1212         DP("Firstprivate array " DPxMOD " of size %" PRId64 " mapped to " DPxMOD
1213            "\n",
1214            DPxPTR(Info.HstPtrBegin), Info.HstPtrEnd - Info.HstPtrBegin,
1215            DPxPTR(Ptr));
1216       }
1217     }
1218 
1219     return OFFLOAD_SUCCESS;
1220   }
1221 
1222   /// Free all target memory allocated for private arguments
1223   int free() {
1224     for (void *P : TgtPtrs) {
1225       int Ret = Device.deleteData(P);
1226       if (Ret != OFFLOAD_SUCCESS) {
1227         DP("Deallocation of (first-)private arrays failed.\n");
1228         return OFFLOAD_FAIL;
1229       }
1230     }
1231 
1232     TgtPtrs.clear();
1233 
1234     return OFFLOAD_SUCCESS;
1235   }
1236 };
1237 
1238 /// Process data before launching the kernel, including calling targetDataBegin
1239 /// to map and transfer data to target device, transferring (first-)private
1240 /// variables.
1241 static int processDataBefore(ident_t *loc, int64_t DeviceId, void *HostPtr,
1242                              int32_t ArgNum, void **ArgBases, void **Args,
1243                              int64_t *ArgSizes, int64_t *ArgTypes,
1244                              map_var_info_t *ArgNames, void **ArgMappers,
1245                              std::vector<void *> &TgtArgs,
1246                              std::vector<ptrdiff_t> &TgtOffsets,
1247                              PrivateArgumentManagerTy &PrivateArgumentManager,
1248                              AsyncInfoTy &AsyncInfo) {
1249   TIMESCOPE_WITH_NAME_AND_IDENT("mappingBeforeTargetRegion", loc);
1250   DeviceTy &Device = *PM->Devices[DeviceId];
1251   int Ret = targetDataBegin(loc, Device, ArgNum, ArgBases, Args, ArgSizes,
1252                             ArgTypes, ArgNames, ArgMappers, AsyncInfo);
1253   if (Ret != OFFLOAD_SUCCESS) {
1254     REPORT("Call to targetDataBegin failed, abort target.\n");
1255     return OFFLOAD_FAIL;
1256   }
1257 
1258   // List of (first-)private arrays allocated for this target region
1259   std::vector<int> TgtArgsPositions(ArgNum, -1);
1260 
1261   for (int32_t I = 0; I < ArgNum; ++I) {
1262     if (!(ArgTypes[I] & OMP_TGT_MAPTYPE_TARGET_PARAM)) {
1263       // This is not a target parameter, do not push it into TgtArgs.
1264       // Check for lambda mapping.
1265       if (isLambdaMapping(ArgTypes[I])) {
1266         assert((ArgTypes[I] & OMP_TGT_MAPTYPE_MEMBER_OF) &&
1267                "PTR_AND_OBJ must be also MEMBER_OF.");
1268         unsigned Idx = getParentIndex(ArgTypes[I]);
1269         int TgtIdx = TgtArgsPositions[Idx];
1270         assert(TgtIdx != -1 && "Base address must be translated already.");
1271         // The parent lambda must be processed already and it must be the last
1272         // in TgtArgs and TgtOffsets arrays.
1273         void *HstPtrVal = Args[I];
1274         void *HstPtrBegin = ArgBases[I];
1275         void *HstPtrBase = Args[Idx];
1276         bool IsLast, IsHostPtr; // IsLast is unused.
1277         void *TgtPtrBase =
1278             (void *)((intptr_t)TgtArgs[TgtIdx] + TgtOffsets[TgtIdx]);
1279         DP("Parent lambda base " DPxMOD "\n", DPxPTR(TgtPtrBase));
1280         uint64_t Delta = (uint64_t)HstPtrBegin - (uint64_t)HstPtrBase;
1281         void *TgtPtrBegin = (void *)((uintptr_t)TgtPtrBase + Delta);
1282         void *&PointerTgtPtrBegin = AsyncInfo.getVoidPtrLocation();
1283         PointerTgtPtrBegin = Device.getTgtPtrBegin(
1284             HstPtrVal, ArgSizes[I], IsLast, /*UpdateRefCount=*/false,
1285             /*UseHoldRefCount=*/false, IsHostPtr);
1286         if (!PointerTgtPtrBegin) {
1287           DP("No lambda captured variable mapped (" DPxMOD ") - ignored\n",
1288              DPxPTR(HstPtrVal));
1289           continue;
1290         }
1291         if (IsHostPtr) {
1292           DP("Unified memory is active, no need to map lambda captured"
1293              "variable (" DPxMOD ")\n",
1294              DPxPTR(HstPtrVal));
1295           continue;
1296         }
1297         DP("Update lambda reference (" DPxMOD ") -> [" DPxMOD "]\n",
1298            DPxPTR(PointerTgtPtrBegin), DPxPTR(TgtPtrBegin));
1299         Ret = Device.submitData(TgtPtrBegin, &PointerTgtPtrBegin,
1300                                 sizeof(void *), AsyncInfo);
1301         if (Ret != OFFLOAD_SUCCESS) {
1302           REPORT("Copying data to device failed.\n");
1303           return OFFLOAD_FAIL;
1304         }
1305       }
1306       continue;
1307     }
1308     void *HstPtrBegin = Args[I];
1309     void *HstPtrBase = ArgBases[I];
1310     void *TgtPtrBegin;
1311     map_var_info_t HstPtrName = (!ArgNames) ? nullptr : ArgNames[I];
1312     ptrdiff_t TgtBaseOffset;
1313     bool IsLast, IsHostPtr; // unused.
1314     if (ArgTypes[I] & OMP_TGT_MAPTYPE_LITERAL) {
1315       DP("Forwarding first-private value " DPxMOD " to the target construct\n",
1316          DPxPTR(HstPtrBase));
1317       TgtPtrBegin = HstPtrBase;
1318       TgtBaseOffset = 0;
1319     } else if (ArgTypes[I] & OMP_TGT_MAPTYPE_PRIVATE) {
1320       TgtBaseOffset = (intptr_t)HstPtrBase - (intptr_t)HstPtrBegin;
1321       const bool IsFirstPrivate = (ArgTypes[I] & OMP_TGT_MAPTYPE_TO);
1322       // If there is a next argument and it depends on the current one, we need
1323       // to allocate the private memory immediately. If this is not the case,
1324       // then the argument can be marked for optimization and packed with the
1325       // other privates.
1326       const bool AllocImmediately =
1327           (I < ArgNum - 1 && (ArgTypes[I + 1] & OMP_TGT_MAPTYPE_MEMBER_OF));
1328       Ret = PrivateArgumentManager.addArg(
1329           HstPtrBegin, ArgSizes[I], TgtBaseOffset, IsFirstPrivate, TgtPtrBegin,
1330           TgtArgs.size(), HstPtrName, AllocImmediately);
1331       if (Ret != OFFLOAD_SUCCESS) {
1332         REPORT("Failed to process %sprivate argument " DPxMOD "\n",
1333                (IsFirstPrivate ? "first-" : ""), DPxPTR(HstPtrBegin));
1334         return OFFLOAD_FAIL;
1335       }
1336     } else {
1337       if (ArgTypes[I] & OMP_TGT_MAPTYPE_PTR_AND_OBJ)
1338         HstPtrBase = *reinterpret_cast<void **>(HstPtrBase);
1339       TgtPtrBegin = Device.getTgtPtrBegin(HstPtrBegin, ArgSizes[I], IsLast,
1340                                           /*UpdateRefCount=*/false,
1341                                           /*UseHoldRefCount=*/false, IsHostPtr);
1342       TgtBaseOffset = (intptr_t)HstPtrBase - (intptr_t)HstPtrBegin;
1343 #ifdef OMPTARGET_DEBUG
1344       void *TgtPtrBase = (void *)((intptr_t)TgtPtrBegin + TgtBaseOffset);
1345       DP("Obtained target argument " DPxMOD " from host pointer " DPxMOD "\n",
1346          DPxPTR(TgtPtrBase), DPxPTR(HstPtrBegin));
1347 #endif
1348     }
1349     TgtArgsPositions[I] = TgtArgs.size();
1350     TgtArgs.push_back(TgtPtrBegin);
1351     TgtOffsets.push_back(TgtBaseOffset);
1352   }
1353 
1354   assert(TgtArgs.size() == TgtOffsets.size() &&
1355          "Size mismatch in arguments and offsets");
1356 
1357   // Pack and transfer first-private arguments
1358   Ret = PrivateArgumentManager.packAndTransfer(TgtArgs);
1359   if (Ret != OFFLOAD_SUCCESS) {
1360     DP("Failed to pack and transfer first private arguments\n");
1361     return OFFLOAD_FAIL;
1362   }
1363 
1364   return OFFLOAD_SUCCESS;
1365 }
1366 
1367 /// Process data after launching the kernel, including transferring data back to
1368 /// host if needed and deallocating target memory of (first-)private variables.
1369 static int processDataAfter(ident_t *loc, int64_t DeviceId, void *HostPtr,
1370                             int32_t ArgNum, void **ArgBases, void **Args,
1371                             int64_t *ArgSizes, int64_t *ArgTypes,
1372                             map_var_info_t *ArgNames, void **ArgMappers,
1373                             PrivateArgumentManagerTy &PrivateArgumentManager,
1374                             AsyncInfoTy &AsyncInfo) {
1375   TIMESCOPE_WITH_NAME_AND_IDENT("mappingAfterTargetRegion", loc);
1376   DeviceTy &Device = *PM->Devices[DeviceId];
1377 
1378   // Move data from device.
1379   int Ret = targetDataEnd(loc, Device, ArgNum, ArgBases, Args, ArgSizes,
1380                           ArgTypes, ArgNames, ArgMappers, AsyncInfo);
1381   if (Ret != OFFLOAD_SUCCESS) {
1382     REPORT("Call to targetDataEnd failed, abort target.\n");
1383     return OFFLOAD_FAIL;
1384   }
1385 
1386   // Free target memory for private arguments
1387   Ret = PrivateArgumentManager.free();
1388   if (Ret != OFFLOAD_SUCCESS) {
1389     REPORT("Failed to deallocate target memory for private args\n");
1390     return OFFLOAD_FAIL;
1391   }
1392 
1393   return OFFLOAD_SUCCESS;
1394 }
1395 } // namespace
1396 
1397 /// performs the same actions as data_begin in case arg_num is
1398 /// non-zero and initiates run of the offloaded region on the target platform;
1399 /// if arg_num is non-zero after the region execution is done it also
1400 /// performs the same action as data_update and data_end above. This function
1401 /// returns 0 if it was able to transfer the execution to a target and an
1402 /// integer different from zero otherwise.
1403 int target(ident_t *loc, DeviceTy &Device, void *HostPtr, int32_t ArgNum,
1404            void **ArgBases, void **Args, int64_t *ArgSizes, int64_t *ArgTypes,
1405            map_var_info_t *ArgNames, void **ArgMappers, int32_t TeamNum,
1406            int32_t ThreadLimit, int IsTeamConstruct, AsyncInfoTy &AsyncInfo) {
1407   int32_t DeviceId = Device.DeviceID;
1408 
1409   TableMap *TM = getTableMap(HostPtr);
1410   // No map for this host pointer found!
1411   if (!TM) {
1412     REPORT("Host ptr " DPxMOD " does not have a matching target pointer.\n",
1413            DPxPTR(HostPtr));
1414     return OFFLOAD_FAIL;
1415   }
1416 
1417   // get target table.
1418   __tgt_target_table *TargetTable = nullptr;
1419   {
1420     std::lock_guard<std::mutex> TrlTblLock(PM->TrlTblMtx);
1421     assert(TM->Table->TargetsTable.size() > (size_t)DeviceId &&
1422            "Not expecting a device ID outside the table's bounds!");
1423     TargetTable = TM->Table->TargetsTable[DeviceId];
1424   }
1425   assert(TargetTable && "Global data has not been mapped\n");
1426 
1427   // We need to keep bases and offsets separate. Sometimes (e.g. in OpenCL) we
1428   // need to manifest base pointers prior to launching a kernel. Even if we have
1429   // mapped an object only partially, e.g. A[N:M], although the kernel is
1430   // expected to access elements starting at address &A[N] and beyond, we still
1431   // need to manifest the base of the array &A[0]. In other cases, e.g. the COI
1432   // API, we need the begin address itself, i.e. &A[N], as the API operates on
1433   // begin addresses, not bases. That's why we pass args and offsets as two
1434   // separate entities so that each plugin can do what it needs. This behavior
1435   // was introdued via https://reviews.llvm.org/D33028 and commit 1546d319244c.
1436   std::vector<void *> TgtArgs;
1437   std::vector<ptrdiff_t> TgtOffsets;
1438 
1439   PrivateArgumentManagerTy PrivateArgumentManager(Device, AsyncInfo);
1440 
1441   int Ret;
1442   if (ArgNum) {
1443     // Process data, such as data mapping, before launching the kernel
1444     Ret = processDataBefore(loc, DeviceId, HostPtr, ArgNum, ArgBases, Args,
1445                             ArgSizes, ArgTypes, ArgNames, ArgMappers, TgtArgs,
1446                             TgtOffsets, PrivateArgumentManager, AsyncInfo);
1447     if (Ret != OFFLOAD_SUCCESS) {
1448       REPORT("Failed to process data before launching the kernel.\n");
1449       return OFFLOAD_FAIL;
1450     }
1451   }
1452 
1453   // Launch device execution.
1454   void *TgtEntryPtr = TargetTable->EntriesBegin[TM->Index].addr;
1455   DP("Launching target execution %s with pointer " DPxMOD " (index=%d).\n",
1456      TargetTable->EntriesBegin[TM->Index].name, DPxPTR(TgtEntryPtr), TM->Index);
1457 
1458   {
1459     TIMESCOPE_WITH_NAME_AND_IDENT(
1460         IsTeamConstruct ? "runTargetTeamRegion" : "runTargetRegion", loc);
1461     if (IsTeamConstruct)
1462       Ret = Device.runTeamRegion(TgtEntryPtr, &TgtArgs[0], &TgtOffsets[0],
1463                                  TgtArgs.size(), TeamNum, ThreadLimit,
1464                                  getLoopTripCount(DeviceId), AsyncInfo);
1465     else
1466       Ret = Device.runRegion(TgtEntryPtr, &TgtArgs[0], &TgtOffsets[0],
1467                              TgtArgs.size(), AsyncInfo);
1468   }
1469 
1470   if (Ret != OFFLOAD_SUCCESS) {
1471     REPORT("Executing target region abort target.\n");
1472     return OFFLOAD_FAIL;
1473   }
1474 
1475   if (ArgNum) {
1476     // Transfer data back and deallocate target memory for (first-)private
1477     // variables
1478     Ret = processDataAfter(loc, DeviceId, HostPtr, ArgNum, ArgBases, Args,
1479                            ArgSizes, ArgTypes, ArgNames, ArgMappers,
1480                            PrivateArgumentManager, AsyncInfo);
1481     if (Ret != OFFLOAD_SUCCESS) {
1482       REPORT("Failed to process data after launching the kernel.\n");
1483       return OFFLOAD_FAIL;
1484     }
1485   }
1486 
1487   return OFFLOAD_SUCCESS;
1488 }
1489