1 //===----RTLs/hsa/src/rtl.cpp - Target RTLs Implementation -------- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // RTL for hsa machine 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <algorithm> 14 #include <assert.h> 15 #include <cstdio> 16 #include <cstdlib> 17 #include <cstring> 18 #include <dlfcn.h> 19 #include <elf.h> 20 #include <ffi.h> 21 #include <fstream> 22 #include <iostream> 23 #include <libelf.h> 24 #include <list> 25 #include <memory> 26 #include <mutex> 27 #include <shared_mutex> 28 #include <thread> 29 #include <unordered_map> 30 #include <vector> 31 32 // Header from ATMI interface 33 #include "atmi_interop_hsa.h" 34 #include "atmi_runtime.h" 35 36 #include "internal.h" 37 38 #include "Debug.h" 39 #include "omptargetplugin.h" 40 41 #include "llvm/Frontend/OpenMP/OMPGridValues.h" 42 43 #ifndef TARGET_NAME 44 #define TARGET_NAME AMDHSA 45 #endif 46 #define DEBUG_PREFIX "Target " GETNAME(TARGET_NAME) " RTL" 47 48 int print_kernel_trace; 49 50 // Size of the target call stack struture 51 uint32_t TgtStackItemSize = 0; 52 53 #undef check // Drop definition from internal.h 54 #ifdef OMPTARGET_DEBUG 55 #define check(msg, status) \ 56 if (status != ATMI_STATUS_SUCCESS) { \ 57 /* fprintf(stderr, "[%s:%d] %s failed.\n", __FILE__, __LINE__, #msg);*/ \ 58 DP(#msg " failed\n"); \ 59 /*assert(0);*/ \ 60 } else { \ 61 /* fprintf(stderr, "[%s:%d] %s succeeded.\n", __FILE__, __LINE__, #msg); \ 62 */ \ 63 DP(#msg " succeeded\n"); \ 64 } 65 #else 66 #define check(msg, status) \ 67 {} 68 #endif 69 70 #include "../../common/elf_common.c" 71 72 static bool elf_machine_id_is_amdgcn(__tgt_device_image *image) { 73 const uint16_t amdgcnMachineID = 224; 74 int32_t r = elf_check_machine(image, amdgcnMachineID); 75 if (!r) { 76 DP("Supported machine ID not found\n"); 77 } 78 return r; 79 } 80 81 /// Keep entries table per device 82 struct FuncOrGblEntryTy { 83 __tgt_target_table Table; 84 std::vector<__tgt_offload_entry> Entries; 85 }; 86 87 enum ExecutionModeType { 88 SPMD, // constructors, destructors, 89 // combined constructs (`teams distribute parallel for [simd]`) 90 GENERIC, // everything else 91 NONE 92 }; 93 94 struct KernelArgPool { 95 private: 96 static pthread_mutex_t mutex; 97 98 public: 99 uint32_t kernarg_segment_size; 100 void *kernarg_region = nullptr; 101 std::queue<int> free_kernarg_segments; 102 103 uint32_t kernarg_size_including_implicit() { 104 return kernarg_segment_size + sizeof(atmi_implicit_args_t); 105 } 106 107 ~KernelArgPool() { 108 if (kernarg_region) { 109 auto r = hsa_amd_memory_pool_free(kernarg_region); 110 assert(r == HSA_STATUS_SUCCESS); 111 ErrorCheck(Memory pool free, r); 112 } 113 } 114 115 // Can't really copy or move a mutex 116 KernelArgPool() = default; 117 KernelArgPool(const KernelArgPool &) = delete; 118 KernelArgPool(KernelArgPool &&) = delete; 119 120 KernelArgPool(uint32_t kernarg_segment_size) 121 : kernarg_segment_size(kernarg_segment_size) { 122 123 // atmi uses one pool per kernel for all gpus, with a fixed upper size 124 // preserving that exact scheme here, including the queue<int> 125 { 126 hsa_status_t err = hsa_amd_memory_pool_allocate( 127 atl_gpu_kernarg_pools[0], 128 kernarg_size_including_implicit() * MAX_NUM_KERNELS, 0, 129 &kernarg_region); 130 ErrorCheck(Allocating memory for the executable-kernel, err); 131 core::allow_access_to_all_gpu_agents(kernarg_region); 132 133 for (int i = 0; i < MAX_NUM_KERNELS; i++) { 134 free_kernarg_segments.push(i); 135 } 136 } 137 } 138 139 void *allocate(uint64_t arg_num) { 140 assert((arg_num * sizeof(void *)) == kernarg_segment_size); 141 lock l(&mutex); 142 void *res = nullptr; 143 if (!free_kernarg_segments.empty()) { 144 145 int free_idx = free_kernarg_segments.front(); 146 res = static_cast<void *>(static_cast<char *>(kernarg_region) + 147 (free_idx * kernarg_size_including_implicit())); 148 assert(free_idx == pointer_to_index(res)); 149 free_kernarg_segments.pop(); 150 } 151 return res; 152 } 153 154 void deallocate(void *ptr) { 155 lock l(&mutex); 156 int idx = pointer_to_index(ptr); 157 free_kernarg_segments.push(idx); 158 } 159 160 private: 161 int pointer_to_index(void *ptr) { 162 ptrdiff_t bytes = 163 static_cast<char *>(ptr) - static_cast<char *>(kernarg_region); 164 assert(bytes >= 0); 165 assert(bytes % kernarg_size_including_implicit() == 0); 166 return bytes / kernarg_size_including_implicit(); 167 } 168 struct lock { 169 lock(pthread_mutex_t *m) : m(m) { pthread_mutex_lock(m); } 170 ~lock() { pthread_mutex_unlock(m); } 171 pthread_mutex_t *m; 172 }; 173 }; 174 pthread_mutex_t KernelArgPool::mutex = PTHREAD_MUTEX_INITIALIZER; 175 176 std::unordered_map<std::string /*kernel*/, std::unique_ptr<KernelArgPool>> 177 KernelArgPoolMap; 178 179 /// Use a single entity to encode a kernel and a set of flags 180 struct KernelTy { 181 // execution mode of kernel 182 // 0 - SPMD mode (without master warp) 183 // 1 - Generic mode (with master warp) 184 int8_t ExecutionMode; 185 int16_t ConstWGSize; 186 int32_t device_id; 187 void *CallStackAddr = nullptr; 188 const char *Name; 189 190 KernelTy(int8_t _ExecutionMode, int16_t _ConstWGSize, int32_t _device_id, 191 void *_CallStackAddr, const char *_Name, 192 uint32_t _kernarg_segment_size) 193 : ExecutionMode(_ExecutionMode), ConstWGSize(_ConstWGSize), 194 device_id(_device_id), CallStackAddr(_CallStackAddr), Name(_Name) { 195 DP("Construct kernelinfo: ExecMode %d\n", ExecutionMode); 196 197 std::string N(_Name); 198 if (KernelArgPoolMap.find(N) == KernelArgPoolMap.end()) { 199 KernelArgPoolMap.insert( 200 std::make_pair(N, std::unique_ptr<KernelArgPool>( 201 new KernelArgPool(_kernarg_segment_size)))); 202 } 203 } 204 }; 205 206 /// List that contains all the kernels. 207 /// FIXME: we may need this to be per device and per library. 208 std::list<KernelTy> KernelsList; 209 210 // ATMI API to get gpu and gpu memory place 211 static atmi_place_t get_gpu_place(int device_id) { 212 return ATMI_PLACE_GPU(0, device_id); 213 } 214 static atmi_mem_place_t get_gpu_mem_place(int device_id) { 215 return ATMI_MEM_PLACE_GPU_MEM(0, device_id, 0); 216 } 217 218 static std::vector<hsa_agent_t> find_gpu_agents() { 219 std::vector<hsa_agent_t> res; 220 221 hsa_status_t err = hsa_iterate_agents( 222 [](hsa_agent_t agent, void *data) -> hsa_status_t { 223 std::vector<hsa_agent_t> *res = 224 static_cast<std::vector<hsa_agent_t> *>(data); 225 226 hsa_device_type_t device_type; 227 // get_info fails iff HSA runtime not yet initialized 228 hsa_status_t err = 229 hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type); 230 if (print_kernel_trace > 0 && err != HSA_STATUS_SUCCESS) 231 printf("rtl.cpp: err %d\n", err); 232 assert(err == HSA_STATUS_SUCCESS); 233 234 if (device_type == HSA_DEVICE_TYPE_GPU) { 235 res->push_back(agent); 236 } 237 return HSA_STATUS_SUCCESS; 238 }, 239 &res); 240 241 // iterate_agents fails iff HSA runtime not yet initialized 242 if (print_kernel_trace > 0 && err != HSA_STATUS_SUCCESS) 243 printf("rtl.cpp: err %d\n", err); 244 assert(err == HSA_STATUS_SUCCESS); 245 return res; 246 } 247 248 static void callbackQueue(hsa_status_t status, hsa_queue_t *source, 249 void *data) { 250 if (status != HSA_STATUS_SUCCESS) { 251 const char *status_string; 252 if (hsa_status_string(status, &status_string) != HSA_STATUS_SUCCESS) { 253 status_string = "unavailable"; 254 } 255 fprintf(stderr, "[%s:%d] GPU error in queue %p %d (%s)\n", __FILE__, 256 __LINE__, source, status, status_string); 257 abort(); 258 } 259 } 260 261 namespace core { 262 void packet_store_release(uint32_t *packet, uint16_t header, uint16_t rest) { 263 __atomic_store_n(packet, header | (rest << 16), __ATOMIC_RELEASE); 264 } 265 266 uint16_t create_header(hsa_packet_type_t type, int barrier, 267 atmi_task_fence_scope_t acq_fence, 268 atmi_task_fence_scope_t rel_fence) { 269 uint16_t header = type << HSA_PACKET_HEADER_TYPE; 270 header |= barrier << HSA_PACKET_HEADER_BARRIER; 271 header |= (hsa_fence_scope_t) static_cast<int>( 272 acq_fence << HSA_PACKET_HEADER_ACQUIRE_FENCE_SCOPE); 273 header |= (hsa_fence_scope_t) static_cast<int>( 274 rel_fence << HSA_PACKET_HEADER_RELEASE_FENCE_SCOPE); 275 return header; 276 } 277 } // namespace core 278 279 /// Class containing all the device information 280 class RTLDeviceInfoTy { 281 std::vector<std::list<FuncOrGblEntryTy>> FuncGblEntries; 282 283 public: 284 // load binary populates symbol tables and mutates various global state 285 // run uses those symbol tables 286 std::shared_timed_mutex load_run_lock; 287 288 int NumberOfDevices; 289 290 // GPU devices 291 std::vector<hsa_agent_t> HSAAgents; 292 std::vector<hsa_queue_t *> HSAQueues; // one per gpu 293 294 // Device properties 295 std::vector<int> ComputeUnits; 296 std::vector<int> GroupsPerDevice; 297 std::vector<int> ThreadsPerGroup; 298 std::vector<int> WarpSize; 299 300 // OpenMP properties 301 std::vector<int> NumTeams; 302 std::vector<int> NumThreads; 303 304 // OpenMP Environment properties 305 int EnvNumTeams; 306 int EnvTeamLimit; 307 int EnvMaxTeamsDefault; 308 309 // OpenMP Requires Flags 310 int64_t RequiresFlags; 311 312 // Resource pools 313 SignalPoolT FreeSignalPool; 314 315 struct atmiFreePtrDeletor { 316 void operator()(void *p) { 317 atmi_free(p); // ignore failure to free 318 } 319 }; 320 321 // device_State shared across loaded binaries, error if inconsistent size 322 std::vector<std::pair<std::unique_ptr<void, atmiFreePtrDeletor>, uint64_t>> 323 deviceStateStore; 324 325 static const unsigned HardTeamLimit = 326 (1 << 16) - 1; // 64K needed to fit in uint16 327 static const int DefaultNumTeams = 128; 328 static const int Max_Teams = 329 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Max_Teams]; 330 static const int Warp_Size = 331 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Warp_Size]; 332 static const int Max_WG_Size = 333 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Max_WG_Size]; 334 static const int Default_WG_Size = 335 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Default_WG_Size]; 336 337 using MemcpyFunc = atmi_status_t (*)(hsa_signal_t, void *, const void *, 338 size_t size, hsa_agent_t); 339 atmi_status_t freesignalpool_memcpy(void *dest, const void *src, size_t size, 340 MemcpyFunc Func, int32_t deviceId) { 341 hsa_agent_t agent = HSAAgents[deviceId]; 342 hsa_signal_t s = FreeSignalPool.pop(); 343 if (s.handle == 0) { 344 return ATMI_STATUS_ERROR; 345 } 346 atmi_status_t r = Func(s, dest, src, size, agent); 347 FreeSignalPool.push(s); 348 return r; 349 } 350 351 atmi_status_t freesignalpool_memcpy_d2h(void *dest, const void *src, 352 size_t size, int32_t deviceId) { 353 return freesignalpool_memcpy(dest, src, size, atmi_memcpy_d2h, deviceId); 354 } 355 356 atmi_status_t freesignalpool_memcpy_h2d(void *dest, const void *src, 357 size_t size, int32_t deviceId) { 358 return freesignalpool_memcpy(dest, src, size, atmi_memcpy_h2d, deviceId); 359 } 360 361 // Record entry point associated with device 362 void addOffloadEntry(int32_t device_id, __tgt_offload_entry entry) { 363 assert(device_id < (int32_t)FuncGblEntries.size() && 364 "Unexpected device id!"); 365 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back(); 366 367 E.Entries.push_back(entry); 368 } 369 370 // Return true if the entry is associated with device 371 bool findOffloadEntry(int32_t device_id, void *addr) { 372 assert(device_id < (int32_t)FuncGblEntries.size() && 373 "Unexpected device id!"); 374 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back(); 375 376 for (auto &it : E.Entries) { 377 if (it.addr == addr) 378 return true; 379 } 380 381 return false; 382 } 383 384 // Return the pointer to the target entries table 385 __tgt_target_table *getOffloadEntriesTable(int32_t device_id) { 386 assert(device_id < (int32_t)FuncGblEntries.size() && 387 "Unexpected device id!"); 388 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back(); 389 390 int32_t size = E.Entries.size(); 391 392 // Table is empty 393 if (!size) 394 return 0; 395 396 __tgt_offload_entry *begin = &E.Entries[0]; 397 __tgt_offload_entry *end = &E.Entries[size - 1]; 398 399 // Update table info according to the entries and return the pointer 400 E.Table.EntriesBegin = begin; 401 E.Table.EntriesEnd = ++end; 402 403 return &E.Table; 404 } 405 406 // Clear entries table for a device 407 void clearOffloadEntriesTable(int device_id) { 408 assert(device_id < (int32_t)FuncGblEntries.size() && 409 "Unexpected device id!"); 410 FuncGblEntries[device_id].emplace_back(); 411 FuncOrGblEntryTy &E = FuncGblEntries[device_id].back(); 412 // KernelArgPoolMap.clear(); 413 E.Entries.clear(); 414 E.Table.EntriesBegin = E.Table.EntriesEnd = 0; 415 } 416 417 RTLDeviceInfoTy() { 418 // LIBOMPTARGET_KERNEL_TRACE provides a kernel launch trace to stderr 419 // anytime. You do not need a debug library build. 420 // 0 => no tracing 421 // 1 => tracing dispatch only 422 // >1 => verbosity increase 423 if (char *envStr = getenv("LIBOMPTARGET_KERNEL_TRACE")) 424 print_kernel_trace = atoi(envStr); 425 else 426 print_kernel_trace = 0; 427 428 DP("Start initializing HSA-ATMI\n"); 429 atmi_status_t err = atmi_init(); 430 if (err != ATMI_STATUS_SUCCESS) { 431 DP("Error when initializing HSA-ATMI\n"); 432 return; 433 } 434 435 HSAAgents = find_gpu_agents(); 436 NumberOfDevices = (int)HSAAgents.size(); 437 438 if (NumberOfDevices == 0) { 439 DP("There are no devices supporting HSA.\n"); 440 return; 441 } else { 442 DP("There are %d devices supporting HSA.\n", NumberOfDevices); 443 } 444 445 // Init the device info 446 HSAQueues.resize(NumberOfDevices); 447 FuncGblEntries.resize(NumberOfDevices); 448 ThreadsPerGroup.resize(NumberOfDevices); 449 ComputeUnits.resize(NumberOfDevices); 450 GroupsPerDevice.resize(NumberOfDevices); 451 WarpSize.resize(NumberOfDevices); 452 NumTeams.resize(NumberOfDevices); 453 NumThreads.resize(NumberOfDevices); 454 deviceStateStore.resize(NumberOfDevices); 455 456 for (int i = 0; i < NumberOfDevices; i++) { 457 uint32_t queue_size = 0; 458 { 459 hsa_status_t err; 460 err = hsa_agent_get_info(HSAAgents[i], HSA_AGENT_INFO_QUEUE_MAX_SIZE, 461 &queue_size); 462 ErrorCheck(Querying the agent maximum queue size, err); 463 if (queue_size > core::Runtime::getInstance().getMaxQueueSize()) { 464 queue_size = core::Runtime::getInstance().getMaxQueueSize(); 465 } 466 } 467 468 hsa_status_t rc = hsa_queue_create( 469 HSAAgents[i], queue_size, HSA_QUEUE_TYPE_MULTI, callbackQueue, NULL, 470 UINT32_MAX, UINT32_MAX, &HSAQueues[i]); 471 if (rc != HSA_STATUS_SUCCESS) { 472 DP("Failed to create HSA queues\n"); 473 return; 474 } 475 476 deviceStateStore[i] = {nullptr, 0}; 477 } 478 479 for (int i = 0; i < NumberOfDevices; i++) { 480 ThreadsPerGroup[i] = RTLDeviceInfoTy::Default_WG_Size; 481 GroupsPerDevice[i] = RTLDeviceInfoTy::DefaultNumTeams; 482 ComputeUnits[i] = 1; 483 DP("Device %d: Initial groupsPerDevice %d & threadsPerGroup %d\n", i, 484 GroupsPerDevice[i], ThreadsPerGroup[i]); 485 } 486 487 // Get environment variables regarding teams 488 char *envStr = getenv("OMP_TEAM_LIMIT"); 489 if (envStr) { 490 // OMP_TEAM_LIMIT has been set 491 EnvTeamLimit = std::stoi(envStr); 492 DP("Parsed OMP_TEAM_LIMIT=%d\n", EnvTeamLimit); 493 } else { 494 EnvTeamLimit = -1; 495 } 496 envStr = getenv("OMP_NUM_TEAMS"); 497 if (envStr) { 498 // OMP_NUM_TEAMS has been set 499 EnvNumTeams = std::stoi(envStr); 500 DP("Parsed OMP_NUM_TEAMS=%d\n", EnvNumTeams); 501 } else { 502 EnvNumTeams = -1; 503 } 504 // Get environment variables regarding expMaxTeams 505 envStr = getenv("OMP_MAX_TEAMS_DEFAULT"); 506 if (envStr) { 507 EnvMaxTeamsDefault = std::stoi(envStr); 508 DP("Parsed OMP_MAX_TEAMS_DEFAULT=%d\n", EnvMaxTeamsDefault); 509 } else { 510 EnvMaxTeamsDefault = -1; 511 } 512 513 // Default state. 514 RequiresFlags = OMP_REQ_UNDEFINED; 515 } 516 517 ~RTLDeviceInfoTy() { 518 DP("Finalizing the HSA-ATMI DeviceInfo.\n"); 519 // Run destructors on types that use HSA before 520 // atmi_finalize removes access to it 521 deviceStateStore.clear(); 522 KernelArgPoolMap.clear(); 523 atmi_finalize(); 524 } 525 }; 526 527 pthread_mutex_t SignalPoolT::mutex = PTHREAD_MUTEX_INITIALIZER; 528 529 // TODO: May need to drop the trailing to fields until deviceRTL is updated 530 struct omptarget_device_environmentTy { 531 int32_t debug_level; // gets value of envvar LIBOMPTARGET_DEVICE_RTL_DEBUG 532 // only useful for Debug build of deviceRTLs 533 int32_t num_devices; // gets number of active offload devices 534 int32_t device_num; // gets a value 0 to num_devices-1 535 }; 536 537 static RTLDeviceInfoTy DeviceInfo; 538 539 namespace { 540 541 int32_t dataRetrieve(int32_t DeviceId, void *HstPtr, void *TgtPtr, int64_t Size, 542 __tgt_async_info *AsyncInfoPtr) { 543 assert(AsyncInfoPtr && "AsyncInfoPtr is nullptr"); 544 assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large"); 545 // Return success if we are not copying back to host from target. 546 if (!HstPtr) 547 return OFFLOAD_SUCCESS; 548 atmi_status_t err; 549 DP("Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size, 550 (long long unsigned)(Elf64_Addr)TgtPtr, 551 (long long unsigned)(Elf64_Addr)HstPtr); 552 553 err = DeviceInfo.freesignalpool_memcpy_d2h(HstPtr, TgtPtr, (size_t)Size, 554 DeviceId); 555 556 if (err != ATMI_STATUS_SUCCESS) { 557 DP("Error when copying data from device to host. Pointers: " 558 "host = 0x%016lx, device = 0x%016lx, size = %lld\n", 559 (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size); 560 return OFFLOAD_FAIL; 561 } 562 DP("DONE Retrieve data %ld bytes, (tgt:%016llx) -> (hst:%016llx).\n", Size, 563 (long long unsigned)(Elf64_Addr)TgtPtr, 564 (long long unsigned)(Elf64_Addr)HstPtr); 565 return OFFLOAD_SUCCESS; 566 } 567 568 int32_t dataSubmit(int32_t DeviceId, void *TgtPtr, void *HstPtr, int64_t Size, 569 __tgt_async_info *AsyncInfoPtr) { 570 assert(AsyncInfoPtr && "AsyncInfoPtr is nullptr"); 571 atmi_status_t err; 572 assert(DeviceId < DeviceInfo.NumberOfDevices && "Device ID too large"); 573 // Return success if we are not doing host to target. 574 if (!HstPtr) 575 return OFFLOAD_SUCCESS; 576 577 DP("Submit data %ld bytes, (hst:%016llx) -> (tgt:%016llx).\n", Size, 578 (long long unsigned)(Elf64_Addr)HstPtr, 579 (long long unsigned)(Elf64_Addr)TgtPtr); 580 err = DeviceInfo.freesignalpool_memcpy_h2d(TgtPtr, HstPtr, (size_t)Size, 581 DeviceId); 582 if (err != ATMI_STATUS_SUCCESS) { 583 DP("Error when copying data from host to device. Pointers: " 584 "host = 0x%016lx, device = 0x%016lx, size = %lld\n", 585 (Elf64_Addr)HstPtr, (Elf64_Addr)TgtPtr, (unsigned long long)Size); 586 return OFFLOAD_FAIL; 587 } 588 return OFFLOAD_SUCCESS; 589 } 590 591 // Async. 592 // The implementation was written with cuda streams in mind. The semantics of 593 // that are to execute kernels on a queue in order of insertion. A synchronise 594 // call then makes writes visible between host and device. This means a series 595 // of N data_submit_async calls are expected to execute serially. HSA offers 596 // various options to run the data copies concurrently. This may require changes 597 // to libomptarget. 598 599 // __tgt_async_info* contains a void * Queue. Queue = 0 is used to indicate that 600 // there are no outstanding kernels that need to be synchronized. Any async call 601 // may be passed a Queue==0, at which point the cuda implementation will set it 602 // to non-null (see getStream). The cuda streams are per-device. Upstream may 603 // change this interface to explicitly initialize the async_info_pointer, but 604 // until then hsa lazily initializes it as well. 605 606 void initAsyncInfoPtr(__tgt_async_info *async_info_ptr) { 607 // set non-null while using async calls, return to null to indicate completion 608 assert(async_info_ptr); 609 if (!async_info_ptr->Queue) { 610 async_info_ptr->Queue = reinterpret_cast<void *>(UINT64_MAX); 611 } 612 } 613 void finiAsyncInfoPtr(__tgt_async_info *async_info_ptr) { 614 assert(async_info_ptr); 615 assert(async_info_ptr->Queue); 616 async_info_ptr->Queue = 0; 617 } 618 } // namespace 619 620 int32_t __tgt_rtl_is_valid_binary(__tgt_device_image *image) { 621 return elf_machine_id_is_amdgcn(image); 622 } 623 624 int __tgt_rtl_number_of_devices() { return DeviceInfo.NumberOfDevices; } 625 626 int64_t __tgt_rtl_init_requires(int64_t RequiresFlags) { 627 DP("Init requires flags to %ld\n", RequiresFlags); 628 DeviceInfo.RequiresFlags = RequiresFlags; 629 return RequiresFlags; 630 } 631 632 int32_t __tgt_rtl_init_device(int device_id) { 633 hsa_status_t err; 634 635 // this is per device id init 636 DP("Initialize the device id: %d\n", device_id); 637 638 hsa_agent_t agent = DeviceInfo.HSAAgents[device_id]; 639 640 // Get number of Compute Unit 641 uint32_t compute_units = 0; 642 err = hsa_agent_get_info( 643 agent, (hsa_agent_info_t)HSA_AMD_AGENT_INFO_COMPUTE_UNIT_COUNT, 644 &compute_units); 645 if (err != HSA_STATUS_SUCCESS) { 646 DeviceInfo.ComputeUnits[device_id] = 1; 647 DP("Error getting compute units : settiing to 1\n"); 648 } else { 649 DeviceInfo.ComputeUnits[device_id] = compute_units; 650 DP("Using %d compute unis per grid\n", DeviceInfo.ComputeUnits[device_id]); 651 } 652 if (print_kernel_trace == 4) 653 fprintf(stderr, "Device#%-2d CU's: %2d\n", device_id, 654 DeviceInfo.ComputeUnits[device_id]); 655 656 // Query attributes to determine number of threads/block and blocks/grid. 657 uint16_t workgroup_max_dim[3]; 658 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_WORKGROUP_MAX_DIM, 659 &workgroup_max_dim); 660 if (err != HSA_STATUS_SUCCESS) { 661 DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::DefaultNumTeams; 662 DP("Error getting grid dims: num groups : %d\n", 663 RTLDeviceInfoTy::DefaultNumTeams); 664 } else if (workgroup_max_dim[0] <= RTLDeviceInfoTy::HardTeamLimit) { 665 DeviceInfo.GroupsPerDevice[device_id] = workgroup_max_dim[0]; 666 DP("Using %d ROCm blocks per grid\n", 667 DeviceInfo.GroupsPerDevice[device_id]); 668 } else { 669 DeviceInfo.GroupsPerDevice[device_id] = RTLDeviceInfoTy::HardTeamLimit; 670 DP("Max ROCm blocks per grid %d exceeds the hard team limit %d, capping " 671 "at the hard limit\n", 672 workgroup_max_dim[0], RTLDeviceInfoTy::HardTeamLimit); 673 } 674 675 // Get thread limit 676 hsa_dim3_t grid_max_dim; 677 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_GRID_MAX_DIM, &grid_max_dim); 678 if (err == HSA_STATUS_SUCCESS) { 679 DeviceInfo.ThreadsPerGroup[device_id] = 680 reinterpret_cast<uint32_t *>(&grid_max_dim)[0] / 681 DeviceInfo.GroupsPerDevice[device_id]; 682 if ((DeviceInfo.ThreadsPerGroup[device_id] > 683 RTLDeviceInfoTy::Max_WG_Size) || 684 DeviceInfo.ThreadsPerGroup[device_id] == 0) { 685 DP("Capped thread limit: %d\n", RTLDeviceInfoTy::Max_WG_Size); 686 DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size; 687 } else { 688 DP("Using ROCm Queried thread limit: %d\n", 689 DeviceInfo.ThreadsPerGroup[device_id]); 690 } 691 } else { 692 DeviceInfo.ThreadsPerGroup[device_id] = RTLDeviceInfoTy::Max_WG_Size; 693 DP("Error getting max block dimension, use default:%d \n", 694 RTLDeviceInfoTy::Max_WG_Size); 695 } 696 697 // Get wavefront size 698 uint32_t wavefront_size = 0; 699 err = 700 hsa_agent_get_info(agent, HSA_AGENT_INFO_WAVEFRONT_SIZE, &wavefront_size); 701 if (err == HSA_STATUS_SUCCESS) { 702 DP("Queried wavefront size: %d\n", wavefront_size); 703 DeviceInfo.WarpSize[device_id] = wavefront_size; 704 } else { 705 DP("Default wavefront size: %d\n", 706 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Warp_Size]); 707 DeviceInfo.WarpSize[device_id] = 708 llvm::omp::AMDGPUGpuGridValues[llvm::omp::GVIDX::GV_Warp_Size]; 709 } 710 711 // Adjust teams to the env variables 712 if (DeviceInfo.EnvTeamLimit > 0 && 713 DeviceInfo.GroupsPerDevice[device_id] > DeviceInfo.EnvTeamLimit) { 714 DeviceInfo.GroupsPerDevice[device_id] = DeviceInfo.EnvTeamLimit; 715 DP("Capping max groups per device to OMP_TEAM_LIMIT=%d\n", 716 DeviceInfo.EnvTeamLimit); 717 } 718 719 // Set default number of teams 720 if (DeviceInfo.EnvNumTeams > 0) { 721 DeviceInfo.NumTeams[device_id] = DeviceInfo.EnvNumTeams; 722 DP("Default number of teams set according to environment %d\n", 723 DeviceInfo.EnvNumTeams); 724 } else { 725 DeviceInfo.NumTeams[device_id] = RTLDeviceInfoTy::DefaultNumTeams; 726 DP("Default number of teams set according to library's default %d\n", 727 RTLDeviceInfoTy::DefaultNumTeams); 728 } 729 730 if (DeviceInfo.NumTeams[device_id] > DeviceInfo.GroupsPerDevice[device_id]) { 731 DeviceInfo.NumTeams[device_id] = DeviceInfo.GroupsPerDevice[device_id]; 732 DP("Default number of teams exceeds device limit, capping at %d\n", 733 DeviceInfo.GroupsPerDevice[device_id]); 734 } 735 736 // Set default number of threads 737 DeviceInfo.NumThreads[device_id] = RTLDeviceInfoTy::Default_WG_Size; 738 DP("Default number of threads set according to library's default %d\n", 739 RTLDeviceInfoTy::Default_WG_Size); 740 if (DeviceInfo.NumThreads[device_id] > 741 DeviceInfo.ThreadsPerGroup[device_id]) { 742 DeviceInfo.NumTeams[device_id] = DeviceInfo.ThreadsPerGroup[device_id]; 743 DP("Default number of threads exceeds device limit, capping at %d\n", 744 DeviceInfo.ThreadsPerGroup[device_id]); 745 } 746 747 DP("Device %d: default limit for groupsPerDevice %d & threadsPerGroup %d\n", 748 device_id, DeviceInfo.GroupsPerDevice[device_id], 749 DeviceInfo.ThreadsPerGroup[device_id]); 750 751 DP("Device %d: wavefront size %d, total threads %d x %d = %d\n", device_id, 752 DeviceInfo.WarpSize[device_id], DeviceInfo.ThreadsPerGroup[device_id], 753 DeviceInfo.GroupsPerDevice[device_id], 754 DeviceInfo.GroupsPerDevice[device_id] * 755 DeviceInfo.ThreadsPerGroup[device_id]); 756 757 return OFFLOAD_SUCCESS; 758 } 759 760 namespace { 761 Elf64_Shdr *find_only_SHT_HASH(Elf *elf) { 762 size_t N; 763 int rc = elf_getshdrnum(elf, &N); 764 if (rc != 0) { 765 return nullptr; 766 } 767 768 Elf64_Shdr *result = nullptr; 769 for (size_t i = 0; i < N; i++) { 770 Elf_Scn *scn = elf_getscn(elf, i); 771 if (scn) { 772 Elf64_Shdr *shdr = elf64_getshdr(scn); 773 if (shdr) { 774 if (shdr->sh_type == SHT_HASH) { 775 if (result == nullptr) { 776 result = shdr; 777 } else { 778 // multiple SHT_HASH sections not handled 779 return nullptr; 780 } 781 } 782 } 783 } 784 } 785 return result; 786 } 787 788 const Elf64_Sym *elf_lookup(Elf *elf, char *base, Elf64_Shdr *section_hash, 789 const char *symname) { 790 791 assert(section_hash); 792 size_t section_symtab_index = section_hash->sh_link; 793 Elf64_Shdr *section_symtab = 794 elf64_getshdr(elf_getscn(elf, section_symtab_index)); 795 size_t section_strtab_index = section_symtab->sh_link; 796 797 const Elf64_Sym *symtab = 798 reinterpret_cast<const Elf64_Sym *>(base + section_symtab->sh_offset); 799 800 const uint32_t *hashtab = 801 reinterpret_cast<const uint32_t *>(base + section_hash->sh_offset); 802 803 // Layout: 804 // nbucket 805 // nchain 806 // bucket[nbucket] 807 // chain[nchain] 808 uint32_t nbucket = hashtab[0]; 809 const uint32_t *bucket = &hashtab[2]; 810 const uint32_t *chain = &hashtab[nbucket + 2]; 811 812 const size_t max = strlen(symname) + 1; 813 const uint32_t hash = elf_hash(symname); 814 for (uint32_t i = bucket[hash % nbucket]; i != 0; i = chain[i]) { 815 char *n = elf_strptr(elf, section_strtab_index, symtab[i].st_name); 816 if (strncmp(symname, n, max) == 0) { 817 return &symtab[i]; 818 } 819 } 820 821 return nullptr; 822 } 823 824 typedef struct { 825 void *addr = nullptr; 826 uint32_t size = UINT32_MAX; 827 } symbol_info; 828 829 int get_symbol_info_without_loading(Elf *elf, char *base, const char *symname, 830 symbol_info *res) { 831 if (elf_kind(elf) != ELF_K_ELF) { 832 return 1; 833 } 834 835 Elf64_Shdr *section_hash = find_only_SHT_HASH(elf); 836 if (!section_hash) { 837 return 1; 838 } 839 840 const Elf64_Sym *sym = elf_lookup(elf, base, section_hash, symname); 841 if (!sym) { 842 return 1; 843 } 844 845 if (sym->st_size > UINT32_MAX) { 846 return 1; 847 } 848 849 res->size = static_cast<uint32_t>(sym->st_size); 850 res->addr = sym->st_value + base; 851 return 0; 852 } 853 854 int get_symbol_info_without_loading(char *base, size_t img_size, 855 const char *symname, symbol_info *res) { 856 Elf *elf = elf_memory(base, img_size); 857 if (elf) { 858 int rc = get_symbol_info_without_loading(elf, base, symname, res); 859 elf_end(elf); 860 return rc; 861 } 862 return 1; 863 } 864 865 atmi_status_t interop_get_symbol_info(char *base, size_t img_size, 866 const char *symname, void **var_addr, 867 uint32_t *var_size) { 868 symbol_info si; 869 int rc = get_symbol_info_without_loading(base, img_size, symname, &si); 870 if (rc == 0) { 871 *var_addr = si.addr; 872 *var_size = si.size; 873 return ATMI_STATUS_SUCCESS; 874 } else { 875 return ATMI_STATUS_ERROR; 876 } 877 } 878 879 template <typename C> 880 atmi_status_t module_register_from_memory_to_place(void *module_bytes, 881 size_t module_size, 882 atmi_place_t place, C cb) { 883 auto L = [](void *data, size_t size, void *cb_state) -> atmi_status_t { 884 C *unwrapped = static_cast<C *>(cb_state); 885 return (*unwrapped)(data, size); 886 }; 887 return atmi_module_register_from_memory_to_place( 888 module_bytes, module_size, place, L, static_cast<void *>(&cb)); 889 } 890 } // namespace 891 892 static uint64_t get_device_State_bytes(char *ImageStart, size_t img_size) { 893 uint64_t device_State_bytes = 0; 894 { 895 // If this is the deviceRTL, get the state variable size 896 symbol_info size_si; 897 int rc = get_symbol_info_without_loading( 898 ImageStart, img_size, "omptarget_nvptx_device_State_size", &size_si); 899 900 if (rc == 0) { 901 if (size_si.size != sizeof(uint64_t)) { 902 fprintf(stderr, 903 "Found device_State_size variable with wrong size, aborting\n"); 904 exit(1); 905 } 906 907 // Read number of bytes directly from the elf 908 memcpy(&device_State_bytes, size_si.addr, sizeof(uint64_t)); 909 } 910 } 911 return device_State_bytes; 912 } 913 914 static __tgt_target_table * 915 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image); 916 917 static __tgt_target_table * 918 __tgt_rtl_load_binary_locked(int32_t device_id, __tgt_device_image *image); 919 920 __tgt_target_table *__tgt_rtl_load_binary(int32_t device_id, 921 __tgt_device_image *image) { 922 DeviceInfo.load_run_lock.lock(); 923 __tgt_target_table *res = __tgt_rtl_load_binary_locked(device_id, image); 924 DeviceInfo.load_run_lock.unlock(); 925 return res; 926 } 927 928 static atmi_status_t atmi_calloc(void **ret_ptr, size_t size, 929 atmi_mem_place_t place) { 930 uint64_t rounded = 4 * ((size + 3) / 4); 931 void *ptr; 932 atmi_status_t err = atmi_malloc(&ptr, rounded, place); 933 if (err != ATMI_STATUS_SUCCESS) { 934 return err; 935 } 936 937 hsa_status_t rc = hsa_amd_memory_fill(ptr, 0, rounded / 4); 938 if (rc != HSA_STATUS_SUCCESS) { 939 fprintf(stderr, "zero fill device_state failed with %u\n", rc); 940 atmi_free(ptr); 941 return ATMI_STATUS_ERROR; 942 } 943 944 *ret_ptr = ptr; 945 return ATMI_STATUS_SUCCESS; 946 } 947 948 __tgt_target_table *__tgt_rtl_load_binary_locked(int32_t device_id, 949 __tgt_device_image *image) { 950 // This function loads the device image onto gpu[device_id] and does other 951 // per-image initialization work. Specifically: 952 // 953 // - Initialize an omptarget_device_environmentTy instance embedded in the 954 // image at the symbol "omptarget_device_environment" 955 // Fields debug_level, device_num, num_devices. Used by the deviceRTL. 956 // 957 // - Allocate a large array per-gpu (could be moved to init_device) 958 // - Read a uint64_t at symbol omptarget_nvptx_device_State_size 959 // - Allocate at least that many bytes of gpu memory 960 // - Zero initialize it 961 // - Write the pointer to the symbol omptarget_nvptx_device_State 962 // 963 // - Pulls some per-kernel information together from various sources and 964 // records it in the KernelsList for quicker access later 965 // 966 // The initialization can be done before or after loading the image onto the 967 // gpu. This function presently does a mixture. Using the hsa api to get/set 968 // the information is simpler to implement, in exchange for more complicated 969 // runtime behaviour. E.g. launching a kernel or using dma to get eight bytes 970 // back from the gpu vs a hashtable lookup on the host. 971 972 const size_t img_size = (char *)image->ImageEnd - (char *)image->ImageStart; 973 974 DeviceInfo.clearOffloadEntriesTable(device_id); 975 976 // We do not need to set the ELF version because the caller of this function 977 // had to do that to decide the right runtime to use 978 979 if (!elf_machine_id_is_amdgcn(image)) { 980 return NULL; 981 } 982 983 omptarget_device_environmentTy host_device_env; 984 host_device_env.num_devices = DeviceInfo.NumberOfDevices; 985 host_device_env.device_num = device_id; 986 host_device_env.debug_level = 0; 987 #ifdef OMPTARGET_DEBUG 988 if (char *envStr = getenv("LIBOMPTARGET_DEVICE_RTL_DEBUG")) { 989 host_device_env.debug_level = std::stoi(envStr); 990 } 991 #endif 992 993 auto on_deserialized_data = [&](void *data, size_t size) -> atmi_status_t { 994 const char *device_env_Name = "omptarget_device_environment"; 995 symbol_info si; 996 int rc = get_symbol_info_without_loading((char *)image->ImageStart, 997 img_size, device_env_Name, &si); 998 if (rc != 0) { 999 DP("Finding global device environment '%s' - symbol missing.\n", 1000 device_env_Name); 1001 // no need to return FAIL, consider this is a not a device debug build. 1002 return ATMI_STATUS_SUCCESS; 1003 } 1004 if (si.size != sizeof(host_device_env)) { 1005 return ATMI_STATUS_ERROR; 1006 } 1007 DP("Setting global device environment %u bytes\n", si.size); 1008 uint64_t offset = (char *)si.addr - (char *)image->ImageStart; 1009 void *pos = (char *)data + offset; 1010 memcpy(pos, &host_device_env, sizeof(host_device_env)); 1011 return ATMI_STATUS_SUCCESS; 1012 }; 1013 1014 atmi_status_t err; 1015 { 1016 err = module_register_from_memory_to_place( 1017 (void *)image->ImageStart, img_size, get_gpu_place(device_id), 1018 on_deserialized_data); 1019 1020 check("Module registering", err); 1021 if (err != ATMI_STATUS_SUCCESS) { 1022 char GPUName[64] = "--unknown gpu--"; 1023 hsa_agent_t agent = DeviceInfo.HSAAgents[device_id]; 1024 (void)hsa_agent_get_info(agent, (hsa_agent_info_t)HSA_AGENT_INFO_NAME, 1025 (void *)GPUName); 1026 fprintf(stderr, 1027 "Possible gpu arch mismatch: %s, please check" 1028 " compiler: -march=<gpu> flag\n", 1029 GPUName); 1030 return NULL; 1031 } 1032 } 1033 1034 DP("ATMI module successfully loaded!\n"); 1035 1036 // Zero the pseudo-bss variable by calling into hsa 1037 // Do this post-load to handle got 1038 uint64_t device_State_bytes = 1039 get_device_State_bytes((char *)image->ImageStart, img_size); 1040 auto &dss = DeviceInfo.deviceStateStore[device_id]; 1041 if (device_State_bytes != 0) { 1042 1043 if (dss.first.get() == nullptr) { 1044 assert(dss.second == 0); 1045 void *ptr = NULL; 1046 atmi_status_t err = 1047 atmi_calloc(&ptr, device_State_bytes, get_gpu_mem_place(device_id)); 1048 if (err != ATMI_STATUS_SUCCESS) { 1049 fprintf(stderr, "Failed to allocate device_state array\n"); 1050 return NULL; 1051 } 1052 dss = {std::unique_ptr<void, RTLDeviceInfoTy::atmiFreePtrDeletor>{ptr}, 1053 device_State_bytes}; 1054 } 1055 1056 void *ptr = dss.first.get(); 1057 if (device_State_bytes != dss.second) { 1058 fprintf(stderr, "Inconsistent sizes of device_State unsupported\n"); 1059 exit(1); 1060 } 1061 1062 void *state_ptr; 1063 uint32_t state_ptr_size; 1064 err = atmi_interop_hsa_get_symbol_info(get_gpu_mem_place(device_id), 1065 "omptarget_nvptx_device_State", 1066 &state_ptr, &state_ptr_size); 1067 1068 if (err != ATMI_STATUS_SUCCESS) { 1069 fprintf(stderr, "failed to find device_state ptr\n"); 1070 return NULL; 1071 } 1072 if (state_ptr_size != sizeof(void *)) { 1073 fprintf(stderr, "unexpected size of state_ptr %u != %zu\n", 1074 state_ptr_size, sizeof(void *)); 1075 return NULL; 1076 } 1077 1078 // write ptr to device memory so it can be used by later kernels 1079 err = DeviceInfo.freesignalpool_memcpy_h2d(state_ptr, &ptr, sizeof(void *), 1080 device_id); 1081 if (err != ATMI_STATUS_SUCCESS) { 1082 fprintf(stderr, "memcpy install of state_ptr failed\n"); 1083 return NULL; 1084 } 1085 } 1086 1087 // TODO: Check with Guansong to understand the below comment more thoroughly. 1088 // Here, we take advantage of the data that is appended after img_end to get 1089 // the symbols' name we need to load. This data consist of the host entries 1090 // begin and end as well as the target name (see the offloading linker script 1091 // creation in clang compiler). 1092 1093 // Find the symbols in the module by name. The name can be obtain by 1094 // concatenating the host entry name with the target name 1095 1096 __tgt_offload_entry *HostBegin = image->EntriesBegin; 1097 __tgt_offload_entry *HostEnd = image->EntriesEnd; 1098 1099 for (__tgt_offload_entry *e = HostBegin; e != HostEnd; ++e) { 1100 1101 if (!e->addr) { 1102 // The host should have always something in the address to 1103 // uniquely identify the target region. 1104 fprintf(stderr, "Analyzing host entry '<null>' (size = %lld)...\n", 1105 (unsigned long long)e->size); 1106 return NULL; 1107 } 1108 1109 if (e->size) { 1110 __tgt_offload_entry entry = *e; 1111 1112 void *varptr; 1113 uint32_t varsize; 1114 1115 err = atmi_interop_hsa_get_symbol_info(get_gpu_mem_place(device_id), 1116 e->name, &varptr, &varsize); 1117 1118 if (err != ATMI_STATUS_SUCCESS) { 1119 DP("Loading global '%s' (Failed)\n", e->name); 1120 // Inform the user what symbol prevented offloading 1121 fprintf(stderr, "Loading global '%s' (Failed)\n", e->name); 1122 return NULL; 1123 } 1124 1125 if (varsize != e->size) { 1126 DP("Loading global '%s' - size mismatch (%u != %lu)\n", e->name, 1127 varsize, e->size); 1128 return NULL; 1129 } 1130 1131 DP("Entry point " DPxMOD " maps to global %s (" DPxMOD ")\n", 1132 DPxPTR(e - HostBegin), e->name, DPxPTR(varptr)); 1133 entry.addr = (void *)varptr; 1134 1135 DeviceInfo.addOffloadEntry(device_id, entry); 1136 1137 if (DeviceInfo.RequiresFlags & OMP_REQ_UNIFIED_SHARED_MEMORY && 1138 e->flags & OMP_DECLARE_TARGET_LINK) { 1139 // If unified memory is present any target link variables 1140 // can access host addresses directly. There is no longer a 1141 // need for device copies. 1142 err = DeviceInfo.freesignalpool_memcpy_h2d(varptr, e->addr, 1143 sizeof(void *), device_id); 1144 if (err != ATMI_STATUS_SUCCESS) 1145 DP("Error when copying USM\n"); 1146 DP("Copy linked variable host address (" DPxMOD ")" 1147 "to device address (" DPxMOD ")\n", 1148 DPxPTR(*((void **)e->addr)), DPxPTR(varptr)); 1149 } 1150 1151 continue; 1152 } 1153 1154 DP("to find the kernel name: %s size: %lu\n", e->name, strlen(e->name)); 1155 1156 atmi_mem_place_t place = get_gpu_mem_place(device_id); 1157 uint32_t kernarg_segment_size; 1158 err = atmi_interop_hsa_get_kernel_info( 1159 place, e->name, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_KERNARG_SEGMENT_SIZE, 1160 &kernarg_segment_size); 1161 1162 // each arg is a void * in this openmp implementation 1163 uint32_t arg_num = kernarg_segment_size / sizeof(void *); 1164 std::vector<size_t> arg_sizes(arg_num); 1165 for (std::vector<size_t>::iterator it = arg_sizes.begin(); 1166 it != arg_sizes.end(); it++) { 1167 *it = sizeof(void *); 1168 } 1169 1170 // default value GENERIC (in case symbol is missing from cubin file) 1171 int8_t ExecModeVal = ExecutionModeType::GENERIC; 1172 1173 // get flat group size if present, else Default_WG_Size 1174 int16_t WGSizeVal = RTLDeviceInfoTy::Default_WG_Size; 1175 1176 // get Kernel Descriptor if present. 1177 // Keep struct in sync wih getTgtAttributeStructQTy in CGOpenMPRuntime.cpp 1178 struct KernDescValType { 1179 uint16_t Version; 1180 uint16_t TSize; 1181 uint16_t WG_Size; 1182 uint8_t Mode; 1183 }; 1184 struct KernDescValType KernDescVal; 1185 std::string KernDescNameStr(e->name); 1186 KernDescNameStr += "_kern_desc"; 1187 const char *KernDescName = KernDescNameStr.c_str(); 1188 1189 void *KernDescPtr; 1190 uint32_t KernDescSize; 1191 void *CallStackAddr = nullptr; 1192 err = interop_get_symbol_info((char *)image->ImageStart, img_size, 1193 KernDescName, &KernDescPtr, &KernDescSize); 1194 1195 if (err == ATMI_STATUS_SUCCESS) { 1196 if ((size_t)KernDescSize != sizeof(KernDescVal)) 1197 DP("Loading global computation properties '%s' - size mismatch (%u != " 1198 "%lu)\n", 1199 KernDescName, KernDescSize, sizeof(KernDescVal)); 1200 1201 memcpy(&KernDescVal, KernDescPtr, (size_t)KernDescSize); 1202 1203 // Check structure size against recorded size. 1204 if ((size_t)KernDescSize != KernDescVal.TSize) 1205 DP("KernDescVal size %lu does not match advertized size %d for '%s'\n", 1206 sizeof(KernDescVal), KernDescVal.TSize, KernDescName); 1207 1208 DP("After loading global for %s KernDesc \n", KernDescName); 1209 DP("KernDesc: Version: %d\n", KernDescVal.Version); 1210 DP("KernDesc: TSize: %d\n", KernDescVal.TSize); 1211 DP("KernDesc: WG_Size: %d\n", KernDescVal.WG_Size); 1212 DP("KernDesc: Mode: %d\n", KernDescVal.Mode); 1213 1214 // Get ExecMode 1215 ExecModeVal = KernDescVal.Mode; 1216 DP("ExecModeVal %d\n", ExecModeVal); 1217 if (KernDescVal.WG_Size == 0) { 1218 KernDescVal.WG_Size = RTLDeviceInfoTy::Default_WG_Size; 1219 DP("Setting KernDescVal.WG_Size to default %d\n", KernDescVal.WG_Size); 1220 } 1221 WGSizeVal = KernDescVal.WG_Size; 1222 DP("WGSizeVal %d\n", WGSizeVal); 1223 check("Loading KernDesc computation property", err); 1224 } else { 1225 DP("Warning: Loading KernDesc '%s' - symbol not found, ", KernDescName); 1226 1227 // Generic 1228 std::string ExecModeNameStr(e->name); 1229 ExecModeNameStr += "_exec_mode"; 1230 const char *ExecModeName = ExecModeNameStr.c_str(); 1231 1232 void *ExecModePtr; 1233 uint32_t varsize; 1234 err = interop_get_symbol_info((char *)image->ImageStart, img_size, 1235 ExecModeName, &ExecModePtr, &varsize); 1236 1237 if (err == ATMI_STATUS_SUCCESS) { 1238 if ((size_t)varsize != sizeof(int8_t)) { 1239 DP("Loading global computation properties '%s' - size mismatch(%u != " 1240 "%lu)\n", 1241 ExecModeName, varsize, sizeof(int8_t)); 1242 return NULL; 1243 } 1244 1245 memcpy(&ExecModeVal, ExecModePtr, (size_t)varsize); 1246 1247 DP("After loading global for %s ExecMode = %d\n", ExecModeName, 1248 ExecModeVal); 1249 1250 if (ExecModeVal < 0 || ExecModeVal > 1) { 1251 DP("Error wrong exec_mode value specified in HSA code object file: " 1252 "%d\n", 1253 ExecModeVal); 1254 return NULL; 1255 } 1256 } else { 1257 DP("Loading global exec_mode '%s' - symbol missing, using default " 1258 "value " 1259 "GENERIC (1)\n", 1260 ExecModeName); 1261 } 1262 check("Loading computation property", err); 1263 1264 // Flat group size 1265 std::string WGSizeNameStr(e->name); 1266 WGSizeNameStr += "_wg_size"; 1267 const char *WGSizeName = WGSizeNameStr.c_str(); 1268 1269 void *WGSizePtr; 1270 uint32_t WGSize; 1271 err = interop_get_symbol_info((char *)image->ImageStart, img_size, 1272 WGSizeName, &WGSizePtr, &WGSize); 1273 1274 if (err == ATMI_STATUS_SUCCESS) { 1275 if ((size_t)WGSize != sizeof(int16_t)) { 1276 DP("Loading global computation properties '%s' - size mismatch (%u " 1277 "!= " 1278 "%lu)\n", 1279 WGSizeName, WGSize, sizeof(int16_t)); 1280 return NULL; 1281 } 1282 1283 memcpy(&WGSizeVal, WGSizePtr, (size_t)WGSize); 1284 1285 DP("After loading global for %s WGSize = %d\n", WGSizeName, WGSizeVal); 1286 1287 if (WGSizeVal < RTLDeviceInfoTy::Default_WG_Size || 1288 WGSizeVal > RTLDeviceInfoTy::Max_WG_Size) { 1289 DP("Error wrong WGSize value specified in HSA code object file: " 1290 "%d\n", 1291 WGSizeVal); 1292 WGSizeVal = RTLDeviceInfoTy::Default_WG_Size; 1293 } 1294 } else { 1295 DP("Warning: Loading WGSize '%s' - symbol not found, " 1296 "using default value %d\n", 1297 WGSizeName, WGSizeVal); 1298 } 1299 1300 check("Loading WGSize computation property", err); 1301 } 1302 1303 KernelsList.push_back(KernelTy(ExecModeVal, WGSizeVal, device_id, 1304 CallStackAddr, e->name, 1305 kernarg_segment_size)); 1306 __tgt_offload_entry entry = *e; 1307 entry.addr = (void *)&KernelsList.back(); 1308 DeviceInfo.addOffloadEntry(device_id, entry); 1309 DP("Entry point %ld maps to %s\n", e - HostBegin, e->name); 1310 } 1311 1312 return DeviceInfo.getOffloadEntriesTable(device_id); 1313 } 1314 1315 void *__tgt_rtl_data_alloc(int device_id, int64_t size, void *) { 1316 void *ptr = NULL; 1317 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large"); 1318 atmi_status_t err = atmi_malloc(&ptr, size, get_gpu_mem_place(device_id)); 1319 DP("Tgt alloc data %ld bytes, (tgt:%016llx).\n", size, 1320 (long long unsigned)(Elf64_Addr)ptr); 1321 ptr = (err == ATMI_STATUS_SUCCESS) ? ptr : NULL; 1322 return ptr; 1323 } 1324 1325 int32_t __tgt_rtl_data_submit(int device_id, void *tgt_ptr, void *hst_ptr, 1326 int64_t size) { 1327 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large"); 1328 __tgt_async_info async_info; 1329 int32_t rc = dataSubmit(device_id, tgt_ptr, hst_ptr, size, &async_info); 1330 if (rc != OFFLOAD_SUCCESS) 1331 return OFFLOAD_FAIL; 1332 1333 return __tgt_rtl_synchronize(device_id, &async_info); 1334 } 1335 1336 int32_t __tgt_rtl_data_submit_async(int device_id, void *tgt_ptr, void *hst_ptr, 1337 int64_t size, 1338 __tgt_async_info *async_info_ptr) { 1339 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large"); 1340 if (async_info_ptr) { 1341 initAsyncInfoPtr(async_info_ptr); 1342 return dataSubmit(device_id, tgt_ptr, hst_ptr, size, async_info_ptr); 1343 } else { 1344 return __tgt_rtl_data_submit(device_id, tgt_ptr, hst_ptr, size); 1345 } 1346 } 1347 1348 int32_t __tgt_rtl_data_retrieve(int device_id, void *hst_ptr, void *tgt_ptr, 1349 int64_t size) { 1350 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large"); 1351 __tgt_async_info async_info; 1352 int32_t rc = dataRetrieve(device_id, hst_ptr, tgt_ptr, size, &async_info); 1353 if (rc != OFFLOAD_SUCCESS) 1354 return OFFLOAD_FAIL; 1355 1356 return __tgt_rtl_synchronize(device_id, &async_info); 1357 } 1358 1359 int32_t __tgt_rtl_data_retrieve_async(int device_id, void *hst_ptr, 1360 void *tgt_ptr, int64_t size, 1361 __tgt_async_info *async_info_ptr) { 1362 assert(async_info_ptr && "async_info is nullptr"); 1363 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large"); 1364 initAsyncInfoPtr(async_info_ptr); 1365 return dataRetrieve(device_id, hst_ptr, tgt_ptr, size, async_info_ptr); 1366 } 1367 1368 int32_t __tgt_rtl_data_delete(int device_id, void *tgt_ptr) { 1369 assert(device_id < DeviceInfo.NumberOfDevices && "Device ID too large"); 1370 atmi_status_t err; 1371 DP("Tgt free data (tgt:%016llx).\n", (long long unsigned)(Elf64_Addr)tgt_ptr); 1372 err = atmi_free(tgt_ptr); 1373 if (err != ATMI_STATUS_SUCCESS) { 1374 DP("Error when freeing CUDA memory\n"); 1375 return OFFLOAD_FAIL; 1376 } 1377 return OFFLOAD_SUCCESS; 1378 } 1379 1380 // Determine launch values for threadsPerGroup and num_groups. 1381 // Outputs: treadsPerGroup, num_groups 1382 // Inputs: Max_Teams, Max_WG_Size, Warp_Size, ExecutionMode, 1383 // EnvTeamLimit, EnvNumTeams, num_teams, thread_limit, 1384 // loop_tripcount. 1385 void getLaunchVals(int &threadsPerGroup, int &num_groups, int ConstWGSize, 1386 int ExecutionMode, int EnvTeamLimit, int EnvNumTeams, 1387 int num_teams, int thread_limit, uint64_t loop_tripcount) { 1388 1389 int Max_Teams = DeviceInfo.EnvMaxTeamsDefault > 0 1390 ? DeviceInfo.EnvMaxTeamsDefault 1391 : DeviceInfo.Max_Teams; 1392 if (Max_Teams > DeviceInfo.HardTeamLimit) 1393 Max_Teams = DeviceInfo.HardTeamLimit; 1394 1395 if (print_kernel_trace == 4) { 1396 fprintf(stderr, "RTLDeviceInfoTy::Max_Teams: %d\n", 1397 RTLDeviceInfoTy::Max_Teams); 1398 fprintf(stderr, "Max_Teams: %d\n", Max_Teams); 1399 fprintf(stderr, "RTLDeviceInfoTy::Warp_Size: %d\n", 1400 RTLDeviceInfoTy::Warp_Size); 1401 fprintf(stderr, "RTLDeviceInfoTy::Max_WG_Size: %d\n", 1402 RTLDeviceInfoTy::Max_WG_Size); 1403 fprintf(stderr, "RTLDeviceInfoTy::Default_WG_Size: %d\n", 1404 RTLDeviceInfoTy::Default_WG_Size); 1405 fprintf(stderr, "thread_limit: %d\n", thread_limit); 1406 fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup); 1407 fprintf(stderr, "ConstWGSize: %d\n", ConstWGSize); 1408 } 1409 // check for thread_limit() clause 1410 if (thread_limit > 0) { 1411 threadsPerGroup = thread_limit; 1412 DP("Setting threads per block to requested %d\n", thread_limit); 1413 if (ExecutionMode == GENERIC) { // Add master warp for GENERIC 1414 threadsPerGroup += RTLDeviceInfoTy::Warp_Size; 1415 DP("Adding master wavefront: +%d threads\n", RTLDeviceInfoTy::Warp_Size); 1416 } 1417 if (threadsPerGroup > RTLDeviceInfoTy::Max_WG_Size) { // limit to max 1418 threadsPerGroup = RTLDeviceInfoTy::Max_WG_Size; 1419 DP("Setting threads per block to maximum %d\n", threadsPerGroup); 1420 } 1421 } 1422 // check flat_max_work_group_size attr here 1423 if (threadsPerGroup > ConstWGSize) { 1424 threadsPerGroup = ConstWGSize; 1425 DP("Reduced threadsPerGroup to flat-attr-group-size limit %d\n", 1426 threadsPerGroup); 1427 } 1428 if (print_kernel_trace == 4) 1429 fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup); 1430 DP("Preparing %d threads\n", threadsPerGroup); 1431 1432 // Set default num_groups (teams) 1433 if (DeviceInfo.EnvTeamLimit > 0) 1434 num_groups = (Max_Teams < DeviceInfo.EnvTeamLimit) 1435 ? Max_Teams 1436 : DeviceInfo.EnvTeamLimit; 1437 else 1438 num_groups = Max_Teams; 1439 DP("Set default num of groups %d\n", num_groups); 1440 1441 if (print_kernel_trace == 4) { 1442 fprintf(stderr, "num_groups: %d\n", num_groups); 1443 fprintf(stderr, "num_teams: %d\n", num_teams); 1444 } 1445 1446 // Reduce num_groups if threadsPerGroup exceeds RTLDeviceInfoTy::Max_WG_Size 1447 // This reduction is typical for default case (no thread_limit clause). 1448 // or when user goes crazy with num_teams clause. 1449 // FIXME: We cant distinguish between a constant or variable thread limit. 1450 // So we only handle constant thread_limits. 1451 if (threadsPerGroup > 1452 RTLDeviceInfoTy::Default_WG_Size) // 256 < threadsPerGroup <= 1024 1453 // Should we round threadsPerGroup up to nearest RTLDeviceInfoTy::Warp_Size 1454 // here? 1455 num_groups = (Max_Teams * RTLDeviceInfoTy::Max_WG_Size) / threadsPerGroup; 1456 1457 // check for num_teams() clause 1458 if (num_teams > 0) { 1459 num_groups = (num_teams < num_groups) ? num_teams : num_groups; 1460 } 1461 if (print_kernel_trace == 4) { 1462 fprintf(stderr, "num_groups: %d\n", num_groups); 1463 fprintf(stderr, "DeviceInfo.EnvNumTeams %d\n", DeviceInfo.EnvNumTeams); 1464 fprintf(stderr, "DeviceInfo.EnvTeamLimit %d\n", DeviceInfo.EnvTeamLimit); 1465 } 1466 1467 if (DeviceInfo.EnvNumTeams > 0) { 1468 num_groups = (DeviceInfo.EnvNumTeams < num_groups) ? DeviceInfo.EnvNumTeams 1469 : num_groups; 1470 DP("Modifying teams based on EnvNumTeams %d\n", DeviceInfo.EnvNumTeams); 1471 } else if (DeviceInfo.EnvTeamLimit > 0) { 1472 num_groups = (DeviceInfo.EnvTeamLimit < num_groups) 1473 ? DeviceInfo.EnvTeamLimit 1474 : num_groups; 1475 DP("Modifying teams based on EnvTeamLimit%d\n", DeviceInfo.EnvTeamLimit); 1476 } else { 1477 if (num_teams <= 0) { 1478 if (loop_tripcount > 0) { 1479 if (ExecutionMode == SPMD) { 1480 // round up to the nearest integer 1481 num_groups = ((loop_tripcount - 1) / threadsPerGroup) + 1; 1482 } else { 1483 num_groups = loop_tripcount; 1484 } 1485 DP("Using %d teams due to loop trip count %" PRIu64 " and number of " 1486 "threads per block %d\n", 1487 num_groups, loop_tripcount, threadsPerGroup); 1488 } 1489 } else { 1490 num_groups = num_teams; 1491 } 1492 if (num_groups > Max_Teams) { 1493 num_groups = Max_Teams; 1494 if (print_kernel_trace == 4) 1495 fprintf(stderr, "Limiting num_groups %d to Max_Teams %d \n", num_groups, 1496 Max_Teams); 1497 } 1498 if (num_groups > num_teams && num_teams > 0) { 1499 num_groups = num_teams; 1500 if (print_kernel_trace == 4) 1501 fprintf(stderr, "Limiting num_groups %d to clause num_teams %d \n", 1502 num_groups, num_teams); 1503 } 1504 } 1505 1506 // num_teams clause always honored, no matter what, unless DEFAULT is active. 1507 if (num_teams > 0) { 1508 num_groups = num_teams; 1509 // Cap num_groups to EnvMaxTeamsDefault if set. 1510 if (DeviceInfo.EnvMaxTeamsDefault > 0 && 1511 num_groups > DeviceInfo.EnvMaxTeamsDefault) 1512 num_groups = DeviceInfo.EnvMaxTeamsDefault; 1513 } 1514 if (print_kernel_trace == 4) { 1515 fprintf(stderr, "threadsPerGroup: %d\n", threadsPerGroup); 1516 fprintf(stderr, "num_groups: %d\n", num_groups); 1517 fprintf(stderr, "loop_tripcount: %ld\n", loop_tripcount); 1518 } 1519 DP("Final %d num_groups and %d threadsPerGroup\n", num_groups, 1520 threadsPerGroup); 1521 } 1522 1523 static uint64_t acquire_available_packet_id(hsa_queue_t *queue) { 1524 uint64_t packet_id = hsa_queue_add_write_index_relaxed(queue, 1); 1525 bool full = true; 1526 while (full) { 1527 full = 1528 packet_id >= (queue->size + hsa_queue_load_read_index_scacquire(queue)); 1529 } 1530 return packet_id; 1531 } 1532 1533 static int32_t __tgt_rtl_run_target_team_region_locked( 1534 int32_t device_id, void *tgt_entry_ptr, void **tgt_args, 1535 ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams, 1536 int32_t thread_limit, uint64_t loop_tripcount); 1537 1538 int32_t __tgt_rtl_run_target_team_region(int32_t device_id, void *tgt_entry_ptr, 1539 void **tgt_args, 1540 ptrdiff_t *tgt_offsets, 1541 int32_t arg_num, int32_t num_teams, 1542 int32_t thread_limit, 1543 uint64_t loop_tripcount) { 1544 1545 DeviceInfo.load_run_lock.lock_shared(); 1546 int32_t res = __tgt_rtl_run_target_team_region_locked( 1547 device_id, tgt_entry_ptr, tgt_args, tgt_offsets, arg_num, num_teams, 1548 thread_limit, loop_tripcount); 1549 1550 DeviceInfo.load_run_lock.unlock_shared(); 1551 return res; 1552 } 1553 1554 int32_t __tgt_rtl_run_target_team_region_locked( 1555 int32_t device_id, void *tgt_entry_ptr, void **tgt_args, 1556 ptrdiff_t *tgt_offsets, int32_t arg_num, int32_t num_teams, 1557 int32_t thread_limit, uint64_t loop_tripcount) { 1558 // Set the context we are using 1559 // update thread limit content in gpu memory if un-initialized or specified 1560 // from host 1561 1562 DP("Run target team region thread_limit %d\n", thread_limit); 1563 1564 // All args are references. 1565 std::vector<void *> args(arg_num); 1566 std::vector<void *> ptrs(arg_num); 1567 1568 DP("Arg_num: %d\n", arg_num); 1569 for (int32_t i = 0; i < arg_num; ++i) { 1570 ptrs[i] = (void *)((intptr_t)tgt_args[i] + tgt_offsets[i]); 1571 args[i] = &ptrs[i]; 1572 DP("Offseted base: arg[%d]:" DPxMOD "\n", i, DPxPTR(ptrs[i])); 1573 } 1574 1575 KernelTy *KernelInfo = (KernelTy *)tgt_entry_ptr; 1576 1577 /* 1578 * Set limit based on ThreadsPerGroup and GroupsPerDevice 1579 */ 1580 int num_groups = 0; 1581 1582 int threadsPerGroup = RTLDeviceInfoTy::Default_WG_Size; 1583 1584 getLaunchVals(threadsPerGroup, num_groups, KernelInfo->ConstWGSize, 1585 KernelInfo->ExecutionMode, DeviceInfo.EnvTeamLimit, 1586 DeviceInfo.EnvNumTeams, 1587 num_teams, // From run_region arg 1588 thread_limit, // From run_region arg 1589 loop_tripcount // From run_region arg 1590 ); 1591 1592 if (print_kernel_trace == 4) 1593 // enum modes are SPMD, GENERIC, NONE 0,1,2 1594 fprintf(stderr, 1595 "DEVID:%2d SGN:%1d ConstWGSize:%-4d args:%2d teamsXthrds:(%4dX%4d) " 1596 "reqd:(%4dX%4d) n:%s\n", 1597 device_id, KernelInfo->ExecutionMode, KernelInfo->ConstWGSize, 1598 arg_num, num_groups, threadsPerGroup, num_teams, thread_limit, 1599 KernelInfo->Name); 1600 1601 // Run on the device. 1602 { 1603 hsa_queue_t *queue = DeviceInfo.HSAQueues[device_id]; 1604 uint64_t packet_id = acquire_available_packet_id(queue); 1605 1606 const uint32_t mask = queue->size - 1; // size is a power of 2 1607 hsa_kernel_dispatch_packet_t *packet = 1608 (hsa_kernel_dispatch_packet_t *)queue->base_address + 1609 (packet_id & mask); 1610 1611 // packet->header is written last 1612 packet->setup = UINT16_C(1) << HSA_KERNEL_DISPATCH_PACKET_SETUP_DIMENSIONS; 1613 packet->workgroup_size_x = threadsPerGroup; 1614 packet->workgroup_size_y = 1; 1615 packet->workgroup_size_z = 1; 1616 packet->reserved0 = 0; 1617 packet->grid_size_x = num_groups * threadsPerGroup; 1618 packet->grid_size_y = 1; 1619 packet->grid_size_z = 1; 1620 packet->private_segment_size = 0; 1621 packet->group_segment_size = 0; 1622 packet->kernel_object = 0; 1623 packet->kernarg_address = 0; // use the block allocator 1624 packet->reserved2 = 0; // atmi writes id_ here 1625 packet->completion_signal = {0}; // may want a pool of signals 1626 1627 std::string kernel_name = std::string(KernelInfo->Name); 1628 { 1629 assert(KernelInfoTable[device_id].find(kernel_name) != 1630 KernelInfoTable[device_id].end()); 1631 auto it = KernelInfoTable[device_id][kernel_name]; 1632 packet->kernel_object = it.kernel_object; 1633 packet->private_segment_size = it.private_segment_size; 1634 packet->group_segment_size = it.group_segment_size; 1635 assert(arg_num == (int)it.num_args); 1636 } 1637 1638 KernelArgPool *ArgPool = nullptr; 1639 { 1640 auto it = KernelArgPoolMap.find(std::string(KernelInfo->Name)); 1641 if (it != KernelArgPoolMap.end()) { 1642 ArgPool = (it->second).get(); 1643 } 1644 } 1645 if (!ArgPool) { 1646 fprintf(stderr, "Warning: No ArgPool for %s on device %d\n", 1647 KernelInfo->Name, device_id); 1648 } 1649 { 1650 void *kernarg = nullptr; 1651 if (ArgPool) { 1652 assert(ArgPool->kernarg_segment_size == (arg_num * sizeof(void *))); 1653 kernarg = ArgPool->allocate(arg_num); 1654 } 1655 if (!kernarg) { 1656 printf("Allocate kernarg failed\n"); 1657 exit(1); 1658 } 1659 1660 // Copy explicit arguments 1661 for (int i = 0; i < arg_num; i++) { 1662 memcpy((char *)kernarg + sizeof(void *) * i, args[i], sizeof(void *)); 1663 } 1664 1665 // Initialize implicit arguments. ATMI seems to leave most fields 1666 // uninitialized 1667 atmi_implicit_args_t *impl_args = 1668 reinterpret_cast<atmi_implicit_args_t *>( 1669 static_cast<char *>(kernarg) + ArgPool->kernarg_segment_size); 1670 memset(impl_args, 0, 1671 sizeof(atmi_implicit_args_t)); // may not be necessary 1672 impl_args->offset_x = 0; 1673 impl_args->offset_y = 0; 1674 impl_args->offset_z = 0; 1675 1676 packet->kernarg_address = kernarg; 1677 } 1678 1679 { 1680 hsa_signal_t s = DeviceInfo.FreeSignalPool.pop(); 1681 if (s.handle == 0) { 1682 printf("Failed to get signal instance\n"); 1683 exit(1); 1684 } 1685 packet->completion_signal = s; 1686 hsa_signal_store_relaxed(packet->completion_signal, 1); 1687 } 1688 1689 core::packet_store_release( 1690 reinterpret_cast<uint32_t *>(packet), 1691 core::create_header(HSA_PACKET_TYPE_KERNEL_DISPATCH, 0, 1692 ATMI_FENCE_SCOPE_SYSTEM, ATMI_FENCE_SCOPE_SYSTEM), 1693 packet->setup); 1694 1695 hsa_signal_store_relaxed(queue->doorbell_signal, packet_id); 1696 1697 while (hsa_signal_wait_scacquire(packet->completion_signal, 1698 HSA_SIGNAL_CONDITION_EQ, 0, UINT64_MAX, 1699 HSA_WAIT_STATE_BLOCKED) != 0) 1700 ; 1701 1702 assert(ArgPool); 1703 ArgPool->deallocate(packet->kernarg_address); 1704 DeviceInfo.FreeSignalPool.push(packet->completion_signal); 1705 } 1706 1707 DP("Kernel completed\n"); 1708 return OFFLOAD_SUCCESS; 1709 } 1710 1711 int32_t __tgt_rtl_run_target_region(int32_t device_id, void *tgt_entry_ptr, 1712 void **tgt_args, ptrdiff_t *tgt_offsets, 1713 int32_t arg_num) { 1714 // use one team and one thread 1715 // fix thread num 1716 int32_t team_num = 1; 1717 int32_t thread_limit = 0; // use default 1718 return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args, 1719 tgt_offsets, arg_num, team_num, 1720 thread_limit, 0); 1721 } 1722 1723 int32_t __tgt_rtl_run_target_region_async(int32_t device_id, 1724 void *tgt_entry_ptr, void **tgt_args, 1725 ptrdiff_t *tgt_offsets, 1726 int32_t arg_num, 1727 __tgt_async_info *async_info_ptr) { 1728 assert(async_info_ptr && "async_info is nullptr"); 1729 initAsyncInfoPtr(async_info_ptr); 1730 1731 // use one team and one thread 1732 // fix thread num 1733 int32_t team_num = 1; 1734 int32_t thread_limit = 0; // use default 1735 return __tgt_rtl_run_target_team_region(device_id, tgt_entry_ptr, tgt_args, 1736 tgt_offsets, arg_num, team_num, 1737 thread_limit, 0); 1738 } 1739 1740 int32_t __tgt_rtl_synchronize(int32_t device_id, 1741 __tgt_async_info *async_info_ptr) { 1742 assert(async_info_ptr && "async_info is nullptr"); 1743 1744 // Cuda asserts that async_info_ptr->Queue is non-null, but this invariant 1745 // is not ensured by devices.cpp for amdgcn 1746 // assert(async_info_ptr->Queue && "async_info_ptr->Queue is nullptr"); 1747 if (async_info_ptr->Queue) { 1748 finiAsyncInfoPtr(async_info_ptr); 1749 } 1750 return OFFLOAD_SUCCESS; 1751 } 1752