1 /*===-------------------------------------------------------------------------- 2 * ATMI (Asynchronous Task and Memory Interface) 3 * 4 * This file is distributed under the MIT License. See LICENSE.txt for details. 5 *===------------------------------------------------------------------------*/ 6 #include <gelf.h> 7 #include <libelf.h> 8 9 #include <cassert> 10 #include <cstdarg> 11 #include <fstream> 12 #include <iomanip> 13 #include <iostream> 14 #include <set> 15 #include <string> 16 17 #include "internal.h" 18 #include "machine.h" 19 #include "rt.h" 20 21 #include "msgpack.h" 22 23 namespace hsa { 24 // Wrap HSA iterate API in a shim that allows passing general callables 25 template <typename C> 26 hsa_status_t executable_iterate_symbols(hsa_executable_t executable, C cb) { 27 auto L = [](hsa_executable_t executable, hsa_executable_symbol_t symbol, 28 void *data) -> hsa_status_t { 29 C *unwrapped = static_cast<C *>(data); 30 return (*unwrapped)(executable, symbol); 31 }; 32 return hsa_executable_iterate_symbols(executable, L, 33 static_cast<void *>(&cb)); 34 } 35 } // namespace hsa 36 37 typedef unsigned char *address; 38 /* 39 * Note descriptors. 40 */ 41 typedef struct { 42 uint32_t n_namesz; /* Length of note's name. */ 43 uint32_t n_descsz; /* Length of note's value. */ 44 uint32_t n_type; /* Type of note. */ 45 // then name 46 // then padding, optional 47 // then desc, at 4 byte alignment (not 8, despite being elf64) 48 } Elf_Note; 49 50 // The following include file and following structs/enums 51 // have been replicated on a per-use basis below. For example, 52 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields, 53 // but we may care only about kernargSegmentSize_ for now, so 54 // we just include that field in our KernelMD implementation. We 55 // chose this approach to replicate in order to avoid forcing 56 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime. 57 // #include "llvm/Support/AMDGPUMetadata.h" 58 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD; 59 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD; 60 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD; 61 // using llvm::AMDGPU::HSAMD::AccessQualifier; 62 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier; 63 // using llvm::AMDGPU::HSAMD::ValueKind; 64 // using llvm::AMDGPU::HSAMD::ValueType; 65 66 class KernelArgMD { 67 public: 68 enum class ValueKind { 69 HiddenGlobalOffsetX, 70 HiddenGlobalOffsetY, 71 HiddenGlobalOffsetZ, 72 HiddenNone, 73 HiddenPrintfBuffer, 74 HiddenDefaultQueue, 75 HiddenCompletionAction, 76 HiddenMultiGridSyncArg, 77 HiddenHostcallBuffer, 78 Unknown 79 }; 80 81 KernelArgMD() 82 : name_(std::string()), typeName_(std::string()), size_(0), offset_(0), 83 align_(0), valueKind_(ValueKind::Unknown) {} 84 85 // fields 86 std::string name_; 87 std::string typeName_; 88 uint32_t size_; 89 uint32_t offset_; 90 uint32_t align_; 91 ValueKind valueKind_; 92 }; 93 94 class KernelMD { 95 public: 96 KernelMD() : kernargSegmentSize_(0ull) {} 97 98 // fields 99 uint64_t kernargSegmentSize_; 100 }; 101 102 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = { 103 // Including only those fields that are relevant to the runtime. 104 // {"ByValue", KernelArgMD::ValueKind::ByValue}, 105 // {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer}, 106 // {"DynamicSharedPointer", 107 // KernelArgMD::ValueKind::DynamicSharedPointer}, 108 // {"Sampler", KernelArgMD::ValueKind::Sampler}, 109 // {"Image", KernelArgMD::ValueKind::Image}, 110 // {"Pipe", KernelArgMD::ValueKind::Pipe}, 111 // {"Queue", KernelArgMD::ValueKind::Queue}, 112 {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 113 {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 114 {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 115 {"HiddenNone", KernelArgMD::ValueKind::HiddenNone}, 116 {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 117 {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 118 {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction}, 119 {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 120 {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 121 // v3 122 // {"by_value", KernelArgMD::ValueKind::ByValue}, 123 // {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer}, 124 // {"dynamic_shared_pointer", 125 // KernelArgMD::ValueKind::DynamicSharedPointer}, 126 // {"sampler", KernelArgMD::ValueKind::Sampler}, 127 // {"image", KernelArgMD::ValueKind::Image}, 128 // {"pipe", KernelArgMD::ValueKind::Pipe}, 129 // {"queue", KernelArgMD::ValueKind::Queue}, 130 {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 131 {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 132 {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 133 {"hidden_none", KernelArgMD::ValueKind::HiddenNone}, 134 {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 135 {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 136 {"hidden_completion_action", 137 KernelArgMD::ValueKind::HiddenCompletionAction}, 138 {"hidden_multigrid_sync_arg", 139 KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 140 {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 141 }; 142 143 // global variables. TODO: Get rid of these 144 atmi_machine_t g_atmi_machine; 145 ATLMachine g_atl_machine; 146 147 std::vector<hsa_amd_memory_pool_t> atl_gpu_kernarg_pools; 148 149 /* 150 atlc is all internal global values. 151 The structure atl_context_t is defined in atl_internal.h 152 Most references will use the global structure prefix atlc. 153 */ 154 atl_context_t atlc = {.struct_initialized = false}; 155 156 namespace core { 157 /* Machine Info */ 158 atmi_machine_t *Runtime::GetMachineInfo() { 159 if (!atlc.g_hsa_initialized) 160 return NULL; 161 return &g_atmi_machine; 162 } 163 164 hsa_status_t allow_access_to_all_gpu_agents(void *ptr) { 165 std::vector<ATLGPUProcessor> &gpu_procs = 166 g_atl_machine.processors<ATLGPUProcessor>(); 167 std::vector<hsa_agent_t> agents; 168 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 169 agents.push_back(gpu_procs[i].agent()); 170 } 171 return hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr); 172 } 173 174 static void atmi_init_context_structs() { 175 atlc.struct_initialized = true; /* This only gets called one time */ 176 atlc.g_hsa_initialized = false; 177 atlc.g_gpu_initialized = false; 178 atlc.g_tasks_initialized = false; 179 } 180 181 // Implement memory_pool iteration function 182 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool, 183 void *data) { 184 ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data); 185 hsa_status_t err = HSA_STATUS_SUCCESS; 186 // Check if the memory_pool is allowed to allocate, i.e. do not return group 187 // memory 188 bool alloc_allowed = false; 189 err = hsa_amd_memory_pool_get_info( 190 memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED, 191 &alloc_allowed); 192 if (err != HSA_STATUS_SUCCESS) { 193 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 194 "Alloc allowed in memory pool check", get_error_string(err)); 195 return err; 196 } 197 if (alloc_allowed) { 198 uint32_t global_flag = 0; 199 err = hsa_amd_memory_pool_get_info( 200 memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag); 201 if (err != HSA_STATUS_SUCCESS) { 202 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 203 "Get memory pool info", get_error_string(err)); 204 return err; 205 } 206 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) { 207 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED); 208 proc->addMemory(new_mem); 209 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT & global_flag) { 210 DEBUG_PRINT("GPU kernel args pool handle: %lu\n", memory_pool.handle); 211 atl_gpu_kernarg_pools.push_back(memory_pool); 212 } 213 } else { 214 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED); 215 proc->addMemory(new_mem); 216 } 217 } 218 219 return err; 220 } 221 222 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) { 223 hsa_status_t err = HSA_STATUS_SUCCESS; 224 hsa_device_type_t device_type; 225 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type); 226 if (err != HSA_STATUS_SUCCESS) { 227 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 228 "Get device type info", get_error_string(err)); 229 return err; 230 } 231 switch (device_type) { 232 case HSA_DEVICE_TYPE_CPU: { 233 ATLCPUProcessor new_proc(agent); 234 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 235 &new_proc); 236 if (err != HSA_STATUS_SUCCESS) { 237 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 238 "Iterate all memory pools", get_error_string(err)); 239 return err; 240 } 241 g_atl_machine.addProcessor(new_proc); 242 } break; 243 case HSA_DEVICE_TYPE_GPU: { 244 hsa_profile_t profile; 245 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile); 246 if (err != HSA_STATUS_SUCCESS) { 247 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 248 "Query the agent profile", get_error_string(err)); 249 return err; 250 } 251 atmi_devtype_t gpu_type; 252 gpu_type = 253 (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU; 254 ATLGPUProcessor new_proc(agent, gpu_type); 255 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 256 &new_proc); 257 if (err != HSA_STATUS_SUCCESS) { 258 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 259 "Iterate all memory pools", get_error_string(err)); 260 return err; 261 } 262 g_atl_machine.addProcessor(new_proc); 263 } break; 264 case HSA_DEVICE_TYPE_DSP: { 265 err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 266 } break; 267 } 268 269 return err; 270 } 271 272 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) { 273 hsa_region_segment_t segment; 274 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 275 if (segment != HSA_REGION_SEGMENT_GLOBAL) { 276 return HSA_STATUS_SUCCESS; 277 } 278 hsa_region_global_flag_t flags; 279 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 280 if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) { 281 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 282 *ret = region; 283 return HSA_STATUS_INFO_BREAK; 284 } 285 return HSA_STATUS_SUCCESS; 286 } 287 288 /* Determines if a memory region can be used for kernarg allocations. */ 289 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) { 290 hsa_region_segment_t segment; 291 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 292 if (HSA_REGION_SEGMENT_GLOBAL != segment) { 293 return HSA_STATUS_SUCCESS; 294 } 295 296 hsa_region_global_flag_t flags; 297 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 298 if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) { 299 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 300 *ret = region; 301 return HSA_STATUS_INFO_BREAK; 302 } 303 304 return HSA_STATUS_SUCCESS; 305 } 306 307 static hsa_status_t init_compute_and_memory() { 308 hsa_status_t err; 309 310 /* Iterate over the agents and pick the gpu agent */ 311 err = hsa_iterate_agents(get_agent_info, NULL); 312 if (err == HSA_STATUS_INFO_BREAK) { 313 err = HSA_STATUS_SUCCESS; 314 } 315 if (err != HSA_STATUS_SUCCESS) { 316 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Getting a gpu agent", 317 get_error_string(err)); 318 return err; 319 } 320 321 /* Init all devices or individual device types? */ 322 std::vector<ATLCPUProcessor> &cpu_procs = 323 g_atl_machine.processors<ATLCPUProcessor>(); 324 std::vector<ATLGPUProcessor> &gpu_procs = 325 g_atl_machine.processors<ATLGPUProcessor>(); 326 /* For CPU memory pools, add other devices that can access them directly 327 * or indirectly */ 328 for (auto &cpu_proc : cpu_procs) { 329 for (auto &cpu_mem : cpu_proc.memories()) { 330 hsa_amd_memory_pool_t pool = cpu_mem.memory(); 331 for (auto &gpu_proc : gpu_procs) { 332 hsa_agent_t agent = gpu_proc.agent(); 333 hsa_amd_memory_pool_access_t access; 334 hsa_amd_agent_memory_pool_get_info( 335 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 336 if (access != 0) { 337 // this means not NEVER, but could be YES or NO 338 // add this memory pool to the proc 339 gpu_proc.addMemory(cpu_mem); 340 } 341 } 342 } 343 } 344 345 /* FIXME: are the below combinations of procs and memory pools needed? 346 * all to all compare procs with their memory pools and add those memory 347 * pools that are accessible by the target procs */ 348 for (auto &gpu_proc : gpu_procs) { 349 for (auto &gpu_mem : gpu_proc.memories()) { 350 hsa_amd_memory_pool_t pool = gpu_mem.memory(); 351 for (auto &cpu_proc : cpu_procs) { 352 hsa_agent_t agent = cpu_proc.agent(); 353 hsa_amd_memory_pool_access_t access; 354 hsa_amd_agent_memory_pool_get_info( 355 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 356 if (access != 0) { 357 // this means not NEVER, but could be YES or NO 358 // add this memory pool to the proc 359 cpu_proc.addMemory(gpu_mem); 360 } 361 } 362 } 363 } 364 365 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_CPU] = cpu_procs.size(); 366 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_GPU] = gpu_procs.size(); 367 368 size_t num_procs = cpu_procs.size() + gpu_procs.size(); 369 // g_atmi_machine.devices = (atmi_device_t *)malloc(num_procs * 370 // sizeof(atmi_device_t)); 371 atmi_device_t *all_devices = reinterpret_cast<atmi_device_t *>( 372 malloc(num_procs * sizeof(atmi_device_t))); 373 int num_iGPUs = 0; 374 int num_dGPUs = 0; 375 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 376 if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU) 377 num_iGPUs++; 378 else 379 num_dGPUs++; 380 } 381 assert(num_iGPUs + num_dGPUs == gpu_procs.size() && 382 "Number of dGPUs and iGPUs do not add up"); 383 DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size()); 384 DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs); 385 DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs); 386 DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size()); 387 388 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_iGPU] = num_iGPUs; 389 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_dGPU] = num_dGPUs; 390 391 int cpus_begin = 0; 392 int cpus_end = cpu_procs.size(); 393 int gpus_begin = cpu_procs.size(); 394 int gpus_end = cpu_procs.size() + gpu_procs.size(); 395 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_CPU] = &all_devices[cpus_begin]; 396 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_GPU] = &all_devices[gpus_begin]; 397 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_iGPU] = &all_devices[gpus_begin]; 398 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_dGPU] = &all_devices[gpus_begin]; 399 int proc_index = 0; 400 for (int i = cpus_begin; i < cpus_end; i++) { 401 all_devices[i].type = cpu_procs[proc_index].type(); 402 403 std::vector<ATLMemory> memories = cpu_procs[proc_index].memories(); 404 int fine_memories_size = 0; 405 int coarse_memories_size = 0; 406 DEBUG_PRINT("CPU memory types:\t"); 407 for (auto &memory : memories) { 408 atmi_memtype_t type = memory.type(); 409 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 410 fine_memories_size++; 411 DEBUG_PRINT("Fine\t"); 412 } else { 413 coarse_memories_size++; 414 DEBUG_PRINT("Coarse\t"); 415 } 416 } 417 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 418 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 419 proc_index++; 420 } 421 proc_index = 0; 422 for (int i = gpus_begin; i < gpus_end; i++) { 423 all_devices[i].type = gpu_procs[proc_index].type(); 424 425 std::vector<ATLMemory> memories = gpu_procs[proc_index].memories(); 426 int fine_memories_size = 0; 427 int coarse_memories_size = 0; 428 DEBUG_PRINT("GPU memory types:\t"); 429 for (auto &memory : memories) { 430 atmi_memtype_t type = memory.type(); 431 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 432 fine_memories_size++; 433 DEBUG_PRINT("Fine\t"); 434 } else { 435 coarse_memories_size++; 436 DEBUG_PRINT("Coarse\t"); 437 } 438 } 439 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 440 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 441 proc_index++; 442 } 443 proc_index = 0; 444 hsa_region_t atl_cpu_kernarg_region; 445 atl_cpu_kernarg_region.handle = (uint64_t)-1; 446 if (cpu_procs.size() > 0) { 447 err = hsa_agent_iterate_regions( 448 cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region); 449 if (err == HSA_STATUS_INFO_BREAK) { 450 err = HSA_STATUS_SUCCESS; 451 } 452 err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 453 : HSA_STATUS_SUCCESS; 454 if (err != HSA_STATUS_SUCCESS) { 455 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 456 "Finding a CPU kernarg memory region handle", 457 get_error_string(err)); 458 return err; 459 } 460 } 461 hsa_region_t atl_gpu_kernarg_region; 462 /* Find a memory region that supports kernel arguments. */ 463 atl_gpu_kernarg_region.handle = (uint64_t)-1; 464 if (gpu_procs.size() > 0) { 465 hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region, 466 &atl_gpu_kernarg_region); 467 err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 468 : HSA_STATUS_SUCCESS; 469 if (err != HSA_STATUS_SUCCESS) { 470 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 471 "Finding a kernarg memory region", get_error_string(err)); 472 return err; 473 } 474 } 475 if (num_procs > 0) 476 return HSA_STATUS_SUCCESS; 477 else 478 return HSA_STATUS_ERROR_NOT_INITIALIZED; 479 } 480 481 hsa_status_t init_hsa() { 482 if (atlc.g_hsa_initialized == false) { 483 DEBUG_PRINT("Initializing HSA..."); 484 hsa_status_t err = hsa_init(); 485 if (err != HSA_STATUS_SUCCESS) { 486 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 487 "Initializing the hsa runtime", get_error_string(err)); 488 return err; 489 } 490 if (err != HSA_STATUS_SUCCESS) 491 return err; 492 493 err = init_compute_and_memory(); 494 if (err != HSA_STATUS_SUCCESS) 495 return err; 496 if (err != HSA_STATUS_SUCCESS) { 497 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 498 "After initializing compute and memory", get_error_string(err)); 499 return err; 500 } 501 502 atlc.g_hsa_initialized = true; 503 DEBUG_PRINT("done\n"); 504 } 505 return HSA_STATUS_SUCCESS; 506 } 507 508 void init_tasks() { 509 if (atlc.g_tasks_initialized != false) 510 return; 511 std::vector<hsa_agent_t> gpu_agents; 512 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>(); 513 for (int gpu = 0; gpu < gpu_count; gpu++) { 514 atmi_place_t place = ATMI_PLACE_GPU(0, gpu); 515 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place); 516 gpu_agents.push_back(proc.agent()); 517 } 518 atlc.g_tasks_initialized = true; 519 } 520 521 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) { 522 #if (ROCM_VERSION_MAJOR >= 3) || \ 523 (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3) 524 if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) { 525 #else 526 if (event->event_type == GPU_MEMORY_FAULT_EVENT) { 527 #endif 528 hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault; 529 // memory_fault.agent 530 // memory_fault.virtual_address 531 // memory_fault.fault_reason_mask 532 // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address); 533 std::stringstream stream; 534 stream << std::hex << (uintptr_t)memory_fault.virtual_address; 535 std::string addr("0x" + stream.str()); 536 537 std::string err_string = "[GPU Memory Error] Addr: " + addr; 538 err_string += " Reason: "; 539 if (!(memory_fault.fault_reason_mask & 0x00111111)) { 540 err_string += "No Idea! "; 541 } else { 542 if (memory_fault.fault_reason_mask & 0x00000001) 543 err_string += "Page not present or supervisor privilege. "; 544 if (memory_fault.fault_reason_mask & 0x00000010) 545 err_string += "Write access to a read-only page. "; 546 if (memory_fault.fault_reason_mask & 0x00000100) 547 err_string += "Execute access to a page marked NX. "; 548 if (memory_fault.fault_reason_mask & 0x00001000) 549 err_string += "Host access only. "; 550 if (memory_fault.fault_reason_mask & 0x00010000) 551 err_string += "ECC failure (if supported by HW). "; 552 if (memory_fault.fault_reason_mask & 0x00100000) 553 err_string += "Can't determine the exact fault address. "; 554 } 555 fprintf(stderr, "%s\n", err_string.c_str()); 556 return HSA_STATUS_ERROR; 557 } 558 return HSA_STATUS_SUCCESS; 559 } 560 561 hsa_status_t atl_init_gpu_context() { 562 if (atlc.struct_initialized == false) 563 atmi_init_context_structs(); 564 if (atlc.g_gpu_initialized != false) 565 return HSA_STATUS_SUCCESS; 566 567 hsa_status_t err; 568 err = init_hsa(); 569 if (err != HSA_STATUS_SUCCESS) 570 return HSA_STATUS_ERROR; 571 572 err = hsa_amd_register_system_event_handler(callbackEvent, NULL); 573 if (err != HSA_STATUS_SUCCESS) { 574 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 575 "Registering the system for memory faults", get_error_string(err)); 576 return HSA_STATUS_ERROR; 577 } 578 579 init_tasks(); 580 atlc.g_gpu_initialized = true; 581 return HSA_STATUS_SUCCESS; 582 } 583 584 static bool isImplicit(KernelArgMD::ValueKind value_kind) { 585 switch (value_kind) { 586 case KernelArgMD::ValueKind::HiddenGlobalOffsetX: 587 case KernelArgMD::ValueKind::HiddenGlobalOffsetY: 588 case KernelArgMD::ValueKind::HiddenGlobalOffsetZ: 589 case KernelArgMD::ValueKind::HiddenNone: 590 case KernelArgMD::ValueKind::HiddenPrintfBuffer: 591 case KernelArgMD::ValueKind::HiddenDefaultQueue: 592 case KernelArgMD::ValueKind::HiddenCompletionAction: 593 case KernelArgMD::ValueKind::HiddenMultiGridSyncArg: 594 case KernelArgMD::ValueKind::HiddenHostcallBuffer: 595 return true; 596 default: 597 return false; 598 } 599 } 600 601 static std::pair<unsigned char *, unsigned char *> 602 find_metadata(void *binary, size_t binSize) { 603 std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr}; 604 605 Elf *e = elf_memory(static_cast<char *>(binary), binSize); 606 if (elf_kind(e) != ELF_K_ELF) { 607 return failure; 608 } 609 610 size_t numpHdrs; 611 if (elf_getphdrnum(e, &numpHdrs) != 0) { 612 return failure; 613 } 614 615 for (size_t i = 0; i < numpHdrs; ++i) { 616 GElf_Phdr pHdr; 617 if (gelf_getphdr(e, i, &pHdr) != &pHdr) { 618 continue; 619 } 620 // Look for the runtime metadata note 621 if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) { 622 // Iterate over the notes in this segment 623 address ptr = (address)binary + pHdr.p_offset; 624 address segmentEnd = ptr + pHdr.p_filesz; 625 626 while (ptr < segmentEnd) { 627 Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr); 628 address name = (address)¬e[1]; 629 630 if (note->n_type == 7 || note->n_type == 8) { 631 return failure; 632 } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ && 633 note->n_namesz == sizeof "AMD" && 634 !memcmp(name, "AMD", note->n_namesz)) { 635 // code object v2 uses yaml metadata, no longer supported 636 return failure; 637 } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ && 638 note->n_namesz == sizeof "AMDGPU" && 639 !memcmp(name, "AMDGPU", note->n_namesz)) { 640 641 // n_descsz = 485 642 // value is padded to 4 byte alignment, may want to move end up to 643 // match 644 size_t offset = sizeof(uint32_t) * 3 /* fields */ 645 + sizeof("AMDGPU") /* name */ 646 + 1 /* padding to 4 byte alignment */; 647 648 // Including the trailing padding means both pointers are 4 bytes 649 // aligned, which may be useful later. 650 unsigned char *metadata_start = (unsigned char *)ptr + offset; 651 unsigned char *metadata_end = 652 metadata_start + core::alignUp(note->n_descsz, 4); 653 return {metadata_start, metadata_end}; 654 } 655 ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) + 656 core::alignUp(note->n_descsz, sizeof(int)); 657 } 658 } 659 } 660 661 return failure; 662 } 663 664 namespace { 665 int map_lookup_array(msgpack::byte_range message, const char *needle, 666 msgpack::byte_range *res, uint64_t *size) { 667 unsigned count = 0; 668 struct s : msgpack::functors_defaults<s> { 669 s(unsigned &count, uint64_t *size) : count(count), size(size) {} 670 unsigned &count; 671 uint64_t *size; 672 const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) { 673 count++; 674 *size = N; 675 return bytes.end; 676 } 677 }; 678 679 msgpack::foreach_map(message, 680 [&](msgpack::byte_range key, msgpack::byte_range value) { 681 if (msgpack::message_is_string(key, needle)) { 682 // If the message is an array, record number of 683 // elements in *size 684 msgpack::handle_msgpack<s>(value, {count, size}); 685 // return the whole array 686 *res = value; 687 } 688 }); 689 // Only claim success if exactly one key/array pair matched 690 return count != 1; 691 } 692 693 int map_lookup_string(msgpack::byte_range message, const char *needle, 694 std::string *res) { 695 unsigned count = 0; 696 struct s : public msgpack::functors_defaults<s> { 697 s(unsigned &count, std::string *res) : count(count), res(res) {} 698 unsigned &count; 699 std::string *res; 700 void handle_string(size_t N, const unsigned char *str) { 701 count++; 702 *res = std::string(str, str + N); 703 } 704 }; 705 msgpack::foreach_map(message, 706 [&](msgpack::byte_range key, msgpack::byte_range value) { 707 if (msgpack::message_is_string(key, needle)) { 708 msgpack::handle_msgpack<s>(value, {count, res}); 709 } 710 }); 711 return count != 1; 712 } 713 714 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle, 715 uint64_t *res) { 716 unsigned count = 0; 717 msgpack::foreach_map(message, 718 [&](msgpack::byte_range key, msgpack::byte_range value) { 719 if (msgpack::message_is_string(key, needle)) { 720 msgpack::foronly_unsigned(value, [&](uint64_t x) { 721 count++; 722 *res = x; 723 }); 724 } 725 }); 726 return count != 1; 727 } 728 729 int array_lookup_element(msgpack::byte_range message, uint64_t elt, 730 msgpack::byte_range *res) { 731 int rc = 1; 732 uint64_t i = 0; 733 msgpack::foreach_array(message, [&](msgpack::byte_range value) { 734 if (i == elt) { 735 *res = value; 736 rc = 0; 737 } 738 i++; 739 }); 740 return rc; 741 } 742 743 int populate_kernelArgMD(msgpack::byte_range args_element, 744 KernelArgMD *kernelarg) { 745 using namespace msgpack; 746 int error = 0; 747 foreach_map(args_element, [&](byte_range key, byte_range value) -> void { 748 if (message_is_string(key, ".name")) { 749 foronly_string(value, [&](size_t N, const unsigned char *str) { 750 kernelarg->name_ = std::string(str, str + N); 751 }); 752 } else if (message_is_string(key, ".type_name")) { 753 foronly_string(value, [&](size_t N, const unsigned char *str) { 754 kernelarg->typeName_ = std::string(str, str + N); 755 }); 756 } else if (message_is_string(key, ".size")) { 757 foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; }); 758 } else if (message_is_string(key, ".offset")) { 759 foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; }); 760 } else if (message_is_string(key, ".value_kind")) { 761 foronly_string(value, [&](size_t N, const unsigned char *str) { 762 std::string s = std::string(str, str + N); 763 auto itValueKind = ArgValueKind.find(s); 764 if (itValueKind != ArgValueKind.end()) { 765 kernelarg->valueKind_ = itValueKind->second; 766 } 767 }); 768 } 769 }); 770 return error; 771 } 772 } // namespace 773 774 static hsa_status_t get_code_object_custom_metadata( 775 void *binary, size_t binSize, int gpu, 776 std::map<std::string, atl_kernel_info_t> &KernelInfoTable) { 777 // parse code object with different keys from v2 778 // also, the kernel name is not the same as the symbol name -- so a 779 // symbol->name map is needed 780 781 std::pair<unsigned char *, unsigned char *> metadata = 782 find_metadata(binary, binSize); 783 if (!metadata.first) { 784 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 785 } 786 787 uint64_t kernelsSize = 0; 788 int msgpack_errors = 0; 789 msgpack::byte_range kernel_array; 790 msgpack_errors = 791 map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels", 792 &kernel_array, &kernelsSize); 793 if (msgpack_errors != 0) { 794 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 795 "kernels lookup in program metadata"); 796 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 797 } 798 799 for (size_t i = 0; i < kernelsSize; i++) { 800 assert(msgpack_errors == 0); 801 std::string kernelName; 802 std::string symbolName; 803 804 msgpack::byte_range element; 805 msgpack_errors += array_lookup_element(kernel_array, i, &element); 806 if (msgpack_errors != 0) { 807 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 808 "element lookup in kernel metadata"); 809 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 810 } 811 812 msgpack_errors += map_lookup_string(element, ".name", &kernelName); 813 msgpack_errors += map_lookup_string(element, ".symbol", &symbolName); 814 if (msgpack_errors != 0) { 815 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 816 "strings lookup in kernel metadata"); 817 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 818 } 819 820 // Make sure that kernelName + ".kd" == symbolName 821 if ((kernelName + ".kd") != symbolName) { 822 printf("[%s:%d] Kernel name mismatching symbol: %s != %s + .kd\n", 823 __FILE__, __LINE__, symbolName.c_str(), kernelName.c_str()); 824 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 825 } 826 827 atl_kernel_info_t info = {0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {}, {}}; 828 829 uint64_t sgpr_count, vgpr_count, sgpr_spill_count, vgpr_spill_count; 830 msgpack_errors += map_lookup_uint64_t(element, ".sgpr_count", &sgpr_count); 831 if (msgpack_errors != 0) { 832 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 833 "sgpr count metadata lookup in kernel metadata"); 834 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 835 } 836 837 info.sgpr_count = sgpr_count; 838 839 msgpack_errors += map_lookup_uint64_t(element, ".vgpr_count", &vgpr_count); 840 if (msgpack_errors != 0) { 841 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 842 "vgpr count metadata lookup in kernel metadata"); 843 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 844 } 845 846 info.vgpr_count = vgpr_count; 847 848 msgpack_errors += 849 map_lookup_uint64_t(element, ".sgpr_spill_count", &sgpr_spill_count); 850 if (msgpack_errors != 0) { 851 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 852 "sgpr spill count metadata lookup in kernel metadata"); 853 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 854 } 855 856 info.sgpr_spill_count = sgpr_spill_count; 857 858 msgpack_errors += 859 map_lookup_uint64_t(element, ".vgpr_spill_count", &vgpr_spill_count); 860 if (msgpack_errors != 0) { 861 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 862 "vgpr spill count metadata lookup in kernel metadata"); 863 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 864 } 865 866 info.vgpr_spill_count = vgpr_spill_count; 867 868 size_t kernel_explicit_args_size = 0; 869 uint64_t kernel_segment_size; 870 msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size", 871 &kernel_segment_size); 872 if (msgpack_errors != 0) { 873 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 874 "kernarg segment size metadata lookup in kernel metadata"); 875 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 876 } 877 878 bool hasHiddenArgs = false; 879 if (kernel_segment_size > 0) { 880 uint64_t argsSize; 881 size_t offset = 0; 882 883 msgpack::byte_range args_array; 884 msgpack_errors += 885 map_lookup_array(element, ".args", &args_array, &argsSize); 886 if (msgpack_errors != 0) { 887 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 888 "kernel args metadata lookup in kernel metadata"); 889 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 890 } 891 892 info.num_args = argsSize; 893 894 for (size_t i = 0; i < argsSize; ++i) { 895 KernelArgMD lcArg; 896 897 msgpack::byte_range args_element; 898 msgpack_errors += array_lookup_element(args_array, i, &args_element); 899 if (msgpack_errors != 0) { 900 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 901 "iterate args map in kernel args metadata"); 902 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 903 } 904 905 msgpack_errors += populate_kernelArgMD(args_element, &lcArg); 906 if (msgpack_errors != 0) { 907 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 908 "iterate args map in kernel args metadata"); 909 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 910 } 911 // populate info with sizes and offsets 912 info.arg_sizes.push_back(lcArg.size_); 913 // v3 has offset field and not align field 914 size_t new_offset = lcArg.offset_; 915 size_t padding = new_offset - offset; 916 offset = new_offset; 917 info.arg_offsets.push_back(lcArg.offset_); 918 DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(), 919 lcArg.size_, lcArg.offset_); 920 offset += lcArg.size_; 921 922 // check if the arg is a hidden/implicit arg 923 // this logic assumes that all hidden args are 8-byte aligned 924 if (!isImplicit(lcArg.valueKind_)) { 925 kernel_explicit_args_size += lcArg.size_; 926 } else { 927 hasHiddenArgs = true; 928 } 929 kernel_explicit_args_size += padding; 930 } 931 } 932 933 // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but 934 // in ATMI, do not count the compiler set implicit args, but set your own 935 // implicit args by discounting the compiler set implicit args 936 info.kernel_segment_size = 937 (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) + 938 sizeof(atmi_implicit_args_t); 939 DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(), 940 kernel_segment_size, info.kernel_segment_size); 941 942 // kernel received, now add it to the kernel info table 943 KernelInfoTable[kernelName] = info; 944 } 945 946 return HSA_STATUS_SUCCESS; 947 } 948 949 static hsa_status_t 950 populate_InfoTables(hsa_executable_symbol_t symbol, int gpu, 951 std::map<std::string, atl_kernel_info_t> &KernelInfoTable, 952 std::map<std::string, atl_symbol_info_t> &SymbolInfoTable) { 953 hsa_symbol_kind_t type; 954 955 uint32_t name_length; 956 hsa_status_t err; 957 err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE, 958 &type); 959 if (err != HSA_STATUS_SUCCESS) { 960 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 961 "Symbol info extraction", get_error_string(err)); 962 return err; 963 } 964 DEBUG_PRINT("Exec Symbol type: %d\n", type); 965 if (type == HSA_SYMBOL_KIND_KERNEL) { 966 err = hsa_executable_symbol_get_info( 967 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 968 if (err != HSA_STATUS_SUCCESS) { 969 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 970 "Symbol info extraction", get_error_string(err)); 971 return err; 972 } 973 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 974 err = hsa_executable_symbol_get_info(symbol, 975 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 976 if (err != HSA_STATUS_SUCCESS) { 977 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 978 "Symbol info extraction", get_error_string(err)); 979 return err; 980 } 981 // remove the suffix .kd from symbol name. 982 name[name_length - 3] = 0; 983 984 atl_kernel_info_t info; 985 std::string kernelName(name); 986 // by now, the kernel info table should already have an entry 987 // because the non-ROCr custom code object parsing is called before 988 // iterating over the code object symbols using ROCr 989 if (KernelInfoTable.find(kernelName) == KernelInfoTable.end()) { 990 if (HSA_STATUS_ERROR_INVALID_CODE_OBJECT != HSA_STATUS_SUCCESS) { 991 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 992 "Finding the entry kernel info table", 993 get_error_string(HSA_STATUS_ERROR_INVALID_CODE_OBJECT)); 994 exit(1); 995 } 996 } 997 // found, so assign and update 998 info = KernelInfoTable[kernelName]; 999 1000 /* Extract dispatch information from the symbol */ 1001 err = hsa_executable_symbol_get_info( 1002 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT, 1003 &(info.kernel_object)); 1004 if (err != HSA_STATUS_SUCCESS) { 1005 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1006 "Extracting the symbol from the executable", 1007 get_error_string(err)); 1008 return err; 1009 } 1010 err = hsa_executable_symbol_get_info( 1011 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE, 1012 &(info.group_segment_size)); 1013 if (err != HSA_STATUS_SUCCESS) { 1014 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1015 "Extracting the group segment size from the executable", 1016 get_error_string(err)); 1017 return err; 1018 } 1019 err = hsa_executable_symbol_get_info( 1020 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE, 1021 &(info.private_segment_size)); 1022 if (err != HSA_STATUS_SUCCESS) { 1023 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1024 "Extracting the private segment from the executable", 1025 get_error_string(err)); 1026 return err; 1027 } 1028 1029 DEBUG_PRINT( 1030 "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes " 1031 "kernarg\n", 1032 kernelName.c_str(), info.kernel_object, info.group_segment_size, 1033 info.private_segment_size, info.kernel_segment_size); 1034 1035 // assign it back to the kernel info table 1036 KernelInfoTable[kernelName] = info; 1037 free(name); 1038 } else if (type == HSA_SYMBOL_KIND_VARIABLE) { 1039 err = hsa_executable_symbol_get_info( 1040 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 1041 if (err != HSA_STATUS_SUCCESS) { 1042 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1043 "Symbol info extraction", get_error_string(err)); 1044 return err; 1045 } 1046 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 1047 err = hsa_executable_symbol_get_info(symbol, 1048 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 1049 if (err != HSA_STATUS_SUCCESS) { 1050 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1051 "Symbol info extraction", get_error_string(err)); 1052 return err; 1053 } 1054 name[name_length] = 0; 1055 1056 atl_symbol_info_t info; 1057 1058 err = hsa_executable_symbol_get_info( 1059 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr)); 1060 if (err != HSA_STATUS_SUCCESS) { 1061 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1062 "Symbol info address extraction", get_error_string(err)); 1063 return err; 1064 } 1065 1066 err = hsa_executable_symbol_get_info( 1067 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size)); 1068 if (err != HSA_STATUS_SUCCESS) { 1069 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1070 "Symbol info size extraction", get_error_string(err)); 1071 return err; 1072 } 1073 1074 DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr, 1075 info.size); 1076 err = register_allocation(reinterpret_cast<void *>(info.addr), 1077 (size_t)info.size, ATMI_DEVTYPE_GPU); 1078 if (err != HSA_STATUS_SUCCESS) { 1079 return err; 1080 } 1081 SymbolInfoTable[std::string(name)] = info; 1082 free(name); 1083 } else { 1084 DEBUG_PRINT("Symbol is an indirect function\n"); 1085 } 1086 return HSA_STATUS_SUCCESS; 1087 } 1088 1089 hsa_status_t RegisterModuleFromMemory( 1090 std::map<std::string, atl_kernel_info_t> &KernelInfoTable, 1091 std::map<std::string, atl_symbol_info_t> &SymbolInfoTable, 1092 void *module_bytes, size_t module_size, atmi_place_t place, 1093 hsa_status_t (*on_deserialized_data)(void *data, size_t size, 1094 void *cb_state), 1095 void *cb_state, std::vector<hsa_executable_t> &HSAExecutables) { 1096 hsa_status_t err; 1097 int gpu = place.device_id; 1098 assert(gpu >= 0); 1099 1100 DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu); 1101 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place); 1102 hsa_agent_t agent = proc.agent(); 1103 hsa_executable_t executable = {0}; 1104 hsa_profile_t agent_profile; 1105 1106 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile); 1107 if (err != HSA_STATUS_SUCCESS) { 1108 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1109 "Query the agent profile", get_error_string(err)); 1110 return HSA_STATUS_ERROR; 1111 } 1112 // FIXME: Assume that every profile is FULL until we understand how to build 1113 // GCN with base profile 1114 agent_profile = HSA_PROFILE_FULL; 1115 /* Create the empty executable. */ 1116 err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "", 1117 &executable); 1118 if (err != HSA_STATUS_SUCCESS) { 1119 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1120 "Create the executable", get_error_string(err)); 1121 return HSA_STATUS_ERROR; 1122 } 1123 1124 bool module_load_success = false; 1125 do // Existing control flow used continue, preserve that for this patch 1126 { 1127 { 1128 // Some metadata info is not available through ROCr API, so use custom 1129 // code object metadata parsing to collect such metadata info 1130 1131 err = get_code_object_custom_metadata(module_bytes, module_size, gpu, 1132 KernelInfoTable); 1133 if (err != HSA_STATUS_SUCCESS) { 1134 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1135 "Getting custom code object metadata", 1136 get_error_string(err)); 1137 continue; 1138 } 1139 1140 // Deserialize code object. 1141 hsa_code_object_t code_object = {0}; 1142 err = hsa_code_object_deserialize(module_bytes, module_size, NULL, 1143 &code_object); 1144 if (err != HSA_STATUS_SUCCESS) { 1145 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1146 "Code Object Deserialization", get_error_string(err)); 1147 continue; 1148 } 1149 assert(0 != code_object.handle); 1150 1151 // Mutating the device image here avoids another allocation & memcpy 1152 void *code_object_alloc_data = 1153 reinterpret_cast<void *>(code_object.handle); 1154 hsa_status_t atmi_err = 1155 on_deserialized_data(code_object_alloc_data, module_size, cb_state); 1156 if (atmi_err != HSA_STATUS_SUCCESS) { 1157 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1158 "Error in deserialized_data callback", 1159 get_atmi_error_string(atmi_err)); 1160 return atmi_err; 1161 } 1162 1163 /* Load the code object. */ 1164 err = 1165 hsa_executable_load_code_object(executable, agent, code_object, NULL); 1166 if (err != HSA_STATUS_SUCCESS) { 1167 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1168 "Loading the code object", get_error_string(err)); 1169 continue; 1170 } 1171 1172 // cannot iterate over symbols until executable is frozen 1173 } 1174 module_load_success = true; 1175 } while (0); 1176 DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success); 1177 if (module_load_success) { 1178 /* Freeze the executable; it can now be queried for symbols. */ 1179 err = hsa_executable_freeze(executable, ""); 1180 if (err != HSA_STATUS_SUCCESS) { 1181 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1182 "Freeze the executable", get_error_string(err)); 1183 return HSA_STATUS_ERROR; 1184 } 1185 1186 err = hsa::executable_iterate_symbols( 1187 executable, 1188 [&](hsa_executable_t, hsa_executable_symbol_t symbol) -> hsa_status_t { 1189 return populate_InfoTables(symbol, gpu, KernelInfoTable, 1190 SymbolInfoTable); 1191 }); 1192 if (err != HSA_STATUS_SUCCESS) { 1193 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1194 "Iterating over symbols for execuatable", get_error_string(err)); 1195 return HSA_STATUS_ERROR; 1196 } 1197 1198 // save the executable and destroy during finalize 1199 HSAExecutables.push_back(executable); 1200 return HSA_STATUS_SUCCESS; 1201 } else { 1202 return HSA_STATUS_ERROR; 1203 } 1204 } 1205 1206 } // namespace core 1207