1 /*===-------------------------------------------------------------------------- 2 * ATMI (Asynchronous Task and Memory Interface) 3 * 4 * This file is distributed under the MIT License. See LICENSE.txt for details. 5 *===------------------------------------------------------------------------*/ 6 #include <gelf.h> 7 #include <libelf.h> 8 9 #include <cassert> 10 #include <cstdarg> 11 #include <fstream> 12 #include <iomanip> 13 #include <iostream> 14 #include <set> 15 #include <string> 16 17 #include "internal.h" 18 #include "machine.h" 19 #include "rt.h" 20 21 #include "msgpack.h" 22 23 namespace hsa { 24 // Wrap HSA iterate API in a shim that allows passing general callables 25 template <typename C> 26 hsa_status_t executable_iterate_symbols(hsa_executable_t executable, C cb) { 27 auto L = [](hsa_executable_t executable, hsa_executable_symbol_t symbol, 28 void *data) -> hsa_status_t { 29 C *unwrapped = static_cast<C *>(data); 30 return (*unwrapped)(executable, symbol); 31 }; 32 return hsa_executable_iterate_symbols(executable, L, 33 static_cast<void *>(&cb)); 34 } 35 } // namespace hsa 36 37 typedef unsigned char *address; 38 /* 39 * Note descriptors. 40 */ 41 typedef struct { 42 uint32_t n_namesz; /* Length of note's name. */ 43 uint32_t n_descsz; /* Length of note's value. */ 44 uint32_t n_type; /* Type of note. */ 45 // then name 46 // then padding, optional 47 // then desc, at 4 byte alignment (not 8, despite being elf64) 48 } Elf_Note; 49 50 // The following include file and following structs/enums 51 // have been replicated on a per-use basis below. For example, 52 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields, 53 // but we may care only about kernargSegmentSize_ for now, so 54 // we just include that field in our KernelMD implementation. We 55 // chose this approach to replicate in order to avoid forcing 56 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime. 57 // #include "llvm/Support/AMDGPUMetadata.h" 58 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD; 59 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD; 60 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD; 61 // using llvm::AMDGPU::HSAMD::AccessQualifier; 62 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier; 63 // using llvm::AMDGPU::HSAMD::ValueKind; 64 // using llvm::AMDGPU::HSAMD::ValueType; 65 66 class KernelArgMD { 67 public: 68 enum class ValueKind { 69 HiddenGlobalOffsetX, 70 HiddenGlobalOffsetY, 71 HiddenGlobalOffsetZ, 72 HiddenNone, 73 HiddenPrintfBuffer, 74 HiddenDefaultQueue, 75 HiddenCompletionAction, 76 HiddenMultiGridSyncArg, 77 HiddenHostcallBuffer, 78 Unknown 79 }; 80 81 KernelArgMD() 82 : name_(std::string()), typeName_(std::string()), size_(0), offset_(0), 83 align_(0), valueKind_(ValueKind::Unknown) {} 84 85 // fields 86 std::string name_; 87 std::string typeName_; 88 uint32_t size_; 89 uint32_t offset_; 90 uint32_t align_; 91 ValueKind valueKind_; 92 }; 93 94 class KernelMD { 95 public: 96 KernelMD() : kernargSegmentSize_(0ull) {} 97 98 // fields 99 uint64_t kernargSegmentSize_; 100 }; 101 102 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = { 103 // Including only those fields that are relevant to the runtime. 104 // {"ByValue", KernelArgMD::ValueKind::ByValue}, 105 // {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer}, 106 // {"DynamicSharedPointer", 107 // KernelArgMD::ValueKind::DynamicSharedPointer}, 108 // {"Sampler", KernelArgMD::ValueKind::Sampler}, 109 // {"Image", KernelArgMD::ValueKind::Image}, 110 // {"Pipe", KernelArgMD::ValueKind::Pipe}, 111 // {"Queue", KernelArgMD::ValueKind::Queue}, 112 {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 113 {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 114 {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 115 {"HiddenNone", KernelArgMD::ValueKind::HiddenNone}, 116 {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 117 {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 118 {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction}, 119 {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 120 {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 121 // v3 122 // {"by_value", KernelArgMD::ValueKind::ByValue}, 123 // {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer}, 124 // {"dynamic_shared_pointer", 125 // KernelArgMD::ValueKind::DynamicSharedPointer}, 126 // {"sampler", KernelArgMD::ValueKind::Sampler}, 127 // {"image", KernelArgMD::ValueKind::Image}, 128 // {"pipe", KernelArgMD::ValueKind::Pipe}, 129 // {"queue", KernelArgMD::ValueKind::Queue}, 130 {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 131 {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 132 {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 133 {"hidden_none", KernelArgMD::ValueKind::HiddenNone}, 134 {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 135 {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 136 {"hidden_completion_action", 137 KernelArgMD::ValueKind::HiddenCompletionAction}, 138 {"hidden_multigrid_sync_arg", 139 KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 140 {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 141 }; 142 143 ATLMachine g_atl_machine; 144 145 /* 146 atlc is all internal global values. 147 The structure atl_context_t is defined in atl_internal.h 148 Most references will use the global structure prefix atlc. 149 */ 150 atl_context_t atlc = {.struct_initialized = false}; 151 152 namespace core { 153 154 hsa_status_t allow_access_to_all_gpu_agents(void *ptr) { 155 std::vector<ATLGPUProcessor> &gpu_procs = 156 g_atl_machine.processors<ATLGPUProcessor>(); 157 std::vector<hsa_agent_t> agents; 158 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 159 agents.push_back(gpu_procs[i].agent()); 160 } 161 return hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr); 162 } 163 164 static void atmi_init_context_structs() { 165 atlc.struct_initialized = true; /* This only gets called one time */ 166 atlc.g_hsa_initialized = false; 167 atlc.g_gpu_initialized = false; 168 atlc.g_tasks_initialized = false; 169 } 170 171 // Implement memory_pool iteration function 172 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool, 173 void *data) { 174 ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data); 175 hsa_status_t err = HSA_STATUS_SUCCESS; 176 // Check if the memory_pool is allowed to allocate, i.e. do not return group 177 // memory 178 bool alloc_allowed = false; 179 err = hsa_amd_memory_pool_get_info( 180 memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED, 181 &alloc_allowed); 182 if (err != HSA_STATUS_SUCCESS) { 183 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 184 "Alloc allowed in memory pool check", get_error_string(err)); 185 return err; 186 } 187 if (alloc_allowed) { 188 uint32_t global_flag = 0; 189 err = hsa_amd_memory_pool_get_info( 190 memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag); 191 if (err != HSA_STATUS_SUCCESS) { 192 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 193 "Get memory pool info", get_error_string(err)); 194 return err; 195 } 196 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) { 197 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED); 198 proc->addMemory(new_mem); 199 } else { 200 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED); 201 proc->addMemory(new_mem); 202 } 203 } 204 205 return err; 206 } 207 208 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) { 209 hsa_status_t err = HSA_STATUS_SUCCESS; 210 hsa_device_type_t device_type; 211 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type); 212 if (err != HSA_STATUS_SUCCESS) { 213 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 214 "Get device type info", get_error_string(err)); 215 return err; 216 } 217 switch (device_type) { 218 case HSA_DEVICE_TYPE_CPU: { 219 ATLCPUProcessor new_proc(agent); 220 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 221 &new_proc); 222 if (err != HSA_STATUS_SUCCESS) { 223 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 224 "Iterate all memory pools", get_error_string(err)); 225 return err; 226 } 227 g_atl_machine.addProcessor(new_proc); 228 } break; 229 case HSA_DEVICE_TYPE_GPU: { 230 hsa_profile_t profile; 231 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile); 232 if (err != HSA_STATUS_SUCCESS) { 233 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 234 "Query the agent profile", get_error_string(err)); 235 return err; 236 } 237 atmi_devtype_t gpu_type; 238 gpu_type = 239 (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU; 240 ATLGPUProcessor new_proc(agent, gpu_type); 241 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 242 &new_proc); 243 if (err != HSA_STATUS_SUCCESS) { 244 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 245 "Iterate all memory pools", get_error_string(err)); 246 return err; 247 } 248 g_atl_machine.addProcessor(new_proc); 249 } break; 250 case HSA_DEVICE_TYPE_DSP: { 251 err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 252 } break; 253 } 254 255 return err; 256 } 257 258 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) { 259 hsa_region_segment_t segment; 260 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 261 if (segment != HSA_REGION_SEGMENT_GLOBAL) { 262 return HSA_STATUS_SUCCESS; 263 } 264 hsa_region_global_flag_t flags; 265 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 266 if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) { 267 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 268 *ret = region; 269 return HSA_STATUS_INFO_BREAK; 270 } 271 return HSA_STATUS_SUCCESS; 272 } 273 274 /* Determines if a memory region can be used for kernarg allocations. */ 275 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) { 276 hsa_region_segment_t segment; 277 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 278 if (HSA_REGION_SEGMENT_GLOBAL != segment) { 279 return HSA_STATUS_SUCCESS; 280 } 281 282 hsa_region_global_flag_t flags; 283 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 284 if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) { 285 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 286 *ret = region; 287 return HSA_STATUS_INFO_BREAK; 288 } 289 290 return HSA_STATUS_SUCCESS; 291 } 292 293 static hsa_status_t init_compute_and_memory() { 294 hsa_status_t err; 295 296 /* Iterate over the agents and pick the gpu agent */ 297 err = hsa_iterate_agents(get_agent_info, NULL); 298 if (err == HSA_STATUS_INFO_BREAK) { 299 err = HSA_STATUS_SUCCESS; 300 } 301 if (err != HSA_STATUS_SUCCESS) { 302 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Getting a gpu agent", 303 get_error_string(err)); 304 return err; 305 } 306 307 /* Init all devices or individual device types? */ 308 std::vector<ATLCPUProcessor> &cpu_procs = 309 g_atl_machine.processors<ATLCPUProcessor>(); 310 std::vector<ATLGPUProcessor> &gpu_procs = 311 g_atl_machine.processors<ATLGPUProcessor>(); 312 /* For CPU memory pools, add other devices that can access them directly 313 * or indirectly */ 314 for (auto &cpu_proc : cpu_procs) { 315 for (auto &cpu_mem : cpu_proc.memories()) { 316 hsa_amd_memory_pool_t pool = cpu_mem.memory(); 317 for (auto &gpu_proc : gpu_procs) { 318 hsa_agent_t agent = gpu_proc.agent(); 319 hsa_amd_memory_pool_access_t access; 320 hsa_amd_agent_memory_pool_get_info( 321 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 322 if (access != 0) { 323 // this means not NEVER, but could be YES or NO 324 // add this memory pool to the proc 325 gpu_proc.addMemory(cpu_mem); 326 } 327 } 328 } 329 } 330 331 /* FIXME: are the below combinations of procs and memory pools needed? 332 * all to all compare procs with their memory pools and add those memory 333 * pools that are accessible by the target procs */ 334 for (auto &gpu_proc : gpu_procs) { 335 for (auto &gpu_mem : gpu_proc.memories()) { 336 hsa_amd_memory_pool_t pool = gpu_mem.memory(); 337 for (auto &cpu_proc : cpu_procs) { 338 hsa_agent_t agent = cpu_proc.agent(); 339 hsa_amd_memory_pool_access_t access; 340 hsa_amd_agent_memory_pool_get_info( 341 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 342 if (access != 0) { 343 // this means not NEVER, but could be YES or NO 344 // add this memory pool to the proc 345 cpu_proc.addMemory(gpu_mem); 346 } 347 } 348 } 349 } 350 351 size_t num_procs = cpu_procs.size() + gpu_procs.size(); 352 int num_iGPUs = 0; 353 int num_dGPUs = 0; 354 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 355 if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU) 356 num_iGPUs++; 357 else 358 num_dGPUs++; 359 } 360 assert(num_iGPUs + num_dGPUs == gpu_procs.size() && 361 "Number of dGPUs and iGPUs do not add up"); 362 DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size()); 363 DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs); 364 DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs); 365 DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size()); 366 367 int cpus_begin = 0; 368 int cpus_end = cpu_procs.size(); 369 int gpus_begin = cpu_procs.size(); 370 int gpus_end = cpu_procs.size() + gpu_procs.size(); 371 int proc_index = 0; 372 for (int i = cpus_begin; i < cpus_end; i++) { 373 std::vector<ATLMemory> memories = cpu_procs[proc_index].memories(); 374 int fine_memories_size = 0; 375 int coarse_memories_size = 0; 376 DEBUG_PRINT("CPU memory types:\t"); 377 for (auto &memory : memories) { 378 atmi_memtype_t type = memory.type(); 379 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 380 fine_memories_size++; 381 DEBUG_PRINT("Fine\t"); 382 } else { 383 coarse_memories_size++; 384 DEBUG_PRINT("Coarse\t"); 385 } 386 } 387 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 388 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 389 proc_index++; 390 } 391 proc_index = 0; 392 for (int i = gpus_begin; i < gpus_end; i++) { 393 std::vector<ATLMemory> memories = gpu_procs[proc_index].memories(); 394 int fine_memories_size = 0; 395 int coarse_memories_size = 0; 396 DEBUG_PRINT("GPU memory types:\t"); 397 for (auto &memory : memories) { 398 atmi_memtype_t type = memory.type(); 399 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 400 fine_memories_size++; 401 DEBUG_PRINT("Fine\t"); 402 } else { 403 coarse_memories_size++; 404 DEBUG_PRINT("Coarse\t"); 405 } 406 } 407 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 408 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 409 proc_index++; 410 } 411 proc_index = 0; 412 hsa_region_t atl_cpu_kernarg_region; 413 atl_cpu_kernarg_region.handle = (uint64_t)-1; 414 if (cpu_procs.size() > 0) { 415 err = hsa_agent_iterate_regions( 416 cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region); 417 if (err == HSA_STATUS_INFO_BREAK) { 418 err = HSA_STATUS_SUCCESS; 419 } 420 err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 421 : HSA_STATUS_SUCCESS; 422 if (err != HSA_STATUS_SUCCESS) { 423 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 424 "Finding a CPU kernarg memory region handle", 425 get_error_string(err)); 426 return err; 427 } 428 } 429 hsa_region_t atl_gpu_kernarg_region; 430 /* Find a memory region that supports kernel arguments. */ 431 atl_gpu_kernarg_region.handle = (uint64_t)-1; 432 if (gpu_procs.size() > 0) { 433 hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region, 434 &atl_gpu_kernarg_region); 435 err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 436 : HSA_STATUS_SUCCESS; 437 if (err != HSA_STATUS_SUCCESS) { 438 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 439 "Finding a kernarg memory region", get_error_string(err)); 440 return err; 441 } 442 } 443 if (num_procs > 0) 444 return HSA_STATUS_SUCCESS; 445 else 446 return HSA_STATUS_ERROR_NOT_INITIALIZED; 447 } 448 449 hsa_status_t init_hsa() { 450 if (atlc.g_hsa_initialized == false) { 451 DEBUG_PRINT("Initializing HSA..."); 452 hsa_status_t err = hsa_init(); 453 if (err != HSA_STATUS_SUCCESS) { 454 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 455 "Initializing the hsa runtime", get_error_string(err)); 456 return err; 457 } 458 if (err != HSA_STATUS_SUCCESS) 459 return err; 460 461 err = init_compute_and_memory(); 462 if (err != HSA_STATUS_SUCCESS) 463 return err; 464 if (err != HSA_STATUS_SUCCESS) { 465 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 466 "After initializing compute and memory", get_error_string(err)); 467 return err; 468 } 469 470 atlc.g_hsa_initialized = true; 471 DEBUG_PRINT("done\n"); 472 } 473 return HSA_STATUS_SUCCESS; 474 } 475 476 void init_tasks() { 477 if (atlc.g_tasks_initialized != false) 478 return; 479 std::vector<hsa_agent_t> gpu_agents; 480 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>(); 481 for (int gpu = 0; gpu < gpu_count; gpu++) { 482 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(gpu); 483 gpu_agents.push_back(proc.agent()); 484 } 485 atlc.g_tasks_initialized = true; 486 } 487 488 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) { 489 #if (ROCM_VERSION_MAJOR >= 3) || \ 490 (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3) 491 if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) { 492 #else 493 if (event->event_type == GPU_MEMORY_FAULT_EVENT) { 494 #endif 495 hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault; 496 // memory_fault.agent 497 // memory_fault.virtual_address 498 // memory_fault.fault_reason_mask 499 // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address); 500 std::stringstream stream; 501 stream << std::hex << (uintptr_t)memory_fault.virtual_address; 502 std::string addr("0x" + stream.str()); 503 504 std::string err_string = "[GPU Memory Error] Addr: " + addr; 505 err_string += " Reason: "; 506 if (!(memory_fault.fault_reason_mask & 0x00111111)) { 507 err_string += "No Idea! "; 508 } else { 509 if (memory_fault.fault_reason_mask & 0x00000001) 510 err_string += "Page not present or supervisor privilege. "; 511 if (memory_fault.fault_reason_mask & 0x00000010) 512 err_string += "Write access to a read-only page. "; 513 if (memory_fault.fault_reason_mask & 0x00000100) 514 err_string += "Execute access to a page marked NX. "; 515 if (memory_fault.fault_reason_mask & 0x00001000) 516 err_string += "Host access only. "; 517 if (memory_fault.fault_reason_mask & 0x00010000) 518 err_string += "ECC failure (if supported by HW). "; 519 if (memory_fault.fault_reason_mask & 0x00100000) 520 err_string += "Can't determine the exact fault address. "; 521 } 522 fprintf(stderr, "%s\n", err_string.c_str()); 523 return HSA_STATUS_ERROR; 524 } 525 return HSA_STATUS_SUCCESS; 526 } 527 528 hsa_status_t atl_init_gpu_context() { 529 if (atlc.struct_initialized == false) 530 atmi_init_context_structs(); 531 if (atlc.g_gpu_initialized != false) 532 return HSA_STATUS_SUCCESS; 533 534 hsa_status_t err; 535 err = init_hsa(); 536 if (err != HSA_STATUS_SUCCESS) 537 return HSA_STATUS_ERROR; 538 539 err = hsa_amd_register_system_event_handler(callbackEvent, NULL); 540 if (err != HSA_STATUS_SUCCESS) { 541 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 542 "Registering the system for memory faults", get_error_string(err)); 543 return HSA_STATUS_ERROR; 544 } 545 546 init_tasks(); 547 atlc.g_gpu_initialized = true; 548 return HSA_STATUS_SUCCESS; 549 } 550 551 static bool isImplicit(KernelArgMD::ValueKind value_kind) { 552 switch (value_kind) { 553 case KernelArgMD::ValueKind::HiddenGlobalOffsetX: 554 case KernelArgMD::ValueKind::HiddenGlobalOffsetY: 555 case KernelArgMD::ValueKind::HiddenGlobalOffsetZ: 556 case KernelArgMD::ValueKind::HiddenNone: 557 case KernelArgMD::ValueKind::HiddenPrintfBuffer: 558 case KernelArgMD::ValueKind::HiddenDefaultQueue: 559 case KernelArgMD::ValueKind::HiddenCompletionAction: 560 case KernelArgMD::ValueKind::HiddenMultiGridSyncArg: 561 case KernelArgMD::ValueKind::HiddenHostcallBuffer: 562 return true; 563 default: 564 return false; 565 } 566 } 567 568 static std::pair<unsigned char *, unsigned char *> 569 find_metadata(void *binary, size_t binSize) { 570 std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr}; 571 572 Elf *e = elf_memory(static_cast<char *>(binary), binSize); 573 if (elf_kind(e) != ELF_K_ELF) { 574 return failure; 575 } 576 577 size_t numpHdrs; 578 if (elf_getphdrnum(e, &numpHdrs) != 0) { 579 return failure; 580 } 581 582 for (size_t i = 0; i < numpHdrs; ++i) { 583 GElf_Phdr pHdr; 584 if (gelf_getphdr(e, i, &pHdr) != &pHdr) { 585 continue; 586 } 587 // Look for the runtime metadata note 588 if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) { 589 // Iterate over the notes in this segment 590 address ptr = (address)binary + pHdr.p_offset; 591 address segmentEnd = ptr + pHdr.p_filesz; 592 593 while (ptr < segmentEnd) { 594 Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr); 595 address name = (address)¬e[1]; 596 597 if (note->n_type == 7 || note->n_type == 8) { 598 return failure; 599 } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ && 600 note->n_namesz == sizeof "AMD" && 601 !memcmp(name, "AMD", note->n_namesz)) { 602 // code object v2 uses yaml metadata, no longer supported 603 return failure; 604 } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ && 605 note->n_namesz == sizeof "AMDGPU" && 606 !memcmp(name, "AMDGPU", note->n_namesz)) { 607 608 // n_descsz = 485 609 // value is padded to 4 byte alignment, may want to move end up to 610 // match 611 size_t offset = sizeof(uint32_t) * 3 /* fields */ 612 + sizeof("AMDGPU") /* name */ 613 + 1 /* padding to 4 byte alignment */; 614 615 // Including the trailing padding means both pointers are 4 bytes 616 // aligned, which may be useful later. 617 unsigned char *metadata_start = (unsigned char *)ptr + offset; 618 unsigned char *metadata_end = 619 metadata_start + core::alignUp(note->n_descsz, 4); 620 return {metadata_start, metadata_end}; 621 } 622 ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) + 623 core::alignUp(note->n_descsz, sizeof(int)); 624 } 625 } 626 } 627 628 return failure; 629 } 630 631 namespace { 632 int map_lookup_array(msgpack::byte_range message, const char *needle, 633 msgpack::byte_range *res, uint64_t *size) { 634 unsigned count = 0; 635 struct s : msgpack::functors_defaults<s> { 636 s(unsigned &count, uint64_t *size) : count(count), size(size) {} 637 unsigned &count; 638 uint64_t *size; 639 const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) { 640 count++; 641 *size = N; 642 return bytes.end; 643 } 644 }; 645 646 msgpack::foreach_map(message, 647 [&](msgpack::byte_range key, msgpack::byte_range value) { 648 if (msgpack::message_is_string(key, needle)) { 649 // If the message is an array, record number of 650 // elements in *size 651 msgpack::handle_msgpack<s>(value, {count, size}); 652 // return the whole array 653 *res = value; 654 } 655 }); 656 // Only claim success if exactly one key/array pair matched 657 return count != 1; 658 } 659 660 int map_lookup_string(msgpack::byte_range message, const char *needle, 661 std::string *res) { 662 unsigned count = 0; 663 struct s : public msgpack::functors_defaults<s> { 664 s(unsigned &count, std::string *res) : count(count), res(res) {} 665 unsigned &count; 666 std::string *res; 667 void handle_string(size_t N, const unsigned char *str) { 668 count++; 669 *res = std::string(str, str + N); 670 } 671 }; 672 msgpack::foreach_map(message, 673 [&](msgpack::byte_range key, msgpack::byte_range value) { 674 if (msgpack::message_is_string(key, needle)) { 675 msgpack::handle_msgpack<s>(value, {count, res}); 676 } 677 }); 678 return count != 1; 679 } 680 681 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle, 682 uint64_t *res) { 683 unsigned count = 0; 684 msgpack::foreach_map(message, 685 [&](msgpack::byte_range key, msgpack::byte_range value) { 686 if (msgpack::message_is_string(key, needle)) { 687 msgpack::foronly_unsigned(value, [&](uint64_t x) { 688 count++; 689 *res = x; 690 }); 691 } 692 }); 693 return count != 1; 694 } 695 696 int array_lookup_element(msgpack::byte_range message, uint64_t elt, 697 msgpack::byte_range *res) { 698 int rc = 1; 699 uint64_t i = 0; 700 msgpack::foreach_array(message, [&](msgpack::byte_range value) { 701 if (i == elt) { 702 *res = value; 703 rc = 0; 704 } 705 i++; 706 }); 707 return rc; 708 } 709 710 int populate_kernelArgMD(msgpack::byte_range args_element, 711 KernelArgMD *kernelarg) { 712 using namespace msgpack; 713 int error = 0; 714 foreach_map(args_element, [&](byte_range key, byte_range value) -> void { 715 if (message_is_string(key, ".name")) { 716 foronly_string(value, [&](size_t N, const unsigned char *str) { 717 kernelarg->name_ = std::string(str, str + N); 718 }); 719 } else if (message_is_string(key, ".type_name")) { 720 foronly_string(value, [&](size_t N, const unsigned char *str) { 721 kernelarg->typeName_ = std::string(str, str + N); 722 }); 723 } else if (message_is_string(key, ".size")) { 724 foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; }); 725 } else if (message_is_string(key, ".offset")) { 726 foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; }); 727 } else if (message_is_string(key, ".value_kind")) { 728 foronly_string(value, [&](size_t N, const unsigned char *str) { 729 std::string s = std::string(str, str + N); 730 auto itValueKind = ArgValueKind.find(s); 731 if (itValueKind != ArgValueKind.end()) { 732 kernelarg->valueKind_ = itValueKind->second; 733 } 734 }); 735 } 736 }); 737 return error; 738 } 739 } // namespace 740 741 static hsa_status_t get_code_object_custom_metadata( 742 void *binary, size_t binSize, int gpu, 743 std::map<std::string, atl_kernel_info_t> &KernelInfoTable) { 744 // parse code object with different keys from v2 745 // also, the kernel name is not the same as the symbol name -- so a 746 // symbol->name map is needed 747 748 std::pair<unsigned char *, unsigned char *> metadata = 749 find_metadata(binary, binSize); 750 if (!metadata.first) { 751 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 752 } 753 754 uint64_t kernelsSize = 0; 755 int msgpack_errors = 0; 756 msgpack::byte_range kernel_array; 757 msgpack_errors = 758 map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels", 759 &kernel_array, &kernelsSize); 760 if (msgpack_errors != 0) { 761 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 762 "kernels lookup in program metadata"); 763 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 764 } 765 766 for (size_t i = 0; i < kernelsSize; i++) { 767 assert(msgpack_errors == 0); 768 std::string kernelName; 769 std::string symbolName; 770 771 msgpack::byte_range element; 772 msgpack_errors += array_lookup_element(kernel_array, i, &element); 773 if (msgpack_errors != 0) { 774 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 775 "element lookup in kernel metadata"); 776 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 777 } 778 779 msgpack_errors += map_lookup_string(element, ".name", &kernelName); 780 msgpack_errors += map_lookup_string(element, ".symbol", &symbolName); 781 if (msgpack_errors != 0) { 782 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 783 "strings lookup in kernel metadata"); 784 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 785 } 786 787 // Make sure that kernelName + ".kd" == symbolName 788 if ((kernelName + ".kd") != symbolName) { 789 printf("[%s:%d] Kernel name mismatching symbol: %s != %s + .kd\n", 790 __FILE__, __LINE__, symbolName.c_str(), kernelName.c_str()); 791 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 792 } 793 794 atl_kernel_info_t info = {0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {}, {}}; 795 796 uint64_t sgpr_count, vgpr_count, sgpr_spill_count, vgpr_spill_count; 797 msgpack_errors += map_lookup_uint64_t(element, ".sgpr_count", &sgpr_count); 798 if (msgpack_errors != 0) { 799 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 800 "sgpr count metadata lookup in kernel metadata"); 801 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 802 } 803 804 info.sgpr_count = sgpr_count; 805 806 msgpack_errors += map_lookup_uint64_t(element, ".vgpr_count", &vgpr_count); 807 if (msgpack_errors != 0) { 808 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 809 "vgpr count metadata lookup in kernel metadata"); 810 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 811 } 812 813 info.vgpr_count = vgpr_count; 814 815 msgpack_errors += 816 map_lookup_uint64_t(element, ".sgpr_spill_count", &sgpr_spill_count); 817 if (msgpack_errors != 0) { 818 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 819 "sgpr spill count metadata lookup in kernel metadata"); 820 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 821 } 822 823 info.sgpr_spill_count = sgpr_spill_count; 824 825 msgpack_errors += 826 map_lookup_uint64_t(element, ".vgpr_spill_count", &vgpr_spill_count); 827 if (msgpack_errors != 0) { 828 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 829 "vgpr spill count metadata lookup in kernel metadata"); 830 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 831 } 832 833 info.vgpr_spill_count = vgpr_spill_count; 834 835 size_t kernel_explicit_args_size = 0; 836 uint64_t kernel_segment_size; 837 msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size", 838 &kernel_segment_size); 839 if (msgpack_errors != 0) { 840 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 841 "kernarg segment size metadata lookup in kernel metadata"); 842 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 843 } 844 845 bool hasHiddenArgs = false; 846 if (kernel_segment_size > 0) { 847 uint64_t argsSize; 848 size_t offset = 0; 849 850 msgpack::byte_range args_array; 851 msgpack_errors += 852 map_lookup_array(element, ".args", &args_array, &argsSize); 853 if (msgpack_errors != 0) { 854 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 855 "kernel args metadata lookup in kernel metadata"); 856 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 857 } 858 859 info.num_args = argsSize; 860 861 for (size_t i = 0; i < argsSize; ++i) { 862 KernelArgMD lcArg; 863 864 msgpack::byte_range args_element; 865 msgpack_errors += array_lookup_element(args_array, i, &args_element); 866 if (msgpack_errors != 0) { 867 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 868 "iterate args map in kernel args metadata"); 869 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 870 } 871 872 msgpack_errors += populate_kernelArgMD(args_element, &lcArg); 873 if (msgpack_errors != 0) { 874 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 875 "iterate args map in kernel args metadata"); 876 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 877 } 878 // populate info with sizes and offsets 879 info.arg_sizes.push_back(lcArg.size_); 880 // v3 has offset field and not align field 881 size_t new_offset = lcArg.offset_; 882 size_t padding = new_offset - offset; 883 offset = new_offset; 884 info.arg_offsets.push_back(lcArg.offset_); 885 DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(), 886 lcArg.size_, lcArg.offset_); 887 offset += lcArg.size_; 888 889 // check if the arg is a hidden/implicit arg 890 // this logic assumes that all hidden args are 8-byte aligned 891 if (!isImplicit(lcArg.valueKind_)) { 892 kernel_explicit_args_size += lcArg.size_; 893 } else { 894 hasHiddenArgs = true; 895 } 896 kernel_explicit_args_size += padding; 897 } 898 } 899 900 // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but 901 // in ATMI, do not count the compiler set implicit args, but set your own 902 // implicit args by discounting the compiler set implicit args 903 info.kernel_segment_size = 904 (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) + 905 sizeof(atmi_implicit_args_t); 906 DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(), 907 kernel_segment_size, info.kernel_segment_size); 908 909 // kernel received, now add it to the kernel info table 910 KernelInfoTable[kernelName] = info; 911 } 912 913 return HSA_STATUS_SUCCESS; 914 } 915 916 static hsa_status_t 917 populate_InfoTables(hsa_executable_symbol_t symbol, int gpu, 918 std::map<std::string, atl_kernel_info_t> &KernelInfoTable, 919 std::map<std::string, atl_symbol_info_t> &SymbolInfoTable) { 920 hsa_symbol_kind_t type; 921 922 uint32_t name_length; 923 hsa_status_t err; 924 err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE, 925 &type); 926 if (err != HSA_STATUS_SUCCESS) { 927 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 928 "Symbol info extraction", get_error_string(err)); 929 return err; 930 } 931 DEBUG_PRINT("Exec Symbol type: %d\n", type); 932 if (type == HSA_SYMBOL_KIND_KERNEL) { 933 err = hsa_executable_symbol_get_info( 934 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 935 if (err != HSA_STATUS_SUCCESS) { 936 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 937 "Symbol info extraction", get_error_string(err)); 938 return err; 939 } 940 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 941 err = hsa_executable_symbol_get_info(symbol, 942 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 943 if (err != HSA_STATUS_SUCCESS) { 944 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 945 "Symbol info extraction", get_error_string(err)); 946 return err; 947 } 948 // remove the suffix .kd from symbol name. 949 name[name_length - 3] = 0; 950 951 atl_kernel_info_t info; 952 std::string kernelName(name); 953 // by now, the kernel info table should already have an entry 954 // because the non-ROCr custom code object parsing is called before 955 // iterating over the code object symbols using ROCr 956 if (KernelInfoTable.find(kernelName) == KernelInfoTable.end()) { 957 if (HSA_STATUS_ERROR_INVALID_CODE_OBJECT != HSA_STATUS_SUCCESS) { 958 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 959 "Finding the entry kernel info table", 960 get_error_string(HSA_STATUS_ERROR_INVALID_CODE_OBJECT)); 961 exit(1); 962 } 963 } 964 // found, so assign and update 965 info = KernelInfoTable[kernelName]; 966 967 /* Extract dispatch information from the symbol */ 968 err = hsa_executable_symbol_get_info( 969 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT, 970 &(info.kernel_object)); 971 if (err != HSA_STATUS_SUCCESS) { 972 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 973 "Extracting the symbol from the executable", 974 get_error_string(err)); 975 return err; 976 } 977 err = hsa_executable_symbol_get_info( 978 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE, 979 &(info.group_segment_size)); 980 if (err != HSA_STATUS_SUCCESS) { 981 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 982 "Extracting the group segment size from the executable", 983 get_error_string(err)); 984 return err; 985 } 986 err = hsa_executable_symbol_get_info( 987 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE, 988 &(info.private_segment_size)); 989 if (err != HSA_STATUS_SUCCESS) { 990 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 991 "Extracting the private segment from the executable", 992 get_error_string(err)); 993 return err; 994 } 995 996 DEBUG_PRINT( 997 "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes " 998 "kernarg\n", 999 kernelName.c_str(), info.kernel_object, info.group_segment_size, 1000 info.private_segment_size, info.kernel_segment_size); 1001 1002 // assign it back to the kernel info table 1003 KernelInfoTable[kernelName] = info; 1004 free(name); 1005 } else if (type == HSA_SYMBOL_KIND_VARIABLE) { 1006 err = hsa_executable_symbol_get_info( 1007 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 1008 if (err != HSA_STATUS_SUCCESS) { 1009 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1010 "Symbol info extraction", get_error_string(err)); 1011 return err; 1012 } 1013 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 1014 err = hsa_executable_symbol_get_info(symbol, 1015 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 1016 if (err != HSA_STATUS_SUCCESS) { 1017 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1018 "Symbol info extraction", get_error_string(err)); 1019 return err; 1020 } 1021 name[name_length] = 0; 1022 1023 atl_symbol_info_t info; 1024 1025 err = hsa_executable_symbol_get_info( 1026 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr)); 1027 if (err != HSA_STATUS_SUCCESS) { 1028 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1029 "Symbol info address extraction", get_error_string(err)); 1030 return err; 1031 } 1032 1033 err = hsa_executable_symbol_get_info( 1034 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size)); 1035 if (err != HSA_STATUS_SUCCESS) { 1036 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1037 "Symbol info size extraction", get_error_string(err)); 1038 return err; 1039 } 1040 1041 DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr, 1042 info.size); 1043 err = register_allocation(reinterpret_cast<void *>(info.addr), 1044 (size_t)info.size, ATMI_DEVTYPE_GPU); 1045 if (err != HSA_STATUS_SUCCESS) { 1046 return err; 1047 } 1048 SymbolInfoTable[std::string(name)] = info; 1049 free(name); 1050 } else { 1051 DEBUG_PRINT("Symbol is an indirect function\n"); 1052 } 1053 return HSA_STATUS_SUCCESS; 1054 } 1055 1056 hsa_status_t RegisterModuleFromMemory( 1057 std::map<std::string, atl_kernel_info_t> &KernelInfoTable, 1058 std::map<std::string, atl_symbol_info_t> &SymbolInfoTable, 1059 void *module_bytes, size_t module_size, int gpu, 1060 hsa_status_t (*on_deserialized_data)(void *data, size_t size, 1061 void *cb_state), 1062 void *cb_state, std::vector<hsa_executable_t> &HSAExecutables) { 1063 hsa_status_t err; 1064 assert(gpu >= 0); 1065 1066 DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu); 1067 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(gpu); 1068 hsa_agent_t agent = proc.agent(); 1069 hsa_executable_t executable = {0}; 1070 hsa_profile_t agent_profile; 1071 1072 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile); 1073 if (err != HSA_STATUS_SUCCESS) { 1074 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1075 "Query the agent profile", get_error_string(err)); 1076 return HSA_STATUS_ERROR; 1077 } 1078 // FIXME: Assume that every profile is FULL until we understand how to build 1079 // GCN with base profile 1080 agent_profile = HSA_PROFILE_FULL; 1081 /* Create the empty executable. */ 1082 err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "", 1083 &executable); 1084 if (err != HSA_STATUS_SUCCESS) { 1085 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1086 "Create the executable", get_error_string(err)); 1087 return HSA_STATUS_ERROR; 1088 } 1089 1090 bool module_load_success = false; 1091 do // Existing control flow used continue, preserve that for this patch 1092 { 1093 { 1094 // Some metadata info is not available through ROCr API, so use custom 1095 // code object metadata parsing to collect such metadata info 1096 1097 err = get_code_object_custom_metadata(module_bytes, module_size, gpu, 1098 KernelInfoTable); 1099 if (err != HSA_STATUS_SUCCESS) { 1100 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1101 "Getting custom code object metadata", 1102 get_error_string(err)); 1103 continue; 1104 } 1105 1106 // Deserialize code object. 1107 hsa_code_object_t code_object = {0}; 1108 err = hsa_code_object_deserialize(module_bytes, module_size, NULL, 1109 &code_object); 1110 if (err != HSA_STATUS_SUCCESS) { 1111 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1112 "Code Object Deserialization", get_error_string(err)); 1113 continue; 1114 } 1115 assert(0 != code_object.handle); 1116 1117 // Mutating the device image here avoids another allocation & memcpy 1118 void *code_object_alloc_data = 1119 reinterpret_cast<void *>(code_object.handle); 1120 hsa_status_t atmi_err = 1121 on_deserialized_data(code_object_alloc_data, module_size, cb_state); 1122 if (atmi_err != HSA_STATUS_SUCCESS) { 1123 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1124 "Error in deserialized_data callback", 1125 get_atmi_error_string(atmi_err)); 1126 return atmi_err; 1127 } 1128 1129 /* Load the code object. */ 1130 err = 1131 hsa_executable_load_code_object(executable, agent, code_object, NULL); 1132 if (err != HSA_STATUS_SUCCESS) { 1133 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1134 "Loading the code object", get_error_string(err)); 1135 continue; 1136 } 1137 1138 // cannot iterate over symbols until executable is frozen 1139 } 1140 module_load_success = true; 1141 } while (0); 1142 DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success); 1143 if (module_load_success) { 1144 /* Freeze the executable; it can now be queried for symbols. */ 1145 err = hsa_executable_freeze(executable, ""); 1146 if (err != HSA_STATUS_SUCCESS) { 1147 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1148 "Freeze the executable", get_error_string(err)); 1149 return HSA_STATUS_ERROR; 1150 } 1151 1152 err = hsa::executable_iterate_symbols( 1153 executable, 1154 [&](hsa_executable_t, hsa_executable_symbol_t symbol) -> hsa_status_t { 1155 return populate_InfoTables(symbol, gpu, KernelInfoTable, 1156 SymbolInfoTable); 1157 }); 1158 if (err != HSA_STATUS_SUCCESS) { 1159 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1160 "Iterating over symbols for execuatable", get_error_string(err)); 1161 return HSA_STATUS_ERROR; 1162 } 1163 1164 // save the executable and destroy during finalize 1165 HSAExecutables.push_back(executable); 1166 return HSA_STATUS_SUCCESS; 1167 } else { 1168 return HSA_STATUS_ERROR; 1169 } 1170 } 1171 1172 } // namespace core 1173