1 /*===-------------------------------------------------------------------------- 2 * ATMI (Asynchronous Task and Memory Interface) 3 * 4 * This file is distributed under the MIT License. See LICENSE.txt for details. 5 *===------------------------------------------------------------------------*/ 6 #include <gelf.h> 7 #include <libelf.h> 8 9 #include <cassert> 10 #include <cstdarg> 11 #include <fstream> 12 #include <iomanip> 13 #include <iostream> 14 #include <set> 15 #include <string> 16 17 #include "internal.h" 18 #include "machine.h" 19 #include "rt.h" 20 21 #include "msgpack.h" 22 23 namespace hsa { 24 // Wrap HSA iterate API in a shim that allows passing general callables 25 template <typename C> 26 hsa_status_t executable_iterate_symbols(hsa_executable_t executable, C cb) { 27 auto L = [](hsa_executable_t executable, hsa_executable_symbol_t symbol, 28 void *data) -> hsa_status_t { 29 C *unwrapped = static_cast<C *>(data); 30 return (*unwrapped)(executable, symbol); 31 }; 32 return hsa_executable_iterate_symbols(executable, L, 33 static_cast<void *>(&cb)); 34 } 35 } // namespace hsa 36 37 typedef unsigned char *address; 38 /* 39 * Note descriptors. 40 */ 41 typedef struct { 42 uint32_t n_namesz; /* Length of note's name. */ 43 uint32_t n_descsz; /* Length of note's value. */ 44 uint32_t n_type; /* Type of note. */ 45 // then name 46 // then padding, optional 47 // then desc, at 4 byte alignment (not 8, despite being elf64) 48 } Elf_Note; 49 50 // The following include file and following structs/enums 51 // have been replicated on a per-use basis below. For example, 52 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields, 53 // but we may care only about kernargSegmentSize_ for now, so 54 // we just include that field in our KernelMD implementation. We 55 // chose this approach to replicate in order to avoid forcing 56 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime. 57 // #include "llvm/Support/AMDGPUMetadata.h" 58 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD; 59 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD; 60 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD; 61 // using llvm::AMDGPU::HSAMD::AccessQualifier; 62 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier; 63 // using llvm::AMDGPU::HSAMD::ValueKind; 64 // using llvm::AMDGPU::HSAMD::ValueType; 65 66 class KernelArgMD { 67 public: 68 enum class ValueKind { 69 HiddenGlobalOffsetX, 70 HiddenGlobalOffsetY, 71 HiddenGlobalOffsetZ, 72 HiddenNone, 73 HiddenPrintfBuffer, 74 HiddenDefaultQueue, 75 HiddenCompletionAction, 76 HiddenMultiGridSyncArg, 77 HiddenHostcallBuffer, 78 Unknown 79 }; 80 81 KernelArgMD() 82 : name_(std::string()), typeName_(std::string()), size_(0), offset_(0), 83 align_(0), valueKind_(ValueKind::Unknown) {} 84 85 // fields 86 std::string name_; 87 std::string typeName_; 88 uint32_t size_; 89 uint32_t offset_; 90 uint32_t align_; 91 ValueKind valueKind_; 92 }; 93 94 class KernelMD { 95 public: 96 KernelMD() : kernargSegmentSize_(0ull) {} 97 98 // fields 99 uint64_t kernargSegmentSize_; 100 }; 101 102 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = { 103 // Including only those fields that are relevant to the runtime. 104 // {"ByValue", KernelArgMD::ValueKind::ByValue}, 105 // {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer}, 106 // {"DynamicSharedPointer", 107 // KernelArgMD::ValueKind::DynamicSharedPointer}, 108 // {"Sampler", KernelArgMD::ValueKind::Sampler}, 109 // {"Image", KernelArgMD::ValueKind::Image}, 110 // {"Pipe", KernelArgMD::ValueKind::Pipe}, 111 // {"Queue", KernelArgMD::ValueKind::Queue}, 112 {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 113 {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 114 {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 115 {"HiddenNone", KernelArgMD::ValueKind::HiddenNone}, 116 {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 117 {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 118 {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction}, 119 {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 120 {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 121 // v3 122 // {"by_value", KernelArgMD::ValueKind::ByValue}, 123 // {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer}, 124 // {"dynamic_shared_pointer", 125 // KernelArgMD::ValueKind::DynamicSharedPointer}, 126 // {"sampler", KernelArgMD::ValueKind::Sampler}, 127 // {"image", KernelArgMD::ValueKind::Image}, 128 // {"pipe", KernelArgMD::ValueKind::Pipe}, 129 // {"queue", KernelArgMD::ValueKind::Queue}, 130 {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 131 {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 132 {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 133 {"hidden_none", KernelArgMD::ValueKind::HiddenNone}, 134 {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 135 {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 136 {"hidden_completion_action", 137 KernelArgMD::ValueKind::HiddenCompletionAction}, 138 {"hidden_multigrid_sync_arg", 139 KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 140 {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 141 }; 142 143 // global variables. TODO: Get rid of these 144 atmi_machine_t g_atmi_machine; 145 ATLMachine g_atl_machine; 146 147 std::vector<hsa_amd_memory_pool_t> atl_gpu_kernarg_pools; 148 149 std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable; 150 std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable; 151 152 bool g_atmi_initialized = false; 153 bool g_atmi_hostcall_required = false; 154 155 /* 156 atlc is all internal global values. 157 The structure atl_context_t is defined in atl_internal.h 158 Most references will use the global structure prefix atlc. 159 */ 160 atl_context_t atlc = {.struct_initialized = false}; 161 162 namespace core { 163 /* Machine Info */ 164 atmi_machine_t *Runtime::GetMachineInfo() { 165 if (!atlc.g_hsa_initialized) 166 return NULL; 167 return &g_atmi_machine; 168 } 169 170 static void atl_set_atmi_initialized() { 171 // FIXME: thread safe? locks? 172 g_atmi_initialized = true; 173 } 174 175 static void atl_reset_atmi_initialized() { 176 // FIXME: thread safe? locks? 177 g_atmi_initialized = false; 178 } 179 180 bool atl_is_atmi_initialized() { return g_atmi_initialized; } 181 182 hsa_status_t allow_access_to_all_gpu_agents(void *ptr) { 183 std::vector<ATLGPUProcessor> &gpu_procs = 184 g_atl_machine.processors<ATLGPUProcessor>(); 185 std::vector<hsa_agent_t> agents; 186 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 187 agents.push_back(gpu_procs[i].agent()); 188 } 189 return hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr); 190 } 191 192 atmi_status_t Runtime::Initialize() { 193 atmi_devtype_t devtype = ATMI_DEVTYPE_GPU; 194 if (atl_is_atmi_initialized()) 195 return ATMI_STATUS_SUCCESS; 196 197 if (devtype == ATMI_DEVTYPE_ALL || devtype & ATMI_DEVTYPE_GPU) { 198 atmi_status_t rc = atl_init_gpu_context(); 199 if (rc != ATMI_STATUS_SUCCESS) { 200 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "GPU context init", 201 get_atmi_error_string(atl_init_gpu_context())); 202 return rc; 203 } 204 } 205 206 atl_set_atmi_initialized(); 207 return ATMI_STATUS_SUCCESS; 208 } 209 210 atmi_status_t Runtime::Finalize() { 211 atmi_status_t rc = ATMI_STATUS_SUCCESS; 212 for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) { 213 SymbolInfoTable[i].clear(); 214 } 215 SymbolInfoTable.clear(); 216 for (uint32_t i = 0; i < KernelInfoTable.size(); i++) { 217 KernelInfoTable[i].clear(); 218 } 219 KernelInfoTable.clear(); 220 221 atl_reset_atmi_initialized(); 222 hsa_status_t err = hsa_shut_down(); 223 if (err != HSA_STATUS_SUCCESS) { 224 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Shutting down HSA", 225 get_error_string(err)); 226 rc = ATMI_STATUS_ERROR; 227 } 228 229 return rc; 230 } 231 232 static void atmi_init_context_structs() { 233 atlc.struct_initialized = true; /* This only gets called one time */ 234 atlc.g_hsa_initialized = false; 235 atlc.g_gpu_initialized = false; 236 atlc.g_tasks_initialized = false; 237 } 238 239 // Implement memory_pool iteration function 240 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool, 241 void *data) { 242 ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data); 243 hsa_status_t err = HSA_STATUS_SUCCESS; 244 // Check if the memory_pool is allowed to allocate, i.e. do not return group 245 // memory 246 bool alloc_allowed = false; 247 err = hsa_amd_memory_pool_get_info( 248 memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED, 249 &alloc_allowed); 250 if (err != HSA_STATUS_SUCCESS) { 251 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 252 "Alloc allowed in memory pool check", get_error_string(err)); 253 return err; 254 } 255 if (alloc_allowed) { 256 uint32_t global_flag = 0; 257 err = hsa_amd_memory_pool_get_info( 258 memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag); 259 if (err != HSA_STATUS_SUCCESS) { 260 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 261 "Get memory pool info", get_error_string(err)); 262 return err; 263 } 264 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) { 265 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED); 266 proc->addMemory(new_mem); 267 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT & global_flag) { 268 DEBUG_PRINT("GPU kernel args pool handle: %lu\n", memory_pool.handle); 269 atl_gpu_kernarg_pools.push_back(memory_pool); 270 } 271 } else { 272 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED); 273 proc->addMemory(new_mem); 274 } 275 } 276 277 return err; 278 } 279 280 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) { 281 hsa_status_t err = HSA_STATUS_SUCCESS; 282 hsa_device_type_t device_type; 283 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type); 284 if (err != HSA_STATUS_SUCCESS) { 285 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 286 "Get device type info", get_error_string(err)); 287 return err; 288 } 289 switch (device_type) { 290 case HSA_DEVICE_TYPE_CPU: { 291 ATLCPUProcessor new_proc(agent); 292 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 293 &new_proc); 294 if (err != HSA_STATUS_SUCCESS) { 295 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 296 "Iterate all memory pools", get_error_string(err)); 297 return err; 298 } 299 g_atl_machine.addProcessor(new_proc); 300 } break; 301 case HSA_DEVICE_TYPE_GPU: { 302 hsa_profile_t profile; 303 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile); 304 if (err != HSA_STATUS_SUCCESS) { 305 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 306 "Query the agent profile", get_error_string(err)); 307 return err; 308 } 309 atmi_devtype_t gpu_type; 310 gpu_type = 311 (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU; 312 ATLGPUProcessor new_proc(agent, gpu_type); 313 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 314 &new_proc); 315 if (err != HSA_STATUS_SUCCESS) { 316 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 317 "Iterate all memory pools", get_error_string(err)); 318 return err; 319 } 320 g_atl_machine.addProcessor(new_proc); 321 } break; 322 case HSA_DEVICE_TYPE_DSP: { 323 err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 324 } break; 325 } 326 327 return err; 328 } 329 330 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) { 331 hsa_region_segment_t segment; 332 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 333 if (segment != HSA_REGION_SEGMENT_GLOBAL) { 334 return HSA_STATUS_SUCCESS; 335 } 336 hsa_region_global_flag_t flags; 337 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 338 if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) { 339 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 340 *ret = region; 341 return HSA_STATUS_INFO_BREAK; 342 } 343 return HSA_STATUS_SUCCESS; 344 } 345 346 /* Determines if a memory region can be used for kernarg allocations. */ 347 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) { 348 hsa_region_segment_t segment; 349 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 350 if (HSA_REGION_SEGMENT_GLOBAL != segment) { 351 return HSA_STATUS_SUCCESS; 352 } 353 354 hsa_region_global_flag_t flags; 355 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 356 if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) { 357 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 358 *ret = region; 359 return HSA_STATUS_INFO_BREAK; 360 } 361 362 return HSA_STATUS_SUCCESS; 363 } 364 365 static hsa_status_t init_compute_and_memory() { 366 hsa_status_t err; 367 368 /* Iterate over the agents and pick the gpu agent */ 369 err = hsa_iterate_agents(get_agent_info, NULL); 370 if (err == HSA_STATUS_INFO_BREAK) { 371 err = HSA_STATUS_SUCCESS; 372 } 373 if (err != HSA_STATUS_SUCCESS) { 374 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Getting a gpu agent", 375 get_error_string(err)); 376 return err; 377 } 378 379 /* Init all devices or individual device types? */ 380 std::vector<ATLCPUProcessor> &cpu_procs = 381 g_atl_machine.processors<ATLCPUProcessor>(); 382 std::vector<ATLGPUProcessor> &gpu_procs = 383 g_atl_machine.processors<ATLGPUProcessor>(); 384 /* For CPU memory pools, add other devices that can access them directly 385 * or indirectly */ 386 for (auto &cpu_proc : cpu_procs) { 387 for (auto &cpu_mem : cpu_proc.memories()) { 388 hsa_amd_memory_pool_t pool = cpu_mem.memory(); 389 for (auto &gpu_proc : gpu_procs) { 390 hsa_agent_t agent = gpu_proc.agent(); 391 hsa_amd_memory_pool_access_t access; 392 hsa_amd_agent_memory_pool_get_info( 393 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 394 if (access != 0) { 395 // this means not NEVER, but could be YES or NO 396 // add this memory pool to the proc 397 gpu_proc.addMemory(cpu_mem); 398 } 399 } 400 } 401 } 402 403 /* FIXME: are the below combinations of procs and memory pools needed? 404 * all to all compare procs with their memory pools and add those memory 405 * pools that are accessible by the target procs */ 406 for (auto &gpu_proc : gpu_procs) { 407 for (auto &gpu_mem : gpu_proc.memories()) { 408 hsa_amd_memory_pool_t pool = gpu_mem.memory(); 409 for (auto &cpu_proc : cpu_procs) { 410 hsa_agent_t agent = cpu_proc.agent(); 411 hsa_amd_memory_pool_access_t access; 412 hsa_amd_agent_memory_pool_get_info( 413 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 414 if (access != 0) { 415 // this means not NEVER, but could be YES or NO 416 // add this memory pool to the proc 417 cpu_proc.addMemory(gpu_mem); 418 } 419 } 420 } 421 } 422 423 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_CPU] = cpu_procs.size(); 424 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_GPU] = gpu_procs.size(); 425 426 size_t num_procs = cpu_procs.size() + gpu_procs.size(); 427 // g_atmi_machine.devices = (atmi_device_t *)malloc(num_procs * 428 // sizeof(atmi_device_t)); 429 atmi_device_t *all_devices = reinterpret_cast<atmi_device_t *>( 430 malloc(num_procs * sizeof(atmi_device_t))); 431 int num_iGPUs = 0; 432 int num_dGPUs = 0; 433 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 434 if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU) 435 num_iGPUs++; 436 else 437 num_dGPUs++; 438 } 439 assert(num_iGPUs + num_dGPUs == gpu_procs.size() && 440 "Number of dGPUs and iGPUs do not add up"); 441 DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size()); 442 DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs); 443 DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs); 444 DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size()); 445 446 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_iGPU] = num_iGPUs; 447 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_dGPU] = num_dGPUs; 448 449 int cpus_begin = 0; 450 int cpus_end = cpu_procs.size(); 451 int gpus_begin = cpu_procs.size(); 452 int gpus_end = cpu_procs.size() + gpu_procs.size(); 453 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_CPU] = &all_devices[cpus_begin]; 454 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_GPU] = &all_devices[gpus_begin]; 455 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_iGPU] = &all_devices[gpus_begin]; 456 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_dGPU] = &all_devices[gpus_begin]; 457 int proc_index = 0; 458 for (int i = cpus_begin; i < cpus_end; i++) { 459 all_devices[i].type = cpu_procs[proc_index].type(); 460 461 std::vector<ATLMemory> memories = cpu_procs[proc_index].memories(); 462 int fine_memories_size = 0; 463 int coarse_memories_size = 0; 464 DEBUG_PRINT("CPU memory types:\t"); 465 for (auto &memory : memories) { 466 atmi_memtype_t type = memory.type(); 467 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 468 fine_memories_size++; 469 DEBUG_PRINT("Fine\t"); 470 } else { 471 coarse_memories_size++; 472 DEBUG_PRINT("Coarse\t"); 473 } 474 } 475 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 476 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 477 proc_index++; 478 } 479 proc_index = 0; 480 for (int i = gpus_begin; i < gpus_end; i++) { 481 all_devices[i].type = gpu_procs[proc_index].type(); 482 483 std::vector<ATLMemory> memories = gpu_procs[proc_index].memories(); 484 int fine_memories_size = 0; 485 int coarse_memories_size = 0; 486 DEBUG_PRINT("GPU memory types:\t"); 487 for (auto &memory : memories) { 488 atmi_memtype_t type = memory.type(); 489 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 490 fine_memories_size++; 491 DEBUG_PRINT("Fine\t"); 492 } else { 493 coarse_memories_size++; 494 DEBUG_PRINT("Coarse\t"); 495 } 496 } 497 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 498 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 499 proc_index++; 500 } 501 proc_index = 0; 502 hsa_region_t atl_cpu_kernarg_region; 503 atl_cpu_kernarg_region.handle = (uint64_t)-1; 504 if (cpu_procs.size() > 0) { 505 err = hsa_agent_iterate_regions( 506 cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region); 507 if (err == HSA_STATUS_INFO_BREAK) { 508 err = HSA_STATUS_SUCCESS; 509 } 510 err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 511 : HSA_STATUS_SUCCESS; 512 if (err != HSA_STATUS_SUCCESS) { 513 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 514 "Finding a CPU kernarg memory region handle", 515 get_error_string(err)); 516 return err; 517 } 518 } 519 hsa_region_t atl_gpu_kernarg_region; 520 /* Find a memory region that supports kernel arguments. */ 521 atl_gpu_kernarg_region.handle = (uint64_t)-1; 522 if (gpu_procs.size() > 0) { 523 hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region, 524 &atl_gpu_kernarg_region); 525 err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 526 : HSA_STATUS_SUCCESS; 527 if (err != HSA_STATUS_SUCCESS) { 528 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 529 "Finding a kernarg memory region", get_error_string(err)); 530 return err; 531 } 532 } 533 if (num_procs > 0) 534 return HSA_STATUS_SUCCESS; 535 else 536 return HSA_STATUS_ERROR_NOT_INITIALIZED; 537 } 538 539 hsa_status_t init_hsa() { 540 if (atlc.g_hsa_initialized == false) { 541 DEBUG_PRINT("Initializing HSA..."); 542 hsa_status_t err = hsa_init(); 543 if (err != HSA_STATUS_SUCCESS) { 544 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 545 "Initializing the hsa runtime", get_error_string(err)); 546 return err; 547 } 548 if (err != HSA_STATUS_SUCCESS) 549 return err; 550 551 err = init_compute_and_memory(); 552 if (err != HSA_STATUS_SUCCESS) 553 return err; 554 if (err != HSA_STATUS_SUCCESS) { 555 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 556 "After initializing compute and memory", get_error_string(err)); 557 return err; 558 } 559 560 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>(); 561 KernelInfoTable.resize(gpu_count); 562 SymbolInfoTable.resize(gpu_count); 563 for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) 564 SymbolInfoTable[i].clear(); 565 for (uint32_t i = 0; i < KernelInfoTable.size(); i++) 566 KernelInfoTable[i].clear(); 567 atlc.g_hsa_initialized = true; 568 DEBUG_PRINT("done\n"); 569 } 570 return HSA_STATUS_SUCCESS; 571 } 572 573 void init_tasks() { 574 if (atlc.g_tasks_initialized != false) 575 return; 576 std::vector<hsa_agent_t> gpu_agents; 577 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>(); 578 for (int gpu = 0; gpu < gpu_count; gpu++) { 579 atmi_place_t place = ATMI_PLACE_GPU(0, gpu); 580 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place); 581 gpu_agents.push_back(proc.agent()); 582 } 583 atlc.g_tasks_initialized = true; 584 } 585 586 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) { 587 #if (ROCM_VERSION_MAJOR >= 3) || \ 588 (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3) 589 if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) { 590 #else 591 if (event->event_type == GPU_MEMORY_FAULT_EVENT) { 592 #endif 593 hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault; 594 // memory_fault.agent 595 // memory_fault.virtual_address 596 // memory_fault.fault_reason_mask 597 // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address); 598 std::stringstream stream; 599 stream << std::hex << (uintptr_t)memory_fault.virtual_address; 600 std::string addr("0x" + stream.str()); 601 602 std::string err_string = "[GPU Memory Error] Addr: " + addr; 603 err_string += " Reason: "; 604 if (!(memory_fault.fault_reason_mask & 0x00111111)) { 605 err_string += "No Idea! "; 606 } else { 607 if (memory_fault.fault_reason_mask & 0x00000001) 608 err_string += "Page not present or supervisor privilege. "; 609 if (memory_fault.fault_reason_mask & 0x00000010) 610 err_string += "Write access to a read-only page. "; 611 if (memory_fault.fault_reason_mask & 0x00000100) 612 err_string += "Execute access to a page marked NX. "; 613 if (memory_fault.fault_reason_mask & 0x00001000) 614 err_string += "Host access only. "; 615 if (memory_fault.fault_reason_mask & 0x00010000) 616 err_string += "ECC failure (if supported by HW). "; 617 if (memory_fault.fault_reason_mask & 0x00100000) 618 err_string += "Can't determine the exact fault address. "; 619 } 620 fprintf(stderr, "%s\n", err_string.c_str()); 621 return HSA_STATUS_ERROR; 622 } 623 return HSA_STATUS_SUCCESS; 624 } 625 626 atmi_status_t atl_init_gpu_context() { 627 if (atlc.struct_initialized == false) 628 atmi_init_context_structs(); 629 if (atlc.g_gpu_initialized != false) 630 return ATMI_STATUS_SUCCESS; 631 632 hsa_status_t err; 633 err = init_hsa(); 634 if (err != HSA_STATUS_SUCCESS) 635 return ATMI_STATUS_ERROR; 636 637 err = hsa_amd_register_system_event_handler(callbackEvent, NULL); 638 if (err != HSA_STATUS_SUCCESS) { 639 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 640 "Registering the system for memory faults", get_error_string(err)); 641 return ATMI_STATUS_ERROR; 642 } 643 644 init_tasks(); 645 atlc.g_gpu_initialized = true; 646 return ATMI_STATUS_SUCCESS; 647 } 648 649 static bool isImplicit(KernelArgMD::ValueKind value_kind) { 650 switch (value_kind) { 651 case KernelArgMD::ValueKind::HiddenGlobalOffsetX: 652 case KernelArgMD::ValueKind::HiddenGlobalOffsetY: 653 case KernelArgMD::ValueKind::HiddenGlobalOffsetZ: 654 case KernelArgMD::ValueKind::HiddenNone: 655 case KernelArgMD::ValueKind::HiddenPrintfBuffer: 656 case KernelArgMD::ValueKind::HiddenDefaultQueue: 657 case KernelArgMD::ValueKind::HiddenCompletionAction: 658 case KernelArgMD::ValueKind::HiddenMultiGridSyncArg: 659 case KernelArgMD::ValueKind::HiddenHostcallBuffer: 660 return true; 661 default: 662 return false; 663 } 664 } 665 666 static std::pair<unsigned char *, unsigned char *> 667 find_metadata(void *binary, size_t binSize) { 668 std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr}; 669 670 Elf *e = elf_memory(static_cast<char *>(binary), binSize); 671 if (elf_kind(e) != ELF_K_ELF) { 672 return failure; 673 } 674 675 size_t numpHdrs; 676 if (elf_getphdrnum(e, &numpHdrs) != 0) { 677 return failure; 678 } 679 680 for (size_t i = 0; i < numpHdrs; ++i) { 681 GElf_Phdr pHdr; 682 if (gelf_getphdr(e, i, &pHdr) != &pHdr) { 683 continue; 684 } 685 // Look for the runtime metadata note 686 if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) { 687 // Iterate over the notes in this segment 688 address ptr = (address)binary + pHdr.p_offset; 689 address segmentEnd = ptr + pHdr.p_filesz; 690 691 while (ptr < segmentEnd) { 692 Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr); 693 address name = (address)¬e[1]; 694 695 if (note->n_type == 7 || note->n_type == 8) { 696 return failure; 697 } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ && 698 note->n_namesz == sizeof "AMD" && 699 !memcmp(name, "AMD", note->n_namesz)) { 700 // code object v2 uses yaml metadata, no longer supported 701 return failure; 702 } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ && 703 note->n_namesz == sizeof "AMDGPU" && 704 !memcmp(name, "AMDGPU", note->n_namesz)) { 705 706 // n_descsz = 485 707 // value is padded to 4 byte alignment, may want to move end up to 708 // match 709 size_t offset = sizeof(uint32_t) * 3 /* fields */ 710 + sizeof("AMDGPU") /* name */ 711 + 1 /* padding to 4 byte alignment */; 712 713 // Including the trailing padding means both pointers are 4 bytes 714 // aligned, which may be useful later. 715 unsigned char *metadata_start = (unsigned char *)ptr + offset; 716 unsigned char *metadata_end = 717 metadata_start + core::alignUp(note->n_descsz, 4); 718 return {metadata_start, metadata_end}; 719 } 720 ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) + 721 core::alignUp(note->n_descsz, sizeof(int)); 722 } 723 } 724 } 725 726 return failure; 727 } 728 729 namespace { 730 int map_lookup_array(msgpack::byte_range message, const char *needle, 731 msgpack::byte_range *res, uint64_t *size) { 732 unsigned count = 0; 733 struct s : msgpack::functors_defaults<s> { 734 s(unsigned &count, uint64_t *size) : count(count), size(size) {} 735 unsigned &count; 736 uint64_t *size; 737 const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) { 738 count++; 739 *size = N; 740 return bytes.end; 741 } 742 }; 743 744 msgpack::foreach_map(message, 745 [&](msgpack::byte_range key, msgpack::byte_range value) { 746 if (msgpack::message_is_string(key, needle)) { 747 // If the message is an array, record number of 748 // elements in *size 749 msgpack::handle_msgpack<s>(value, {count, size}); 750 // return the whole array 751 *res = value; 752 } 753 }); 754 // Only claim success if exactly one key/array pair matched 755 return count != 1; 756 } 757 758 int map_lookup_string(msgpack::byte_range message, const char *needle, 759 std::string *res) { 760 unsigned count = 0; 761 struct s : public msgpack::functors_defaults<s> { 762 s(unsigned &count, std::string *res) : count(count), res(res) {} 763 unsigned &count; 764 std::string *res; 765 void handle_string(size_t N, const unsigned char *str) { 766 count++; 767 *res = std::string(str, str + N); 768 } 769 }; 770 msgpack::foreach_map(message, 771 [&](msgpack::byte_range key, msgpack::byte_range value) { 772 if (msgpack::message_is_string(key, needle)) { 773 msgpack::handle_msgpack<s>(value, {count, res}); 774 } 775 }); 776 return count != 1; 777 } 778 779 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle, 780 uint64_t *res) { 781 unsigned count = 0; 782 msgpack::foreach_map(message, 783 [&](msgpack::byte_range key, msgpack::byte_range value) { 784 if (msgpack::message_is_string(key, needle)) { 785 msgpack::foronly_unsigned(value, [&](uint64_t x) { 786 count++; 787 *res = x; 788 }); 789 } 790 }); 791 return count != 1; 792 } 793 794 int array_lookup_element(msgpack::byte_range message, uint64_t elt, 795 msgpack::byte_range *res) { 796 int rc = 1; 797 uint64_t i = 0; 798 msgpack::foreach_array(message, [&](msgpack::byte_range value) { 799 if (i == elt) { 800 *res = value; 801 rc = 0; 802 } 803 i++; 804 }); 805 return rc; 806 } 807 808 int populate_kernelArgMD(msgpack::byte_range args_element, 809 KernelArgMD *kernelarg) { 810 using namespace msgpack; 811 int error = 0; 812 foreach_map(args_element, [&](byte_range key, byte_range value) -> void { 813 if (message_is_string(key, ".name")) { 814 foronly_string(value, [&](size_t N, const unsigned char *str) { 815 kernelarg->name_ = std::string(str, str + N); 816 }); 817 } else if (message_is_string(key, ".type_name")) { 818 foronly_string(value, [&](size_t N, const unsigned char *str) { 819 kernelarg->typeName_ = std::string(str, str + N); 820 }); 821 } else if (message_is_string(key, ".size")) { 822 foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; }); 823 } else if (message_is_string(key, ".offset")) { 824 foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; }); 825 } else if (message_is_string(key, ".value_kind")) { 826 foronly_string(value, [&](size_t N, const unsigned char *str) { 827 std::string s = std::string(str, str + N); 828 auto itValueKind = ArgValueKind.find(s); 829 if (itValueKind != ArgValueKind.end()) { 830 kernelarg->valueKind_ = itValueKind->second; 831 } 832 }); 833 } 834 }); 835 return error; 836 } 837 } // namespace 838 839 static hsa_status_t get_code_object_custom_metadata(void *binary, 840 size_t binSize, int gpu) { 841 // parse code object with different keys from v2 842 // also, the kernel name is not the same as the symbol name -- so a 843 // symbol->name map is needed 844 845 std::pair<unsigned char *, unsigned char *> metadata = 846 find_metadata(binary, binSize); 847 if (!metadata.first) { 848 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 849 } 850 851 uint64_t kernelsSize = 0; 852 int msgpack_errors = 0; 853 msgpack::byte_range kernel_array; 854 msgpack_errors = 855 map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels", 856 &kernel_array, &kernelsSize); 857 if (msgpack_errors != 0) { 858 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 859 "kernels lookup in program metadata"); 860 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 861 } 862 863 for (size_t i = 0; i < kernelsSize; i++) { 864 assert(msgpack_errors == 0); 865 std::string kernelName; 866 std::string symbolName; 867 868 msgpack::byte_range element; 869 msgpack_errors += array_lookup_element(kernel_array, i, &element); 870 if (msgpack_errors != 0) { 871 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 872 "element lookup in kernel metadata"); 873 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 874 } 875 876 msgpack_errors += map_lookup_string(element, ".name", &kernelName); 877 msgpack_errors += map_lookup_string(element, ".symbol", &symbolName); 878 if (msgpack_errors != 0) { 879 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 880 "strings lookup in kernel metadata"); 881 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 882 } 883 884 // Make sure that kernelName + ".kd" == symbolName 885 if ((kernelName + ".kd") != symbolName) { 886 printf("[%s:%d] Kernel name mismatching symbol: %s != %s + .kd\n", 887 __FILE__, __LINE__, symbolName.c_str(), kernelName.c_str()); 888 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 889 } 890 891 atl_kernel_info_t info = {0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {}, {}}; 892 893 uint64_t sgpr_count, vgpr_count, sgpr_spill_count, vgpr_spill_count; 894 msgpack_errors += map_lookup_uint64_t(element, ".sgpr_count", &sgpr_count); 895 if (msgpack_errors != 0) { 896 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 897 "sgpr count metadata lookup in kernel metadata"); 898 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 899 } 900 901 info.sgpr_count = sgpr_count; 902 903 msgpack_errors += map_lookup_uint64_t(element, ".vgpr_count", &vgpr_count); 904 if (msgpack_errors != 0) { 905 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 906 "vgpr count metadata lookup in kernel metadata"); 907 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 908 } 909 910 info.vgpr_count = vgpr_count; 911 912 msgpack_errors += 913 map_lookup_uint64_t(element, ".sgpr_spill_count", &sgpr_spill_count); 914 if (msgpack_errors != 0) { 915 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 916 "sgpr spill count metadata lookup in kernel metadata"); 917 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 918 } 919 920 info.sgpr_spill_count = sgpr_spill_count; 921 922 msgpack_errors += 923 map_lookup_uint64_t(element, ".vgpr_spill_count", &vgpr_spill_count); 924 if (msgpack_errors != 0) { 925 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 926 "vgpr spill count metadata lookup in kernel metadata"); 927 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 928 } 929 930 info.vgpr_spill_count = vgpr_spill_count; 931 932 size_t kernel_explicit_args_size = 0; 933 uint64_t kernel_segment_size; 934 msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size", 935 &kernel_segment_size); 936 if (msgpack_errors != 0) { 937 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 938 "kernarg segment size metadata lookup in kernel metadata"); 939 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 940 } 941 942 bool hasHiddenArgs = false; 943 if (kernel_segment_size > 0) { 944 uint64_t argsSize; 945 size_t offset = 0; 946 947 msgpack::byte_range args_array; 948 msgpack_errors += 949 map_lookup_array(element, ".args", &args_array, &argsSize); 950 if (msgpack_errors != 0) { 951 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 952 "kernel args metadata lookup in kernel metadata"); 953 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 954 } 955 956 info.num_args = argsSize; 957 958 for (size_t i = 0; i < argsSize; ++i) { 959 KernelArgMD lcArg; 960 961 msgpack::byte_range args_element; 962 msgpack_errors += array_lookup_element(args_array, i, &args_element); 963 if (msgpack_errors != 0) { 964 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 965 "iterate args map in kernel args metadata"); 966 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 967 } 968 969 msgpack_errors += populate_kernelArgMD(args_element, &lcArg); 970 if (msgpack_errors != 0) { 971 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 972 "iterate args map in kernel args metadata"); 973 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 974 } 975 // populate info with sizes and offsets 976 info.arg_sizes.push_back(lcArg.size_); 977 // v3 has offset field and not align field 978 size_t new_offset = lcArg.offset_; 979 size_t padding = new_offset - offset; 980 offset = new_offset; 981 info.arg_offsets.push_back(lcArg.offset_); 982 DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(), 983 lcArg.size_, lcArg.offset_); 984 offset += lcArg.size_; 985 986 // check if the arg is a hidden/implicit arg 987 // this logic assumes that all hidden args are 8-byte aligned 988 if (!isImplicit(lcArg.valueKind_)) { 989 kernel_explicit_args_size += lcArg.size_; 990 } else { 991 hasHiddenArgs = true; 992 } 993 kernel_explicit_args_size += padding; 994 } 995 } 996 997 // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but 998 // in ATMI, do not count the compiler set implicit args, but set your own 999 // implicit args by discounting the compiler set implicit args 1000 info.kernel_segment_size = 1001 (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) + 1002 sizeof(atmi_implicit_args_t); 1003 DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(), 1004 kernel_segment_size, info.kernel_segment_size); 1005 1006 // kernel received, now add it to the kernel info table 1007 KernelInfoTable[gpu][kernelName] = info; 1008 } 1009 1010 return HSA_STATUS_SUCCESS; 1011 } 1012 1013 static hsa_status_t populate_InfoTables(hsa_executable_symbol_t symbol, 1014 int gpu) { 1015 hsa_symbol_kind_t type; 1016 1017 uint32_t name_length; 1018 hsa_status_t err; 1019 err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE, 1020 &type); 1021 if (err != HSA_STATUS_SUCCESS) { 1022 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1023 "Symbol info extraction", get_error_string(err)); 1024 return err; 1025 } 1026 DEBUG_PRINT("Exec Symbol type: %d\n", type); 1027 if (type == HSA_SYMBOL_KIND_KERNEL) { 1028 err = hsa_executable_symbol_get_info( 1029 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 1030 if (err != HSA_STATUS_SUCCESS) { 1031 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1032 "Symbol info extraction", get_error_string(err)); 1033 return err; 1034 } 1035 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 1036 err = hsa_executable_symbol_get_info(symbol, 1037 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 1038 if (err != HSA_STATUS_SUCCESS) { 1039 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1040 "Symbol info extraction", get_error_string(err)); 1041 return err; 1042 } 1043 // remove the suffix .kd from symbol name. 1044 name[name_length - 3] = 0; 1045 1046 atl_kernel_info_t info; 1047 std::string kernelName(name); 1048 // by now, the kernel info table should already have an entry 1049 // because the non-ROCr custom code object parsing is called before 1050 // iterating over the code object symbols using ROCr 1051 if (KernelInfoTable[gpu].find(kernelName) == KernelInfoTable[gpu].end()) { 1052 return HSA_STATUS_ERROR; 1053 } 1054 // found, so assign and update 1055 info = KernelInfoTable[gpu][kernelName]; 1056 1057 /* Extract dispatch information from the symbol */ 1058 err = hsa_executable_symbol_get_info( 1059 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT, 1060 &(info.kernel_object)); 1061 if (err != HSA_STATUS_SUCCESS) { 1062 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1063 "Extracting the symbol from the executable", 1064 get_error_string(err)); 1065 return err; 1066 } 1067 err = hsa_executable_symbol_get_info( 1068 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE, 1069 &(info.group_segment_size)); 1070 if (err != HSA_STATUS_SUCCESS) { 1071 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1072 "Extracting the group segment size from the executable", 1073 get_error_string(err)); 1074 return err; 1075 } 1076 err = hsa_executable_symbol_get_info( 1077 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE, 1078 &(info.private_segment_size)); 1079 if (err != HSA_STATUS_SUCCESS) { 1080 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1081 "Extracting the private segment from the executable", 1082 get_error_string(err)); 1083 return err; 1084 } 1085 1086 DEBUG_PRINT( 1087 "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes " 1088 "kernarg\n", 1089 kernelName.c_str(), info.kernel_object, info.group_segment_size, 1090 info.private_segment_size, info.kernel_segment_size); 1091 1092 // assign it back to the kernel info table 1093 KernelInfoTable[gpu][kernelName] = info; 1094 free(name); 1095 } else if (type == HSA_SYMBOL_KIND_VARIABLE) { 1096 err = hsa_executable_symbol_get_info( 1097 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 1098 if (err != HSA_STATUS_SUCCESS) { 1099 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1100 "Symbol info extraction", get_error_string(err)); 1101 return err; 1102 } 1103 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 1104 err = hsa_executable_symbol_get_info(symbol, 1105 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 1106 if (err != HSA_STATUS_SUCCESS) { 1107 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1108 "Symbol info extraction", get_error_string(err)); 1109 return err; 1110 } 1111 name[name_length] = 0; 1112 1113 atl_symbol_info_t info; 1114 1115 err = hsa_executable_symbol_get_info( 1116 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr)); 1117 if (err != HSA_STATUS_SUCCESS) { 1118 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1119 "Symbol info address extraction", get_error_string(err)); 1120 return err; 1121 } 1122 1123 err = hsa_executable_symbol_get_info( 1124 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size)); 1125 if (err != HSA_STATUS_SUCCESS) { 1126 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1127 "Symbol info size extraction", get_error_string(err)); 1128 return err; 1129 } 1130 1131 atmi_mem_place_t place = ATMI_MEM_PLACE(ATMI_DEVTYPE_GPU, gpu, 0); 1132 DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr, 1133 info.size); 1134 err = register_allocation(reinterpret_cast<void *>(info.addr), 1135 (size_t)info.size, place); 1136 if (err != HSA_STATUS_SUCCESS) { 1137 return err; 1138 } 1139 SymbolInfoTable[gpu][std::string(name)] = info; 1140 if (strcmp(name, "needs_hostcall_buffer") == 0) 1141 g_atmi_hostcall_required = true; 1142 free(name); 1143 } else { 1144 DEBUG_PRINT("Symbol is an indirect function\n"); 1145 } 1146 return HSA_STATUS_SUCCESS; 1147 } 1148 1149 atmi_status_t Runtime::RegisterModuleFromMemory( 1150 void *module_bytes, size_t module_size, atmi_place_t place, 1151 atmi_status_t (*on_deserialized_data)(void *data, size_t size, 1152 void *cb_state), 1153 void *cb_state, std::vector<hsa_executable_t> &HSAExecutables) { 1154 hsa_status_t err; 1155 int gpu = place.device_id; 1156 assert(gpu >= 0); 1157 1158 DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu); 1159 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place); 1160 hsa_agent_t agent = proc.agent(); 1161 hsa_executable_t executable = {0}; 1162 hsa_profile_t agent_profile; 1163 1164 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile); 1165 if (err != HSA_STATUS_SUCCESS) { 1166 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1167 "Query the agent profile", get_error_string(err)); 1168 return ATMI_STATUS_ERROR; 1169 } 1170 // FIXME: Assume that every profile is FULL until we understand how to build 1171 // GCN with base profile 1172 agent_profile = HSA_PROFILE_FULL; 1173 /* Create the empty executable. */ 1174 err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "", 1175 &executable); 1176 if (err != HSA_STATUS_SUCCESS) { 1177 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1178 "Create the executable", get_error_string(err)); 1179 return ATMI_STATUS_ERROR; 1180 } 1181 1182 bool module_load_success = false; 1183 do // Existing control flow used continue, preserve that for this patch 1184 { 1185 { 1186 // Some metadata info is not available through ROCr API, so use custom 1187 // code object metadata parsing to collect such metadata info 1188 1189 err = get_code_object_custom_metadata(module_bytes, module_size, gpu); 1190 if (err != HSA_STATUS_SUCCESS) { 1191 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1192 "Getting custom code object metadata", 1193 get_error_string(err)); 1194 continue; 1195 } 1196 1197 // Deserialize code object. 1198 hsa_code_object_t code_object = {0}; 1199 err = hsa_code_object_deserialize(module_bytes, module_size, NULL, 1200 &code_object); 1201 if (err != HSA_STATUS_SUCCESS) { 1202 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1203 "Code Object Deserialization", get_error_string(err)); 1204 continue; 1205 } 1206 assert(0 != code_object.handle); 1207 1208 // Mutating the device image here avoids another allocation & memcpy 1209 void *code_object_alloc_data = 1210 reinterpret_cast<void *>(code_object.handle); 1211 atmi_status_t atmi_err = 1212 on_deserialized_data(code_object_alloc_data, module_size, cb_state); 1213 if (atmi_err != ATMI_STATUS_SUCCESS) { 1214 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1215 "Error in deserialized_data callback", 1216 get_atmi_error_string(atmi_err)); 1217 return atmi_err; 1218 } 1219 1220 /* Load the code object. */ 1221 err = 1222 hsa_executable_load_code_object(executable, agent, code_object, NULL); 1223 if (err != HSA_STATUS_SUCCESS) { 1224 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1225 "Loading the code object", get_error_string(err)); 1226 continue; 1227 } 1228 1229 // cannot iterate over symbols until executable is frozen 1230 } 1231 module_load_success = true; 1232 } while (0); 1233 DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success); 1234 if (module_load_success) { 1235 /* Freeze the executable; it can now be queried for symbols. */ 1236 err = hsa_executable_freeze(executable, ""); 1237 if (err != HSA_STATUS_SUCCESS) { 1238 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1239 "Freeze the executable", get_error_string(err)); 1240 return ATMI_STATUS_ERROR; 1241 } 1242 1243 err = hsa::executable_iterate_symbols( 1244 executable, 1245 [&](hsa_executable_t, hsa_executable_symbol_t symbol) -> hsa_status_t { 1246 return populate_InfoTables(symbol, gpu); 1247 }); 1248 1249 if (err != HSA_STATUS_SUCCESS) { 1250 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1251 "Iterating over symbols for execuatable", get_error_string(err)); 1252 return ATMI_STATUS_ERROR; 1253 } 1254 1255 // save the executable and destroy during finalize 1256 HSAExecutables.push_back(executable); 1257 return ATMI_STATUS_SUCCESS; 1258 } else { 1259 return ATMI_STATUS_ERROR; 1260 } 1261 } 1262 1263 } // namespace core 1264