1 /*===-------------------------------------------------------------------------- 2 * ATMI (Asynchronous Task and Memory Interface) 3 * 4 * This file is distributed under the MIT License. See LICENSE.txt for details. 5 *===------------------------------------------------------------------------*/ 6 #include <gelf.h> 7 #include <libelf.h> 8 9 #include <cassert> 10 #include <cstdarg> 11 #include <fstream> 12 #include <iomanip> 13 #include <iostream> 14 #include <set> 15 #include <string> 16 17 #include "internal.h" 18 #include "machine.h" 19 #include "rt.h" 20 21 #include "msgpack.h" 22 23 typedef unsigned char *address; 24 /* 25 * Note descriptors. 26 */ 27 typedef struct { 28 uint32_t n_namesz; /* Length of note's name. */ 29 uint32_t n_descsz; /* Length of note's value. */ 30 uint32_t n_type; /* Type of note. */ 31 // then name 32 // then padding, optional 33 // then desc, at 4 byte alignment (not 8, despite being elf64) 34 } Elf_Note; 35 36 // The following include file and following structs/enums 37 // have been replicated on a per-use basis below. For example, 38 // llvm::AMDGPU::HSAMD::Kernel::Metadata has several fields, 39 // but we may care only about kernargSegmentSize_ for now, so 40 // we just include that field in our KernelMD implementation. We 41 // chose this approach to replicate in order to avoid forcing 42 // a dependency on LLVM_INCLUDE_DIR just to compile the runtime. 43 // #include "llvm/Support/AMDGPUMetadata.h" 44 // typedef llvm::AMDGPU::HSAMD::Metadata CodeObjectMD; 45 // typedef llvm::AMDGPU::HSAMD::Kernel::Metadata KernelMD; 46 // typedef llvm::AMDGPU::HSAMD::Kernel::Arg::Metadata KernelArgMD; 47 // using llvm::AMDGPU::HSAMD::AccessQualifier; 48 // using llvm::AMDGPU::HSAMD::AddressSpaceQualifier; 49 // using llvm::AMDGPU::HSAMD::ValueKind; 50 // using llvm::AMDGPU::HSAMD::ValueType; 51 52 class KernelArgMD { 53 public: 54 enum class ValueKind { 55 HiddenGlobalOffsetX, 56 HiddenGlobalOffsetY, 57 HiddenGlobalOffsetZ, 58 HiddenNone, 59 HiddenPrintfBuffer, 60 HiddenDefaultQueue, 61 HiddenCompletionAction, 62 HiddenMultiGridSyncArg, 63 HiddenHostcallBuffer, 64 Unknown 65 }; 66 67 KernelArgMD() 68 : name_(std::string()), typeName_(std::string()), size_(0), offset_(0), 69 align_(0), valueKind_(ValueKind::Unknown) {} 70 71 // fields 72 std::string name_; 73 std::string typeName_; 74 uint32_t size_; 75 uint32_t offset_; 76 uint32_t align_; 77 ValueKind valueKind_; 78 }; 79 80 class KernelMD { 81 public: 82 KernelMD() : kernargSegmentSize_(0ull) {} 83 84 // fields 85 uint64_t kernargSegmentSize_; 86 }; 87 88 static const std::map<std::string, KernelArgMD::ValueKind> ArgValueKind = { 89 // Including only those fields that are relevant to the runtime. 90 // {"ByValue", KernelArgMD::ValueKind::ByValue}, 91 // {"GlobalBuffer", KernelArgMD::ValueKind::GlobalBuffer}, 92 // {"DynamicSharedPointer", 93 // KernelArgMD::ValueKind::DynamicSharedPointer}, 94 // {"Sampler", KernelArgMD::ValueKind::Sampler}, 95 // {"Image", KernelArgMD::ValueKind::Image}, 96 // {"Pipe", KernelArgMD::ValueKind::Pipe}, 97 // {"Queue", KernelArgMD::ValueKind::Queue}, 98 {"HiddenGlobalOffsetX", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 99 {"HiddenGlobalOffsetY", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 100 {"HiddenGlobalOffsetZ", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 101 {"HiddenNone", KernelArgMD::ValueKind::HiddenNone}, 102 {"HiddenPrintfBuffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 103 {"HiddenDefaultQueue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 104 {"HiddenCompletionAction", KernelArgMD::ValueKind::HiddenCompletionAction}, 105 {"HiddenMultiGridSyncArg", KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 106 {"HiddenHostcallBuffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 107 // v3 108 // {"by_value", KernelArgMD::ValueKind::ByValue}, 109 // {"global_buffer", KernelArgMD::ValueKind::GlobalBuffer}, 110 // {"dynamic_shared_pointer", 111 // KernelArgMD::ValueKind::DynamicSharedPointer}, 112 // {"sampler", KernelArgMD::ValueKind::Sampler}, 113 // {"image", KernelArgMD::ValueKind::Image}, 114 // {"pipe", KernelArgMD::ValueKind::Pipe}, 115 // {"queue", KernelArgMD::ValueKind::Queue}, 116 {"hidden_global_offset_x", KernelArgMD::ValueKind::HiddenGlobalOffsetX}, 117 {"hidden_global_offset_y", KernelArgMD::ValueKind::HiddenGlobalOffsetY}, 118 {"hidden_global_offset_z", KernelArgMD::ValueKind::HiddenGlobalOffsetZ}, 119 {"hidden_none", KernelArgMD::ValueKind::HiddenNone}, 120 {"hidden_printf_buffer", KernelArgMD::ValueKind::HiddenPrintfBuffer}, 121 {"hidden_default_queue", KernelArgMD::ValueKind::HiddenDefaultQueue}, 122 {"hidden_completion_action", 123 KernelArgMD::ValueKind::HiddenCompletionAction}, 124 {"hidden_multigrid_sync_arg", 125 KernelArgMD::ValueKind::HiddenMultiGridSyncArg}, 126 {"hidden_hostcall_buffer", KernelArgMD::ValueKind::HiddenHostcallBuffer}, 127 }; 128 129 // global variables. TODO: Get rid of these 130 atmi_machine_t g_atmi_machine; 131 ATLMachine g_atl_machine; 132 133 std::vector<hsa_amd_memory_pool_t> atl_gpu_kernarg_pools; 134 135 std::map<std::string, std::string> KernelNameMap; 136 std::vector<std::map<std::string, atl_kernel_info_t>> KernelInfoTable; 137 std::vector<std::map<std::string, atl_symbol_info_t>> SymbolInfoTable; 138 139 bool g_atmi_initialized = false; 140 bool g_atmi_hostcall_required = false; 141 142 /* 143 atlc is all internal global values. 144 The structure atl_context_t is defined in atl_internal.h 145 Most references will use the global structure prefix atlc. 146 */ 147 atl_context_t atlc = {.struct_initialized = false}; 148 149 namespace core { 150 /* Machine Info */ 151 atmi_machine_t *Runtime::GetMachineInfo() { 152 if (!atlc.g_hsa_initialized) 153 return NULL; 154 return &g_atmi_machine; 155 } 156 157 static void atl_set_atmi_initialized() { 158 // FIXME: thread safe? locks? 159 g_atmi_initialized = true; 160 } 161 162 static void atl_reset_atmi_initialized() { 163 // FIXME: thread safe? locks? 164 g_atmi_initialized = false; 165 } 166 167 bool atl_is_atmi_initialized() { return g_atmi_initialized; } 168 169 hsa_status_t allow_access_to_all_gpu_agents(void *ptr) { 170 std::vector<ATLGPUProcessor> &gpu_procs = 171 g_atl_machine.processors<ATLGPUProcessor>(); 172 std::vector<hsa_agent_t> agents; 173 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 174 agents.push_back(gpu_procs[i].agent()); 175 } 176 return hsa_amd_agents_allow_access(agents.size(), &agents[0], NULL, ptr); 177 } 178 179 atmi_status_t Runtime::Initialize() { 180 atmi_devtype_t devtype = ATMI_DEVTYPE_GPU; 181 if (atl_is_atmi_initialized()) 182 return ATMI_STATUS_SUCCESS; 183 184 if (devtype == ATMI_DEVTYPE_ALL || devtype & ATMI_DEVTYPE_GPU) { 185 atmi_status_t rc = atl_init_gpu_context(); 186 if (rc != ATMI_STATUS_SUCCESS) { 187 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "GPU context init", 188 get_atmi_error_string(atl_init_gpu_context())); 189 return rc; 190 } 191 } 192 193 atl_set_atmi_initialized(); 194 return ATMI_STATUS_SUCCESS; 195 } 196 197 atmi_status_t Runtime::Finalize() { 198 atmi_status_t rc = ATMI_STATUS_SUCCESS; 199 for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) { 200 SymbolInfoTable[i].clear(); 201 } 202 SymbolInfoTable.clear(); 203 for (uint32_t i = 0; i < KernelInfoTable.size(); i++) { 204 KernelInfoTable[i].clear(); 205 } 206 KernelInfoTable.clear(); 207 208 atl_reset_atmi_initialized(); 209 hsa_status_t err = hsa_shut_down(); 210 if (err != HSA_STATUS_SUCCESS) { 211 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Shutting down HSA", 212 get_error_string(err)); 213 rc = ATMI_STATUS_ERROR; 214 } 215 216 return rc; 217 } 218 219 static void atmi_init_context_structs() { 220 atlc.struct_initialized = true; /* This only gets called one time */ 221 atlc.g_hsa_initialized = false; 222 atlc.g_gpu_initialized = false; 223 atlc.g_tasks_initialized = false; 224 } 225 226 // Implement memory_pool iteration function 227 static hsa_status_t get_memory_pool_info(hsa_amd_memory_pool_t memory_pool, 228 void *data) { 229 ATLProcessor *proc = reinterpret_cast<ATLProcessor *>(data); 230 hsa_status_t err = HSA_STATUS_SUCCESS; 231 // Check if the memory_pool is allowed to allocate, i.e. do not return group 232 // memory 233 bool alloc_allowed = false; 234 err = hsa_amd_memory_pool_get_info( 235 memory_pool, HSA_AMD_MEMORY_POOL_INFO_RUNTIME_ALLOC_ALLOWED, 236 &alloc_allowed); 237 if (err != HSA_STATUS_SUCCESS) { 238 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 239 "Alloc allowed in memory pool check", get_error_string(err)); 240 return err; 241 } 242 if (alloc_allowed) { 243 uint32_t global_flag = 0; 244 err = hsa_amd_memory_pool_get_info( 245 memory_pool, HSA_AMD_MEMORY_POOL_INFO_GLOBAL_FLAGS, &global_flag); 246 if (err != HSA_STATUS_SUCCESS) { 247 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 248 "Get memory pool info", get_error_string(err)); 249 return err; 250 } 251 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_FINE_GRAINED & global_flag) { 252 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_FINE_GRAINED); 253 proc->addMemory(new_mem); 254 if (HSA_AMD_MEMORY_POOL_GLOBAL_FLAG_KERNARG_INIT & global_flag) { 255 DEBUG_PRINT("GPU kernel args pool handle: %lu\n", memory_pool.handle); 256 atl_gpu_kernarg_pools.push_back(memory_pool); 257 } 258 } else { 259 ATLMemory new_mem(memory_pool, *proc, ATMI_MEMTYPE_COARSE_GRAINED); 260 proc->addMemory(new_mem); 261 } 262 } 263 264 return err; 265 } 266 267 static hsa_status_t get_agent_info(hsa_agent_t agent, void *data) { 268 hsa_status_t err = HSA_STATUS_SUCCESS; 269 hsa_device_type_t device_type; 270 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_DEVICE, &device_type); 271 if (err != HSA_STATUS_SUCCESS) { 272 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 273 "Get device type info", get_error_string(err)); 274 return err; 275 } 276 switch (device_type) { 277 case HSA_DEVICE_TYPE_CPU: { 278 ATLCPUProcessor new_proc(agent); 279 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 280 &new_proc); 281 if (err != HSA_STATUS_SUCCESS) { 282 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 283 "Iterate all memory pools", get_error_string(err)); 284 return err; 285 } 286 g_atl_machine.addProcessor(new_proc); 287 } break; 288 case HSA_DEVICE_TYPE_GPU: { 289 hsa_profile_t profile; 290 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &profile); 291 if (err != HSA_STATUS_SUCCESS) { 292 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 293 "Query the agent profile", get_error_string(err)); 294 return err; 295 } 296 atmi_devtype_t gpu_type; 297 gpu_type = 298 (profile == HSA_PROFILE_FULL) ? ATMI_DEVTYPE_iGPU : ATMI_DEVTYPE_dGPU; 299 ATLGPUProcessor new_proc(agent, gpu_type); 300 err = hsa_amd_agent_iterate_memory_pools(agent, get_memory_pool_info, 301 &new_proc); 302 if (err != HSA_STATUS_SUCCESS) { 303 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 304 "Iterate all memory pools", get_error_string(err)); 305 return err; 306 } 307 g_atl_machine.addProcessor(new_proc); 308 } break; 309 case HSA_DEVICE_TYPE_DSP: { 310 err = HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 311 } break; 312 } 313 314 return err; 315 } 316 317 hsa_status_t get_fine_grained_region(hsa_region_t region, void *data) { 318 hsa_region_segment_t segment; 319 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 320 if (segment != HSA_REGION_SEGMENT_GLOBAL) { 321 return HSA_STATUS_SUCCESS; 322 } 323 hsa_region_global_flag_t flags; 324 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 325 if (flags & HSA_REGION_GLOBAL_FLAG_FINE_GRAINED) { 326 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 327 *ret = region; 328 return HSA_STATUS_INFO_BREAK; 329 } 330 return HSA_STATUS_SUCCESS; 331 } 332 333 /* Determines if a memory region can be used for kernarg allocations. */ 334 static hsa_status_t get_kernarg_memory_region(hsa_region_t region, void *data) { 335 hsa_region_segment_t segment; 336 hsa_region_get_info(region, HSA_REGION_INFO_SEGMENT, &segment); 337 if (HSA_REGION_SEGMENT_GLOBAL != segment) { 338 return HSA_STATUS_SUCCESS; 339 } 340 341 hsa_region_global_flag_t flags; 342 hsa_region_get_info(region, HSA_REGION_INFO_GLOBAL_FLAGS, &flags); 343 if (flags & HSA_REGION_GLOBAL_FLAG_KERNARG) { 344 hsa_region_t *ret = reinterpret_cast<hsa_region_t *>(data); 345 *ret = region; 346 return HSA_STATUS_INFO_BREAK; 347 } 348 349 return HSA_STATUS_SUCCESS; 350 } 351 352 static hsa_status_t init_compute_and_memory() { 353 hsa_status_t err; 354 355 /* Iterate over the agents and pick the gpu agent */ 356 err = hsa_iterate_agents(get_agent_info, NULL); 357 if (err == HSA_STATUS_INFO_BREAK) { 358 err = HSA_STATUS_SUCCESS; 359 } 360 if (err != HSA_STATUS_SUCCESS) { 361 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, "Getting a gpu agent", 362 get_error_string(err)); 363 return err; 364 } 365 366 /* Init all devices or individual device types? */ 367 std::vector<ATLCPUProcessor> &cpu_procs = 368 g_atl_machine.processors<ATLCPUProcessor>(); 369 std::vector<ATLGPUProcessor> &gpu_procs = 370 g_atl_machine.processors<ATLGPUProcessor>(); 371 /* For CPU memory pools, add other devices that can access them directly 372 * or indirectly */ 373 for (auto &cpu_proc : cpu_procs) { 374 for (auto &cpu_mem : cpu_proc.memories()) { 375 hsa_amd_memory_pool_t pool = cpu_mem.memory(); 376 for (auto &gpu_proc : gpu_procs) { 377 hsa_agent_t agent = gpu_proc.agent(); 378 hsa_amd_memory_pool_access_t access; 379 hsa_amd_agent_memory_pool_get_info( 380 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 381 if (access != 0) { 382 // this means not NEVER, but could be YES or NO 383 // add this memory pool to the proc 384 gpu_proc.addMemory(cpu_mem); 385 } 386 } 387 } 388 } 389 390 /* FIXME: are the below combinations of procs and memory pools needed? 391 * all to all compare procs with their memory pools and add those memory 392 * pools that are accessible by the target procs */ 393 for (auto &gpu_proc : gpu_procs) { 394 for (auto &gpu_mem : gpu_proc.memories()) { 395 hsa_amd_memory_pool_t pool = gpu_mem.memory(); 396 for (auto &cpu_proc : cpu_procs) { 397 hsa_agent_t agent = cpu_proc.agent(); 398 hsa_amd_memory_pool_access_t access; 399 hsa_amd_agent_memory_pool_get_info( 400 agent, pool, HSA_AMD_AGENT_MEMORY_POOL_INFO_ACCESS, &access); 401 if (access != 0) { 402 // this means not NEVER, but could be YES or NO 403 // add this memory pool to the proc 404 cpu_proc.addMemory(gpu_mem); 405 } 406 } 407 } 408 } 409 410 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_CPU] = cpu_procs.size(); 411 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_GPU] = gpu_procs.size(); 412 413 size_t num_procs = cpu_procs.size() + gpu_procs.size(); 414 // g_atmi_machine.devices = (atmi_device_t *)malloc(num_procs * 415 // sizeof(atmi_device_t)); 416 atmi_device_t *all_devices = reinterpret_cast<atmi_device_t *>( 417 malloc(num_procs * sizeof(atmi_device_t))); 418 int num_iGPUs = 0; 419 int num_dGPUs = 0; 420 for (uint32_t i = 0; i < gpu_procs.size(); i++) { 421 if (gpu_procs[i].type() == ATMI_DEVTYPE_iGPU) 422 num_iGPUs++; 423 else 424 num_dGPUs++; 425 } 426 assert(num_iGPUs + num_dGPUs == gpu_procs.size() && 427 "Number of dGPUs and iGPUs do not add up"); 428 DEBUG_PRINT("CPU Agents: %lu\n", cpu_procs.size()); 429 DEBUG_PRINT("iGPU Agents: %d\n", num_iGPUs); 430 DEBUG_PRINT("dGPU Agents: %d\n", num_dGPUs); 431 DEBUG_PRINT("GPU Agents: %lu\n", gpu_procs.size()); 432 433 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_iGPU] = num_iGPUs; 434 g_atmi_machine.device_count_by_type[ATMI_DEVTYPE_dGPU] = num_dGPUs; 435 436 int cpus_begin = 0; 437 int cpus_end = cpu_procs.size(); 438 int gpus_begin = cpu_procs.size(); 439 int gpus_end = cpu_procs.size() + gpu_procs.size(); 440 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_CPU] = &all_devices[cpus_begin]; 441 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_GPU] = &all_devices[gpus_begin]; 442 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_iGPU] = &all_devices[gpus_begin]; 443 g_atmi_machine.devices_by_type[ATMI_DEVTYPE_dGPU] = &all_devices[gpus_begin]; 444 int proc_index = 0; 445 for (int i = cpus_begin; i < cpus_end; i++) { 446 all_devices[i].type = cpu_procs[proc_index].type(); 447 448 std::vector<ATLMemory> memories = cpu_procs[proc_index].memories(); 449 int fine_memories_size = 0; 450 int coarse_memories_size = 0; 451 DEBUG_PRINT("CPU memory types:\t"); 452 for (auto &memory : memories) { 453 atmi_memtype_t type = memory.type(); 454 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 455 fine_memories_size++; 456 DEBUG_PRINT("Fine\t"); 457 } else { 458 coarse_memories_size++; 459 DEBUG_PRINT("Coarse\t"); 460 } 461 } 462 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 463 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 464 proc_index++; 465 } 466 proc_index = 0; 467 for (int i = gpus_begin; i < gpus_end; i++) { 468 all_devices[i].type = gpu_procs[proc_index].type(); 469 470 std::vector<ATLMemory> memories = gpu_procs[proc_index].memories(); 471 int fine_memories_size = 0; 472 int coarse_memories_size = 0; 473 DEBUG_PRINT("GPU memory types:\t"); 474 for (auto &memory : memories) { 475 atmi_memtype_t type = memory.type(); 476 if (type == ATMI_MEMTYPE_FINE_GRAINED) { 477 fine_memories_size++; 478 DEBUG_PRINT("Fine\t"); 479 } else { 480 coarse_memories_size++; 481 DEBUG_PRINT("Coarse\t"); 482 } 483 } 484 DEBUG_PRINT("\nFine Memories : %d", fine_memories_size); 485 DEBUG_PRINT("\tCoarse Memories : %d\n", coarse_memories_size); 486 proc_index++; 487 } 488 proc_index = 0; 489 hsa_region_t atl_cpu_kernarg_region; 490 atl_cpu_kernarg_region.handle = (uint64_t)-1; 491 if (cpu_procs.size() > 0) { 492 err = hsa_agent_iterate_regions( 493 cpu_procs[0].agent(), get_fine_grained_region, &atl_cpu_kernarg_region); 494 if (err == HSA_STATUS_INFO_BREAK) { 495 err = HSA_STATUS_SUCCESS; 496 } 497 err = (atl_cpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 498 : HSA_STATUS_SUCCESS; 499 if (err != HSA_STATUS_SUCCESS) { 500 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 501 "Finding a CPU kernarg memory region handle", 502 get_error_string(err)); 503 return err; 504 } 505 } 506 hsa_region_t atl_gpu_kernarg_region; 507 /* Find a memory region that supports kernel arguments. */ 508 atl_gpu_kernarg_region.handle = (uint64_t)-1; 509 if (gpu_procs.size() > 0) { 510 hsa_agent_iterate_regions(gpu_procs[0].agent(), get_kernarg_memory_region, 511 &atl_gpu_kernarg_region); 512 err = (atl_gpu_kernarg_region.handle == (uint64_t)-1) ? HSA_STATUS_ERROR 513 : HSA_STATUS_SUCCESS; 514 if (err != HSA_STATUS_SUCCESS) { 515 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 516 "Finding a kernarg memory region", get_error_string(err)); 517 return err; 518 } 519 } 520 if (num_procs > 0) 521 return HSA_STATUS_SUCCESS; 522 else 523 return HSA_STATUS_ERROR_NOT_INITIALIZED; 524 } 525 526 hsa_status_t init_hsa() { 527 if (atlc.g_hsa_initialized == false) { 528 DEBUG_PRINT("Initializing HSA..."); 529 hsa_status_t err = hsa_init(); 530 if (err != HSA_STATUS_SUCCESS) { 531 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 532 "Initializing the hsa runtime", get_error_string(err)); 533 return err; 534 } 535 if (err != HSA_STATUS_SUCCESS) 536 return err; 537 538 err = init_compute_and_memory(); 539 if (err != HSA_STATUS_SUCCESS) 540 return err; 541 if (err != HSA_STATUS_SUCCESS) { 542 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 543 "After initializing compute and memory", get_error_string(err)); 544 return err; 545 } 546 547 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>(); 548 KernelInfoTable.resize(gpu_count); 549 SymbolInfoTable.resize(gpu_count); 550 for (uint32_t i = 0; i < SymbolInfoTable.size(); i++) 551 SymbolInfoTable[i].clear(); 552 for (uint32_t i = 0; i < KernelInfoTable.size(); i++) 553 KernelInfoTable[i].clear(); 554 atlc.g_hsa_initialized = true; 555 DEBUG_PRINT("done\n"); 556 } 557 return HSA_STATUS_SUCCESS; 558 } 559 560 void init_tasks() { 561 if (atlc.g_tasks_initialized != false) 562 return; 563 std::vector<hsa_agent_t> gpu_agents; 564 int gpu_count = g_atl_machine.processorCount<ATLGPUProcessor>(); 565 for (int gpu = 0; gpu < gpu_count; gpu++) { 566 atmi_place_t place = ATMI_PLACE_GPU(0, gpu); 567 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place); 568 gpu_agents.push_back(proc.agent()); 569 } 570 atlc.g_tasks_initialized = true; 571 } 572 573 hsa_status_t callbackEvent(const hsa_amd_event_t *event, void *data) { 574 #if (ROCM_VERSION_MAJOR >= 3) || \ 575 (ROCM_VERSION_MAJOR >= 2 && ROCM_VERSION_MINOR >= 3) 576 if (event->event_type == HSA_AMD_GPU_MEMORY_FAULT_EVENT) { 577 #else 578 if (event->event_type == GPU_MEMORY_FAULT_EVENT) { 579 #endif 580 hsa_amd_gpu_memory_fault_info_t memory_fault = event->memory_fault; 581 // memory_fault.agent 582 // memory_fault.virtual_address 583 // memory_fault.fault_reason_mask 584 // fprintf("[GPU Error at %p: Reason is ", memory_fault.virtual_address); 585 std::stringstream stream; 586 stream << std::hex << (uintptr_t)memory_fault.virtual_address; 587 std::string addr("0x" + stream.str()); 588 589 std::string err_string = "[GPU Memory Error] Addr: " + addr; 590 err_string += " Reason: "; 591 if (!(memory_fault.fault_reason_mask & 0x00111111)) { 592 err_string += "No Idea! "; 593 } else { 594 if (memory_fault.fault_reason_mask & 0x00000001) 595 err_string += "Page not present or supervisor privilege. "; 596 if (memory_fault.fault_reason_mask & 0x00000010) 597 err_string += "Write access to a read-only page. "; 598 if (memory_fault.fault_reason_mask & 0x00000100) 599 err_string += "Execute access to a page marked NX. "; 600 if (memory_fault.fault_reason_mask & 0x00001000) 601 err_string += "Host access only. "; 602 if (memory_fault.fault_reason_mask & 0x00010000) 603 err_string += "ECC failure (if supported by HW). "; 604 if (memory_fault.fault_reason_mask & 0x00100000) 605 err_string += "Can't determine the exact fault address. "; 606 } 607 fprintf(stderr, "%s\n", err_string.c_str()); 608 return HSA_STATUS_ERROR; 609 } 610 return HSA_STATUS_SUCCESS; 611 } 612 613 atmi_status_t atl_init_gpu_context() { 614 if (atlc.struct_initialized == false) 615 atmi_init_context_structs(); 616 if (atlc.g_gpu_initialized != false) 617 return ATMI_STATUS_SUCCESS; 618 619 hsa_status_t err; 620 err = init_hsa(); 621 if (err != HSA_STATUS_SUCCESS) 622 return ATMI_STATUS_ERROR; 623 624 err = hsa_amd_register_system_event_handler(callbackEvent, NULL); 625 if (err != HSA_STATUS_SUCCESS) { 626 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 627 "Registering the system for memory faults", get_error_string(err)); 628 return ATMI_STATUS_ERROR; 629 } 630 631 init_tasks(); 632 atlc.g_gpu_initialized = true; 633 return ATMI_STATUS_SUCCESS; 634 } 635 636 static bool isImplicit(KernelArgMD::ValueKind value_kind) { 637 switch (value_kind) { 638 case KernelArgMD::ValueKind::HiddenGlobalOffsetX: 639 case KernelArgMD::ValueKind::HiddenGlobalOffsetY: 640 case KernelArgMD::ValueKind::HiddenGlobalOffsetZ: 641 case KernelArgMD::ValueKind::HiddenNone: 642 case KernelArgMD::ValueKind::HiddenPrintfBuffer: 643 case KernelArgMD::ValueKind::HiddenDefaultQueue: 644 case KernelArgMD::ValueKind::HiddenCompletionAction: 645 case KernelArgMD::ValueKind::HiddenMultiGridSyncArg: 646 case KernelArgMD::ValueKind::HiddenHostcallBuffer: 647 return true; 648 default: 649 return false; 650 } 651 } 652 653 static std::pair<unsigned char *, unsigned char *> 654 find_metadata(void *binary, size_t binSize) { 655 std::pair<unsigned char *, unsigned char *> failure = {nullptr, nullptr}; 656 657 Elf *e = elf_memory(static_cast<char *>(binary), binSize); 658 if (elf_kind(e) != ELF_K_ELF) { 659 return failure; 660 } 661 662 size_t numpHdrs; 663 if (elf_getphdrnum(e, &numpHdrs) != 0) { 664 return failure; 665 } 666 667 for (size_t i = 0; i < numpHdrs; ++i) { 668 GElf_Phdr pHdr; 669 if (gelf_getphdr(e, i, &pHdr) != &pHdr) { 670 continue; 671 } 672 // Look for the runtime metadata note 673 if (pHdr.p_type == PT_NOTE && pHdr.p_align >= sizeof(int)) { 674 // Iterate over the notes in this segment 675 address ptr = (address)binary + pHdr.p_offset; 676 address segmentEnd = ptr + pHdr.p_filesz; 677 678 while (ptr < segmentEnd) { 679 Elf_Note *note = reinterpret_cast<Elf_Note *>(ptr); 680 address name = (address)¬e[1]; 681 682 if (note->n_type == 7 || note->n_type == 8) { 683 return failure; 684 } else if (note->n_type == 10 /* NT_AMD_AMDGPU_HSA_METADATA */ && 685 note->n_namesz == sizeof "AMD" && 686 !memcmp(name, "AMD", note->n_namesz)) { 687 // code object v2 uses yaml metadata, no longer supported 688 return failure; 689 } else if (note->n_type == 32 /* NT_AMDGPU_METADATA */ && 690 note->n_namesz == sizeof "AMDGPU" && 691 !memcmp(name, "AMDGPU", note->n_namesz)) { 692 693 // n_descsz = 485 694 // value is padded to 4 byte alignment, may want to move end up to 695 // match 696 size_t offset = sizeof(uint32_t) * 3 /* fields */ 697 + sizeof("AMDGPU") /* name */ 698 + 1 /* padding to 4 byte alignment */; 699 700 // Including the trailing padding means both pointers are 4 bytes 701 // aligned, which may be useful later. 702 unsigned char *metadata_start = (unsigned char *)ptr + offset; 703 unsigned char *metadata_end = 704 metadata_start + core::alignUp(note->n_descsz, 4); 705 return {metadata_start, metadata_end}; 706 } 707 ptr += sizeof(*note) + core::alignUp(note->n_namesz, sizeof(int)) + 708 core::alignUp(note->n_descsz, sizeof(int)); 709 } 710 } 711 } 712 713 return failure; 714 } 715 716 namespace { 717 int map_lookup_array(msgpack::byte_range message, const char *needle, 718 msgpack::byte_range *res, uint64_t *size) { 719 unsigned count = 0; 720 struct s : msgpack::functors_defaults<s> { 721 s(unsigned &count, uint64_t *size) : count(count), size(size) {} 722 unsigned &count; 723 uint64_t *size; 724 const unsigned char *handle_array(uint64_t N, msgpack::byte_range bytes) { 725 count++; 726 *size = N; 727 return bytes.end; 728 } 729 }; 730 731 msgpack::foreach_map(message, 732 [&](msgpack::byte_range key, msgpack::byte_range value) { 733 if (msgpack::message_is_string(key, needle)) { 734 // If the message is an array, record number of 735 // elements in *size 736 msgpack::handle_msgpack<s>(value, {count, size}); 737 // return the whole array 738 *res = value; 739 } 740 }); 741 // Only claim success if exactly one key/array pair matched 742 return count != 1; 743 } 744 745 int map_lookup_string(msgpack::byte_range message, const char *needle, 746 std::string *res) { 747 unsigned count = 0; 748 struct s : public msgpack::functors_defaults<s> { 749 s(unsigned &count, std::string *res) : count(count), res(res) {} 750 unsigned &count; 751 std::string *res; 752 void handle_string(size_t N, const unsigned char *str) { 753 count++; 754 *res = std::string(str, str + N); 755 } 756 }; 757 msgpack::foreach_map(message, 758 [&](msgpack::byte_range key, msgpack::byte_range value) { 759 if (msgpack::message_is_string(key, needle)) { 760 msgpack::handle_msgpack<s>(value, {count, res}); 761 } 762 }); 763 return count != 1; 764 } 765 766 int map_lookup_uint64_t(msgpack::byte_range message, const char *needle, 767 uint64_t *res) { 768 unsigned count = 0; 769 msgpack::foreach_map(message, 770 [&](msgpack::byte_range key, msgpack::byte_range value) { 771 if (msgpack::message_is_string(key, needle)) { 772 msgpack::foronly_unsigned(value, [&](uint64_t x) { 773 count++; 774 *res = x; 775 }); 776 } 777 }); 778 return count != 1; 779 } 780 781 int array_lookup_element(msgpack::byte_range message, uint64_t elt, 782 msgpack::byte_range *res) { 783 int rc = 1; 784 uint64_t i = 0; 785 msgpack::foreach_array(message, [&](msgpack::byte_range value) { 786 if (i == elt) { 787 *res = value; 788 rc = 0; 789 } 790 i++; 791 }); 792 return rc; 793 } 794 795 int populate_kernelArgMD(msgpack::byte_range args_element, 796 KernelArgMD *kernelarg) { 797 using namespace msgpack; 798 int error = 0; 799 foreach_map(args_element, [&](byte_range key, byte_range value) -> void { 800 if (message_is_string(key, ".name")) { 801 foronly_string(value, [&](size_t N, const unsigned char *str) { 802 kernelarg->name_ = std::string(str, str + N); 803 }); 804 } else if (message_is_string(key, ".type_name")) { 805 foronly_string(value, [&](size_t N, const unsigned char *str) { 806 kernelarg->typeName_ = std::string(str, str + N); 807 }); 808 } else if (message_is_string(key, ".size")) { 809 foronly_unsigned(value, [&](uint64_t x) { kernelarg->size_ = x; }); 810 } else if (message_is_string(key, ".offset")) { 811 foronly_unsigned(value, [&](uint64_t x) { kernelarg->offset_ = x; }); 812 } else if (message_is_string(key, ".value_kind")) { 813 foronly_string(value, [&](size_t N, const unsigned char *str) { 814 std::string s = std::string(str, str + N); 815 auto itValueKind = ArgValueKind.find(s); 816 if (itValueKind != ArgValueKind.end()) { 817 kernelarg->valueKind_ = itValueKind->second; 818 } 819 }); 820 } 821 }); 822 return error; 823 } 824 } // namespace 825 826 static hsa_status_t get_code_object_custom_metadata(void *binary, 827 size_t binSize, int gpu) { 828 // parse code object with different keys from v2 829 // also, the kernel name is not the same as the symbol name -- so a 830 // symbol->name map is needed 831 832 std::pair<unsigned char *, unsigned char *> metadata = 833 find_metadata(binary, binSize); 834 if (!metadata.first) { 835 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 836 } 837 838 uint64_t kernelsSize = 0; 839 int msgpack_errors = 0; 840 msgpack::byte_range kernel_array; 841 msgpack_errors = 842 map_lookup_array({metadata.first, metadata.second}, "amdhsa.kernels", 843 &kernel_array, &kernelsSize); 844 if (msgpack_errors != 0) { 845 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 846 "kernels lookup in program metadata"); 847 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 848 } 849 850 for (size_t i = 0; i < kernelsSize; i++) { 851 assert(msgpack_errors == 0); 852 std::string kernelName; 853 std::string symbolName; 854 855 msgpack::byte_range element; 856 msgpack_errors += array_lookup_element(kernel_array, i, &element); 857 if (msgpack_errors != 0) { 858 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 859 "element lookup in kernel metadata"); 860 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 861 } 862 863 msgpack_errors += map_lookup_string(element, ".name", &kernelName); 864 msgpack_errors += map_lookup_string(element, ".symbol", &symbolName); 865 if (msgpack_errors != 0) { 866 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 867 "strings lookup in kernel metadata"); 868 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 869 } 870 871 atl_kernel_info_t info = {0, 0, 0, 0, 0, 0, 0, 0, 0, {}, {}, {}}; 872 873 uint64_t sgpr_count, vgpr_count, sgpr_spill_count, vgpr_spill_count; 874 msgpack_errors += map_lookup_uint64_t(element, ".sgpr_count", &sgpr_count); 875 if (msgpack_errors != 0) { 876 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 877 "sgpr count metadata lookup in kernel metadata"); 878 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 879 } 880 881 info.sgpr_count = sgpr_count; 882 883 msgpack_errors += map_lookup_uint64_t(element, ".vgpr_count", &vgpr_count); 884 if (msgpack_errors != 0) { 885 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 886 "vgpr count metadata lookup in kernel metadata"); 887 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 888 } 889 890 info.vgpr_count = vgpr_count; 891 892 msgpack_errors += 893 map_lookup_uint64_t(element, ".sgpr_spill_count", &sgpr_spill_count); 894 if (msgpack_errors != 0) { 895 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 896 "sgpr spill count metadata lookup in kernel metadata"); 897 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 898 } 899 900 info.sgpr_spill_count = sgpr_spill_count; 901 902 msgpack_errors += 903 map_lookup_uint64_t(element, ".vgpr_spill_count", &vgpr_spill_count); 904 if (msgpack_errors != 0) { 905 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 906 "vgpr spill count metadata lookup in kernel metadata"); 907 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 908 } 909 910 info.vgpr_spill_count = vgpr_spill_count; 911 912 size_t kernel_explicit_args_size = 0; 913 uint64_t kernel_segment_size; 914 msgpack_errors += map_lookup_uint64_t(element, ".kernarg_segment_size", 915 &kernel_segment_size); 916 if (msgpack_errors != 0) { 917 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 918 "kernarg segment size metadata lookup in kernel metadata"); 919 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 920 } 921 922 // create a map from symbol to name 923 DEBUG_PRINT("Kernel symbol %s; Name: %s; Size: %lu\n", symbolName.c_str(), 924 kernelName.c_str(), kernel_segment_size); 925 KernelNameMap[symbolName] = kernelName; 926 927 bool hasHiddenArgs = false; 928 if (kernel_segment_size > 0) { 929 uint64_t argsSize; 930 size_t offset = 0; 931 932 msgpack::byte_range args_array; 933 msgpack_errors += 934 map_lookup_array(element, ".args", &args_array, &argsSize); 935 if (msgpack_errors != 0) { 936 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 937 "kernel args metadata lookup in kernel metadata"); 938 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 939 } 940 941 info.num_args = argsSize; 942 943 for (size_t i = 0; i < argsSize; ++i) { 944 KernelArgMD lcArg; 945 946 msgpack::byte_range args_element; 947 msgpack_errors += array_lookup_element(args_array, i, &args_element); 948 if (msgpack_errors != 0) { 949 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 950 "iterate args map in kernel args metadata"); 951 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 952 } 953 954 msgpack_errors += populate_kernelArgMD(args_element, &lcArg); 955 if (msgpack_errors != 0) { 956 printf("[%s:%d] %s failed\n", __FILE__, __LINE__, 957 "iterate args map in kernel args metadata"); 958 return HSA_STATUS_ERROR_INVALID_CODE_OBJECT; 959 } 960 // populate info with sizes and offsets 961 info.arg_sizes.push_back(lcArg.size_); 962 // v3 has offset field and not align field 963 size_t new_offset = lcArg.offset_; 964 size_t padding = new_offset - offset; 965 offset = new_offset; 966 info.arg_offsets.push_back(lcArg.offset_); 967 DEBUG_PRINT("Arg[%lu] \"%s\" (%u, %u)\n", i, lcArg.name_.c_str(), 968 lcArg.size_, lcArg.offset_); 969 offset += lcArg.size_; 970 971 // check if the arg is a hidden/implicit arg 972 // this logic assumes that all hidden args are 8-byte aligned 973 if (!isImplicit(lcArg.valueKind_)) { 974 kernel_explicit_args_size += lcArg.size_; 975 } else { 976 hasHiddenArgs = true; 977 } 978 kernel_explicit_args_size += padding; 979 } 980 } 981 982 // add size of implicit args, e.g.: offset x, y and z and pipe pointer, but 983 // in ATMI, do not count the compiler set implicit args, but set your own 984 // implicit args by discounting the compiler set implicit args 985 info.kernel_segment_size = 986 (hasHiddenArgs ? kernel_explicit_args_size : kernel_segment_size) + 987 sizeof(atmi_implicit_args_t); 988 DEBUG_PRINT("[%s: kernarg seg size] (%lu --> %u)\n", kernelName.c_str(), 989 kernel_segment_size, info.kernel_segment_size); 990 991 // kernel received, now add it to the kernel info table 992 KernelInfoTable[gpu][kernelName] = info; 993 } 994 995 return HSA_STATUS_SUCCESS; 996 } 997 998 static hsa_status_t populate_InfoTables(hsa_executable_t executable, 999 hsa_executable_symbol_t symbol, 1000 void *data) { 1001 int gpu = *static_cast<int *>(data); 1002 hsa_symbol_kind_t type; 1003 1004 uint32_t name_length; 1005 hsa_status_t err; 1006 err = hsa_executable_symbol_get_info(symbol, HSA_EXECUTABLE_SYMBOL_INFO_TYPE, 1007 &type); 1008 if (err != HSA_STATUS_SUCCESS) { 1009 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1010 "Symbol info extraction", get_error_string(err)); 1011 return err; 1012 } 1013 DEBUG_PRINT("Exec Symbol type: %d\n", type); 1014 if (type == HSA_SYMBOL_KIND_KERNEL) { 1015 err = hsa_executable_symbol_get_info( 1016 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 1017 if (err != HSA_STATUS_SUCCESS) { 1018 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1019 "Symbol info extraction", get_error_string(err)); 1020 return err; 1021 } 1022 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 1023 err = hsa_executable_symbol_get_info(symbol, 1024 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 1025 if (err != HSA_STATUS_SUCCESS) { 1026 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1027 "Symbol info extraction", get_error_string(err)); 1028 return err; 1029 } 1030 name[name_length] = 0; 1031 1032 if (KernelNameMap.find(std::string(name)) == KernelNameMap.end()) { 1033 // did not find kernel name in the kernel map; this can happen only 1034 // if the ROCr API for getting symbol info (name) is different from 1035 // the comgr method of getting symbol info 1036 return HSA_STATUS_ERROR; 1037 } 1038 atl_kernel_info_t info; 1039 std::string kernelName = KernelNameMap[std::string(name)]; 1040 // by now, the kernel info table should already have an entry 1041 // because the non-ROCr custom code object parsing is called before 1042 // iterating over the code object symbols using ROCr 1043 if (KernelInfoTable[gpu].find(kernelName) == KernelInfoTable[gpu].end()) { 1044 return HSA_STATUS_ERROR; 1045 } 1046 // found, so assign and update 1047 info = KernelInfoTable[gpu][kernelName]; 1048 1049 /* Extract dispatch information from the symbol */ 1050 err = hsa_executable_symbol_get_info( 1051 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_OBJECT, 1052 &(info.kernel_object)); 1053 if (err != HSA_STATUS_SUCCESS) { 1054 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1055 "Extracting the symbol from the executable", 1056 get_error_string(err)); 1057 return err; 1058 } 1059 err = hsa_executable_symbol_get_info( 1060 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_GROUP_SEGMENT_SIZE, 1061 &(info.group_segment_size)); 1062 if (err != HSA_STATUS_SUCCESS) { 1063 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1064 "Extracting the group segment size from the executable", 1065 get_error_string(err)); 1066 return err; 1067 } 1068 err = hsa_executable_symbol_get_info( 1069 symbol, HSA_EXECUTABLE_SYMBOL_INFO_KERNEL_PRIVATE_SEGMENT_SIZE, 1070 &(info.private_segment_size)); 1071 if (err != HSA_STATUS_SUCCESS) { 1072 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1073 "Extracting the private segment from the executable", 1074 get_error_string(err)); 1075 return err; 1076 } 1077 1078 DEBUG_PRINT( 1079 "Kernel %s --> %lx symbol %u group segsize %u pvt segsize %u bytes " 1080 "kernarg\n", 1081 kernelName.c_str(), info.kernel_object, info.group_segment_size, 1082 info.private_segment_size, info.kernel_segment_size); 1083 1084 // assign it back to the kernel info table 1085 KernelInfoTable[gpu][kernelName] = info; 1086 free(name); 1087 } else if (type == HSA_SYMBOL_KIND_VARIABLE) { 1088 err = hsa_executable_symbol_get_info( 1089 symbol, HSA_EXECUTABLE_SYMBOL_INFO_NAME_LENGTH, &name_length); 1090 if (err != HSA_STATUS_SUCCESS) { 1091 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1092 "Symbol info extraction", get_error_string(err)); 1093 return err; 1094 } 1095 char *name = reinterpret_cast<char *>(malloc(name_length + 1)); 1096 err = hsa_executable_symbol_get_info(symbol, 1097 HSA_EXECUTABLE_SYMBOL_INFO_NAME, name); 1098 if (err != HSA_STATUS_SUCCESS) { 1099 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1100 "Symbol info extraction", get_error_string(err)); 1101 return err; 1102 } 1103 name[name_length] = 0; 1104 1105 atl_symbol_info_t info; 1106 1107 err = hsa_executable_symbol_get_info( 1108 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_ADDRESS, &(info.addr)); 1109 if (err != HSA_STATUS_SUCCESS) { 1110 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1111 "Symbol info address extraction", get_error_string(err)); 1112 return err; 1113 } 1114 1115 err = hsa_executable_symbol_get_info( 1116 symbol, HSA_EXECUTABLE_SYMBOL_INFO_VARIABLE_SIZE, &(info.size)); 1117 if (err != HSA_STATUS_SUCCESS) { 1118 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1119 "Symbol info size extraction", get_error_string(err)); 1120 return err; 1121 } 1122 1123 atmi_mem_place_t place = ATMI_MEM_PLACE(ATMI_DEVTYPE_GPU, gpu, 0); 1124 DEBUG_PRINT("Symbol %s = %p (%u bytes)\n", name, (void *)info.addr, 1125 info.size); 1126 err = register_allocation(reinterpret_cast<void *>(info.addr), 1127 (size_t)info.size, place); 1128 if (err != HSA_STATUS_SUCCESS) { 1129 return err; 1130 } 1131 SymbolInfoTable[gpu][std::string(name)] = info; 1132 if (strcmp(name, "needs_hostcall_buffer") == 0) 1133 g_atmi_hostcall_required = true; 1134 free(name); 1135 } else { 1136 DEBUG_PRINT("Symbol is an indirect function\n"); 1137 } 1138 return HSA_STATUS_SUCCESS; 1139 } 1140 1141 atmi_status_t Runtime::RegisterModuleFromMemory( 1142 void *module_bytes, size_t module_size, atmi_place_t place, 1143 atmi_status_t (*on_deserialized_data)(void *data, size_t size, 1144 void *cb_state), 1145 void *cb_state, std::vector<hsa_executable_t> &HSAExecutables) { 1146 hsa_status_t err; 1147 int gpu = place.device_id; 1148 assert(gpu >= 0); 1149 1150 DEBUG_PRINT("Trying to load module to GPU-%d\n", gpu); 1151 ATLGPUProcessor &proc = get_processor<ATLGPUProcessor>(place); 1152 hsa_agent_t agent = proc.agent(); 1153 hsa_executable_t executable = {0}; 1154 hsa_profile_t agent_profile; 1155 1156 err = hsa_agent_get_info(agent, HSA_AGENT_INFO_PROFILE, &agent_profile); 1157 if (err != HSA_STATUS_SUCCESS) { 1158 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1159 "Query the agent profile", get_error_string(err)); 1160 return ATMI_STATUS_ERROR; 1161 } 1162 // FIXME: Assume that every profile is FULL until we understand how to build 1163 // GCN with base profile 1164 agent_profile = HSA_PROFILE_FULL; 1165 /* Create the empty executable. */ 1166 err = hsa_executable_create(agent_profile, HSA_EXECUTABLE_STATE_UNFROZEN, "", 1167 &executable); 1168 if (err != HSA_STATUS_SUCCESS) { 1169 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1170 "Create the executable", get_error_string(err)); 1171 return ATMI_STATUS_ERROR; 1172 } 1173 1174 bool module_load_success = false; 1175 do // Existing control flow used continue, preserve that for this patch 1176 { 1177 { 1178 // Some metadata info is not available through ROCr API, so use custom 1179 // code object metadata parsing to collect such metadata info 1180 1181 err = get_code_object_custom_metadata(module_bytes, module_size, gpu); 1182 if (err != HSA_STATUS_SUCCESS) { 1183 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1184 "Getting custom code object metadata", 1185 get_error_string(err)); 1186 continue; 1187 } 1188 1189 // Deserialize code object. 1190 hsa_code_object_t code_object = {0}; 1191 err = hsa_code_object_deserialize(module_bytes, module_size, NULL, 1192 &code_object); 1193 if (err != HSA_STATUS_SUCCESS) { 1194 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1195 "Code Object Deserialization", get_error_string(err)); 1196 continue; 1197 } 1198 assert(0 != code_object.handle); 1199 1200 // Mutating the device image here avoids another allocation & memcpy 1201 void *code_object_alloc_data = 1202 reinterpret_cast<void *>(code_object.handle); 1203 atmi_status_t atmi_err = 1204 on_deserialized_data(code_object_alloc_data, module_size, cb_state); 1205 if (atmi_err != ATMI_STATUS_SUCCESS) { 1206 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1207 "Error in deserialized_data callback", 1208 get_atmi_error_string(atmi_err)); 1209 return atmi_err; 1210 } 1211 1212 /* Load the code object. */ 1213 err = 1214 hsa_executable_load_code_object(executable, agent, code_object, NULL); 1215 if (err != HSA_STATUS_SUCCESS) { 1216 DEBUG_PRINT("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1217 "Loading the code object", get_error_string(err)); 1218 continue; 1219 } 1220 1221 // cannot iterate over symbols until executable is frozen 1222 } 1223 module_load_success = true; 1224 } while (0); 1225 DEBUG_PRINT("Modules loaded successful? %d\n", module_load_success); 1226 if (module_load_success) { 1227 /* Freeze the executable; it can now be queried for symbols. */ 1228 err = hsa_executable_freeze(executable, ""); 1229 if (err != HSA_STATUS_SUCCESS) { 1230 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1231 "Freeze the executable", get_error_string(err)); 1232 return ATMI_STATUS_ERROR; 1233 } 1234 1235 err = hsa_executable_iterate_symbols(executable, populate_InfoTables, 1236 static_cast<void *>(&gpu)); 1237 if (err != HSA_STATUS_SUCCESS) { 1238 printf("[%s:%d] %s failed: %s\n", __FILE__, __LINE__, 1239 "Iterating over symbols for execuatable", get_error_string(err)); 1240 return ATMI_STATUS_ERROR; 1241 } 1242 1243 // save the executable and destroy during finalize 1244 HSAExecutables.push_back(executable); 1245 return ATMI_STATUS_SUCCESS; 1246 } else { 1247 return ATMI_STATUS_ERROR; 1248 } 1249 } 1250 1251 } // namespace core 1252