1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/26/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MachVMMemory.h" 15 #include "DNBLog.h" 16 #include "MachVMRegion.h" 17 #include <dlfcn.h> 18 #include <mach/mach_vm.h> 19 #include <mach/shared_region.h> 20 #include <sys/sysctl.h> 21 22 static const vm_size_t kInvalidPageSize = ~0; 23 24 MachVMMemory::MachVMMemory() : m_page_size(kInvalidPageSize), m_err(0) {} 25 26 MachVMMemory::~MachVMMemory() {} 27 28 nub_size_t MachVMMemory::PageSize(task_t task) { 29 if (m_page_size == kInvalidPageSize) { 30 #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22 31 if (task != TASK_NULL) { 32 kern_return_t kr; 33 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT; 34 task_vm_info_data_t vm_info; 35 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count); 36 if (kr == KERN_SUCCESS) { 37 DNBLogThreadedIf( 38 LOG_TASK, 39 "MachVMMemory::PageSize task_info returned page size of 0x%x", 40 (int)vm_info.page_size); 41 m_page_size = vm_info.page_size; 42 return m_page_size; 43 } else { 44 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call " 45 "failed to get page size, TASK_VM_INFO %d, " 46 "TASK_VM_INFO_COUNT %d, kern return %d", 47 TASK_VM_INFO, TASK_VM_INFO_COUNT, kr); 48 } 49 } 50 #endif 51 m_err = ::host_page_size(::mach_host_self(), &m_page_size); 52 if (m_err.Fail()) 53 m_page_size = 0; 54 } 55 return m_page_size; 56 } 57 58 nub_size_t MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, 59 nub_size_t count) { 60 const nub_size_t page_size = PageSize(task); 61 if (page_size > 0) { 62 nub_size_t page_offset = (addr % page_size); 63 nub_size_t bytes_left_in_page = page_size - page_offset; 64 if (count > bytes_left_in_page) 65 count = bytes_left_in_page; 66 } 67 return count; 68 } 69 70 nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, 71 DNBRegionInfo *region_info) { 72 MachVMRegion vmRegion(task); 73 74 if (vmRegion.GetRegionForAddress(address)) { 75 region_info->addr = vmRegion.StartAddress(); 76 region_info->size = vmRegion.GetByteSize(); 77 region_info->permissions = vmRegion.GetDNBPermissions(); 78 } else { 79 region_info->addr = address; 80 region_info->size = 0; 81 if (vmRegion.GetError().Success()) { 82 // vmRegion.GetRegionForAddress() return false, indicating that "address" 83 // wasn't in a valid region, but the "vmRegion" info was successfully 84 // read from the task which means the info describes the next valid 85 // region from which we can infer the size of this invalid region 86 mach_vm_address_t start_addr = vmRegion.StartAddress(); 87 if (address < start_addr) 88 region_info->size = start_addr - address; 89 } 90 // If we can't get any info about the size from the next region it means 91 // we asked about an address that was past all mappings, so the size 92 // of this region will take up all remaining address space. 93 if (region_info->size == 0) 94 region_info->size = INVALID_NUB_ADDRESS - region_info->addr; 95 96 // Not readable, writeable or executable 97 region_info->permissions = 0; 98 } 99 return true; 100 } 101 102 // For integrated graphics chip, this makes the accounting info for 'wired' 103 // memory more like top. 104 uint64_t MachVMMemory::GetStolenPages(task_t task) { 105 static uint64_t stolenPages = 0; 106 static bool calculated = false; 107 if (calculated) 108 return stolenPages; 109 110 static int mib_reserved[CTL_MAXNAME]; 111 static int mib_unusable[CTL_MAXNAME]; 112 static int mib_other[CTL_MAXNAME]; 113 static size_t mib_reserved_len = 0; 114 static size_t mib_unusable_len = 0; 115 static size_t mib_other_len = 0; 116 int r; 117 118 /* This can be used for testing: */ 119 // tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize; 120 121 if (0 == mib_reserved_len) { 122 mib_reserved_len = CTL_MAXNAME; 123 124 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved, 125 &mib_reserved_len); 126 127 if (-1 == r) { 128 mib_reserved_len = 0; 129 return 0; 130 } 131 132 mib_unusable_len = CTL_MAXNAME; 133 134 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable, 135 &mib_unusable_len); 136 137 if (-1 == r) { 138 mib_reserved_len = 0; 139 return 0; 140 } 141 142 mib_other_len = CTL_MAXNAME; 143 144 r = sysctlnametomib("machdep.memmap.Other", mib_other, &mib_other_len); 145 146 if (-1 == r) { 147 mib_reserved_len = 0; 148 return 0; 149 } 150 } 151 152 if (mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0) { 153 uint64_t reserved = 0, unusable = 0, other = 0; 154 size_t reserved_len; 155 size_t unusable_len; 156 size_t other_len; 157 158 reserved_len = sizeof(reserved); 159 unusable_len = sizeof(unusable); 160 other_len = sizeof(other); 161 162 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */ 163 164 if (sysctl(mib_reserved, static_cast<u_int>(mib_reserved_len), &reserved, 165 &reserved_len, NULL, 0)) { 166 return 0; 167 } 168 169 if (sysctl(mib_unusable, static_cast<u_int>(mib_unusable_len), &unusable, 170 &unusable_len, NULL, 0)) { 171 return 0; 172 } 173 174 if (sysctl(mib_other, static_cast<u_int>(mib_other_len), &other, &other_len, 175 NULL, 0)) { 176 return 0; 177 } 178 179 if (reserved_len == sizeof(reserved) && unusable_len == sizeof(unusable) && 180 other_len == sizeof(other)) { 181 uint64_t stolen = reserved + unusable + other; 182 uint64_t mb128 = 128 * 1024 * 1024ULL; 183 184 if (stolen >= mb128) { 185 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down 186 stolenPages = stolen / PageSize(task); 187 } 188 } 189 } 190 191 calculated = true; 192 return stolenPages; 193 } 194 195 static uint64_t GetPhysicalMemory() { 196 // This doesn't change often at all. No need to poll each time. 197 static uint64_t physical_memory = 0; 198 static bool calculated = false; 199 if (calculated) 200 return physical_memory; 201 202 size_t len = sizeof(physical_memory); 203 sysctlbyname("hw.memsize", &physical_memory, &len, NULL, 0); 204 205 calculated = true; 206 return physical_memory; 207 } 208 209 // rsize and dirty_size is not adjusted for dyld shared cache and multiple 210 // __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much 211 // but rsize may. There is performance penalty for the adjustment. Right now, 212 // only use the dirty_size. 213 void MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, 214 mach_vm_size_t &dirty_size) { 215 #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22 216 217 task_vm_info_data_t vm_info; 218 mach_msg_type_number_t info_count; 219 kern_return_t kr; 220 221 info_count = TASK_VM_INFO_COUNT; 222 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, 223 &info_count); 224 if (kr == KERN_SUCCESS) 225 dirty_size = vm_info.internal; 226 #endif 227 } 228 229 // Test whether the virtual address is within the architecture's shared region. 230 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type) { 231 mach_vm_address_t base = 0, size = 0; 232 233 switch (type) { 234 #if defined(CPU_TYPE_ARM64) && defined(SHARED_REGION_BASE_ARM64) 235 case CPU_TYPE_ARM64: 236 base = SHARED_REGION_BASE_ARM64; 237 size = SHARED_REGION_SIZE_ARM64; 238 break; 239 #endif 240 241 case CPU_TYPE_ARM: 242 base = SHARED_REGION_BASE_ARM; 243 size = SHARED_REGION_SIZE_ARM; 244 break; 245 246 case CPU_TYPE_X86_64: 247 base = SHARED_REGION_BASE_X86_64; 248 size = SHARED_REGION_SIZE_X86_64; 249 break; 250 251 case CPU_TYPE_I386: 252 base = SHARED_REGION_BASE_I386; 253 size = SHARED_REGION_SIZE_I386; 254 break; 255 256 default: { 257 // Log error abut unknown CPU type 258 break; 259 } 260 } 261 262 return (addr >= base && addr < (base + size)); 263 } 264 265 void MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, 266 nub_process_t pid, mach_vm_size_t &rprvt, 267 mach_vm_size_t &vprvt) { 268 // Collecting some other info cheaply but not reporting for now. 269 mach_vm_size_t empty = 0; 270 mach_vm_size_t fw_private = 0; 271 272 mach_vm_size_t aliased = 0; 273 bool global_shared_text_data_mapped = false; 274 vm_size_t pagesize = PageSize(task); 275 276 for (mach_vm_address_t addr = 0, size = 0;; addr += size) { 277 vm_region_top_info_data_t info; 278 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT; 279 mach_port_t object_name; 280 281 kern_return_t kr = 282 mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, 283 (vm_region_info_t)&info, &count, &object_name); 284 if (kr != KERN_SUCCESS) 285 break; 286 287 if (InSharedRegion(addr, cputype)) { 288 // Private Shared 289 fw_private += info.private_pages_resident * pagesize; 290 291 // Check if this process has the globally shared text and data regions 292 // mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid 293 // checking again. 294 if (global_shared_text_data_mapped == FALSE && 295 info.share_mode == SM_EMPTY) { 296 vm_region_basic_info_data_64_t b_info; 297 mach_vm_address_t b_addr = addr; 298 mach_vm_size_t b_size = size; 299 count = VM_REGION_BASIC_INFO_COUNT_64; 300 301 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, 302 (vm_region_info_t)&b_info, &count, &object_name); 303 if (kr != KERN_SUCCESS) 304 break; 305 306 if (b_info.reserved) { 307 global_shared_text_data_mapped = TRUE; 308 } 309 } 310 311 // Short circuit the loop if this isn't a shared private region, since 312 // that's the only region type we care about within the current address 313 // range. 314 if (info.share_mode != SM_PRIVATE) { 315 continue; 316 } 317 } 318 319 // Update counters according to the region type. 320 if (info.share_mode == SM_COW && info.ref_count == 1) { 321 // Treat single reference SM_COW as SM_PRIVATE 322 info.share_mode = SM_PRIVATE; 323 } 324 325 switch (info.share_mode) { 326 case SM_LARGE_PAGE: 327 // Treat SM_LARGE_PAGE the same as SM_PRIVATE 328 // since they are not shareable and are wired. 329 case SM_PRIVATE: 330 rprvt += info.private_pages_resident * pagesize; 331 rprvt += info.shared_pages_resident * pagesize; 332 vprvt += size; 333 break; 334 335 case SM_EMPTY: 336 empty += size; 337 break; 338 339 case SM_COW: 340 case SM_SHARED: { 341 if (pid == 0) { 342 // Treat kernel_task specially 343 if (info.share_mode == SM_COW) { 344 rprvt += info.private_pages_resident * pagesize; 345 vprvt += size; 346 } 347 break; 348 } 349 350 if (info.share_mode == SM_COW) { 351 rprvt += info.private_pages_resident * pagesize; 352 vprvt += info.private_pages_resident * pagesize; 353 } 354 break; 355 } 356 default: 357 // log that something is really bad. 358 break; 359 } 360 } 361 362 rprvt += aliased; 363 } 364 365 static void GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, 366 uint64_t &anonymous) { 367 #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22 368 369 kern_return_t kr; 370 mach_msg_type_number_t info_count; 371 task_vm_info_data_t vm_info; 372 373 info_count = TASK_VM_INFO_COUNT; 374 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, 375 &info_count); 376 if (kr == KERN_SUCCESS) { 377 purgeable = vm_info.purgeable_volatile_resident; 378 anonymous = 379 vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap; 380 } 381 382 #endif 383 } 384 385 #if defined(HOST_VM_INFO64_COUNT) 386 nub_bool_t MachVMMemory::GetMemoryProfile( 387 DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, 388 cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, 389 uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, 390 mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, 391 mach_vm_size_t &purgeable, mach_vm_size_t &anonymous) 392 #else 393 nub_bool_t MachVMMemory::GetMemoryProfile( 394 DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, 395 cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vminfo, 396 uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, 397 mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, 398 mach_vm_size_t &purgeable, mach_vm_size_t &anonymous) 399 #endif 400 { 401 if (scanType & eProfileHostMemory) 402 physical_memory = GetPhysicalMemory(); 403 404 if (scanType & eProfileMemory) { 405 static mach_port_t localHost = mach_host_self(); 406 #if defined(HOST_VM_INFO64_COUNT) 407 mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; 408 host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo, 409 &count); 410 #else 411 mach_msg_type_number_t count = HOST_VM_INFO_COUNT; 412 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vminfo, &count); 413 vminfo.wire_count += GetStolenPages(task); 414 #endif 415 416 /* We are no longer reporting these. Let's not waste time. 417 GetMemorySizes(task, cputype, pid, rprvt, vprvt); 418 rsize = ti.resident_size; 419 vsize = ti.virtual_size; 420 421 if (scanType & eProfileMemoryDirtyPage) 422 { 423 // This uses vmmap strategy. We don't use the returned rsize for now. We 424 prefer to match top's version since that's what we do for the rest of the 425 metrics. 426 GetRegionSizes(task, rsize, dirty_size); 427 } 428 */ 429 430 if (scanType & eProfileMemoryAnonymous) { 431 GetPurgeableAndAnonymous(task, purgeable, anonymous); 432 } 433 } 434 435 return true; 436 } 437 438 nub_size_t MachVMMemory::Read(task_t task, nub_addr_t address, void *data, 439 nub_size_t data_count) { 440 if (data == NULL || data_count == 0) 441 return 0; 442 443 nub_size_t total_bytes_read = 0; 444 nub_addr_t curr_addr = address; 445 uint8_t *curr_data = (uint8_t *)data; 446 while (total_bytes_read < data_count) { 447 mach_vm_size_t curr_size = 448 MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read); 449 mach_msg_type_number_t curr_bytes_read = 0; 450 vm_offset_t vm_memory = 0; 451 m_err = ::mach_vm_read(task, curr_addr, curr_size, &vm_memory, 452 &curr_bytes_read); 453 454 if (DNBLogCheckLogBit(LOG_MEMORY)) 455 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, " 456 "size = %llu, data => %8.8p, dataCnt => %i )", 457 task, (uint64_t)curr_addr, (uint64_t)curr_size, 458 vm_memory, curr_bytes_read); 459 460 if (m_err.Success()) { 461 if (curr_bytes_read != curr_size) { 462 if (DNBLogCheckLogBit(LOG_MEMORY)) 463 m_err.LogThreaded( 464 "::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, " 465 "data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", 466 task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, 467 curr_bytes_read, curr_bytes_read, (uint64_t)curr_size); 468 } 469 ::memcpy(curr_data, (void *)vm_memory, curr_bytes_read); 470 ::vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read); 471 total_bytes_read += curr_bytes_read; 472 curr_addr += curr_bytes_read; 473 curr_data += curr_bytes_read; 474 } else { 475 break; 476 } 477 } 478 return total_bytes_read; 479 } 480 481 nub_size_t MachVMMemory::Write(task_t task, nub_addr_t address, 482 const void *data, nub_size_t data_count) { 483 MachVMRegion vmRegion(task); 484 485 nub_size_t total_bytes_written = 0; 486 nub_addr_t curr_addr = address; 487 const uint8_t *curr_data = (const uint8_t *)data; 488 489 while (total_bytes_written < data_count) { 490 if (vmRegion.GetRegionForAddress(curr_addr)) { 491 mach_vm_size_t curr_data_count = data_count - total_bytes_written; 492 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr); 493 if (region_bytes_left == 0) { 494 break; 495 } 496 if (curr_data_count > region_bytes_left) 497 curr_data_count = region_bytes_left; 498 499 if (vmRegion.SetProtections(curr_addr, curr_data_count, 500 VM_PROT_READ | VM_PROT_WRITE)) { 501 nub_size_t bytes_written = 502 WriteRegion(task, curr_addr, curr_data, curr_data_count); 503 if (bytes_written <= 0) { 504 // Error should have already be posted by WriteRegion... 505 break; 506 } else { 507 total_bytes_written += bytes_written; 508 curr_addr += bytes_written; 509 curr_data += bytes_written; 510 } 511 } else { 512 DNBLogThreadedIf( 513 LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on " 514 "region for address: [0x%8.8llx-0x%8.8llx)", 515 (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count)); 516 break; 517 } 518 } else { 519 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, 520 "Failed to get region for address: 0x%8.8llx", 521 (uint64_t)address); 522 break; 523 } 524 } 525 526 return total_bytes_written; 527 } 528 529 nub_size_t MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, 530 const void *data, 531 const nub_size_t data_count) { 532 if (data == NULL || data_count == 0) 533 return 0; 534 535 nub_size_t total_bytes_written = 0; 536 nub_addr_t curr_addr = address; 537 const uint8_t *curr_data = (const uint8_t *)data; 538 while (total_bytes_written < data_count) { 539 mach_msg_type_number_t curr_data_count = 540 static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage( 541 task, curr_addr, data_count - total_bytes_written)); 542 m_err = 543 ::mach_vm_write(task, curr_addr, (pointer_t)curr_data, curr_data_count); 544 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 545 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, " 546 "data = %8.8p, dataCnt = %u )", 547 task, (uint64_t)curr_addr, curr_data, curr_data_count); 548 549 #if !defined(__i386__) && !defined(__x86_64__) 550 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH; 551 552 m_err = ::vm_machine_attribute(task, curr_addr, curr_data_count, 553 MATTR_CACHE, &mattr_value); 554 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 555 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = " 556 "0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value " 557 "=> MATTR_VAL_CACHE_FLUSH )", 558 task, (uint64_t)curr_addr, curr_data_count); 559 #endif 560 561 if (m_err.Success()) { 562 total_bytes_written += curr_data_count; 563 curr_addr += curr_data_count; 564 curr_data += curr_data_count; 565 } else { 566 break; 567 } 568 } 569 return total_bytes_written; 570 } 571