1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/26/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MachVMMemory.h" 15 #include "MachVMRegion.h" 16 #include "DNBLog.h" 17 #include <mach/mach_vm.h> 18 #include <mach/shared_region.h> 19 #include <sys/sysctl.h> 20 #include <dlfcn.h> 21 22 MachVMMemory::MachVMMemory() : 23 m_page_size (kInvalidPageSize), 24 m_err (0) 25 { 26 } 27 28 MachVMMemory::~MachVMMemory() 29 { 30 } 31 32 nub_size_t 33 MachVMMemory::PageSize(task_t task) 34 { 35 if (m_page_size == kInvalidPageSize) 36 { 37 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22 38 if (task != TASK_NULL) 39 { 40 kern_return_t kr; 41 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT; 42 task_vm_info_data_t vm_info; 43 kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count); 44 if (kr == KERN_SUCCESS) 45 { 46 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size); 47 m_page_size = vm_info.page_size; 48 return m_page_size; 49 } 50 else 51 { 52 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr); 53 } 54 } 55 #endif 56 m_err = ::host_page_size( ::mach_host_self(), &m_page_size); 57 if (m_err.Fail()) 58 m_page_size = 0; 59 } 60 return m_page_size; 61 } 62 63 nub_size_t 64 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count) 65 { 66 const nub_size_t page_size = PageSize(task); 67 if (page_size > 0) 68 { 69 nub_size_t page_offset = (addr % page_size); 70 nub_size_t bytes_left_in_page = page_size - page_offset; 71 if (count > bytes_left_in_page) 72 count = bytes_left_in_page; 73 } 74 return count; 75 } 76 77 nub_bool_t 78 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info) 79 { 80 MachVMRegion vmRegion(task); 81 82 if (vmRegion.GetRegionForAddress(address)) 83 { 84 region_info->addr = vmRegion.StartAddress(); 85 region_info->size = vmRegion.GetByteSize(); 86 region_info->permissions = vmRegion.GetDNBPermissions(); 87 } 88 else 89 { 90 region_info->addr = address; 91 region_info->size = 0; 92 if (vmRegion.GetError().Success()) 93 { 94 // vmRegion.GetRegionForAddress() return false, indicating that "address" 95 // wasn't in a valid region, but the "vmRegion" info was successfully 96 // read from the task which means the info describes the next valid 97 // region from which we can infer the size of this invalid region 98 mach_vm_address_t start_addr = vmRegion.StartAddress(); 99 if (address < start_addr) 100 region_info->size = start_addr - address; 101 } 102 // If we can't get any info about the size from the next region it means 103 // we asked about an address that was past all mappings, so the size 104 // of this region will take up all remaining address space. 105 if (region_info->size == 0) 106 region_info->size = INVALID_NUB_ADDRESS - region_info->addr; 107 108 // Not readable, writeable or executable 109 region_info->permissions = 0; 110 } 111 return true; 112 } 113 114 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top. 115 uint64_t 116 MachVMMemory::GetStolenPages(task_t task) 117 { 118 static uint64_t stolenPages = 0; 119 static bool calculated = false; 120 if (calculated) return stolenPages; 121 122 static int mib_reserved[CTL_MAXNAME]; 123 static int mib_unusable[CTL_MAXNAME]; 124 static int mib_other[CTL_MAXNAME]; 125 static size_t mib_reserved_len = 0; 126 static size_t mib_unusable_len = 0; 127 static size_t mib_other_len = 0; 128 int r; 129 130 /* This can be used for testing: */ 131 //tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize; 132 133 if(0 == mib_reserved_len) 134 { 135 mib_reserved_len = CTL_MAXNAME; 136 137 r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved, 138 &mib_reserved_len); 139 140 if(-1 == r) 141 { 142 mib_reserved_len = 0; 143 return 0; 144 } 145 146 mib_unusable_len = CTL_MAXNAME; 147 148 r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable, 149 &mib_unusable_len); 150 151 if(-1 == r) 152 { 153 mib_reserved_len = 0; 154 return 0; 155 } 156 157 158 mib_other_len = CTL_MAXNAME; 159 160 r = sysctlnametomib("machdep.memmap.Other", mib_other, 161 &mib_other_len); 162 163 if(-1 == r) 164 { 165 mib_reserved_len = 0; 166 return 0; 167 } 168 } 169 170 if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0) 171 { 172 uint64_t reserved = 0, unusable = 0, other = 0; 173 size_t reserved_len; 174 size_t unusable_len; 175 size_t other_len; 176 177 reserved_len = sizeof(reserved); 178 unusable_len = sizeof(unusable); 179 other_len = sizeof(other); 180 181 /* These are all declared as QUAD/uint64_t sysctls in the kernel. */ 182 183 if (sysctl (mib_reserved, 184 static_cast<u_int>(mib_reserved_len), 185 &reserved, 186 &reserved_len, 187 NULL, 188 0)) 189 { 190 return 0; 191 } 192 193 if (sysctl (mib_unusable, 194 static_cast<u_int>(mib_unusable_len), 195 &unusable, 196 &unusable_len, 197 NULL, 198 0)) 199 { 200 return 0; 201 } 202 203 if (sysctl (mib_other, 204 static_cast<u_int>(mib_other_len), 205 &other, 206 &other_len, 207 NULL, 208 0)) 209 { 210 return 0; 211 } 212 213 if (reserved_len == sizeof(reserved) && 214 unusable_len == sizeof(unusable) && 215 other_len == sizeof(other)) 216 { 217 uint64_t stolen = reserved + unusable + other; 218 uint64_t mb128 = 128 * 1024 * 1024ULL; 219 220 if(stolen >= mb128) 221 { 222 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down 223 stolenPages = stolen / PageSize (task); 224 } 225 } 226 } 227 228 calculated = true; 229 return stolenPages; 230 } 231 232 static uint64_t GetPhysicalMemory() 233 { 234 // This doesn't change often at all. No need to poll each time. 235 static uint64_t physical_memory = 0; 236 static bool calculated = false; 237 if (calculated) return physical_memory; 238 239 int mib[2]; 240 mib[0] = CTL_HW; 241 mib[1] = HW_MEMSIZE; 242 size_t len = sizeof(physical_memory); 243 sysctl(mib, 2, &physical_memory, &len, NULL, 0); 244 return physical_memory; 245 } 246 247 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size. 248 void 249 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size) 250 { 251 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22 252 253 task_vm_info_data_t vm_info; 254 mach_msg_type_number_t info_count; 255 kern_return_t kr; 256 257 info_count = TASK_VM_INFO_COUNT; 258 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count); 259 if (kr == KERN_SUCCESS) 260 dirty_size = vm_info.internal; 261 #endif 262 } 263 264 // Test whether the virtual address is within the architecture's shared region. 265 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type) 266 { 267 mach_vm_address_t base = 0, size = 0; 268 269 switch(type) { 270 #if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64) 271 case CPU_TYPE_ARM64: 272 base = SHARED_REGION_BASE_ARM64; 273 size = SHARED_REGION_SIZE_ARM64; 274 break; 275 #endif 276 277 case CPU_TYPE_ARM: 278 base = SHARED_REGION_BASE_ARM; 279 size = SHARED_REGION_SIZE_ARM; 280 break; 281 282 case CPU_TYPE_X86_64: 283 base = SHARED_REGION_BASE_X86_64; 284 size = SHARED_REGION_SIZE_X86_64; 285 break; 286 287 case CPU_TYPE_I386: 288 base = SHARED_REGION_BASE_I386; 289 size = SHARED_REGION_SIZE_I386; 290 break; 291 292 default: { 293 // Log error abut unknown CPU type 294 break; 295 } 296 } 297 298 299 return(addr >= base && addr < (base + size)); 300 } 301 302 void 303 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt) 304 { 305 // Collecting some other info cheaply but not reporting for now. 306 mach_vm_size_t empty = 0; 307 mach_vm_size_t fw_private = 0; 308 309 mach_vm_size_t aliased = 0; 310 bool global_shared_text_data_mapped = false; 311 vm_size_t pagesize = PageSize (task); 312 313 for (mach_vm_address_t addr=0, size=0; ; addr += size) 314 { 315 vm_region_top_info_data_t info; 316 mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT; 317 mach_port_t object_name; 318 319 kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name); 320 if (kr != KERN_SUCCESS) break; 321 322 if (InSharedRegion(addr, cputype)) 323 { 324 // Private Shared 325 fw_private += info.private_pages_resident * pagesize; 326 327 // Check if this process has the globally shared text and data regions mapped in. If so, set global_shared_text_data_mapped to TRUE and avoid checking again. 328 if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) { 329 vm_region_basic_info_data_64_t b_info; 330 mach_vm_address_t b_addr = addr; 331 mach_vm_size_t b_size = size; 332 count = VM_REGION_BASIC_INFO_COUNT_64; 333 334 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name); 335 if (kr != KERN_SUCCESS) break; 336 337 if (b_info.reserved) { 338 global_shared_text_data_mapped = TRUE; 339 } 340 } 341 342 // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range. 343 if (info.share_mode != SM_PRIVATE) 344 { 345 continue; 346 } 347 } 348 349 // Update counters according to the region type. 350 if (info.share_mode == SM_COW && info.ref_count == 1) 351 { 352 // Treat single reference SM_COW as SM_PRIVATE 353 info.share_mode = SM_PRIVATE; 354 } 355 356 switch (info.share_mode) 357 { 358 case SM_LARGE_PAGE: 359 // Treat SM_LARGE_PAGE the same as SM_PRIVATE 360 // since they are not shareable and are wired. 361 case SM_PRIVATE: 362 rprvt += info.private_pages_resident * pagesize; 363 rprvt += info.shared_pages_resident * pagesize; 364 vprvt += size; 365 break; 366 367 case SM_EMPTY: 368 empty += size; 369 break; 370 371 case SM_COW: 372 case SM_SHARED: 373 { 374 if (pid == 0) 375 { 376 // Treat kernel_task specially 377 if (info.share_mode == SM_COW) 378 { 379 rprvt += info.private_pages_resident * pagesize; 380 vprvt += size; 381 } 382 break; 383 } 384 385 if (info.share_mode == SM_COW) 386 { 387 rprvt += info.private_pages_resident * pagesize; 388 vprvt += info.private_pages_resident * pagesize; 389 } 390 break; 391 } 392 default: 393 // log that something is really bad. 394 break; 395 } 396 } 397 398 rprvt += aliased; 399 } 400 401 static void 402 GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous) 403 { 404 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22 405 406 kern_return_t kr; 407 mach_msg_type_number_t info_count; 408 task_vm_info_data_t vm_info; 409 410 info_count = TASK_VM_INFO_COUNT; 411 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count); 412 if (kr == KERN_SUCCESS) 413 { 414 purgeable = vm_info.purgeable_volatile_resident; 415 anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap; 416 } 417 418 #endif 419 } 420 421 nub_bool_t 422 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous) 423 { 424 if (scanType & eProfileHostMemory) 425 physical_memory = GetPhysicalMemory(); 426 427 if (scanType & eProfileMemory) 428 { 429 static mach_port_t localHost = mach_host_self(); 430 mach_msg_type_number_t count = HOST_VM_INFO_COUNT; 431 host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count); 432 vm_stats.wire_count += GetStolenPages(task); 433 434 GetMemorySizes(task, cputype, pid, rprvt, vprvt); 435 436 rsize = ti.resident_size; 437 vsize = ti.virtual_size; 438 439 if (scanType & eProfileMemoryDirtyPage) 440 { 441 // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics. 442 GetRegionSizes(task, rsize, dirty_size); 443 } 444 445 if (scanType & eProfileMemoryAnonymous) 446 { 447 GetPurgeableAndAnonymous(task, purgeable, anonymous); 448 } 449 } 450 451 return true; 452 } 453 454 nub_size_t 455 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count) 456 { 457 if (data == NULL || data_count == 0) 458 return 0; 459 460 nub_size_t total_bytes_read = 0; 461 nub_addr_t curr_addr = address; 462 uint8_t *curr_data = (uint8_t*)data; 463 while (total_bytes_read < data_count) 464 { 465 mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read); 466 mach_msg_type_number_t curr_bytes_read = 0; 467 vm_offset_t vm_memory = NULL; 468 m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read); 469 470 if (DNBLogCheckLogBit(LOG_MEMORY)) 471 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read); 472 473 if (m_err.Success()) 474 { 475 if (curr_bytes_read != curr_size) 476 { 477 if (DNBLogCheckLogBit(LOG_MEMORY)) 478 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size); 479 } 480 ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read); 481 ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read); 482 total_bytes_read += curr_bytes_read; 483 curr_addr += curr_bytes_read; 484 curr_data += curr_bytes_read; 485 } 486 else 487 { 488 break; 489 } 490 } 491 return total_bytes_read; 492 } 493 494 495 nub_size_t 496 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count) 497 { 498 MachVMRegion vmRegion(task); 499 500 nub_size_t total_bytes_written = 0; 501 nub_addr_t curr_addr = address; 502 const uint8_t *curr_data = (const uint8_t*)data; 503 504 505 while (total_bytes_written < data_count) 506 { 507 if (vmRegion.GetRegionForAddress(curr_addr)) 508 { 509 mach_vm_size_t curr_data_count = data_count - total_bytes_written; 510 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr); 511 if (region_bytes_left == 0) 512 { 513 break; 514 } 515 if (curr_data_count > region_bytes_left) 516 curr_data_count = region_bytes_left; 517 518 if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE)) 519 { 520 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count); 521 if (bytes_written <= 0) 522 { 523 // Error should have already be posted by WriteRegion... 524 break; 525 } 526 else 527 { 528 total_bytes_written += bytes_written; 529 curr_addr += bytes_written; 530 curr_data += bytes_written; 531 } 532 } 533 else 534 { 535 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count)); 536 break; 537 } 538 } 539 else 540 { 541 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address); 542 break; 543 } 544 } 545 546 return total_bytes_written; 547 } 548 549 550 nub_size_t 551 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count) 552 { 553 if (data == NULL || data_count == 0) 554 return 0; 555 556 nub_size_t total_bytes_written = 0; 557 nub_addr_t curr_addr = address; 558 const uint8_t *curr_data = (const uint8_t*)data; 559 while (total_bytes_written < data_count) 560 { 561 mach_msg_type_number_t curr_data_count = static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written)); 562 m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count); 563 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 564 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count); 565 566 #if !defined (__i386__) && !defined (__x86_64__) 567 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH; 568 569 m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value); 570 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 571 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count); 572 #endif 573 574 if (m_err.Success()) 575 { 576 total_bytes_written += curr_data_count; 577 curr_addr += curr_data_count; 578 curr_data += curr_data_count; 579 } 580 else 581 { 582 break; 583 } 584 } 585 return total_bytes_written; 586 } 587