1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/26/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MachVMMemory.h" 15 #include "DNBLog.h" 16 #include "MachVMRegion.h" 17 #include <dlfcn.h> 18 #include <mach/mach_vm.h> 19 #include <mach/shared_region.h> 20 #include <sys/sysctl.h> 21 22 static const vm_size_t kInvalidPageSize = ~0; 23 24 MachVMMemory::MachVMMemory() : m_page_size(kInvalidPageSize), m_err(0) {} 25 26 MachVMMemory::~MachVMMemory() {} 27 28 nub_size_t MachVMMemory::PageSize(task_t task) { 29 if (m_page_size == kInvalidPageSize) { 30 #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22 31 if (task != TASK_NULL) { 32 kern_return_t kr; 33 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT; 34 task_vm_info_data_t vm_info; 35 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count); 36 if (kr == KERN_SUCCESS) { 37 DNBLogThreadedIf( 38 LOG_TASK, 39 "MachVMMemory::PageSize task_info returned page size of 0x%x", 40 (int)vm_info.page_size); 41 m_page_size = vm_info.page_size; 42 return m_page_size; 43 } else { 44 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call " 45 "failed to get page size, TASK_VM_INFO %d, " 46 "TASK_VM_INFO_COUNT %d, kern return %d", 47 TASK_VM_INFO, TASK_VM_INFO_COUNT, kr); 48 } 49 } 50 #endif 51 m_err = ::host_page_size(::mach_host_self(), &m_page_size); 52 if (m_err.Fail()) 53 m_page_size = 0; 54 } 55 return m_page_size; 56 } 57 58 nub_size_t MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, 59 nub_size_t count) { 60 const nub_size_t page_size = PageSize(task); 61 if (page_size > 0) { 62 nub_size_t page_offset = (addr % page_size); 63 nub_size_t bytes_left_in_page = page_size - page_offset; 64 if (count > bytes_left_in_page) 65 count = bytes_left_in_page; 66 } 67 return count; 68 } 69 70 nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, 71 DNBRegionInfo *region_info) { 72 MachVMRegion vmRegion(task); 73 74 if (vmRegion.GetRegionForAddress(address)) { 75 region_info->addr = vmRegion.StartAddress(); 76 region_info->size = vmRegion.GetByteSize(); 77 region_info->permissions = vmRegion.GetDNBPermissions(); 78 } else { 79 region_info->addr = address; 80 region_info->size = 0; 81 if (vmRegion.GetError().Success()) { 82 // vmRegion.GetRegionForAddress() return false, indicating that "address" 83 // wasn't in a valid region, but the "vmRegion" info was successfully 84 // read from the task which means the info describes the next valid 85 // region from which we can infer the size of this invalid region 86 mach_vm_address_t start_addr = vmRegion.StartAddress(); 87 if (address < start_addr) 88 region_info->size = start_addr - address; 89 } 90 // If we can't get any info about the size from the next region it means 91 // we asked about an address that was past all mappings, so the size 92 // of this region will take up all remaining address space. 93 if (region_info->size == 0) 94 region_info->size = INVALID_NUB_ADDRESS - region_info->addr; 95 96 // Not readable, writeable or executable 97 region_info->permissions = 0; 98 } 99 return true; 100 } 101 102 static uint64_t GetPhysicalMemory() { 103 // This doesn't change often at all. No need to poll each time. 104 static uint64_t physical_memory = 0; 105 static bool calculated = false; 106 if (calculated) 107 return physical_memory; 108 109 size_t len = sizeof(physical_memory); 110 sysctlbyname("hw.memsize", &physical_memory, &len, NULL, 0); 111 112 calculated = true; 113 return physical_memory; 114 } 115 116 nub_bool_t MachVMMemory::GetMemoryProfile( 117 DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, 118 cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, 119 uint64_t &physical_memory, mach_vm_size_t &anonymous, mach_vm_size_t &phys_footprint) 120 { 121 if (scanType & eProfileHostMemory) 122 physical_memory = GetPhysicalMemory(); 123 124 if (scanType & eProfileMemory) { 125 static mach_port_t localHost = mach_host_self(); 126 mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; 127 host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo, 128 &count); 129 130 kern_return_t kr; 131 mach_msg_type_number_t info_count; 132 task_vm_info_data_t vm_info; 133 134 info_count = TASK_VM_INFO_COUNT; 135 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count); 136 if (kr == KERN_SUCCESS) { 137 if (scanType & eProfileMemoryAnonymous) { 138 anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap; 139 } 140 141 phys_footprint = vm_info.phys_footprint; 142 } 143 } 144 145 return true; 146 } 147 148 nub_size_t MachVMMemory::Read(task_t task, nub_addr_t address, void *data, 149 nub_size_t data_count) { 150 if (data == NULL || data_count == 0) 151 return 0; 152 153 nub_size_t total_bytes_read = 0; 154 nub_addr_t curr_addr = address; 155 uint8_t *curr_data = (uint8_t *)data; 156 while (total_bytes_read < data_count) { 157 mach_vm_size_t curr_size = 158 MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read); 159 mach_msg_type_number_t curr_bytes_read = 0; 160 vm_offset_t vm_memory = 0; 161 m_err = ::mach_vm_read(task, curr_addr, curr_size, &vm_memory, 162 &curr_bytes_read); 163 164 if (DNBLogCheckLogBit(LOG_MEMORY)) 165 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, " 166 "size = %llu, data => %8.8p, dataCnt => %i )", 167 task, (uint64_t)curr_addr, (uint64_t)curr_size, 168 vm_memory, curr_bytes_read); 169 170 if (m_err.Success()) { 171 if (curr_bytes_read != curr_size) { 172 if (DNBLogCheckLogBit(LOG_MEMORY)) 173 m_err.LogThreaded( 174 "::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, " 175 "data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", 176 task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, 177 curr_bytes_read, curr_bytes_read, (uint64_t)curr_size); 178 } 179 ::memcpy(curr_data, (void *)vm_memory, curr_bytes_read); 180 ::vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read); 181 total_bytes_read += curr_bytes_read; 182 curr_addr += curr_bytes_read; 183 curr_data += curr_bytes_read; 184 } else { 185 break; 186 } 187 } 188 return total_bytes_read; 189 } 190 191 nub_size_t MachVMMemory::Write(task_t task, nub_addr_t address, 192 const void *data, nub_size_t data_count) { 193 MachVMRegion vmRegion(task); 194 195 nub_size_t total_bytes_written = 0; 196 nub_addr_t curr_addr = address; 197 const uint8_t *curr_data = (const uint8_t *)data; 198 199 while (total_bytes_written < data_count) { 200 if (vmRegion.GetRegionForAddress(curr_addr)) { 201 mach_vm_size_t curr_data_count = data_count - total_bytes_written; 202 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr); 203 if (region_bytes_left == 0) { 204 break; 205 } 206 if (curr_data_count > region_bytes_left) 207 curr_data_count = region_bytes_left; 208 209 if (vmRegion.SetProtections(curr_addr, curr_data_count, 210 VM_PROT_READ | VM_PROT_WRITE)) { 211 nub_size_t bytes_written = 212 WriteRegion(task, curr_addr, curr_data, curr_data_count); 213 if (bytes_written <= 0) { 214 // Status should have already be posted by WriteRegion... 215 break; 216 } else { 217 total_bytes_written += bytes_written; 218 curr_addr += bytes_written; 219 curr_data += bytes_written; 220 } 221 } else { 222 DNBLogThreadedIf( 223 LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on " 224 "region for address: [0x%8.8llx-0x%8.8llx)", 225 (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count)); 226 break; 227 } 228 } else { 229 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, 230 "Failed to get region for address: 0x%8.8llx", 231 (uint64_t)address); 232 break; 233 } 234 } 235 236 return total_bytes_written; 237 } 238 239 nub_size_t MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, 240 const void *data, 241 const nub_size_t data_count) { 242 if (data == NULL || data_count == 0) 243 return 0; 244 245 nub_size_t total_bytes_written = 0; 246 nub_addr_t curr_addr = address; 247 const uint8_t *curr_data = (const uint8_t *)data; 248 while (total_bytes_written < data_count) { 249 mach_msg_type_number_t curr_data_count = 250 static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage( 251 task, curr_addr, data_count - total_bytes_written)); 252 m_err = 253 ::mach_vm_write(task, curr_addr, (pointer_t)curr_data, curr_data_count); 254 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 255 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, " 256 "data = %8.8p, dataCnt = %u )", 257 task, (uint64_t)curr_addr, curr_data, curr_data_count); 258 259 #if !defined(__i386__) && !defined(__x86_64__) 260 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH; 261 262 m_err = ::vm_machine_attribute(task, curr_addr, curr_data_count, 263 MATTR_CACHE, &mattr_value); 264 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 265 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = " 266 "0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value " 267 "=> MATTR_VAL_CACHE_FLUSH )", 268 task, (uint64_t)curr_addr, curr_data_count); 269 #endif 270 271 if (m_err.Success()) { 272 total_bytes_written += curr_data_count; 273 curr_addr += curr_data_count; 274 curr_data += curr_data_count; 275 } else { 276 break; 277 } 278 } 279 return total_bytes_written; 280 } 281