1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Created by Greg Clayton on 6/26/07. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "MachVMMemory.h" 15 #include "DNBLog.h" 16 #include "MachVMRegion.h" 17 #include <dlfcn.h> 18 #include <mach/mach_vm.h> 19 #include <mach/shared_region.h> 20 #include <sys/sysctl.h> 21 22 static const vm_size_t kInvalidPageSize = ~0; 23 24 MachVMMemory::MachVMMemory() : m_page_size(kInvalidPageSize), m_err(0) {} 25 26 MachVMMemory::~MachVMMemory() {} 27 28 nub_size_t MachVMMemory::PageSize(task_t task) { 29 if (m_page_size == kInvalidPageSize) { 30 #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22 31 if (task != TASK_NULL) { 32 kern_return_t kr; 33 mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT; 34 task_vm_info_data_t vm_info; 35 kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count); 36 if (kr == KERN_SUCCESS) { 37 DNBLogThreadedIf( 38 LOG_TASK, 39 "MachVMMemory::PageSize task_info returned page size of 0x%x", 40 (int)vm_info.page_size); 41 m_page_size = vm_info.page_size; 42 return m_page_size; 43 } else { 44 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call " 45 "failed to get page size, TASK_VM_INFO %d, " 46 "TASK_VM_INFO_COUNT %d, kern return %d", 47 TASK_VM_INFO, TASK_VM_INFO_COUNT, kr); 48 } 49 } 50 #endif 51 m_err = ::host_page_size(::mach_host_self(), &m_page_size); 52 if (m_err.Fail()) 53 m_page_size = 0; 54 } 55 return m_page_size; 56 } 57 58 nub_size_t MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, 59 nub_size_t count) { 60 const nub_size_t page_size = PageSize(task); 61 if (page_size > 0) { 62 nub_size_t page_offset = (addr % page_size); 63 nub_size_t bytes_left_in_page = page_size - page_offset; 64 if (count > bytes_left_in_page) 65 count = bytes_left_in_page; 66 } 67 return count; 68 } 69 70 nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, 71 DNBRegionInfo *region_info) { 72 MachVMRegion vmRegion(task); 73 74 if (vmRegion.GetRegionForAddress(address)) { 75 region_info->addr = vmRegion.StartAddress(); 76 region_info->size = vmRegion.GetByteSize(); 77 region_info->permissions = vmRegion.GetDNBPermissions(); 78 } else { 79 region_info->addr = address; 80 region_info->size = 0; 81 if (vmRegion.GetError().Success()) { 82 // vmRegion.GetRegionForAddress() return false, indicating that "address" 83 // wasn't in a valid region, but the "vmRegion" info was successfully 84 // read from the task which means the info describes the next valid 85 // region from which we can infer the size of this invalid region 86 mach_vm_address_t start_addr = vmRegion.StartAddress(); 87 if (address < start_addr) 88 region_info->size = start_addr - address; 89 } 90 // If we can't get any info about the size from the next region it means 91 // we asked about an address that was past all mappings, so the size 92 // of this region will take up all remaining address space. 93 if (region_info->size == 0) 94 region_info->size = INVALID_NUB_ADDRESS - region_info->addr; 95 96 // Not readable, writeable or executable 97 region_info->permissions = 0; 98 } 99 return true; 100 } 101 102 static uint64_t GetPhysicalMemory() { 103 // This doesn't change often at all. No need to poll each time. 104 static uint64_t physical_memory = 0; 105 static bool calculated = false; 106 if (calculated) 107 return physical_memory; 108 109 size_t len = sizeof(physical_memory); 110 sysctlbyname("hw.memsize", &physical_memory, &len, NULL, 0); 111 112 calculated = true; 113 return physical_memory; 114 } 115 116 // Test whether the virtual address is within the architecture's shared region. 117 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type) { 118 mach_vm_address_t base = 0, size = 0; 119 120 switch (type) { 121 #if defined(CPU_TYPE_ARM64) && defined(SHARED_REGION_BASE_ARM64) 122 case CPU_TYPE_ARM64: 123 base = SHARED_REGION_BASE_ARM64; 124 size = SHARED_REGION_SIZE_ARM64; 125 break; 126 #endif 127 128 case CPU_TYPE_ARM: 129 base = SHARED_REGION_BASE_ARM; 130 size = SHARED_REGION_SIZE_ARM; 131 break; 132 133 case CPU_TYPE_X86_64: 134 base = SHARED_REGION_BASE_X86_64; 135 size = SHARED_REGION_SIZE_X86_64; 136 break; 137 138 case CPU_TYPE_I386: 139 base = SHARED_REGION_BASE_I386; 140 size = SHARED_REGION_SIZE_I386; 141 break; 142 143 default: { 144 // Log error abut unknown CPU type 145 break; 146 } 147 } 148 149 return (addr >= base && addr < (base + size)); 150 } 151 152 nub_bool_t MachVMMemory::GetMemoryProfile( 153 DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, 154 cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, 155 uint64_t &physical_memory, mach_vm_size_t &anonymous, mach_vm_size_t &phys_footprint) 156 { 157 if (scanType & eProfileHostMemory) 158 physical_memory = GetPhysicalMemory(); 159 160 if (scanType & eProfileMemory) { 161 static mach_port_t localHost = mach_host_self(); 162 mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; 163 host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo, 164 &count); 165 166 kern_return_t kr; 167 mach_msg_type_number_t info_count; 168 task_vm_info_data_t vm_info; 169 170 info_count = TASK_VM_INFO_COUNT; 171 kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count); 172 if (kr == KERN_SUCCESS) { 173 if (scanType & eProfileMemoryAnonymous) { 174 anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap; 175 } 176 177 phys_footprint = vm_info.phys_footprint; 178 } 179 } 180 181 return true; 182 } 183 184 nub_size_t MachVMMemory::Read(task_t task, nub_addr_t address, void *data, 185 nub_size_t data_count) { 186 if (data == NULL || data_count == 0) 187 return 0; 188 189 nub_size_t total_bytes_read = 0; 190 nub_addr_t curr_addr = address; 191 uint8_t *curr_data = (uint8_t *)data; 192 while (total_bytes_read < data_count) { 193 mach_vm_size_t curr_size = 194 MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read); 195 mach_msg_type_number_t curr_bytes_read = 0; 196 vm_offset_t vm_memory = 0; 197 m_err = ::mach_vm_read(task, curr_addr, curr_size, &vm_memory, 198 &curr_bytes_read); 199 200 if (DNBLogCheckLogBit(LOG_MEMORY)) 201 m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, " 202 "size = %llu, data => %8.8p, dataCnt => %i )", 203 task, (uint64_t)curr_addr, (uint64_t)curr_size, 204 vm_memory, curr_bytes_read); 205 206 if (m_err.Success()) { 207 if (curr_bytes_read != curr_size) { 208 if (DNBLogCheckLogBit(LOG_MEMORY)) 209 m_err.LogThreaded( 210 "::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, " 211 "data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", 212 task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, 213 curr_bytes_read, curr_bytes_read, (uint64_t)curr_size); 214 } 215 ::memcpy(curr_data, (void *)vm_memory, curr_bytes_read); 216 ::vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read); 217 total_bytes_read += curr_bytes_read; 218 curr_addr += curr_bytes_read; 219 curr_data += curr_bytes_read; 220 } else { 221 break; 222 } 223 } 224 return total_bytes_read; 225 } 226 227 nub_size_t MachVMMemory::Write(task_t task, nub_addr_t address, 228 const void *data, nub_size_t data_count) { 229 MachVMRegion vmRegion(task); 230 231 nub_size_t total_bytes_written = 0; 232 nub_addr_t curr_addr = address; 233 const uint8_t *curr_data = (const uint8_t *)data; 234 235 while (total_bytes_written < data_count) { 236 if (vmRegion.GetRegionForAddress(curr_addr)) { 237 mach_vm_size_t curr_data_count = data_count - total_bytes_written; 238 mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr); 239 if (region_bytes_left == 0) { 240 break; 241 } 242 if (curr_data_count > region_bytes_left) 243 curr_data_count = region_bytes_left; 244 245 if (vmRegion.SetProtections(curr_addr, curr_data_count, 246 VM_PROT_READ | VM_PROT_WRITE)) { 247 nub_size_t bytes_written = 248 WriteRegion(task, curr_addr, curr_data, curr_data_count); 249 if (bytes_written <= 0) { 250 // Status should have already be posted by WriteRegion... 251 break; 252 } else { 253 total_bytes_written += bytes_written; 254 curr_addr += bytes_written; 255 curr_data += bytes_written; 256 } 257 } else { 258 DNBLogThreadedIf( 259 LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on " 260 "region for address: [0x%8.8llx-0x%8.8llx)", 261 (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count)); 262 break; 263 } 264 } else { 265 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, 266 "Failed to get region for address: 0x%8.8llx", 267 (uint64_t)address); 268 break; 269 } 270 } 271 272 return total_bytes_written; 273 } 274 275 nub_size_t MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, 276 const void *data, 277 const nub_size_t data_count) { 278 if (data == NULL || data_count == 0) 279 return 0; 280 281 nub_size_t total_bytes_written = 0; 282 nub_addr_t curr_addr = address; 283 const uint8_t *curr_data = (const uint8_t *)data; 284 while (total_bytes_written < data_count) { 285 mach_msg_type_number_t curr_data_count = 286 static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage( 287 task, curr_addr, data_count - total_bytes_written)); 288 m_err = 289 ::mach_vm_write(task, curr_addr, (pointer_t)curr_data, curr_data_count); 290 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 291 m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, " 292 "data = %8.8p, dataCnt = %u )", 293 task, (uint64_t)curr_addr, curr_data, curr_data_count); 294 295 #if !defined(__i386__) && !defined(__x86_64__) 296 vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH; 297 298 m_err = ::vm_machine_attribute(task, curr_addr, curr_data_count, 299 MATTR_CACHE, &mattr_value); 300 if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) 301 m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = " 302 "0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value " 303 "=> MATTR_VAL_CACHE_FLUSH )", 304 task, (uint64_t)curr_addr, curr_data_count); 305 #endif 306 307 if (m_err.Success()) { 308 total_bytes_written += curr_data_count; 309 curr_addr += curr_data_count; 310 curr_data += curr_data_count; 311 } else { 312 break; 313 } 314 } 315 return total_bytes_written; 316 } 317