1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/26/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
16 #include "DNBLog.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 
21 MachVMMemory::MachVMMemory() :
22     m_page_size    (kInvalidPageSize),
23     m_err        (0)
24 {
25 }
26 
27 MachVMMemory::~MachVMMemory()
28 {
29 }
30 
31 nub_size_t
32 MachVMMemory::PageSize()
33 {
34     if (m_page_size == kInvalidPageSize)
35     {
36         m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
37         if (m_err.Fail())
38             m_page_size = 0;
39     }
40     return m_page_size;
41 }
42 
43 nub_size_t
44 MachVMMemory::MaxBytesLeftInPage(nub_addr_t addr, nub_size_t count)
45 {
46     const nub_size_t page_size = PageSize();
47     if (page_size > 0)
48     {
49         nub_size_t page_offset = (addr % page_size);
50         nub_size_t bytes_left_in_page = page_size - page_offset;
51         if (count > bytes_left_in_page)
52             count = bytes_left_in_page;
53     }
54     return count;
55 }
56 
57 nub_bool_t
58 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
59 {
60     MachVMRegion vmRegion(task);
61 
62     if (vmRegion.GetRegionForAddress(address))
63     {
64         region_info->addr = vmRegion.StartAddress();
65         region_info->size = vmRegion.GetByteSize();
66         region_info->permissions = vmRegion.GetDNBPermissions();
67     }
68     else
69     {
70         region_info->addr = address;
71         region_info->size = 0;
72         if (vmRegion.GetError().Success())
73         {
74             // vmRegion.GetRegionForAddress() return false, indicating that "address"
75             // wasn't in a valid region, but the "vmRegion" info was successfully
76             // read from the task which means the info describes the next valid
77             // region from which we can infer the size of this invalid region
78             mach_vm_address_t start_addr = vmRegion.StartAddress();
79             if (address < start_addr)
80                 region_info->size = start_addr - address;
81         }
82         // If we can't get any infor about the size from the next region, just fill
83         // 1 in as the byte size
84         if (region_info->size == 0)
85             region_info->size = 1;
86 
87         // Not readable, writeable or executable
88         region_info->permissions = 0;
89     }
90     return true;
91 }
92 
93 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
94 static uint64_t GetStolenPages()
95 {
96     static uint64_t stolenPages = 0;
97     static bool calculated = false;
98     if (calculated) return stolenPages;
99 
100 	static int mib_reserved[CTL_MAXNAME];
101 	static int mib_unusable[CTL_MAXNAME];
102 	static int mib_other[CTL_MAXNAME];
103 	static size_t mib_reserved_len = 0;
104 	static size_t mib_unusable_len = 0;
105 	static size_t mib_other_len = 0;
106 	int r;
107 
108 	/* This can be used for testing: */
109 	//tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
110 
111 	if(0 == mib_reserved_len)
112     {
113 		mib_reserved_len = CTL_MAXNAME;
114 
115 		r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
116                             &mib_reserved_len);
117 
118 		if(-1 == r)
119         {
120 			mib_reserved_len = 0;
121 			return 0;
122 		}
123 
124 		mib_unusable_len = CTL_MAXNAME;
125 
126 		r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
127                             &mib_unusable_len);
128 
129 		if(-1 == r)
130         {
131 			mib_reserved_len = 0;
132 			return 0;
133 		}
134 
135 
136 		mib_other_len = CTL_MAXNAME;
137 
138 		r = sysctlnametomib("machdep.memmap.Other", mib_other,
139                             &mib_other_len);
140 
141 		if(-1 == r)
142         {
143 			mib_reserved_len = 0;
144 			return 0;
145 		}
146 	}
147 
148 	if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
149     {
150 		uint64_t reserved = 0, unusable = 0, other = 0;
151 		size_t reserved_len;
152 		size_t unusable_len;
153 		size_t other_len;
154 
155 		reserved_len = sizeof(reserved);
156 		unusable_len = sizeof(unusable);
157 		other_len = sizeof(other);
158 
159 		/* These are all declared as QUAD/uint64_t sysctls in the kernel. */
160 
161 		if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
162                         &reserved_len, NULL, 0))
163         {
164 			return 0;
165 		}
166 
167 		if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
168                         &unusable_len, NULL, 0))
169         {
170 			return 0;
171 		}
172 
173 		if(-1 == sysctl(mib_other, mib_other_len, &other,
174                         &other_len, NULL, 0))
175         {
176 			return 0;
177 		}
178 
179 		if(reserved_len == sizeof(reserved)
180 		   && unusable_len == sizeof(unusable)
181 		   && other_len == sizeof(other))
182         {
183 			uint64_t stolen = reserved + unusable + other;
184 			uint64_t mb128 = 128 * 1024 * 1024ULL;
185 
186 			if(stolen >= mb128)
187             {
188                 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
189                 stolenPages = stolen/vm_page_size;
190 			}
191 		}
192 	}
193 
194     calculated = true;
195     return stolenPages;
196 }
197 
198 static uint64_t GetPhysicalMemory()
199 {
200     // This doesn't change often at all. No need to poll each time.
201     static uint64_t physical_memory = 0;
202     static bool calculated = false;
203     if (calculated) return physical_memory;
204 
205     int mib[2];
206     mib[0] = CTL_HW;
207     mib[1] = HW_MEMSIZE;
208     size_t len = sizeof(physical_memory);
209     sysctl(mib, 2, &physical_memory, &len, NULL, 0);
210     return physical_memory;
211 }
212 
213 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
214 static void GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
215 {
216     mach_vm_address_t address = 0;
217     mach_vm_size_t size;
218     kern_return_t err = 0;
219     unsigned nestingDepth = 0;
220     mach_vm_size_t pages_resident = 0;
221     mach_vm_size_t pages_dirtied = 0;
222 
223     while (1)
224     {
225         mach_msg_type_number_t  count;
226         struct vm_region_submap_info_64 info;
227 
228         count = VM_REGION_SUBMAP_INFO_COUNT_64;
229         err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
230         if (err == KERN_INVALID_ADDRESS)
231         {
232             // It seems like this is a good break too.
233             break;
234         }
235         else if (err)
236         {
237             mach_error("vm_region",err);
238             break; // reached last region
239         }
240 
241         bool should_count = true;
242         if (info.is_submap)
243         { // is it a submap?
244             nestingDepth++;
245             should_count = false;
246         }
247         else
248         {
249             // Don't count malloc stack logging data in the TOTAL VM usage lines.
250             if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
251                 should_count = false;
252             // Don't count system shared library region not used by this process.
253             if (address >= SHARED_REGION_BASE && address < (SHARED_REGION_BASE + SHARED_REGION_SIZE))
254                 should_count = false;
255 
256             address = address+size;
257         }
258 
259         if (should_count)
260         {
261             pages_resident += info.pages_resident;
262             pages_dirtied += info.pages_dirtied;
263         }
264     }
265 
266     rsize = pages_resident * vm_page_size;
267     dirty_size = pages_dirtied * vm_page_size;
268 }
269 
270 // Test whether the virtual address is within the architecture's shared region.
271 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
272 {
273     mach_vm_address_t base = 0, size = 0;
274 
275     switch(type) {
276         case CPU_TYPE_ARM:
277             base = SHARED_REGION_BASE_ARM;
278             size = SHARED_REGION_SIZE_ARM;
279             break;
280 
281         case CPU_TYPE_X86_64:
282             base = SHARED_REGION_BASE_X86_64;
283             size = SHARED_REGION_SIZE_X86_64;
284             break;
285 
286         case CPU_TYPE_I386:
287             base = SHARED_REGION_BASE_I386;
288             size = SHARED_REGION_SIZE_I386;
289             break;
290 
291         default: {
292             // Log error abut unknown CPU type
293             break;
294         }
295     }
296 
297 
298     return(addr >= base && addr < (base + size));
299 }
300 
301 static void GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
302 {
303     // Collecting some other info cheaply but not reporting for now.
304     mach_vm_size_t empty = 0;
305     mach_vm_size_t fw_private = 0;
306 
307     mach_vm_size_t aliased = 0;
308     mach_vm_size_t pagesize = vm_page_size;
309     bool global_shared_text_data_mapped = false;
310 
311     for (mach_vm_address_t addr=0, size=0; ; addr += size)
312     {
313         vm_region_top_info_data_t info;
314         mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
315         mach_port_t object_name;
316 
317         kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
318         if (kr != KERN_SUCCESS) break;
319 
320         if (InSharedRegion(addr, cputype))
321         {
322             // Private Shared
323             fw_private += info.private_pages_resident * pagesize;
324 
325             // Check if this process has the globally shared text and data regions mapped in.  If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
326             if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
327                 vm_region_basic_info_data_64_t  b_info;
328                 mach_vm_address_t b_addr = addr;
329                 mach_vm_size_t b_size = size;
330                 count = VM_REGION_BASIC_INFO_COUNT_64;
331 
332                 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
333                 if (kr != KERN_SUCCESS) break;
334 
335                 if (b_info.reserved) {
336                     global_shared_text_data_mapped = TRUE;
337                 }
338             }
339 
340             // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
341             if (info.share_mode != SM_PRIVATE)
342             {
343                 continue;
344             }
345         }
346 
347         // Update counters according to the region type.
348         if (info.share_mode == SM_COW && info.ref_count == 1)
349         {
350             // Treat single reference SM_COW as SM_PRIVATE
351             info.share_mode = SM_PRIVATE;
352         }
353 
354         switch (info.share_mode)
355         {
356             case SM_LARGE_PAGE:
357                 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
358                 // since they are not shareable and are wired.
359             case SM_PRIVATE:
360                 rprvt += info.private_pages_resident * pagesize;
361                 rprvt += info.shared_pages_resident * pagesize;
362                 vprvt += size;
363                 break;
364 
365             case SM_EMPTY:
366                 empty += size;
367                 break;
368 
369             case SM_COW:
370             case SM_SHARED:
371             {
372                 if (pid == 0)
373                 {
374                     // Treat kernel_task specially
375                     if (info.share_mode == SM_COW)
376                     {
377                         rprvt += info.private_pages_resident * pagesize;
378                         vprvt += size;
379                     }
380                     break;
381                 }
382 
383                 if (info.share_mode == SM_COW)
384                 {
385                     rprvt += info.private_pages_resident * pagesize;
386                     vprvt += info.private_pages_resident * pagesize;
387                 }
388                 break;
389             }
390             default:
391                 // log that something is really bad.
392                 break;
393         }
394     }
395 
396     rprvt += aliased;
397 }
398 
399 nub_bool_t
400 MachVMMemory::GetMemoryProfile(task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
401 {
402     static mach_port_t localHost = mach_host_self();
403     mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
404     host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
405     vm_stats.wire_count += GetStolenPages();
406     physical_memory = GetPhysicalMemory();
407 
408     // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
409     GetRegionSizes(task, rsize, dirty_size);
410 
411     GetMemorySizes(task, cputype, pid, rprvt, vprvt);
412 
413     rsize = ti.resident_size;
414     vsize = ti.virtual_size;
415 
416     return true;
417 }
418 
419 nub_size_t
420 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
421 {
422     if (data == NULL || data_count == 0)
423         return 0;
424 
425     nub_size_t total_bytes_read = 0;
426     nub_addr_t curr_addr = address;
427     uint8_t *curr_data = (uint8_t*)data;
428     while (total_bytes_read < data_count)
429     {
430         mach_vm_size_t curr_size = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_read);
431         mach_msg_type_number_t curr_bytes_read = 0;
432         vm_offset_t vm_memory = NULL;
433         m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
434 
435         if (DNBLogCheckLogBit(LOG_MEMORY))
436             m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
437 
438         if (m_err.Success())
439         {
440             if (curr_bytes_read != curr_size)
441             {
442                 if (DNBLogCheckLogBit(LOG_MEMORY))
443                     m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
444             }
445             ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
446             ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
447             total_bytes_read += curr_bytes_read;
448             curr_addr += curr_bytes_read;
449             curr_data += curr_bytes_read;
450         }
451         else
452         {
453             break;
454         }
455     }
456     return total_bytes_read;
457 }
458 
459 
460 nub_size_t
461 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
462 {
463     MachVMRegion vmRegion(task);
464 
465     nub_size_t total_bytes_written = 0;
466     nub_addr_t curr_addr = address;
467     const uint8_t *curr_data = (const uint8_t*)data;
468 
469 
470     while (total_bytes_written < data_count)
471     {
472         if (vmRegion.GetRegionForAddress(curr_addr))
473         {
474             mach_vm_size_t curr_data_count = data_count - total_bytes_written;
475             mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
476             if (region_bytes_left == 0)
477             {
478                 break;
479             }
480             if (curr_data_count > region_bytes_left)
481                 curr_data_count = region_bytes_left;
482 
483             if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
484             {
485                 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
486                 if (bytes_written <= 0)
487                 {
488                     // Error should have already be posted by WriteRegion...
489                     break;
490                 }
491                 else
492                 {
493                     total_bytes_written += bytes_written;
494                     curr_addr += bytes_written;
495                     curr_data += bytes_written;
496                 }
497             }
498             else
499             {
500                 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
501                 break;
502             }
503         }
504         else
505         {
506             DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
507             break;
508         }
509     }
510 
511     return total_bytes_written;
512 }
513 
514 
515 nub_size_t
516 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
517 {
518     if (data == NULL || data_count == 0)
519         return 0;
520 
521     nub_size_t total_bytes_written = 0;
522     nub_addr_t curr_addr = address;
523     const uint8_t *curr_data = (const uint8_t*)data;
524     while (total_bytes_written < data_count)
525     {
526         mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(curr_addr, data_count - total_bytes_written);
527         m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
528         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
529             m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
530 
531 #if !defined (__i386__) && !defined (__x86_64__)
532         vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
533 
534         m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
535         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
536             m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
537 #endif
538 
539         if (m_err.Success())
540         {
541             total_bytes_written += curr_data_count;
542             curr_addr += curr_data_count;
543             curr_data += curr_data_count;
544         }
545         else
546         {
547             break;
548         }
549     }
550     return total_bytes_written;
551 }
552