1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/26/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
16 #include "DNBLog.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 #include <dlfcn.h>
21 
22 MachVMMemory::MachVMMemory() :
23     m_page_size    (kInvalidPageSize),
24     m_err        (0)
25 {
26 }
27 
28 MachVMMemory::~MachVMMemory()
29 {
30 }
31 
32 nub_size_t
33 MachVMMemory::PageSize(task_t task)
34 {
35     if (m_page_size == kInvalidPageSize)
36     {
37 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
38         if (task != TASK_NULL)
39         {
40             kern_return_t kr;
41             mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
42             task_vm_info_data_t vm_info;
43             kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
44             if (kr == KERN_SUCCESS)
45             {
46                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
47                 m_page_size = vm_info.page_size;
48                 return m_page_size;
49             }
50             else
51             {
52                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
53             }
54         }
55 #endif
56         m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
57         if (m_err.Fail())
58             m_page_size = 0;
59     }
60     return m_page_size;
61 }
62 
63 nub_size_t
64 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
65 {
66     const nub_size_t page_size = PageSize(task);
67     if (page_size > 0)
68     {
69         nub_size_t page_offset = (addr % page_size);
70         nub_size_t bytes_left_in_page = page_size - page_offset;
71         if (count > bytes_left_in_page)
72             count = bytes_left_in_page;
73     }
74     return count;
75 }
76 
77 nub_bool_t
78 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
79 {
80     MachVMRegion vmRegion(task);
81 
82     if (vmRegion.GetRegionForAddress(address))
83     {
84         region_info->addr = vmRegion.StartAddress();
85         region_info->size = vmRegion.GetByteSize();
86         region_info->permissions = vmRegion.GetDNBPermissions();
87     }
88     else
89     {
90         region_info->addr = address;
91         region_info->size = 0;
92         if (vmRegion.GetError().Success())
93         {
94             // vmRegion.GetRegionForAddress() return false, indicating that "address"
95             // wasn't in a valid region, but the "vmRegion" info was successfully
96             // read from the task which means the info describes the next valid
97             // region from which we can infer the size of this invalid region
98             mach_vm_address_t start_addr = vmRegion.StartAddress();
99             if (address < start_addr)
100                 region_info->size = start_addr - address;
101         }
102         // If we can't get any infor about the size from the next region, just fill
103         // 1 in as the byte size
104         if (region_info->size == 0)
105             region_info->size = 1;
106 
107         // Not readable, writeable or executable
108         region_info->permissions = 0;
109     }
110     return true;
111 }
112 
113 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
114 uint64_t
115 MachVMMemory::GetStolenPages(task_t task)
116 {
117     static uint64_t stolenPages = 0;
118     static bool calculated = false;
119     if (calculated) return stolenPages;
120 
121 	static int mib_reserved[CTL_MAXNAME];
122 	static int mib_unusable[CTL_MAXNAME];
123 	static int mib_other[CTL_MAXNAME];
124 	static size_t mib_reserved_len = 0;
125 	static size_t mib_unusable_len = 0;
126 	static size_t mib_other_len = 0;
127 	int r;
128 
129 	/* This can be used for testing: */
130 	//tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
131 
132 	if(0 == mib_reserved_len)
133     {
134 		mib_reserved_len = CTL_MAXNAME;
135 
136 		r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
137                             &mib_reserved_len);
138 
139 		if(-1 == r)
140         {
141 			mib_reserved_len = 0;
142 			return 0;
143 		}
144 
145 		mib_unusable_len = CTL_MAXNAME;
146 
147 		r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
148                             &mib_unusable_len);
149 
150 		if(-1 == r)
151         {
152 			mib_reserved_len = 0;
153 			return 0;
154 		}
155 
156 
157 		mib_other_len = CTL_MAXNAME;
158 
159 		r = sysctlnametomib("machdep.memmap.Other", mib_other,
160                             &mib_other_len);
161 
162 		if(-1 == r)
163         {
164 			mib_reserved_len = 0;
165 			return 0;
166 		}
167 	}
168 
169 	if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
170     {
171 		uint64_t reserved = 0, unusable = 0, other = 0;
172 		size_t reserved_len;
173 		size_t unusable_len;
174 		size_t other_len;
175 
176 		reserved_len = sizeof(reserved);
177 		unusable_len = sizeof(unusable);
178 		other_len = sizeof(other);
179 
180 		/* These are all declared as QUAD/uint64_t sysctls in the kernel. */
181 
182 		if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
183                         &reserved_len, NULL, 0))
184         {
185 			return 0;
186 		}
187 
188 		if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
189                         &unusable_len, NULL, 0))
190         {
191 			return 0;
192 		}
193 
194 		if(-1 == sysctl(mib_other, mib_other_len, &other,
195                         &other_len, NULL, 0))
196         {
197 			return 0;
198 		}
199 
200 		if(reserved_len == sizeof(reserved)
201 		   && unusable_len == sizeof(unusable)
202 		   && other_len == sizeof(other))
203         {
204 			uint64_t stolen = reserved + unusable + other;
205 			uint64_t mb128 = 128 * 1024 * 1024ULL;
206 
207 			if(stolen >= mb128)
208             {
209                 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
210                 stolenPages = stolen / PageSize (task);
211 			}
212 		}
213 	}
214 
215     calculated = true;
216     return stolenPages;
217 }
218 
219 static uint64_t GetPhysicalMemory()
220 {
221     // This doesn't change often at all. No need to poll each time.
222     static uint64_t physical_memory = 0;
223     static bool calculated = false;
224     if (calculated) return physical_memory;
225 
226     int mib[2];
227     mib[0] = CTL_HW;
228     mib[1] = HW_MEMSIZE;
229     size_t len = sizeof(physical_memory);
230     sysctl(mib, 2, &physical_memory, &len, NULL, 0);
231     return physical_memory;
232 }
233 
234 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
235 void
236 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
237 {
238     mach_vm_address_t address = 0;
239     mach_vm_size_t size;
240     kern_return_t err = 0;
241     unsigned nestingDepth = 0;
242     mach_vm_size_t pages_resident = 0;
243     mach_vm_size_t pages_dirtied = 0;
244 
245     while (1)
246     {
247         mach_msg_type_number_t count;
248         struct vm_region_submap_info_64 info;
249 
250         count = VM_REGION_SUBMAP_INFO_COUNT_64;
251         err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
252         if (err == KERN_INVALID_ADDRESS)
253         {
254             // It seems like this is a good break too.
255             break;
256         }
257         else if (err)
258         {
259             mach_error("vm_region",err);
260             break; // reached last region
261         }
262 
263         bool should_count = true;
264         if (info.is_submap)
265         { // is it a submap?
266             nestingDepth++;
267             should_count = false;
268         }
269         else
270         {
271             // Don't count malloc stack logging data in the TOTAL VM usage lines.
272             if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
273                 should_count = false;
274 
275             address = address+size;
276         }
277 
278         if (should_count)
279         {
280             pages_resident += info.pages_resident;
281             pages_dirtied += info.pages_dirtied;
282         }
283     }
284 
285     vm_size_t pagesize = PageSize (task);
286     rsize = pages_resident * pagesize;
287     dirty_size = pages_dirtied * pagesize;
288 }
289 
290 // Test whether the virtual address is within the architecture's shared region.
291 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
292 {
293     mach_vm_address_t base = 0, size = 0;
294 
295     switch(type) {
296         case CPU_TYPE_ARM:
297             base = SHARED_REGION_BASE_ARM;
298             size = SHARED_REGION_SIZE_ARM;
299             break;
300 
301         case CPU_TYPE_X86_64:
302             base = SHARED_REGION_BASE_X86_64;
303             size = SHARED_REGION_SIZE_X86_64;
304             break;
305 
306         case CPU_TYPE_I386:
307             base = SHARED_REGION_BASE_I386;
308             size = SHARED_REGION_SIZE_I386;
309             break;
310 
311         default: {
312             // Log error abut unknown CPU type
313             break;
314         }
315     }
316 
317 
318     return(addr >= base && addr < (base + size));
319 }
320 
321 void
322 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
323 {
324     // Collecting some other info cheaply but not reporting for now.
325     mach_vm_size_t empty = 0;
326     mach_vm_size_t fw_private = 0;
327 
328     mach_vm_size_t aliased = 0;
329     bool global_shared_text_data_mapped = false;
330     vm_size_t pagesize = PageSize (task);
331 
332     for (mach_vm_address_t addr=0, size=0; ; addr += size)
333     {
334         vm_region_top_info_data_t info;
335         mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
336         mach_port_t object_name;
337 
338         kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
339         if (kr != KERN_SUCCESS) break;
340 
341         if (InSharedRegion(addr, cputype))
342         {
343             // Private Shared
344             fw_private += info.private_pages_resident * pagesize;
345 
346             // Check if this process has the globally shared text and data regions mapped in.  If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
347             if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
348                 vm_region_basic_info_data_64_t b_info;
349                 mach_vm_address_t b_addr = addr;
350                 mach_vm_size_t b_size = size;
351                 count = VM_REGION_BASIC_INFO_COUNT_64;
352 
353                 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
354                 if (kr != KERN_SUCCESS) break;
355 
356                 if (b_info.reserved) {
357                     global_shared_text_data_mapped = TRUE;
358                 }
359             }
360 
361             // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
362             if (info.share_mode != SM_PRIVATE)
363             {
364                 continue;
365             }
366         }
367 
368         // Update counters according to the region type.
369         if (info.share_mode == SM_COW && info.ref_count == 1)
370         {
371             // Treat single reference SM_COW as SM_PRIVATE
372             info.share_mode = SM_PRIVATE;
373         }
374 
375         switch (info.share_mode)
376         {
377             case SM_LARGE_PAGE:
378                 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
379                 // since they are not shareable and are wired.
380             case SM_PRIVATE:
381                 rprvt += info.private_pages_resident * pagesize;
382                 rprvt += info.shared_pages_resident * pagesize;
383                 vprvt += size;
384                 break;
385 
386             case SM_EMPTY:
387                 empty += size;
388                 break;
389 
390             case SM_COW:
391             case SM_SHARED:
392             {
393                 if (pid == 0)
394                 {
395                     // Treat kernel_task specially
396                     if (info.share_mode == SM_COW)
397                     {
398                         rprvt += info.private_pages_resident * pagesize;
399                         vprvt += size;
400                     }
401                     break;
402                 }
403 
404                 if (info.share_mode == SM_COW)
405                 {
406                     rprvt += info.private_pages_resident * pagesize;
407                     vprvt += info.private_pages_resident * pagesize;
408                 }
409                 break;
410             }
411             default:
412                 // log that something is really bad.
413                 break;
414         }
415     }
416 
417     rprvt += aliased;
418 }
419 
420 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
421 
422 // cribbed from sysmond
423 static uint64_t
424 SumVMPurgeableInfo(const vm_purgeable_info_t info)
425 {
426     uint64_t sum = 0;
427     int i;
428 
429     for (i = 0; i < 8; i++)
430     {
431         sum += info->fifo_data[i].size;
432     }
433     sum += info->obsolete_data.size;
434     for (i = 0; i < 8; i++)
435     {
436         sum += info->lifo_data[i].size;
437     }
438 
439     return sum;
440 }
441 
442 #endif
443 
444 static void
445 GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
446 {
447 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
448 
449     kern_return_t kr;
450     task_purgable_info_t purgeable_info;
451     uint64_t purgeable_sum = 0;
452     mach_msg_type_number_t info_count;
453     task_vm_info_data_t vm_info;
454 
455     if (dlsym(RTLD_NEXT, "task_purgable_info") != NULL )
456     {
457         kr = task_purgable_info(task, &purgeable_info);
458         if (kr == KERN_SUCCESS) {
459             purgeable_sum = SumVMPurgeableInfo(&purgeable_info);
460             purgeable = purgeable_sum;
461         }
462     }
463 
464     info_count = TASK_VM_INFO_COUNT;
465     kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
466     if (kr == KERN_SUCCESS)
467     {
468         if (purgeable_sum < vm_info.internal)
469         {
470             anonymous = vm_info.internal - purgeable_sum;
471         }
472         else
473         {
474             anonymous = 0;
475         }
476     }
477 #endif
478 }
479 
480 nub_bool_t
481 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
482 {
483     if (scanType & eProfileHostMemory)
484         physical_memory = GetPhysicalMemory();
485 
486     if (scanType & eProfileMemory)
487     {
488         static mach_port_t localHost = mach_host_self();
489         mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
490         host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
491         vm_stats.wire_count += GetStolenPages(task);
492 
493         GetMemorySizes(task, cputype, pid, rprvt, vprvt);
494 
495         rsize = ti.resident_size;
496         vsize = ti.virtual_size;
497 
498         if (scanType & eProfileMemoryDirtyPage)
499         {
500             // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
501             GetRegionSizes(task, rsize, dirty_size);
502         }
503 
504         if (scanType & eProfileMemoryAnonymous)
505         {
506             GetPurgeableAndAnonymous(task, purgeable, anonymous);
507         }
508     }
509 
510     return true;
511 }
512 
513 nub_size_t
514 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
515 {
516     if (data == NULL || data_count == 0)
517         return 0;
518 
519     nub_size_t total_bytes_read = 0;
520     nub_addr_t curr_addr = address;
521     uint8_t *curr_data = (uint8_t*)data;
522     while (total_bytes_read < data_count)
523     {
524         mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
525         mach_msg_type_number_t curr_bytes_read = 0;
526         vm_offset_t vm_memory = NULL;
527         m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
528 
529         if (DNBLogCheckLogBit(LOG_MEMORY))
530             m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
531 
532         if (m_err.Success())
533         {
534             if (curr_bytes_read != curr_size)
535             {
536                 if (DNBLogCheckLogBit(LOG_MEMORY))
537                     m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
538             }
539             ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
540             ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
541             total_bytes_read += curr_bytes_read;
542             curr_addr += curr_bytes_read;
543             curr_data += curr_bytes_read;
544         }
545         else
546         {
547             break;
548         }
549     }
550     return total_bytes_read;
551 }
552 
553 
554 nub_size_t
555 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
556 {
557     MachVMRegion vmRegion(task);
558 
559     nub_size_t total_bytes_written = 0;
560     nub_addr_t curr_addr = address;
561     const uint8_t *curr_data = (const uint8_t*)data;
562 
563 
564     while (total_bytes_written < data_count)
565     {
566         if (vmRegion.GetRegionForAddress(curr_addr))
567         {
568             mach_vm_size_t curr_data_count = data_count - total_bytes_written;
569             mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
570             if (region_bytes_left == 0)
571             {
572                 break;
573             }
574             if (curr_data_count > region_bytes_left)
575                 curr_data_count = region_bytes_left;
576 
577             if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
578             {
579                 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
580                 if (bytes_written <= 0)
581                 {
582                     // Error should have already be posted by WriteRegion...
583                     break;
584                 }
585                 else
586                 {
587                     total_bytes_written += bytes_written;
588                     curr_addr += bytes_written;
589                     curr_data += bytes_written;
590                 }
591             }
592             else
593             {
594                 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
595                 break;
596             }
597         }
598         else
599         {
600             DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
601             break;
602         }
603     }
604 
605     return total_bytes_written;
606 }
607 
608 
609 nub_size_t
610 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
611 {
612     if (data == NULL || data_count == 0)
613         return 0;
614 
615     nub_size_t total_bytes_written = 0;
616     nub_addr_t curr_addr = address;
617     const uint8_t *curr_data = (const uint8_t*)data;
618     while (total_bytes_written < data_count)
619     {
620         mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
621         m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
622         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
623             m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
624 
625 #if !defined (__i386__) && !defined (__x86_64__)
626         vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
627 
628         m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
629         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
630             m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
631 #endif
632 
633         if (m_err.Success())
634         {
635             total_bytes_written += curr_data_count;
636             curr_addr += curr_data_count;
637             curr_data += curr_data_count;
638         }
639         else
640         {
641             break;
642         }
643     }
644     return total_bytes_written;
645 }
646