1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/26/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
16 #include "DNBLog.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 
21 MachVMMemory::MachVMMemory() :
22     m_page_size    (kInvalidPageSize),
23     m_err        (0)
24 {
25 }
26 
27 MachVMMemory::~MachVMMemory()
28 {
29 }
30 
31 nub_size_t
32 MachVMMemory::PageSize(task_t task)
33 {
34     if (m_page_size == kInvalidPageSize)
35     {
36 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
37         if (task != TASK_NULL)
38         {
39             kern_return_t kr;
40             mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
41             task_vm_info_data_t vm_info;
42             kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
43             if (kr == KERN_SUCCESS)
44             {
45                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
46                 m_page_size = vm_info.page_size;
47                 return m_page_size;
48             }
49             else
50             {
51                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
52             }
53         }
54 #endif
55         m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
56         if (m_err.Fail())
57             m_page_size = 0;
58     }
59     return m_page_size;
60 }
61 
62 nub_size_t
63 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
64 {
65     const nub_size_t page_size = PageSize(task);
66     if (page_size > 0)
67     {
68         nub_size_t page_offset = (addr % page_size);
69         nub_size_t bytes_left_in_page = page_size - page_offset;
70         if (count > bytes_left_in_page)
71             count = bytes_left_in_page;
72     }
73     return count;
74 }
75 
76 nub_bool_t
77 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
78 {
79     MachVMRegion vmRegion(task);
80 
81     if (vmRegion.GetRegionForAddress(address))
82     {
83         region_info->addr = vmRegion.StartAddress();
84         region_info->size = vmRegion.GetByteSize();
85         region_info->permissions = vmRegion.GetDNBPermissions();
86     }
87     else
88     {
89         region_info->addr = address;
90         region_info->size = 0;
91         if (vmRegion.GetError().Success())
92         {
93             // vmRegion.GetRegionForAddress() return false, indicating that "address"
94             // wasn't in a valid region, but the "vmRegion" info was successfully
95             // read from the task which means the info describes the next valid
96             // region from which we can infer the size of this invalid region
97             mach_vm_address_t start_addr = vmRegion.StartAddress();
98             if (address < start_addr)
99                 region_info->size = start_addr - address;
100         }
101         // If we can't get any infor about the size from the next region, just fill
102         // 1 in as the byte size
103         if (region_info->size == 0)
104             region_info->size = 1;
105 
106         // Not readable, writeable or executable
107         region_info->permissions = 0;
108     }
109     return true;
110 }
111 
112 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
113 uint64_t
114 MachVMMemory::GetStolenPages(task_t task)
115 {
116     static uint64_t stolenPages = 0;
117     static bool calculated = false;
118     if (calculated) return stolenPages;
119 
120 	static int mib_reserved[CTL_MAXNAME];
121 	static int mib_unusable[CTL_MAXNAME];
122 	static int mib_other[CTL_MAXNAME];
123 	static size_t mib_reserved_len = 0;
124 	static size_t mib_unusable_len = 0;
125 	static size_t mib_other_len = 0;
126 	int r;
127 
128 	/* This can be used for testing: */
129 	//tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
130 
131 	if(0 == mib_reserved_len)
132     {
133 		mib_reserved_len = CTL_MAXNAME;
134 
135 		r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
136                             &mib_reserved_len);
137 
138 		if(-1 == r)
139         {
140 			mib_reserved_len = 0;
141 			return 0;
142 		}
143 
144 		mib_unusable_len = CTL_MAXNAME;
145 
146 		r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
147                             &mib_unusable_len);
148 
149 		if(-1 == r)
150         {
151 			mib_reserved_len = 0;
152 			return 0;
153 		}
154 
155 
156 		mib_other_len = CTL_MAXNAME;
157 
158 		r = sysctlnametomib("machdep.memmap.Other", mib_other,
159                             &mib_other_len);
160 
161 		if(-1 == r)
162         {
163 			mib_reserved_len = 0;
164 			return 0;
165 		}
166 	}
167 
168 	if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
169     {
170 		uint64_t reserved = 0, unusable = 0, other = 0;
171 		size_t reserved_len;
172 		size_t unusable_len;
173 		size_t other_len;
174 
175 		reserved_len = sizeof(reserved);
176 		unusable_len = sizeof(unusable);
177 		other_len = sizeof(other);
178 
179 		/* These are all declared as QUAD/uint64_t sysctls in the kernel. */
180 
181 		if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
182                         &reserved_len, NULL, 0))
183         {
184 			return 0;
185 		}
186 
187 		if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
188                         &unusable_len, NULL, 0))
189         {
190 			return 0;
191 		}
192 
193 		if(-1 == sysctl(mib_other, mib_other_len, &other,
194                         &other_len, NULL, 0))
195         {
196 			return 0;
197 		}
198 
199 		if(reserved_len == sizeof(reserved)
200 		   && unusable_len == sizeof(unusable)
201 		   && other_len == sizeof(other))
202         {
203 			uint64_t stolen = reserved + unusable + other;
204 			uint64_t mb128 = 128 * 1024 * 1024ULL;
205 
206 			if(stolen >= mb128)
207             {
208                 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
209                 vm_size_t pagesize = vm_page_size;
210                 pagesize = PageSize (task);
211                 stolenPages = stolen/pagesize;
212 			}
213 		}
214 	}
215 
216     calculated = true;
217     return stolenPages;
218 }
219 
220 static uint64_t GetPhysicalMemory()
221 {
222     // This doesn't change often at all. No need to poll each time.
223     static uint64_t physical_memory = 0;
224     static bool calculated = false;
225     if (calculated) return physical_memory;
226 
227     int mib[2];
228     mib[0] = CTL_HW;
229     mib[1] = HW_MEMSIZE;
230     size_t len = sizeof(physical_memory);
231     sysctl(mib, 2, &physical_memory, &len, NULL, 0);
232     return physical_memory;
233 }
234 
235 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
236 void
237 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
238 {
239     mach_vm_address_t address = 0;
240     mach_vm_size_t size;
241     kern_return_t err = 0;
242     unsigned nestingDepth = 0;
243     mach_vm_size_t pages_resident = 0;
244     mach_vm_size_t pages_dirtied = 0;
245 
246     while (1)
247     {
248         mach_msg_type_number_t count;
249         struct vm_region_submap_info_64 info;
250 
251         count = VM_REGION_SUBMAP_INFO_COUNT_64;
252         err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
253         if (err == KERN_INVALID_ADDRESS)
254         {
255             // It seems like this is a good break too.
256             break;
257         }
258         else if (err)
259         {
260             mach_error("vm_region",err);
261             break; // reached last region
262         }
263 
264         bool should_count = true;
265         if (info.is_submap)
266         { // is it a submap?
267             nestingDepth++;
268             should_count = false;
269         }
270         else
271         {
272             // Don't count malloc stack logging data in the TOTAL VM usage lines.
273             if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
274                 should_count = false;
275 
276             address = address+size;
277         }
278 
279         if (should_count)
280         {
281             pages_resident += info.pages_resident;
282             pages_dirtied += info.pages_dirtied;
283         }
284     }
285 
286     static vm_size_t pagesize;
287     static bool calculated = false;
288     if (!calculated)
289     {
290         calculated = true;
291         pagesize = PageSize (task);
292     }
293 
294     rsize = pages_resident * pagesize;
295     dirty_size = pages_dirtied * pagesize;
296 }
297 
298 // Test whether the virtual address is within the architecture's shared region.
299 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
300 {
301     mach_vm_address_t base = 0, size = 0;
302 
303     switch(type) {
304         case CPU_TYPE_ARM:
305             base = SHARED_REGION_BASE_ARM;
306             size = SHARED_REGION_SIZE_ARM;
307             break;
308 
309         case CPU_TYPE_X86_64:
310             base = SHARED_REGION_BASE_X86_64;
311             size = SHARED_REGION_SIZE_X86_64;
312             break;
313 
314         case CPU_TYPE_I386:
315             base = SHARED_REGION_BASE_I386;
316             size = SHARED_REGION_SIZE_I386;
317             break;
318 
319         default: {
320             // Log error abut unknown CPU type
321             break;
322         }
323     }
324 
325 
326     return(addr >= base && addr < (base + size));
327 }
328 
329 void
330 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
331 {
332     // Collecting some other info cheaply but not reporting for now.
333     mach_vm_size_t empty = 0;
334     mach_vm_size_t fw_private = 0;
335 
336     mach_vm_size_t aliased = 0;
337     bool global_shared_text_data_mapped = false;
338 
339     static vm_size_t pagesize;
340     static bool calculated = false;
341     if (!calculated)
342     {
343         calculated = true;
344         pagesize = PageSize (task);
345     }
346 
347     for (mach_vm_address_t addr=0, size=0; ; addr += size)
348     {
349         vm_region_top_info_data_t info;
350         mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
351         mach_port_t object_name;
352 
353         kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
354         if (kr != KERN_SUCCESS) break;
355 
356         if (InSharedRegion(addr, cputype))
357         {
358             // Private Shared
359             fw_private += info.private_pages_resident * pagesize;
360 
361             // Check if this process has the globally shared text and data regions mapped in.  If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
362             if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
363                 vm_region_basic_info_data_64_t b_info;
364                 mach_vm_address_t b_addr = addr;
365                 mach_vm_size_t b_size = size;
366                 count = VM_REGION_BASIC_INFO_COUNT_64;
367 
368                 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
369                 if (kr != KERN_SUCCESS) break;
370 
371                 if (b_info.reserved) {
372                     global_shared_text_data_mapped = TRUE;
373                 }
374             }
375 
376             // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
377             if (info.share_mode != SM_PRIVATE)
378             {
379                 continue;
380             }
381         }
382 
383         // Update counters according to the region type.
384         if (info.share_mode == SM_COW && info.ref_count == 1)
385         {
386             // Treat single reference SM_COW as SM_PRIVATE
387             info.share_mode = SM_PRIVATE;
388         }
389 
390         switch (info.share_mode)
391         {
392             case SM_LARGE_PAGE:
393                 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
394                 // since they are not shareable and are wired.
395             case SM_PRIVATE:
396                 rprvt += info.private_pages_resident * pagesize;
397                 rprvt += info.shared_pages_resident * pagesize;
398                 vprvt += size;
399                 break;
400 
401             case SM_EMPTY:
402                 empty += size;
403                 break;
404 
405             case SM_COW:
406             case SM_SHARED:
407             {
408                 if (pid == 0)
409                 {
410                     // Treat kernel_task specially
411                     if (info.share_mode == SM_COW)
412                     {
413                         rprvt += info.private_pages_resident * pagesize;
414                         vprvt += size;
415                     }
416                     break;
417                 }
418 
419                 if (info.share_mode == SM_COW)
420                 {
421                     rprvt += info.private_pages_resident * pagesize;
422                     vprvt += info.private_pages_resident * pagesize;
423                 }
424                 break;
425             }
426             default:
427                 // log that something is really bad.
428                 break;
429         }
430     }
431 
432     rprvt += aliased;
433 }
434 
435 nub_bool_t
436 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size)
437 {
438     if (scanType & eProfileHostMemory)
439         physical_memory = GetPhysicalMemory();
440 
441     if (scanType & eProfileMemory)
442     {
443         static mach_port_t localHost = mach_host_self();
444         mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
445         host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
446         vm_stats.wire_count += GetStolenPages(task);
447 
448         GetMemorySizes(task, cputype, pid, rprvt, vprvt);
449 
450         rsize = ti.resident_size;
451         vsize = ti.virtual_size;
452 
453         if (scanType & eProfileMemoryDirtyPage)
454         {
455             // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
456             GetRegionSizes(task, rsize, dirty_size);
457         }
458     }
459 
460     return true;
461 }
462 
463 nub_size_t
464 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
465 {
466     if (data == NULL || data_count == 0)
467         return 0;
468 
469     nub_size_t total_bytes_read = 0;
470     nub_addr_t curr_addr = address;
471     uint8_t *curr_data = (uint8_t*)data;
472     while (total_bytes_read < data_count)
473     {
474         mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
475         mach_msg_type_number_t curr_bytes_read = 0;
476         vm_offset_t vm_memory = NULL;
477         m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
478 
479         if (DNBLogCheckLogBit(LOG_MEMORY))
480             m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
481 
482         if (m_err.Success())
483         {
484             if (curr_bytes_read != curr_size)
485             {
486                 if (DNBLogCheckLogBit(LOG_MEMORY))
487                     m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
488             }
489             ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
490             ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
491             total_bytes_read += curr_bytes_read;
492             curr_addr += curr_bytes_read;
493             curr_data += curr_bytes_read;
494         }
495         else
496         {
497             break;
498         }
499     }
500     return total_bytes_read;
501 }
502 
503 
504 nub_size_t
505 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
506 {
507     MachVMRegion vmRegion(task);
508 
509     nub_size_t total_bytes_written = 0;
510     nub_addr_t curr_addr = address;
511     const uint8_t *curr_data = (const uint8_t*)data;
512 
513 
514     while (total_bytes_written < data_count)
515     {
516         if (vmRegion.GetRegionForAddress(curr_addr))
517         {
518             mach_vm_size_t curr_data_count = data_count - total_bytes_written;
519             mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
520             if (region_bytes_left == 0)
521             {
522                 break;
523             }
524             if (curr_data_count > region_bytes_left)
525                 curr_data_count = region_bytes_left;
526 
527             if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
528             {
529                 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
530                 if (bytes_written <= 0)
531                 {
532                     // Error should have already be posted by WriteRegion...
533                     break;
534                 }
535                 else
536                 {
537                     total_bytes_written += bytes_written;
538                     curr_addr += bytes_written;
539                     curr_data += bytes_written;
540                 }
541             }
542             else
543             {
544                 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
545                 break;
546             }
547         }
548         else
549         {
550             DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
551             break;
552         }
553     }
554 
555     return total_bytes_written;
556 }
557 
558 
559 nub_size_t
560 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
561 {
562     if (data == NULL || data_count == 0)
563         return 0;
564 
565     nub_size_t total_bytes_written = 0;
566     nub_addr_t curr_addr = address;
567     const uint8_t *curr_data = (const uint8_t*)data;
568     while (total_bytes_written < data_count)
569     {
570         mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
571         m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
572         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
573             m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
574 
575 #if !defined (__i386__) && !defined (__x86_64__)
576         vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
577 
578         m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
579         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
580             m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
581 #endif
582 
583         if (m_err.Success())
584         {
585             total_bytes_written += curr_data_count;
586             curr_addr += curr_data_count;
587             curr_data += curr_data_count;
588         }
589         else
590         {
591             break;
592         }
593     }
594     return total_bytes_written;
595 }
596