1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/26/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
16 #include "DNBLog.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 #include <dlfcn.h>
21 
22 MachVMMemory::MachVMMemory() :
23     m_page_size    (kInvalidPageSize),
24     m_err        (0)
25 {
26 }
27 
28 MachVMMemory::~MachVMMemory()
29 {
30 }
31 
32 nub_size_t
33 MachVMMemory::PageSize(task_t task)
34 {
35     if (m_page_size == kInvalidPageSize)
36     {
37 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
38         if (task != TASK_NULL)
39         {
40             kern_return_t kr;
41             mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
42             task_vm_info_data_t vm_info;
43             kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
44             if (kr == KERN_SUCCESS)
45             {
46                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
47                 m_page_size = vm_info.page_size;
48                 return m_page_size;
49             }
50             else
51             {
52                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
53             }
54         }
55 #endif
56         m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
57         if (m_err.Fail())
58             m_page_size = 0;
59     }
60     return m_page_size;
61 }
62 
63 nub_size_t
64 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
65 {
66     const nub_size_t page_size = PageSize(task);
67     if (page_size > 0)
68     {
69         nub_size_t page_offset = (addr % page_size);
70         nub_size_t bytes_left_in_page = page_size - page_offset;
71         if (count > bytes_left_in_page)
72             count = bytes_left_in_page;
73     }
74     return count;
75 }
76 
77 nub_bool_t
78 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
79 {
80     MachVMRegion vmRegion(task);
81 
82     if (vmRegion.GetRegionForAddress(address))
83     {
84         region_info->addr = vmRegion.StartAddress();
85         region_info->size = vmRegion.GetByteSize();
86         region_info->permissions = vmRegion.GetDNBPermissions();
87     }
88     else
89     {
90         region_info->addr = address;
91         region_info->size = 0;
92         if (vmRegion.GetError().Success())
93         {
94             // vmRegion.GetRegionForAddress() return false, indicating that "address"
95             // wasn't in a valid region, but the "vmRegion" info was successfully
96             // read from the task which means the info describes the next valid
97             // region from which we can infer the size of this invalid region
98             mach_vm_address_t start_addr = vmRegion.StartAddress();
99             if (address < start_addr)
100                 region_info->size = start_addr - address;
101         }
102         // If we can't get any info about the size from the next region it means
103         // we asked about an address that was past all mappings, so the size
104         // of this region will take up all remaining address space.
105         if (region_info->size == 0)
106             region_info->size = INVALID_NUB_ADDRESS - region_info->addr;
107 
108         // Not readable, writeable or executable
109         region_info->permissions = 0;
110     }
111     return true;
112 }
113 
114 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
115 uint64_t
116 MachVMMemory::GetStolenPages(task_t task)
117 {
118     static uint64_t stolenPages = 0;
119     static bool calculated = false;
120     if (calculated) return stolenPages;
121 
122 	static int mib_reserved[CTL_MAXNAME];
123 	static int mib_unusable[CTL_MAXNAME];
124 	static int mib_other[CTL_MAXNAME];
125 	static size_t mib_reserved_len = 0;
126 	static size_t mib_unusable_len = 0;
127 	static size_t mib_other_len = 0;
128 	int r;
129 
130 	/* This can be used for testing: */
131 	//tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
132 
133 	if(0 == mib_reserved_len)
134     {
135 		mib_reserved_len = CTL_MAXNAME;
136 
137 		r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
138                             &mib_reserved_len);
139 
140 		if(-1 == r)
141         {
142 			mib_reserved_len = 0;
143 			return 0;
144 		}
145 
146 		mib_unusable_len = CTL_MAXNAME;
147 
148 		r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
149                             &mib_unusable_len);
150 
151 		if(-1 == r)
152         {
153 			mib_reserved_len = 0;
154 			return 0;
155 		}
156 
157 
158 		mib_other_len = CTL_MAXNAME;
159 
160 		r = sysctlnametomib("machdep.memmap.Other", mib_other,
161                             &mib_other_len);
162 
163 		if(-1 == r)
164         {
165 			mib_reserved_len = 0;
166 			return 0;
167 		}
168 	}
169 
170 	if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
171     {
172 		uint64_t reserved = 0, unusable = 0, other = 0;
173 		size_t reserved_len;
174 		size_t unusable_len;
175 		size_t other_len;
176 
177 		reserved_len = sizeof(reserved);
178 		unusable_len = sizeof(unusable);
179 		other_len = sizeof(other);
180 
181 		/* These are all declared as QUAD/uint64_t sysctls in the kernel. */
182 
183 		if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
184                         &reserved_len, NULL, 0))
185         {
186 			return 0;
187 		}
188 
189 		if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
190                         &unusable_len, NULL, 0))
191         {
192 			return 0;
193 		}
194 
195 		if(-1 == sysctl(mib_other, mib_other_len, &other,
196                         &other_len, NULL, 0))
197         {
198 			return 0;
199 		}
200 
201 		if(reserved_len == sizeof(reserved)
202 		   && unusable_len == sizeof(unusable)
203 		   && other_len == sizeof(other))
204         {
205 			uint64_t stolen = reserved + unusable + other;
206 			uint64_t mb128 = 128 * 1024 * 1024ULL;
207 
208 			if(stolen >= mb128)
209             {
210                 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
211                 stolenPages = stolen / PageSize (task);
212 			}
213 		}
214 	}
215 
216     calculated = true;
217     return stolenPages;
218 }
219 
220 static uint64_t GetPhysicalMemory()
221 {
222     // This doesn't change often at all. No need to poll each time.
223     static uint64_t physical_memory = 0;
224     static bool calculated = false;
225     if (calculated) return physical_memory;
226 
227     int mib[2];
228     mib[0] = CTL_HW;
229     mib[1] = HW_MEMSIZE;
230     size_t len = sizeof(physical_memory);
231     sysctl(mib, 2, &physical_memory, &len, NULL, 0);
232     return physical_memory;
233 }
234 
235 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
236 void
237 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
238 {
239 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
240 
241     task_vm_info_data_t vm_info;
242     mach_msg_type_number_t info_count;
243     kern_return_t kr;
244 
245     info_count = TASK_VM_INFO_COUNT;
246     kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
247     if (kr == KERN_SUCCESS)
248         dirty_size = vm_info.internal;
249 #endif
250 }
251 
252 // Test whether the virtual address is within the architecture's shared region.
253 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
254 {
255     mach_vm_address_t base = 0, size = 0;
256 
257     switch(type) {
258 #if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64)
259         case CPU_TYPE_ARM64:
260             base = SHARED_REGION_BASE_ARM64;
261             size = SHARED_REGION_SIZE_ARM64;
262             break;
263 #endif
264 
265         case CPU_TYPE_ARM:
266             base = SHARED_REGION_BASE_ARM;
267             size = SHARED_REGION_SIZE_ARM;
268             break;
269 
270         case CPU_TYPE_X86_64:
271             base = SHARED_REGION_BASE_X86_64;
272             size = SHARED_REGION_SIZE_X86_64;
273             break;
274 
275         case CPU_TYPE_I386:
276             base = SHARED_REGION_BASE_I386;
277             size = SHARED_REGION_SIZE_I386;
278             break;
279 
280         default: {
281             // Log error abut unknown CPU type
282             break;
283         }
284     }
285 
286 
287     return(addr >= base && addr < (base + size));
288 }
289 
290 void
291 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
292 {
293     // Collecting some other info cheaply but not reporting for now.
294     mach_vm_size_t empty = 0;
295     mach_vm_size_t fw_private = 0;
296 
297     mach_vm_size_t aliased = 0;
298     bool global_shared_text_data_mapped = false;
299     vm_size_t pagesize = PageSize (task);
300 
301     for (mach_vm_address_t addr=0, size=0; ; addr += size)
302     {
303         vm_region_top_info_data_t info;
304         mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
305         mach_port_t object_name;
306 
307         kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
308         if (kr != KERN_SUCCESS) break;
309 
310         if (InSharedRegion(addr, cputype))
311         {
312             // Private Shared
313             fw_private += info.private_pages_resident * pagesize;
314 
315             // Check if this process has the globally shared text and data regions mapped in.  If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
316             if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
317                 vm_region_basic_info_data_64_t b_info;
318                 mach_vm_address_t b_addr = addr;
319                 mach_vm_size_t b_size = size;
320                 count = VM_REGION_BASIC_INFO_COUNT_64;
321 
322                 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
323                 if (kr != KERN_SUCCESS) break;
324 
325                 if (b_info.reserved) {
326                     global_shared_text_data_mapped = TRUE;
327                 }
328             }
329 
330             // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
331             if (info.share_mode != SM_PRIVATE)
332             {
333                 continue;
334             }
335         }
336 
337         // Update counters according to the region type.
338         if (info.share_mode == SM_COW && info.ref_count == 1)
339         {
340             // Treat single reference SM_COW as SM_PRIVATE
341             info.share_mode = SM_PRIVATE;
342         }
343 
344         switch (info.share_mode)
345         {
346             case SM_LARGE_PAGE:
347                 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
348                 // since they are not shareable and are wired.
349             case SM_PRIVATE:
350                 rprvt += info.private_pages_resident * pagesize;
351                 rprvt += info.shared_pages_resident * pagesize;
352                 vprvt += size;
353                 break;
354 
355             case SM_EMPTY:
356                 empty += size;
357                 break;
358 
359             case SM_COW:
360             case SM_SHARED:
361             {
362                 if (pid == 0)
363                 {
364                     // Treat kernel_task specially
365                     if (info.share_mode == SM_COW)
366                     {
367                         rprvt += info.private_pages_resident * pagesize;
368                         vprvt += size;
369                     }
370                     break;
371                 }
372 
373                 if (info.share_mode == SM_COW)
374                 {
375                     rprvt += info.private_pages_resident * pagesize;
376                     vprvt += info.private_pages_resident * pagesize;
377                 }
378                 break;
379             }
380             default:
381                 // log that something is really bad.
382                 break;
383         }
384     }
385 
386     rprvt += aliased;
387 }
388 
389 static void
390 GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
391 {
392 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
393 
394     kern_return_t kr;
395     mach_msg_type_number_t info_count;
396     task_vm_info_data_t vm_info;
397 
398     info_count = TASK_VM_INFO_COUNT;
399     kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
400     if (kr == KERN_SUCCESS)
401     {
402         purgeable = vm_info.purgeable_volatile_resident;
403         anonymous = vm_info.internal - vm_info.purgeable_volatile_pmap;
404     }
405 
406 #endif
407 }
408 
409 nub_bool_t
410 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
411 {
412     if (scanType & eProfileHostMemory)
413         physical_memory = GetPhysicalMemory();
414 
415     if (scanType & eProfileMemory)
416     {
417         static mach_port_t localHost = mach_host_self();
418         mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
419         host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
420         vm_stats.wire_count += GetStolenPages(task);
421 
422         GetMemorySizes(task, cputype, pid, rprvt, vprvt);
423 
424         rsize = ti.resident_size;
425         vsize = ti.virtual_size;
426 
427         if (scanType & eProfileMemoryDirtyPage)
428         {
429             // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
430             GetRegionSizes(task, rsize, dirty_size);
431         }
432 
433         if (scanType & eProfileMemoryAnonymous)
434         {
435             GetPurgeableAndAnonymous(task, purgeable, anonymous);
436         }
437     }
438 
439     return true;
440 }
441 
442 nub_size_t
443 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
444 {
445     if (data == NULL || data_count == 0)
446         return 0;
447 
448     nub_size_t total_bytes_read = 0;
449     nub_addr_t curr_addr = address;
450     uint8_t *curr_data = (uint8_t*)data;
451     while (total_bytes_read < data_count)
452     {
453         mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
454         mach_msg_type_number_t curr_bytes_read = 0;
455         vm_offset_t vm_memory = NULL;
456         m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
457 
458         if (DNBLogCheckLogBit(LOG_MEMORY))
459             m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
460 
461         if (m_err.Success())
462         {
463             if (curr_bytes_read != curr_size)
464             {
465                 if (DNBLogCheckLogBit(LOG_MEMORY))
466                     m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
467             }
468             ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
469             ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
470             total_bytes_read += curr_bytes_read;
471             curr_addr += curr_bytes_read;
472             curr_data += curr_bytes_read;
473         }
474         else
475         {
476             break;
477         }
478     }
479     return total_bytes_read;
480 }
481 
482 
483 nub_size_t
484 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
485 {
486     MachVMRegion vmRegion(task);
487 
488     nub_size_t total_bytes_written = 0;
489     nub_addr_t curr_addr = address;
490     const uint8_t *curr_data = (const uint8_t*)data;
491 
492 
493     while (total_bytes_written < data_count)
494     {
495         if (vmRegion.GetRegionForAddress(curr_addr))
496         {
497             mach_vm_size_t curr_data_count = data_count - total_bytes_written;
498             mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
499             if (region_bytes_left == 0)
500             {
501                 break;
502             }
503             if (curr_data_count > region_bytes_left)
504                 curr_data_count = region_bytes_left;
505 
506             if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
507             {
508                 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
509                 if (bytes_written <= 0)
510                 {
511                     // Error should have already be posted by WriteRegion...
512                     break;
513                 }
514                 else
515                 {
516                     total_bytes_written += bytes_written;
517                     curr_addr += bytes_written;
518                     curr_data += bytes_written;
519                 }
520             }
521             else
522             {
523                 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
524                 break;
525             }
526         }
527         else
528         {
529             DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
530             break;
531         }
532     }
533 
534     return total_bytes_written;
535 }
536 
537 
538 nub_size_t
539 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
540 {
541     if (data == NULL || data_count == 0)
542         return 0;
543 
544     nub_size_t total_bytes_written = 0;
545     nub_addr_t curr_addr = address;
546     const uint8_t *curr_data = (const uint8_t*)data;
547     while (total_bytes_written < data_count)
548     {
549         mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
550         m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
551         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
552             m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
553 
554 #if !defined (__i386__) && !defined (__x86_64__)
555         vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
556 
557         m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
558         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
559             m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
560 #endif
561 
562         if (m_err.Success())
563         {
564             total_bytes_written += curr_data_count;
565             curr_addr += curr_data_count;
566             curr_data += curr_data_count;
567         }
568         else
569         {
570             break;
571         }
572     }
573     return total_bytes_written;
574 }
575