1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/26/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
16 #include "DNBLog.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 #include <dlfcn.h>
21 
22 static const vm_size_t kInvalidPageSize = ~0;
23 
24 MachVMMemory::MachVMMemory() :
25     m_page_size    (kInvalidPageSize),
26     m_err        (0)
27 {
28 }
29 
30 MachVMMemory::~MachVMMemory()
31 {
32 }
33 
34 nub_size_t
35 MachVMMemory::PageSize(task_t task)
36 {
37     if (m_page_size == kInvalidPageSize)
38     {
39 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
40         if (task != TASK_NULL)
41         {
42             kern_return_t kr;
43             mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
44             task_vm_info_data_t vm_info;
45             kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
46             if (kr == KERN_SUCCESS)
47             {
48                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
49                 m_page_size = vm_info.page_size;
50                 return m_page_size;
51             }
52             else
53             {
54                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
55             }
56         }
57 #endif
58         m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
59         if (m_err.Fail())
60             m_page_size = 0;
61     }
62     return m_page_size;
63 }
64 
65 nub_size_t
66 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
67 {
68     const nub_size_t page_size = PageSize(task);
69     if (page_size > 0)
70     {
71         nub_size_t page_offset = (addr % page_size);
72         nub_size_t bytes_left_in_page = page_size - page_offset;
73         if (count > bytes_left_in_page)
74             count = bytes_left_in_page;
75     }
76     return count;
77 }
78 
79 nub_bool_t
80 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
81 {
82     MachVMRegion vmRegion(task);
83 
84     if (vmRegion.GetRegionForAddress(address))
85     {
86         region_info->addr = vmRegion.StartAddress();
87         region_info->size = vmRegion.GetByteSize();
88         region_info->permissions = vmRegion.GetDNBPermissions();
89     }
90     else
91     {
92         region_info->addr = address;
93         region_info->size = 0;
94         if (vmRegion.GetError().Success())
95         {
96             // vmRegion.GetRegionForAddress() return false, indicating that "address"
97             // wasn't in a valid region, but the "vmRegion" info was successfully
98             // read from the task which means the info describes the next valid
99             // region from which we can infer the size of this invalid region
100             mach_vm_address_t start_addr = vmRegion.StartAddress();
101             if (address < start_addr)
102                 region_info->size = start_addr - address;
103         }
104         // If we can't get any info about the size from the next region it means
105         // we asked about an address that was past all mappings, so the size
106         // of this region will take up all remaining address space.
107         if (region_info->size == 0)
108             region_info->size = INVALID_NUB_ADDRESS - region_info->addr;
109 
110         // Not readable, writeable or executable
111         region_info->permissions = 0;
112     }
113     return true;
114 }
115 
116 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
117 uint64_t
118 MachVMMemory::GetStolenPages(task_t task)
119 {
120     static uint64_t stolenPages = 0;
121     static bool calculated = false;
122     if (calculated) return stolenPages;
123 
124 	static int mib_reserved[CTL_MAXNAME];
125 	static int mib_unusable[CTL_MAXNAME];
126 	static int mib_other[CTL_MAXNAME];
127 	static size_t mib_reserved_len = 0;
128 	static size_t mib_unusable_len = 0;
129 	static size_t mib_other_len = 0;
130 	int r;
131 
132 	/* This can be used for testing: */
133 	//tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
134 
135 	if(0 == mib_reserved_len)
136     {
137 		mib_reserved_len = CTL_MAXNAME;
138 
139 		r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
140                             &mib_reserved_len);
141 
142 		if(-1 == r)
143         {
144 			mib_reserved_len = 0;
145 			return 0;
146 		}
147 
148 		mib_unusable_len = CTL_MAXNAME;
149 
150 		r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
151                             &mib_unusable_len);
152 
153 		if(-1 == r)
154         {
155 			mib_reserved_len = 0;
156 			return 0;
157 		}
158 
159 
160 		mib_other_len = CTL_MAXNAME;
161 
162 		r = sysctlnametomib("machdep.memmap.Other", mib_other,
163                             &mib_other_len);
164 
165 		if(-1 == r)
166         {
167 			mib_reserved_len = 0;
168 			return 0;
169 		}
170 	}
171 
172 	if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
173     {
174 		uint64_t reserved = 0, unusable = 0, other = 0;
175 		size_t reserved_len;
176 		size_t unusable_len;
177 		size_t other_len;
178 
179 		reserved_len = sizeof(reserved);
180 		unusable_len = sizeof(unusable);
181 		other_len = sizeof(other);
182 
183 		/* These are all declared as QUAD/uint64_t sysctls in the kernel. */
184 
185 		if (sysctl (mib_reserved,
186                     static_cast<u_int>(mib_reserved_len),
187                     &reserved,
188                     &reserved_len,
189                     NULL,
190                     0))
191         {
192 			return 0;
193 		}
194 
195 		if (sysctl (mib_unusable,
196                     static_cast<u_int>(mib_unusable_len),
197                     &unusable,
198                     &unusable_len,
199                     NULL,
200                     0))
201         {
202 			return 0;
203 		}
204 
205 		if (sysctl (mib_other,
206                     static_cast<u_int>(mib_other_len),
207                     &other,
208                     &other_len,
209                     NULL,
210                     0))
211         {
212 			return 0;
213 		}
214 
215 		if (reserved_len == sizeof(reserved) &&
216 		    unusable_len == sizeof(unusable) &&
217             other_len == sizeof(other))
218         {
219 			uint64_t stolen = reserved + unusable + other;
220 			uint64_t mb128 = 128 * 1024 * 1024ULL;
221 
222 			if(stolen >= mb128)
223             {
224                 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
225                 stolenPages = stolen / PageSize (task);
226 			}
227 		}
228 	}
229 
230     calculated = true;
231     return stolenPages;
232 }
233 
234 static uint64_t GetPhysicalMemory()
235 {
236     // This doesn't change often at all. No need to poll each time.
237     static uint64_t physical_memory = 0;
238     static bool calculated = false;
239     if (calculated) return physical_memory;
240 
241     size_t len = sizeof(physical_memory);
242     sysctlbyname("hw.memsize", &physical_memory, &len, NULL, 0);
243 
244     calculated = true;
245     return physical_memory;
246 }
247 
248 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
249 void
250 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
251 {
252 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
253 
254     task_vm_info_data_t vm_info;
255     mach_msg_type_number_t info_count;
256     kern_return_t kr;
257 
258     info_count = TASK_VM_INFO_COUNT;
259     kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
260     if (kr == KERN_SUCCESS)
261         dirty_size = vm_info.internal;
262 #endif
263 }
264 
265 // Test whether the virtual address is within the architecture's shared region.
266 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
267 {
268     mach_vm_address_t base = 0, size = 0;
269 
270     switch(type) {
271 #if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64)
272         case CPU_TYPE_ARM64:
273             base = SHARED_REGION_BASE_ARM64;
274             size = SHARED_REGION_SIZE_ARM64;
275             break;
276 #endif
277 
278         case CPU_TYPE_ARM:
279             base = SHARED_REGION_BASE_ARM;
280             size = SHARED_REGION_SIZE_ARM;
281             break;
282 
283         case CPU_TYPE_X86_64:
284             base = SHARED_REGION_BASE_X86_64;
285             size = SHARED_REGION_SIZE_X86_64;
286             break;
287 
288         case CPU_TYPE_I386:
289             base = SHARED_REGION_BASE_I386;
290             size = SHARED_REGION_SIZE_I386;
291             break;
292 
293         default: {
294             // Log error abut unknown CPU type
295             break;
296         }
297     }
298 
299 
300     return(addr >= base && addr < (base + size));
301 }
302 
303 void
304 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
305 {
306     // Collecting some other info cheaply but not reporting for now.
307     mach_vm_size_t empty = 0;
308     mach_vm_size_t fw_private = 0;
309 
310     mach_vm_size_t aliased = 0;
311     bool global_shared_text_data_mapped = false;
312     vm_size_t pagesize = PageSize (task);
313 
314     for (mach_vm_address_t addr=0, size=0; ; addr += size)
315     {
316         vm_region_top_info_data_t info;
317         mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
318         mach_port_t object_name;
319 
320         kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
321         if (kr != KERN_SUCCESS) break;
322 
323         if (InSharedRegion(addr, cputype))
324         {
325             // Private Shared
326             fw_private += info.private_pages_resident * pagesize;
327 
328             // Check if this process has the globally shared text and data regions mapped in.  If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
329             if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
330                 vm_region_basic_info_data_64_t b_info;
331                 mach_vm_address_t b_addr = addr;
332                 mach_vm_size_t b_size = size;
333                 count = VM_REGION_BASIC_INFO_COUNT_64;
334 
335                 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
336                 if (kr != KERN_SUCCESS) break;
337 
338                 if (b_info.reserved) {
339                     global_shared_text_data_mapped = TRUE;
340                 }
341             }
342 
343             // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
344             if (info.share_mode != SM_PRIVATE)
345             {
346                 continue;
347             }
348         }
349 
350         // Update counters according to the region type.
351         if (info.share_mode == SM_COW && info.ref_count == 1)
352         {
353             // Treat single reference SM_COW as SM_PRIVATE
354             info.share_mode = SM_PRIVATE;
355         }
356 
357         switch (info.share_mode)
358         {
359             case SM_LARGE_PAGE:
360                 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
361                 // since they are not shareable and are wired.
362             case SM_PRIVATE:
363                 rprvt += info.private_pages_resident * pagesize;
364                 rprvt += info.shared_pages_resident * pagesize;
365                 vprvt += size;
366                 break;
367 
368             case SM_EMPTY:
369                 empty += size;
370                 break;
371 
372             case SM_COW:
373             case SM_SHARED:
374             {
375                 if (pid == 0)
376                 {
377                     // Treat kernel_task specially
378                     if (info.share_mode == SM_COW)
379                     {
380                         rprvt += info.private_pages_resident * pagesize;
381                         vprvt += size;
382                     }
383                     break;
384                 }
385 
386                 if (info.share_mode == SM_COW)
387                 {
388                     rprvt += info.private_pages_resident * pagesize;
389                     vprvt += info.private_pages_resident * pagesize;
390                 }
391                 break;
392             }
393             default:
394                 // log that something is really bad.
395                 break;
396         }
397     }
398 
399     rprvt += aliased;
400 }
401 
402 static void
403 GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
404 {
405 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
406 
407     kern_return_t kr;
408     mach_msg_type_number_t info_count;
409     task_vm_info_data_t vm_info;
410 
411     info_count = TASK_VM_INFO_COUNT;
412     kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
413     if (kr == KERN_SUCCESS)
414     {
415         purgeable = vm_info.purgeable_volatile_resident;
416         anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap;
417     }
418 
419 #endif
420 }
421 
422 #if defined (HOST_VM_INFO64_COUNT)
423 nub_bool_t
424 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
425 #else
426 nub_bool_t
427 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vminfo, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
428 #endif
429 {
430     if (scanType & eProfileHostMemory)
431         physical_memory = GetPhysicalMemory();
432 
433     if (scanType & eProfileMemory)
434     {
435         static mach_port_t localHost = mach_host_self();
436 #if defined (HOST_VM_INFO64_COUNT)
437         mach_msg_type_number_t count = HOST_VM_INFO64_COUNT;
438         host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo, &count);
439 #else
440         mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
441         host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vminfo, &count);
442         vminfo.wire_count += GetStolenPages(task);
443 #endif
444 
445         /* We are no longer reporting these. Let's not waste time.
446         GetMemorySizes(task, cputype, pid, rprvt, vprvt);
447         rsize = ti.resident_size;
448         vsize = ti.virtual_size;
449 
450         if (scanType & eProfileMemoryDirtyPage)
451         {
452             // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
453             GetRegionSizes(task, rsize, dirty_size);
454         }
455         */
456 
457         if (scanType & eProfileMemoryAnonymous)
458         {
459             GetPurgeableAndAnonymous(task, purgeable, anonymous);
460         }
461     }
462 
463     return true;
464 }
465 
466 nub_size_t
467 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
468 {
469     if (data == NULL || data_count == 0)
470         return 0;
471 
472     nub_size_t total_bytes_read = 0;
473     nub_addr_t curr_addr = address;
474     uint8_t *curr_data = (uint8_t*)data;
475     while (total_bytes_read < data_count)
476     {
477         mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
478         mach_msg_type_number_t curr_bytes_read = 0;
479         vm_offset_t vm_memory = 0;
480         m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
481 
482         if (DNBLogCheckLogBit(LOG_MEMORY))
483             m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
484 
485         if (m_err.Success())
486         {
487             if (curr_bytes_read != curr_size)
488             {
489                 if (DNBLogCheckLogBit(LOG_MEMORY))
490                     m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
491             }
492             ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
493             ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
494             total_bytes_read += curr_bytes_read;
495             curr_addr += curr_bytes_read;
496             curr_data += curr_bytes_read;
497         }
498         else
499         {
500             break;
501         }
502     }
503     return total_bytes_read;
504 }
505 
506 
507 nub_size_t
508 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
509 {
510     MachVMRegion vmRegion(task);
511 
512     nub_size_t total_bytes_written = 0;
513     nub_addr_t curr_addr = address;
514     const uint8_t *curr_data = (const uint8_t*)data;
515 
516 
517     while (total_bytes_written < data_count)
518     {
519         if (vmRegion.GetRegionForAddress(curr_addr))
520         {
521             mach_vm_size_t curr_data_count = data_count - total_bytes_written;
522             mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
523             if (region_bytes_left == 0)
524             {
525                 break;
526             }
527             if (curr_data_count > region_bytes_left)
528                 curr_data_count = region_bytes_left;
529 
530             if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
531             {
532                 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
533                 if (bytes_written <= 0)
534                 {
535                     // Error should have already be posted by WriteRegion...
536                     break;
537                 }
538                 else
539                 {
540                     total_bytes_written += bytes_written;
541                     curr_addr += bytes_written;
542                     curr_data += bytes_written;
543                 }
544             }
545             else
546             {
547                 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
548                 break;
549             }
550         }
551         else
552         {
553             DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
554             break;
555         }
556     }
557 
558     return total_bytes_written;
559 }
560 
561 
562 nub_size_t
563 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
564 {
565     if (data == NULL || data_count == 0)
566         return 0;
567 
568     nub_size_t total_bytes_written = 0;
569     nub_addr_t curr_addr = address;
570     const uint8_t *curr_data = (const uint8_t*)data;
571     while (total_bytes_written < data_count)
572     {
573         mach_msg_type_number_t curr_data_count = static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written));
574         m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
575         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
576             m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
577 
578 #if !defined (__i386__) && !defined (__x86_64__)
579         vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
580 
581         m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
582         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
583             m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
584 #endif
585 
586         if (m_err.Success())
587         {
588             total_bytes_written += curr_data_count;
589             curr_addr += curr_data_count;
590             curr_data += curr_data_count;
591         }
592         else
593         {
594             break;
595         }
596     }
597     return total_bytes_written;
598 }
599