1 //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 //  Created by Greg Clayton on 6/26/07.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "MachVMMemory.h"
15 #include "MachVMRegion.h"
16 #include "DNBLog.h"
17 #include <mach/mach_vm.h>
18 #include <mach/shared_region.h>
19 #include <sys/sysctl.h>
20 #include <dlfcn.h>
21 
22 MachVMMemory::MachVMMemory() :
23     m_page_size    (kInvalidPageSize),
24     m_err        (0)
25 {
26 }
27 
28 MachVMMemory::~MachVMMemory()
29 {
30 }
31 
32 nub_size_t
33 MachVMMemory::PageSize(task_t task)
34 {
35     if (m_page_size == kInvalidPageSize)
36     {
37 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
38         if (task != TASK_NULL)
39         {
40             kern_return_t kr;
41             mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT;
42             task_vm_info_data_t vm_info;
43             kr = task_info (task, TASK_VM_INFO, (task_info_t) &vm_info, &info_count);
44             if (kr == KERN_SUCCESS)
45             {
46                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info returned page size of 0x%x", (int) vm_info.page_size);
47                 m_page_size = vm_info.page_size;
48                 return m_page_size;
49             }
50             else
51             {
52                 DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call failed to get page size, TASK_VM_INFO %d, TASK_VM_INFO_COUNT %d, kern return %d", TASK_VM_INFO, TASK_VM_INFO_COUNT, kr);
53             }
54         }
55 #endif
56         m_err = ::host_page_size( ::mach_host_self(), &m_page_size);
57         if (m_err.Fail())
58             m_page_size = 0;
59     }
60     return m_page_size;
61 }
62 
63 nub_size_t
64 MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, nub_size_t count)
65 {
66     const nub_size_t page_size = PageSize(task);
67     if (page_size > 0)
68     {
69         nub_size_t page_offset = (addr % page_size);
70         nub_size_t bytes_left_in_page = page_size - page_offset;
71         if (count > bytes_left_in_page)
72             count = bytes_left_in_page;
73     }
74     return count;
75 }
76 
77 nub_bool_t
78 MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, DNBRegionInfo *region_info)
79 {
80     MachVMRegion vmRegion(task);
81 
82     if (vmRegion.GetRegionForAddress(address))
83     {
84         region_info->addr = vmRegion.StartAddress();
85         region_info->size = vmRegion.GetByteSize();
86         region_info->permissions = vmRegion.GetDNBPermissions();
87     }
88     else
89     {
90         region_info->addr = address;
91         region_info->size = 0;
92         if (vmRegion.GetError().Success())
93         {
94             // vmRegion.GetRegionForAddress() return false, indicating that "address"
95             // wasn't in a valid region, but the "vmRegion" info was successfully
96             // read from the task which means the info describes the next valid
97             // region from which we can infer the size of this invalid region
98             mach_vm_address_t start_addr = vmRegion.StartAddress();
99             if (address < start_addr)
100                 region_info->size = start_addr - address;
101         }
102         // If we can't get any infor about the size from the next region, just fill
103         // 1 in as the byte size
104         if (region_info->size == 0)
105             region_info->size = 1;
106 
107         // Not readable, writeable or executable
108         region_info->permissions = 0;
109     }
110     return true;
111 }
112 
113 // For integrated graphics chip, this makes the accounting info for 'wired' memory more like top.
114 uint64_t
115 MachVMMemory::GetStolenPages(task_t task)
116 {
117     static uint64_t stolenPages = 0;
118     static bool calculated = false;
119     if (calculated) return stolenPages;
120 
121 	static int mib_reserved[CTL_MAXNAME];
122 	static int mib_unusable[CTL_MAXNAME];
123 	static int mib_other[CTL_MAXNAME];
124 	static size_t mib_reserved_len = 0;
125 	static size_t mib_unusable_len = 0;
126 	static size_t mib_other_len = 0;
127 	int r;
128 
129 	/* This can be used for testing: */
130 	//tsamp->pages_stolen = (256 * 1024 * 1024ULL) / tsamp->pagesize;
131 
132 	if(0 == mib_reserved_len)
133     {
134 		mib_reserved_len = CTL_MAXNAME;
135 
136 		r = sysctlnametomib("machdep.memmap.Reserved", mib_reserved,
137                             &mib_reserved_len);
138 
139 		if(-1 == r)
140         {
141 			mib_reserved_len = 0;
142 			return 0;
143 		}
144 
145 		mib_unusable_len = CTL_MAXNAME;
146 
147 		r = sysctlnametomib("machdep.memmap.Unusable", mib_unusable,
148                             &mib_unusable_len);
149 
150 		if(-1 == r)
151         {
152 			mib_reserved_len = 0;
153 			return 0;
154 		}
155 
156 
157 		mib_other_len = CTL_MAXNAME;
158 
159 		r = sysctlnametomib("machdep.memmap.Other", mib_other,
160                             &mib_other_len);
161 
162 		if(-1 == r)
163         {
164 			mib_reserved_len = 0;
165 			return 0;
166 		}
167 	}
168 
169 	if(mib_reserved_len > 0 && mib_unusable_len > 0 && mib_other_len > 0)
170     {
171 		uint64_t reserved = 0, unusable = 0, other = 0;
172 		size_t reserved_len;
173 		size_t unusable_len;
174 		size_t other_len;
175 
176 		reserved_len = sizeof(reserved);
177 		unusable_len = sizeof(unusable);
178 		other_len = sizeof(other);
179 
180 		/* These are all declared as QUAD/uint64_t sysctls in the kernel. */
181 
182 		if(-1 == sysctl(mib_reserved, mib_reserved_len, &reserved,
183                         &reserved_len, NULL, 0))
184         {
185 			return 0;
186 		}
187 
188 		if(-1 == sysctl(mib_unusable, mib_unusable_len, &unusable,
189                         &unusable_len, NULL, 0))
190         {
191 			return 0;
192 		}
193 
194 		if(-1 == sysctl(mib_other, mib_other_len, &other,
195                         &other_len, NULL, 0))
196         {
197 			return 0;
198 		}
199 
200 		if(reserved_len == sizeof(reserved)
201 		   && unusable_len == sizeof(unusable)
202 		   && other_len == sizeof(other))
203         {
204 			uint64_t stolen = reserved + unusable + other;
205 			uint64_t mb128 = 128 * 1024 * 1024ULL;
206 
207 			if(stolen >= mb128)
208             {
209                 stolen = (stolen & ~((128 * 1024 * 1024ULL) - 1)); // rounding down
210                 stolenPages = stolen / PageSize (task);
211 			}
212 		}
213 	}
214 
215     calculated = true;
216     return stolenPages;
217 }
218 
219 static uint64_t GetPhysicalMemory()
220 {
221     // This doesn't change often at all. No need to poll each time.
222     static uint64_t physical_memory = 0;
223     static bool calculated = false;
224     if (calculated) return physical_memory;
225 
226     int mib[2];
227     mib[0] = CTL_HW;
228     mib[1] = HW_MEMSIZE;
229     size_t len = sizeof(physical_memory);
230     sysctl(mib, 2, &physical_memory, &len, NULL, 0);
231     return physical_memory;
232 }
233 
234 // rsize and dirty_size is not adjusted for dyld shared cache and multiple __LINKEDIT segment, as in vmmap. In practice, dirty_size doesn't differ much but rsize may. There is performance penalty for the adjustment. Right now, only use the dirty_size.
235 void
236 MachVMMemory::GetRegionSizes(task_t task, mach_vm_size_t &rsize, mach_vm_size_t &dirty_size)
237 {
238 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
239 
240     task_vm_info_data_t vm_info;
241     mach_msg_type_number_t info_count;
242     kern_return_t kr;
243 
244     info_count = TASK_VM_INFO_COUNT;
245 #ifdef TASK_VM_INFO_PURGEABLE
246     kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
247 #else
248     kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
249 #endif
250     if (kr == KERN_SUCCESS)
251         dirty_size = vm_info.internal;
252 
253 #else
254     mach_vm_address_t address = 0;
255     mach_vm_size_t size;
256     kern_return_t err = 0;
257     unsigned nestingDepth = 0;
258     mach_vm_size_t pages_resident = 0;
259     mach_vm_size_t pages_dirtied = 0;
260 
261     while (1)
262     {
263         mach_msg_type_number_t count;
264         struct vm_region_submap_info_64 info;
265 
266         count = VM_REGION_SUBMAP_INFO_COUNT_64;
267         err = mach_vm_region_recurse(task, &address, &size, &nestingDepth, (vm_region_info_t)&info, &count);
268         if (err == KERN_INVALID_ADDRESS)
269         {
270             // It seems like this is a good break too.
271             break;
272         }
273         else if (err)
274         {
275             mach_error("vm_region",err);
276             break; // reached last region
277         }
278 
279         bool should_count = true;
280         if (info.is_submap)
281         { // is it a submap?
282             nestingDepth++;
283             should_count = false;
284         }
285         else
286         {
287             // Don't count malloc stack logging data in the TOTAL VM usage lines.
288             if (info.user_tag == VM_MEMORY_ANALYSIS_TOOL)
289                 should_count = false;
290 
291             address = address+size;
292         }
293 
294         if (should_count)
295         {
296             pages_resident += info.pages_resident;
297             pages_dirtied += info.pages_dirtied;
298         }
299     }
300 
301     vm_size_t pagesize = PageSize (task);
302     rsize = pages_resident * pagesize;
303     dirty_size = pages_dirtied * pagesize;
304 
305 #endif
306 }
307 
308 // Test whether the virtual address is within the architecture's shared region.
309 static bool InSharedRegion(mach_vm_address_t addr, cpu_type_t type)
310 {
311     mach_vm_address_t base = 0, size = 0;
312 
313     switch(type) {
314 #if defined (CPU_TYPE_ARM64) && defined (SHARED_REGION_BASE_ARM64)
315         case CPU_TYPE_ARM64:
316             base = SHARED_REGION_BASE_ARM64;
317             size = SHARED_REGION_SIZE_ARM64;
318             break;
319 #endif
320 
321         case CPU_TYPE_ARM:
322             base = SHARED_REGION_BASE_ARM;
323             size = SHARED_REGION_SIZE_ARM;
324             break;
325 
326         case CPU_TYPE_X86_64:
327             base = SHARED_REGION_BASE_X86_64;
328             size = SHARED_REGION_SIZE_X86_64;
329             break;
330 
331         case CPU_TYPE_I386:
332             base = SHARED_REGION_BASE_I386;
333             size = SHARED_REGION_SIZE_I386;
334             break;
335 
336         default: {
337             // Log error abut unknown CPU type
338             break;
339         }
340     }
341 
342 
343     return(addr >= base && addr < (base + size));
344 }
345 
346 void
347 MachVMMemory::GetMemorySizes(task_t task, cpu_type_t cputype, nub_process_t pid, mach_vm_size_t &rprvt, mach_vm_size_t &vprvt)
348 {
349     // Collecting some other info cheaply but not reporting for now.
350     mach_vm_size_t empty = 0;
351     mach_vm_size_t fw_private = 0;
352 
353     mach_vm_size_t aliased = 0;
354     bool global_shared_text_data_mapped = false;
355     vm_size_t pagesize = PageSize (task);
356 
357     for (mach_vm_address_t addr=0, size=0; ; addr += size)
358     {
359         vm_region_top_info_data_t info;
360         mach_msg_type_number_t count = VM_REGION_TOP_INFO_COUNT;
361         mach_port_t object_name;
362 
363         kern_return_t kr = mach_vm_region(task, &addr, &size, VM_REGION_TOP_INFO, (vm_region_info_t)&info, &count, &object_name);
364         if (kr != KERN_SUCCESS) break;
365 
366         if (InSharedRegion(addr, cputype))
367         {
368             // Private Shared
369             fw_private += info.private_pages_resident * pagesize;
370 
371             // Check if this process has the globally shared text and data regions mapped in.  If so, set global_shared_text_data_mapped to TRUE and avoid checking again.
372             if (global_shared_text_data_mapped == FALSE && info.share_mode == SM_EMPTY) {
373                 vm_region_basic_info_data_64_t b_info;
374                 mach_vm_address_t b_addr = addr;
375                 mach_vm_size_t b_size = size;
376                 count = VM_REGION_BASIC_INFO_COUNT_64;
377 
378                 kr = mach_vm_region(task, &b_addr, &b_size, VM_REGION_BASIC_INFO, (vm_region_info_t)&b_info, &count, &object_name);
379                 if (kr != KERN_SUCCESS) break;
380 
381                 if (b_info.reserved) {
382                     global_shared_text_data_mapped = TRUE;
383                 }
384             }
385 
386             // Short circuit the loop if this isn't a shared private region, since that's the only region type we care about within the current address range.
387             if (info.share_mode != SM_PRIVATE)
388             {
389                 continue;
390             }
391         }
392 
393         // Update counters according to the region type.
394         if (info.share_mode == SM_COW && info.ref_count == 1)
395         {
396             // Treat single reference SM_COW as SM_PRIVATE
397             info.share_mode = SM_PRIVATE;
398         }
399 
400         switch (info.share_mode)
401         {
402             case SM_LARGE_PAGE:
403                 // Treat SM_LARGE_PAGE the same as SM_PRIVATE
404                 // since they are not shareable and are wired.
405             case SM_PRIVATE:
406                 rprvt += info.private_pages_resident * pagesize;
407                 rprvt += info.shared_pages_resident * pagesize;
408                 vprvt += size;
409                 break;
410 
411             case SM_EMPTY:
412                 empty += size;
413                 break;
414 
415             case SM_COW:
416             case SM_SHARED:
417             {
418                 if (pid == 0)
419                 {
420                     // Treat kernel_task specially
421                     if (info.share_mode == SM_COW)
422                     {
423                         rprvt += info.private_pages_resident * pagesize;
424                         vprvt += size;
425                     }
426                     break;
427                 }
428 
429                 if (info.share_mode == SM_COW)
430                 {
431                     rprvt += info.private_pages_resident * pagesize;
432                     vprvt += info.private_pages_resident * pagesize;
433                 }
434                 break;
435             }
436             default:
437                 // log that something is really bad.
438                 break;
439         }
440     }
441 
442     rprvt += aliased;
443 }
444 
445 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
446 #ifndef TASK_VM_INFO_PURGEABLE
447 // cribbed from sysmond
448 static uint64_t
449 SumVMPurgeableInfo(const vm_purgeable_info_t info)
450 {
451     uint64_t sum = 0;
452     int i;
453 
454     for (i = 0; i < 8; i++)
455     {
456         sum += info->fifo_data[i].size;
457     }
458     sum += info->obsolete_data.size;
459     for (i = 0; i < 8; i++)
460     {
461         sum += info->lifo_data[i].size;
462     }
463 
464     return sum;
465 }
466 #endif /* !TASK_VM_INFO_PURGEABLE */
467 #endif
468 
469 static void
470 GetPurgeableAndAnonymous(task_t task, uint64_t &purgeable, uint64_t &anonymous)
471 {
472 #if defined (TASK_VM_INFO) && TASK_VM_INFO >= 22
473 
474     kern_return_t kr;
475 #ifndef TASK_VM_INFO_PURGEABLE
476     task_purgable_info_t purgeable_info;
477     uint64_t purgeable_sum = 0;
478 #endif /* !TASK_VM_INFO_PURGEABLE */
479     mach_msg_type_number_t info_count;
480     task_vm_info_data_t vm_info;
481 
482 #ifndef TASK_VM_INFO_PURGEABLE
483     typedef kern_return_t (*task_purgable_info_type) (task_t, task_purgable_info_t *);
484     task_purgable_info_type task_purgable_info_ptr = NULL;
485     task_purgable_info_ptr = (task_purgable_info_type)dlsym(RTLD_NEXT, "task_purgable_info");
486     if (task_purgable_info_ptr != NULL)
487     {
488         kr = (*task_purgable_info_ptr)(task, &purgeable_info);
489         if (kr == KERN_SUCCESS) {
490             purgeable_sum = SumVMPurgeableInfo(&purgeable_info);
491             purgeable = purgeable_sum;
492         }
493     }
494 #endif /* !TASK_VM_INFO_PURGEABLE */
495 
496     info_count = TASK_VM_INFO_COUNT;
497 #ifdef TASK_VM_INFO_PURGEABLE
498     kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count);
499 #else
500     kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count);
501 #endif
502     if (kr == KERN_SUCCESS)
503     {
504 #ifdef TASK_VM_INFO_PURGEABLE
505         purgeable = vm_info.purgeable_volatile_resident;
506         anonymous = vm_info.internal - vm_info.purgeable_volatile_pmap;
507 #else
508         if (purgeable_sum < vm_info.internal)
509         {
510             anonymous = vm_info.internal - purgeable_sum;
511         }
512         else
513         {
514             anonymous = 0;
515         }
516 #endif
517     }
518 
519 #endif
520 }
521 
522 nub_bool_t
523 MachVMMemory::GetMemoryProfile(DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, cpu_type_t cputype, nub_process_t pid, vm_statistics_data_t &vm_stats, uint64_t &physical_memory, mach_vm_size_t &rprvt, mach_vm_size_t &rsize, mach_vm_size_t &vprvt, mach_vm_size_t &vsize, mach_vm_size_t &dirty_size, mach_vm_size_t &purgeable, mach_vm_size_t &anonymous)
524 {
525     if (scanType & eProfileHostMemory)
526         physical_memory = GetPhysicalMemory();
527 
528     if (scanType & eProfileMemory)
529     {
530         static mach_port_t localHost = mach_host_self();
531         mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
532         host_statistics(localHost, HOST_VM_INFO, (host_info_t)&vm_stats, &count);
533         vm_stats.wire_count += GetStolenPages(task);
534 
535         GetMemorySizes(task, cputype, pid, rprvt, vprvt);
536 
537         rsize = ti.resident_size;
538         vsize = ti.virtual_size;
539 
540         if (scanType & eProfileMemoryDirtyPage)
541         {
542             // This uses vmmap strategy. We don't use the returned rsize for now. We prefer to match top's version since that's what we do for the rest of the metrics.
543             GetRegionSizes(task, rsize, dirty_size);
544         }
545 
546         if (scanType & eProfileMemoryAnonymous)
547         {
548             GetPurgeableAndAnonymous(task, purgeable, anonymous);
549         }
550     }
551 
552     return true;
553 }
554 
555 nub_size_t
556 MachVMMemory::Read(task_t task, nub_addr_t address, void *data, nub_size_t data_count)
557 {
558     if (data == NULL || data_count == 0)
559         return 0;
560 
561     nub_size_t total_bytes_read = 0;
562     nub_addr_t curr_addr = address;
563     uint8_t *curr_data = (uint8_t*)data;
564     while (total_bytes_read < data_count)
565     {
566         mach_vm_size_t curr_size = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read);
567         mach_msg_type_number_t curr_bytes_read = 0;
568         vm_offset_t vm_memory = NULL;
569         m_err = ::mach_vm_read (task, curr_addr, curr_size, &vm_memory, &curr_bytes_read);
570 
571         if (DNBLogCheckLogBit(LOG_MEMORY))
572             m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt => %i )", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read);
573 
574         if (m_err.Success())
575         {
576             if (curr_bytes_read != curr_size)
577             {
578                 if (DNBLogCheckLogBit(LOG_MEMORY))
579                     m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes", task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, curr_bytes_read, curr_bytes_read, (uint64_t)curr_size);
580             }
581             ::memcpy (curr_data, (void *)vm_memory, curr_bytes_read);
582             ::vm_deallocate (mach_task_self (), vm_memory, curr_bytes_read);
583             total_bytes_read += curr_bytes_read;
584             curr_addr += curr_bytes_read;
585             curr_data += curr_bytes_read;
586         }
587         else
588         {
589             break;
590         }
591     }
592     return total_bytes_read;
593 }
594 
595 
596 nub_size_t
597 MachVMMemory::Write(task_t task, nub_addr_t address, const void *data, nub_size_t data_count)
598 {
599     MachVMRegion vmRegion(task);
600 
601     nub_size_t total_bytes_written = 0;
602     nub_addr_t curr_addr = address;
603     const uint8_t *curr_data = (const uint8_t*)data;
604 
605 
606     while (total_bytes_written < data_count)
607     {
608         if (vmRegion.GetRegionForAddress(curr_addr))
609         {
610             mach_vm_size_t curr_data_count = data_count - total_bytes_written;
611             mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr);
612             if (region_bytes_left == 0)
613             {
614                 break;
615             }
616             if (curr_data_count > region_bytes_left)
617                 curr_data_count = region_bytes_left;
618 
619             if (vmRegion.SetProtections(curr_addr, curr_data_count, VM_PROT_READ | VM_PROT_WRITE))
620             {
621                 nub_size_t bytes_written = WriteRegion(task, curr_addr, curr_data, curr_data_count);
622                 if (bytes_written <= 0)
623                 {
624                     // Error should have already be posted by WriteRegion...
625                     break;
626                 }
627                 else
628                 {
629                     total_bytes_written += bytes_written;
630                     curr_addr += bytes_written;
631                     curr_data += bytes_written;
632                 }
633             }
634             else
635             {
636                 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on region for address: [0x%8.8llx-0x%8.8llx)", (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count));
637                 break;
638             }
639         }
640         else
641         {
642             DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, "Failed to get region for address: 0x%8.8llx", (uint64_t)address);
643             break;
644         }
645     }
646 
647     return total_bytes_written;
648 }
649 
650 
651 nub_size_t
652 MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, const void *data, const nub_size_t data_count)
653 {
654     if (data == NULL || data_count == 0)
655         return 0;
656 
657     nub_size_t total_bytes_written = 0;
658     nub_addr_t curr_addr = address;
659     const uint8_t *curr_data = (const uint8_t*)data;
660     while (total_bytes_written < data_count)
661     {
662         mach_msg_type_number_t curr_data_count = MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_written);
663         m_err = ::mach_vm_write (task, curr_addr, (pointer_t) curr_data, curr_data_count);
664         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
665             m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, data = %8.8p, dataCnt = %u )", task, (uint64_t)curr_addr, curr_data, curr_data_count);
666 
667 #if !defined (__i386__) && !defined (__x86_64__)
668         vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH;
669 
670         m_err = ::vm_machine_attribute (task, curr_addr, curr_data_count, MATTR_CACHE, &mattr_value);
671         if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail())
672             m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = 0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value => MATTR_VAL_CACHE_FLUSH )", task, (uint64_t)curr_addr, curr_data_count);
673 #endif
674 
675         if (m_err.Success())
676         {
677             total_bytes_written += curr_data_count;
678             curr_addr += curr_data_count;
679             curr_data += curr_data_count;
680         }
681         else
682         {
683             break;
684         }
685     }
686     return total_bytes_written;
687 }
688