1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Resident memory management module.
63 */
64
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/ecc.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_init_xnu.h>
85 #include <vm/vm_map_internal.h>
86 #include <vm/vm_page_internal.h>
87 #include <vm/vm_pageout_internal.h>
88 #include <vm/vm_kern_xnu.h> /* kmem_alloc() */
89 #include <vm/vm_compressor_pager_internal.h>
90 #include <kern/misc_protos.h>
91 #include <mach_debug/zone_info.h>
92 #include <vm/cpm_internal.h>
93 #include <pexpert/pexpert.h>
94 #include <pexpert/device_tree.h>
95 #include <san/kasan.h>
96 #include <os/log.h>
97
98 #include <vm/vm_protos_internal.h>
99 #include <vm/memory_object.h>
100 #include <vm/vm_purgeable_internal.h>
101 #include <vm/vm_compressor_internal.h>
102 #include <vm/vm_iokit.h>
103 #include <vm/vm_object_internal.h>
104 #if defined (__x86_64__)
105 #include <i386/misc_protos.h>
106 #endif
107
108 #if CONFIG_SPTM
109 #include <arm64/sptm/sptm.h>
110 #endif
111
112 #if CONFIG_PHANTOM_CACHE
113 #include <vm/vm_phantom_cache_internal.h>
114 #endif
115
116 #if HIBERNATION
117 #include <IOKit/IOHibernatePrivate.h>
118 #include <machine/pal_hibernate.h>
119 #endif /* HIBERNATION */
120
121 #include <sys/kdebug.h>
122
123 #if defined(HAS_APPLE_PAC)
124 #include <ptrauth.h>
125 #endif
126 #if defined(__arm64__)
127 #include <arm/cpu_internal.h>
128 #endif /* defined(__arm64__) */
129
130 #if MACH_ASSERT
131
132 TUNABLE(bool, vm_check_refs_on_alloc, "vm_check_refs_on_alloc", false);
133 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
134
135 #else /* MACH_ASSERT */
136
137 #define ASSERT_PMAP_FREE(mem) /* nothing */
138
139 #endif /* MACH_ASSERT */
140
141 extern boolean_t vm_pageout_running;
142 extern thread_t vm_pageout_scan_thread;
143 extern bool vps_dynamic_priority_enabled;
144
145 char vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
146 char vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
147 char vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
148 char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
149
150 #if CONFIG_SECLUDED_MEMORY
151 struct vm_page_secluded_data vm_page_secluded;
152 #endif /* CONFIG_SECLUDED_MEMORY */
153
154 #if DEVELOPMENT || DEBUG
155 extern struct memory_object_pager_ops shared_region_pager_ops;
156 unsigned int shared_region_pagers_resident_count = 0;
157 unsigned int shared_region_pagers_resident_peak = 0;
158 #endif /* DEVELOPMENT || DEBUG */
159
160
161
162 int PERCPU_DATA(start_color);
163 vm_page_t PERCPU_DATA(free_pages);
164 boolean_t hibernate_cleaning_in_progress = FALSE;
165
166 uint32_t vm_lopage_free_count = 0;
167 uint32_t vm_lopage_free_limit = 0;
168 uint32_t vm_lopage_lowater = 0;
169 boolean_t vm_lopage_refill = FALSE;
170 boolean_t vm_lopage_needed = FALSE;
171
172 int speculative_age_index = 0;
173 int speculative_steal_index = 0;
174 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_RESERVED_SPECULATIVE_AGE_Q + 1];
175
176 boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
177 * Updated and checked behind the vm_page_queues_lock. */
178
179 static void vm_page_free_prepare(vm_page_t page);
180 static vm_page_t vm_page_grab_fictitious_common(ppnum_t, boolean_t);
181
182 static void vm_tag_init(void);
183
184 /* for debugging purposes */
185 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
186 VM_PAGE_PACKED_FROM_ARRAY;
187 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
188 VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
189
190 /*
191 * Associated with page of user-allocatable memory is a
192 * page structure.
193 */
194
195 /*
196 * These variables record the values returned by vm_page_bootstrap,
197 * for debugging purposes. The implementation of pmap_steal_memory
198 * and pmap_startup here also uses them internally.
199 */
200
201 vm_offset_t virtual_space_start;
202 vm_offset_t virtual_space_end;
203 uint32_t vm_page_pages;
204
205 /*
206 * The vm_page_lookup() routine, which provides for fast
207 * (virtual memory object, offset) to page lookup, employs
208 * the following hash table. The vm_page_{insert,remove}
209 * routines install and remove associations in the table.
210 * [This table is often called the virtual-to-physical,
211 * or VP, table.]
212 */
213 typedef struct {
214 vm_page_packed_t page_list;
215 #if MACH_PAGE_HASH_STATS
216 int cur_count; /* current count */
217 int hi_count; /* high water mark */
218 #endif /* MACH_PAGE_HASH_STATS */
219 } vm_page_bucket_t;
220
221
222 #define BUCKETS_PER_LOCK 16
223
224 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets; /* Array of buckets */
225 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_count = 0; /* How big is array? */
226 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_mask; /* Mask for hash function */
227 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_shift; /* Shift for hash function */
228 SECURITY_READ_ONLY_LATE(uint32_t) vm_page_bucket_hash; /* Basic bucket hash */
229 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_lock_count = 0; /* How big is array of locks? */
230
231 #ifndef VM_TAG_ACTIVE_UPDATE
232 #error VM_TAG_ACTIVE_UPDATE
233 #endif
234 #ifndef VM_TAG_SIZECLASSES
235 #error VM_TAG_SIZECLASSES
236 #endif
237
238 /* for debugging */
239 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
240 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
241
242 vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
243 vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE];
244 #if VM_TAG_SIZECLASSES
245 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
246 #endif /* VM_TAG_SIZECLASSES */
247
248 vm_tag_t vm_allocation_tag_highest;
249
250 #if VM_PAGE_BUCKETS_CHECK
251 boolean_t vm_page_buckets_check_ready = FALSE;
252 #if VM_PAGE_FAKE_BUCKETS
253 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
254 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
255 #endif /* VM_PAGE_FAKE_BUCKETS */
256 #endif /* VM_PAGE_BUCKETS_CHECK */
257
258 #if MACH_PAGE_HASH_STATS
259 /* This routine is only for debug. It is intended to be called by
260 * hand by a developer using a kernel debugger. This routine prints
261 * out vm_page_hash table statistics to the kernel debug console.
262 */
263 void
hash_debug(void)264 hash_debug(void)
265 {
266 int i;
267 int numbuckets = 0;
268 int highsum = 0;
269 int maxdepth = 0;
270
271 for (i = 0; i < vm_page_bucket_count; i++) {
272 if (vm_page_buckets[i].hi_count) {
273 numbuckets++;
274 highsum += vm_page_buckets[i].hi_count;
275 if (vm_page_buckets[i].hi_count > maxdepth) {
276 maxdepth = vm_page_buckets[i].hi_count;
277 }
278 }
279 }
280 printf("Total number of buckets: %d\n", vm_page_bucket_count);
281 printf("Number used buckets: %d = %d%%\n",
282 numbuckets, 100 * numbuckets / vm_page_bucket_count);
283 printf("Number unused buckets: %d = %d%%\n",
284 vm_page_bucket_count - numbuckets,
285 100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
286 printf("Sum of bucket max depth: %d\n", highsum);
287 printf("Average bucket depth: %d.%2d\n",
288 highsum / vm_page_bucket_count,
289 highsum % vm_page_bucket_count);
290 printf("Maximum bucket depth: %d\n", maxdepth);
291 }
292 #endif /* MACH_PAGE_HASH_STATS */
293
294 /*
295 * The virtual page size is currently implemented as a runtime
296 * variable, but is constant once initialized using vm_set_page_size.
297 * This initialization must be done in the machine-dependent
298 * bootstrap sequence, before calling other machine-independent
299 * initializations.
300 *
301 * All references to the virtual page size outside this
302 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
303 * constants.
304 */
305 #if defined(__arm64__)
306 vm_size_t page_size;
307 vm_size_t page_mask;
308 int page_shift;
309 #else
310 vm_size_t page_size = PAGE_SIZE;
311 vm_size_t page_mask = PAGE_MASK;
312 int page_shift = PAGE_SHIFT;
313 #endif
314
315 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
316 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
317 vm_page_t vm_page_array_ending_addr;
318
319 unsigned int vm_pages_count = 0;
320
321 /*
322 * Resident pages that represent real memory
323 * are allocated from a set of free lists,
324 * one per color.
325 */
326 unsigned int vm_colors;
327 unsigned int vm_color_mask; /* mask is == (vm_colors-1) */
328 unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
329 unsigned int vm_free_magazine_refill_limit = 0;
330
331
332 struct vm_page_queue_free_head {
333 vm_page_queue_head_t qhead;
334 } VM_PAGE_PACKED_ALIGNED;
335
336 struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS];
337
338
339 unsigned int vm_page_free_wanted;
340 unsigned int vm_page_free_wanted_privileged;
341 #if CONFIG_SECLUDED_MEMORY
342 unsigned int vm_page_free_wanted_secluded;
343 #endif /* CONFIG_SECLUDED_MEMORY */
344 unsigned int vm_page_free_count;
345
346 unsigned int vm_page_realtime_count;
347
348 /*
349 * Occasionally, the virtual memory system uses
350 * resident page structures that do not refer to
351 * real pages, for example to leave a page with
352 * important state information in the VP table.
353 *
354 * These page structures are allocated the way
355 * most other kernel structures are.
356 */
357 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
358 vm_locks_array_t vm_page_locks;
359
360 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
361 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
362 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
363 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
364 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
365 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
366 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
367 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
368 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
369
370 unsigned int vm_page_local_q_soft_limit = 250;
371 unsigned int vm_page_local_q_hard_limit = 500;
372 struct vpl *__zpercpu vm_page_local_q;
373
374 /* N.B. Guard and fictitious pages must not
375 * be assigned a zero phys_page value.
376 */
377 /*
378 * Fictitious pages don't have a physical address,
379 * but we must initialize phys_page to something.
380 * For debugging, this should be a strange value
381 * that the pmap module can recognize in assertions.
382 */
383 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
384
385 /*
386 * Guard pages are not accessible so they don't
387 * need a physical address, but we need to enter
388 * one in the pmap.
389 * Let's make it recognizable and make sure that
390 * we don't use a real physical page with that
391 * physical address.
392 */
393 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
394
395 /*
396 * Resident page structures are also chained on
397 * queues that are used by the page replacement
398 * system (pageout daemon). These queues are
399 * defined here, but are shared by the pageout
400 * module. The inactive queue is broken into
401 * file backed and anonymous for convenience as the
402 * pageout daemon often assignes a higher
403 * importance to anonymous pages (less likely to pick)
404 */
405 vm_page_queue_head_t vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
406 vm_page_queue_head_t vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
407 #if CONFIG_SECLUDED_MEMORY
408 vm_page_queue_head_t vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
409 #endif /* CONFIG_SECLUDED_MEMORY */
410 vm_page_queue_head_t vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED; /* inactive memory queue for anonymous pages */
411 vm_page_queue_head_t vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
412
413 queue_head_t vm_objects_wired;
414
415 vm_page_queue_head_t vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
416 uint32_t vm_page_donate_mode;
417 uint32_t vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
418 uint32_t vm_page_donate_count;
419 bool vm_page_donate_queue_ripe;
420
421
422 vm_page_queue_head_t vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
423 uint32_t vm_page_background_target;
424 uint32_t vm_page_background_target_snapshot;
425 uint32_t vm_page_background_count;
426 uint64_t vm_page_background_promoted_count;
427
428 uint32_t vm_page_background_internal_count;
429 uint32_t vm_page_background_external_count;
430
431 uint32_t vm_page_background_mode;
432 uint32_t vm_page_background_exclude_external;
433
434 unsigned int vm_page_active_count;
435 unsigned int vm_page_inactive_count;
436 unsigned int vm_page_kernelcache_count;
437 #if CONFIG_SECLUDED_MEMORY
438 unsigned int vm_page_secluded_count;
439 unsigned int vm_page_secluded_count_free;
440 unsigned int vm_page_secluded_count_inuse;
441 unsigned int vm_page_secluded_count_over_target;
442 #endif /* CONFIG_SECLUDED_MEMORY */
443 unsigned int vm_page_anonymous_count;
444 unsigned int vm_page_throttled_count;
445 unsigned int vm_page_speculative_count;
446
447 unsigned int vm_page_wire_count;
448 unsigned int vm_page_wire_count_on_boot = 0;
449 unsigned int vm_page_stolen_count = 0;
450 unsigned int vm_page_wire_count_initial;
451 unsigned int vm_page_gobble_count = 0;
452 unsigned int vm_page_kern_lpage_count = 0;
453
454 uint64_t booter_size; /* external so it can be found in core dumps */
455
456 #define VM_PAGE_WIRE_COUNT_WARNING 0
457 #define VM_PAGE_GOBBLE_COUNT_WARNING 0
458
459 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
460 unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
461 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
462
463 unsigned int vm_page_xpmapped_external_count = 0;
464 unsigned int vm_page_external_count = 0;
465 unsigned int vm_page_internal_count = 0;
466 unsigned int vm_page_pageable_external_count = 0;
467 unsigned int vm_page_pageable_internal_count = 0;
468
469 #if DEVELOPMENT || DEBUG
470 unsigned int vm_page_speculative_recreated = 0;
471 unsigned int vm_page_speculative_created = 0;
472 unsigned int vm_page_speculative_used = 0;
473 #endif
474
475 vm_page_queue_head_t vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
476
477 unsigned int vm_page_cleaned_count = 0;
478
479 uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
480 ppnum_t max_valid_low_ppnum = PPNUM_MAX;
481
482
483 /*
484 * Several page replacement parameters are also
485 * shared with this module, so that page allocation
486 * (done here in vm_page_alloc) can trigger the
487 * pageout daemon.
488 */
489 unsigned int vm_page_free_target = 0;
490 unsigned int vm_page_free_min = 0;
491 unsigned int vm_page_throttle_limit = 0;
492 unsigned int vm_page_inactive_target = 0;
493 #if CONFIG_SECLUDED_MEMORY
494 unsigned int vm_page_secluded_target = 0;
495 #endif /* CONFIG_SECLUDED_MEMORY */
496 unsigned int vm_page_anonymous_min = 0;
497 unsigned int vm_page_free_reserved = 0;
498
499
500 /*
501 * The VM system has a couple of heuristics for deciding
502 * that pages are "uninteresting" and should be placed
503 * on the inactive queue as likely candidates for replacement.
504 * These variables let the heuristics be controlled at run-time
505 * to make experimentation easier.
506 */
507
508 boolean_t vm_page_deactivate_hint = TRUE;
509
510 struct vm_page_stats_reusable vm_page_stats_reusable;
511
512 /*
513 * vm_set_page_size:
514 *
515 * Sets the page size, perhaps based upon the memory
516 * size. Must be called before any use of page-size
517 * dependent functions.
518 *
519 * Sets page_shift and page_mask from page_size.
520 */
521 void
vm_set_page_size(void)522 vm_set_page_size(void)
523 {
524 page_size = PAGE_SIZE;
525 page_mask = PAGE_MASK;
526 page_shift = PAGE_SHIFT;
527
528 if ((page_mask & page_size) != 0) {
529 panic("vm_set_page_size: page size not a power of two");
530 }
531
532 for (page_shift = 0;; page_shift++) {
533 if ((1U << page_shift) == page_size) {
534 break;
535 }
536 }
537 }
538
539 #if defined (__x86_64__)
540
541 #define MAX_CLUMP_SIZE 16
542 #define DEFAULT_CLUMP_SIZE 4
543
544 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
545
546 #if DEVELOPMENT || DEBUG
547 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
548 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
549
550 static inline void
vm_clump_update_stats(unsigned int c)551 vm_clump_update_stats(unsigned int c)
552 {
553 assert(c <= vm_clump_size);
554 if (c > 0 && c <= vm_clump_size) {
555 vm_clump_stats[c] += c;
556 }
557 vm_clump_allocs += c;
558 }
559 #endif /* if DEVELOPMENT || DEBUG */
560
561 /* Called once to setup the VM clump knobs */
562 static void
vm_page_setup_clump(void)563 vm_page_setup_clump( void )
564 {
565 unsigned int override, n;
566
567 vm_clump_size = DEFAULT_CLUMP_SIZE;
568 if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
569 vm_clump_size = override;
570 }
571
572 if (vm_clump_size > MAX_CLUMP_SIZE) {
573 panic("vm_page_setup_clump:: clump_size is too large!");
574 }
575 if (vm_clump_size < 1) {
576 panic("vm_page_setup_clump:: clump_size must be >= 1");
577 }
578 if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
579 panic("vm_page_setup_clump:: clump_size must be a power of 2");
580 }
581
582 vm_clump_promote_threshold = vm_clump_size;
583 vm_clump_mask = vm_clump_size - 1;
584 for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
585 ;
586 }
587
588 #if DEVELOPMENT || DEBUG
589 bzero(vm_clump_stats, sizeof(vm_clump_stats));
590 vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
591 #endif /* if DEVELOPMENT || DEBUG */
592 }
593
594 #endif /* #if defined (__x86_64__) */
595
596 #define COLOR_GROUPS_TO_STEAL 4
597
598 /* Called once during statup, once the cache geometry is known.
599 */
600 static void
vm_page_set_colors(void)601 vm_page_set_colors( void )
602 {
603 unsigned int n, override;
604
605 #if defined (__x86_64__)
606 /* adjust #colors because we need to color outside the clump boundary */
607 vm_cache_geometry_colors >>= vm_clump_shift;
608 #endif
609 if (PE_parse_boot_argn("colors", &override, sizeof(override))) { /* colors specified as a boot-arg? */
610 n = override;
611 } else if (vm_cache_geometry_colors) { /* do we know what the cache geometry is? */
612 n = vm_cache_geometry_colors;
613 } else {
614 n = DEFAULT_COLORS; /* use default if all else fails */
615 }
616 if (n == 0) {
617 n = 1;
618 }
619 if (n > MAX_COLORS) {
620 n = MAX_COLORS;
621 }
622
623 /* the count must be a power of 2 */
624 if ((n & (n - 1)) != 0) {
625 n = DEFAULT_COLORS; /* use default if all else fails */
626 }
627 vm_colors = n;
628 vm_color_mask = n - 1;
629
630 vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
631
632 #if defined (__x86_64__)
633 /* adjust for reduction in colors due to clumping and multiple cores */
634 if (real_ncpus) {
635 vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
636 }
637 #endif
638 }
639
640 /*
641 * During single threaded early boot we don't initialize all pages.
642 * This avoids some delay during boot. They'll be initialized and
643 * added to the free list as needed or after we are multithreaded by
644 * what becomes the pageout thread.
645 */
646 static boolean_t fill = FALSE;
647 static unsigned int fillval;
648 uint_t vm_delayed_count = 0; /* when non-zero, indicates we may have more pages to init */
649 ppnum_t delay_above_pnum = PPNUM_MAX;
650
651 /*
652 * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
653 * If ARM ever uses delayed page initialization, this value may need to be quite different.
654 */
655 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
656
657 /*
658 * When we have to dip into more delayed pages due to low memory, free up
659 * a large chunk to get things back to normal. This avoids contention on the
660 * delayed code allocating page by page.
661 */
662 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
663
664 /*
665 * Get and initialize the next delayed page.
666 */
667 static vm_page_t
vm_get_delayed_page(int grab_options)668 vm_get_delayed_page(int grab_options)
669 {
670 vm_page_t p;
671 ppnum_t pnum;
672
673 /*
674 * Get a new page if we have one.
675 */
676 vm_free_page_lock();
677 if (vm_delayed_count == 0) {
678 vm_free_page_unlock();
679 return NULL;
680 }
681
682 if (!pmap_next_page(&pnum)) {
683 vm_delayed_count = 0;
684 vm_free_page_unlock();
685 return NULL;
686 }
687
688
689 assert(vm_delayed_count > 0);
690 --vm_delayed_count;
691
692 #if defined(__x86_64__)
693 /* x86 cluster code requires increasing phys_page in vm_pages[] */
694 if (vm_pages_count > 0) {
695 assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
696 }
697 #endif
698 p = &vm_pages[vm_pages_count];
699 assert(p < vm_page_array_ending_addr);
700 vm_page_init(p, pnum, FALSE);
701 ++vm_pages_count;
702 ++vm_page_pages;
703 vm_free_page_unlock();
704
705 /*
706 * These pages were initially counted as wired, undo that now.
707 */
708 if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
709 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
710 } else {
711 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
712 vm_page_lockspin_queues();
713 }
714 --vm_page_wire_count;
715 --vm_page_wire_count_initial;
716 if (vm_page_wire_count_on_boot != 0) {
717 --vm_page_wire_count_on_boot;
718 }
719 if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
720 vm_page_unlock_queues();
721 }
722
723
724 if (fill) {
725 fillPage(pnum, fillval);
726 }
727 return p;
728 }
729
730 static void vm_page_module_init_delayed(void);
731
732 /*
733 * Free all remaining delayed pages to the free lists.
734 */
735 void
vm_free_delayed_pages(void)736 vm_free_delayed_pages(void)
737 {
738 vm_page_t p;
739 vm_page_t list = NULL;
740 uint_t cnt = 0;
741 vm_offset_t start_free_va;
742 int64_t free_size;
743
744 while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
745 if (vm_himemory_mode) {
746 vm_page_release(p, FALSE);
747 } else {
748 p->vmp_snext = list;
749 list = p;
750 }
751 ++cnt;
752 }
753
754 /*
755 * Free the pages in reverse order if not himemory mode.
756 * Hence the low memory pages will be first on free lists. (LIFO)
757 */
758 while (list != NULL) {
759 p = list;
760 list = p->vmp_snext;
761 p->vmp_snext = NULL;
762 vm_page_release(p, FALSE);
763 }
764 #if DEVELOPMENT || DEBUG
765 kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
766 #endif
767
768 /*
769 * Free up any unused full pages at the end of the vm_pages[] array
770 */
771 start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
772
773 #if defined(__x86_64__)
774 /*
775 * Since x86 might have used large pages for vm_pages[], we can't
776 * free starting in the middle of a partially used large page.
777 */
778 if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
779 start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
780 }
781 #endif
782 if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
783 free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
784 if (free_size > 0) {
785 ml_static_mfree(start_free_va, (vm_offset_t)free_size);
786 vm_page_array_ending_addr = (void *)start_free_va;
787
788 /*
789 * Note there's no locking here, as only this thread will ever change this value.
790 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
791 */
792 vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
793
794 #if DEVELOPMENT || DEBUG
795 kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
796 (long)free_size, (long)start_free_va);
797 #endif
798 }
799 }
800
801
802 /*
803 * now we can create the VM page array zone
804 */
805 vm_page_module_init_delayed();
806 }
807
808 /*
809 * Try and free up enough delayed pages to match a contig memory allocation.
810 */
811 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)812 vm_free_delayed_pages_contig(
813 uint_t npages,
814 ppnum_t max_pnum,
815 ppnum_t pnum_mask)
816 {
817 vm_page_t p;
818 ppnum_t pnum;
819 uint_t cnt = 0;
820
821 /*
822 * Treat 0 as the absolute max page number.
823 */
824 if (max_pnum == 0) {
825 max_pnum = PPNUM_MAX;
826 }
827
828 /*
829 * Free till we get a properly aligned start page
830 */
831 for (;;) {
832 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
833 if (p == NULL) {
834 return;
835 }
836 pnum = VM_PAGE_GET_PHYS_PAGE(p);
837 vm_page_release(p, FALSE);
838 if (pnum >= max_pnum) {
839 return;
840 }
841 if ((pnum & pnum_mask) == 0) {
842 break;
843 }
844 }
845
846 /*
847 * Having a healthy pool of free pages will help performance. We don't
848 * want to fall back to the delayed code for every page allocation.
849 */
850 if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
851 npages += VM_DELAY_PAGE_CHUNK;
852 }
853
854 /*
855 * Now free up the pages
856 */
857 for (cnt = 1; cnt < npages; ++cnt) {
858 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
859 if (p == NULL) {
860 return;
861 }
862 vm_page_release(p, FALSE);
863 }
864 }
865
866 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
867
868 void
vm_page_init_local_q(unsigned int num_cpus)869 vm_page_init_local_q(unsigned int num_cpus)
870 {
871 struct vpl *t_local_q;
872
873 /*
874 * no point in this for a uni-processor system
875 */
876 if (num_cpus >= 2) {
877 ml_cpu_info_t cpu_info;
878
879 /*
880 * Force the allocation alignment to a cacheline,
881 * because the `vpl` struct has a lock and will be taken
882 * cross CPU so we want to isolate the rest of the per-CPU
883 * data to avoid false sharing due to this lock being taken.
884 */
885
886 ml_cpu_get_info(&cpu_info);
887
888 t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
889 cpu_info.cache_line_size - 1);
890
891 zpercpu_foreach(lq, t_local_q) {
892 VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
893 vm_page_queue_init(&lq->vpl_queue);
894 }
895
896 /* make the initialization visible to all cores */
897 os_atomic_store(&vm_page_local_q, t_local_q, release);
898 }
899 }
900
901 /*
902 * vm_init_before_launchd
903 *
904 * This should be called right before launchd is loaded.
905 */
906 void
vm_init_before_launchd()907 vm_init_before_launchd()
908 {
909 vm_page_lockspin_queues();
910 vm_page_wire_count_on_boot = vm_page_wire_count;
911 vm_page_unlock_queues();
912 }
913
914
915 /*
916 * vm_page_bootstrap:
917 *
918 * Initializes the resident memory module.
919 *
920 * Allocates memory for the page cells, and
921 * for the object/offset-to-page hash table headers.
922 * Each page cell is initialized and placed on the free list.
923 * Returns the range of available kernel virtual memory.
924 */
925 __startup_func
926 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)927 vm_page_bootstrap(
928 vm_offset_t *startp,
929 vm_offset_t *endp)
930 {
931 unsigned int i;
932 unsigned int log1;
933 unsigned int log2;
934 unsigned int size;
935
936 /*
937 * Initialize the page queues.
938 */
939
940 lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
941 lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
942 lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
943
944 for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
945 int group;
946
947 purgeable_queues[i].token_q_head = 0;
948 purgeable_queues[i].token_q_tail = 0;
949 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
950 queue_init(&purgeable_queues[i].objq[group]);
951 }
952
953 purgeable_queues[i].type = i;
954 purgeable_queues[i].new_pages = 0;
955 #if MACH_ASSERT
956 purgeable_queues[i].debug_count_tokens = 0;
957 purgeable_queues[i].debug_count_objects = 0;
958 #endif
959 }
960 ;
961 purgeable_nonvolatile_count = 0;
962 queue_init(&purgeable_nonvolatile_queue);
963
964 for (i = 0; i < MAX_COLORS; i++) {
965 vm_page_queue_init(&vm_page_queue_free[i].qhead);
966 }
967
968 vm_page_queue_init(&vm_lopage_queue_free);
969 vm_page_queue_init(&vm_page_queue_active);
970 vm_page_queue_init(&vm_page_queue_inactive);
971 #if CONFIG_SECLUDED_MEMORY
972 vm_page_queue_init(&vm_page_queue_secluded);
973 #endif /* CONFIG_SECLUDED_MEMORY */
974 vm_page_queue_init(&vm_page_queue_cleaned);
975 vm_page_queue_init(&vm_page_queue_throttled);
976 vm_page_queue_init(&vm_page_queue_anonymous);
977 queue_init(&vm_objects_wired);
978
979 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
980 vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
981
982 vm_page_queue_speculative[i].age_ts.tv_sec = 0;
983 vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
984 }
985
986 vm_page_queue_init(&vm_page_queue_donate);
987 vm_page_queue_init(&vm_page_queue_background);
988
989 vm_page_background_count = 0;
990 vm_page_background_internal_count = 0;
991 vm_page_background_external_count = 0;
992 vm_page_background_promoted_count = 0;
993
994 vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
995
996 if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
997 vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
998 }
999
1000 #if defined(__LP64__)
1001 vm_page_background_mode = VM_PAGE_BG_ENABLED;
1002 vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
1003 #else
1004 vm_page_background_mode = VM_PAGE_BG_DISABLED;
1005 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1006 #endif
1007 vm_page_background_exclude_external = 0;
1008
1009 PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1010 PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1011 PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1012
1013 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1014 vm_page_background_mode = VM_PAGE_BG_DISABLED;
1015 }
1016
1017 PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1018 if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1019 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1020 }
1021
1022 vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1023 vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1024 vm_page_donate_target = vm_page_donate_target_high;
1025 vm_page_donate_count = 0;
1026
1027 vm_page_free_wanted = 0;
1028 vm_page_free_wanted_privileged = 0;
1029 #if CONFIG_SECLUDED_MEMORY
1030 vm_page_free_wanted_secluded = 0;
1031 #endif /* CONFIG_SECLUDED_MEMORY */
1032
1033 #if defined (__x86_64__)
1034 /* this must be called before vm_page_set_colors() */
1035 vm_page_setup_clump();
1036 #endif
1037
1038 vm_page_set_colors();
1039
1040 bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1041 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1042 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1043 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1044
1045 bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1046 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1047 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1048 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1049 vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1050 vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1051 vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1052 #if CONFIG_SECLUDED_MEMORY
1053 vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1054 #endif /* CONFIG_SECLUDED_MEMORY */
1055
1056 bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1057 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1058 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1059 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1060 vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1061 vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1062 #if CONFIG_SECLUDED_MEMORY
1063 vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1064 #endif /* CONFIG_SECLUDED_MEMORY */
1065
1066 bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1067 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1068 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1069 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1070 vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1071 #if CONFIG_SECLUDED_MEMORY
1072 vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1073 #endif /* CONFIG_SECLUDED_MEMORY */
1074
1075 for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1076 vm_allocation_sites_static[t].refcount = 2;
1077 vm_allocation_sites_static[t].tag = t;
1078 vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1079 }
1080 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1081 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1082 vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1083
1084 /*
1085 * Steal memory for the map and zone subsystems.
1086 */
1087 kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1088
1089 /*
1090 * Allocate (and initialize) the virtual-to-physical
1091 * table hash buckets.
1092 *
1093 * The number of buckets should be a power of two to
1094 * get a good hash function. The following computation
1095 * chooses the first power of two that is greater
1096 * than the number of physical pages in the system.
1097 */
1098
1099 if (vm_page_bucket_count == 0) {
1100 unsigned int npages = pmap_free_pages();
1101
1102 vm_page_bucket_count = 1;
1103 while (vm_page_bucket_count < npages) {
1104 vm_page_bucket_count <<= 1;
1105 }
1106 }
1107 vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1108
1109 vm_page_hash_mask = vm_page_bucket_count - 1;
1110
1111 /*
1112 * Calculate object shift value for hashing algorithm:
1113 * O = log2(sizeof(struct vm_object))
1114 * B = log2(vm_page_bucket_count)
1115 * hash shifts the object left by
1116 * B/2 - O
1117 */
1118 size = vm_page_bucket_count;
1119 for (log1 = 0; size > 1; log1++) {
1120 size /= 2;
1121 }
1122 size = sizeof(struct vm_object);
1123 for (log2 = 0; size > 1; log2++) {
1124 size /= 2;
1125 }
1126 vm_page_hash_shift = log1 / 2 - log2 + 1;
1127
1128 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
1129 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
1130 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1131
1132 if (vm_page_hash_mask & vm_page_bucket_count) {
1133 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1134 }
1135
1136 #if VM_PAGE_BUCKETS_CHECK
1137 #if VM_PAGE_FAKE_BUCKETS
1138 /*
1139 * Allocate a decoy set of page buckets, to detect
1140 * any stomping there.
1141 */
1142 vm_page_fake_buckets = (vm_page_bucket_t *)
1143 pmap_steal_memory(vm_page_bucket_count *
1144 sizeof(vm_page_bucket_t), 0);
1145 vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1146 vm_page_fake_buckets_end =
1147 vm_map_round_page((vm_page_fake_buckets_start +
1148 (vm_page_bucket_count *
1149 sizeof(vm_page_bucket_t))),
1150 PAGE_MASK);
1151 char *cp;
1152 for (cp = (char *)vm_page_fake_buckets_start;
1153 cp < (char *)vm_page_fake_buckets_end;
1154 cp++) {
1155 *cp = 0x5a;
1156 }
1157 #endif /* VM_PAGE_FAKE_BUCKETS */
1158 #endif /* VM_PAGE_BUCKETS_CHECK */
1159
1160 kernel_debug_string_early("vm_page_buckets");
1161 vm_page_buckets = (vm_page_bucket_t *)
1162 pmap_steal_memory(vm_page_bucket_count *
1163 sizeof(vm_page_bucket_t), 0);
1164
1165 kernel_debug_string_early("vm_page_bucket_locks");
1166 vm_page_bucket_locks = (lck_spin_t *)
1167 pmap_steal_memory(vm_page_bucket_lock_count *
1168 sizeof(lck_spin_t), 0);
1169
1170 for (i = 0; i < vm_page_bucket_count; i++) {
1171 vm_page_bucket_t *bucket = &vm_page_buckets[i];
1172
1173 bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1174 #if MACH_PAGE_HASH_STATS
1175 bucket->cur_count = 0;
1176 bucket->hi_count = 0;
1177 #endif /* MACH_PAGE_HASH_STATS */
1178 }
1179
1180 for (i = 0; i < vm_page_bucket_lock_count; i++) {
1181 lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1182 }
1183
1184 vm_tag_init();
1185
1186 #if VM_PAGE_BUCKETS_CHECK
1187 vm_page_buckets_check_ready = TRUE;
1188 #endif /* VM_PAGE_BUCKETS_CHECK */
1189
1190 /*
1191 * Machine-dependent code allocates the resident page table.
1192 * It uses vm_page_init to initialize the page frames.
1193 * The code also returns to us the virtual space available
1194 * to the kernel. We don't trust the pmap module
1195 * to get the alignment right.
1196 */
1197
1198 kernel_debug_string_early("pmap_startup");
1199 pmap_startup(&virtual_space_start, &virtual_space_end);
1200 virtual_space_start = round_page(virtual_space_start);
1201 virtual_space_end = trunc_page(virtual_space_end);
1202
1203 *startp = virtual_space_start;
1204 *endp = virtual_space_end;
1205
1206 /*
1207 * Compute the initial "wire" count.
1208 * Up until now, the pages which have been set aside are not under
1209 * the VM system's control, so although they aren't explicitly
1210 * wired, they nonetheless can't be moved. At this moment,
1211 * all VM managed pages are "free", courtesy of pmap_startup.
1212 */
1213 assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1214 vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1215 vm_page_free_count - vm_lopage_free_count;
1216 #if CONFIG_SECLUDED_MEMORY
1217 vm_page_wire_count -= vm_page_secluded_count;
1218 #endif
1219 vm_page_wire_count_initial = vm_page_wire_count;
1220
1221 /* capture this for later use */
1222 booter_size = ml_get_booter_memory_size();
1223
1224 printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1225 vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1226
1227 kernel_debug_string_early("vm_page_bootstrap complete");
1228 }
1229
1230 #ifndef MACHINE_PAGES
1231 /*
1232 * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1233 * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1234 * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1235 */
1236 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags,pmap_mapping_type_t mapping_type)1237 pmap_steal_memory_internal(
1238 vm_size_t size,
1239 vm_size_t alignment,
1240 boolean_t might_free,
1241 unsigned int flags,
1242 pmap_mapping_type_t mapping_type)
1243 {
1244 kern_return_t kr;
1245 vm_offset_t addr;
1246 vm_offset_t map_addr;
1247 ppnum_t phys_page;
1248 unsigned int pmap_flags;
1249
1250 /*
1251 * Size needs to be aligned to word size.
1252 */
1253 size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1254
1255 /*
1256 * Alignment defaults to word size if not specified.
1257 */
1258 if (alignment == 0) {
1259 alignment = sizeof(void*);
1260 }
1261
1262 /*
1263 * Alignment must be no greater than a page and must be a power of two.
1264 */
1265 assert(alignment <= PAGE_SIZE);
1266 assert((alignment & (alignment - 1)) == 0);
1267
1268 /*
1269 * On the first call, get the initial values for virtual address space
1270 * and page align them.
1271 */
1272 if (virtual_space_start == virtual_space_end) {
1273 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1274 virtual_space_start = round_page(virtual_space_start);
1275 virtual_space_end = trunc_page(virtual_space_end);
1276
1277 #if defined(__x86_64__)
1278 /*
1279 * Release remaining unused section of preallocated KVA and the 4K page tables
1280 * that map it. This makes the VA available for large page mappings.
1281 */
1282 Idle_PTs_release(virtual_space_start, virtual_space_end);
1283 #endif
1284 }
1285
1286 /*
1287 * Allocate the virtual space for this request. On x86, we'll align to a large page
1288 * address if the size is big enough to back with at least 1 large page.
1289 */
1290 #if defined(__x86_64__)
1291 if (size >= I386_LPGBYTES) {
1292 virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1293 }
1294 #endif
1295 virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
1296 addr = virtual_space_start;
1297 virtual_space_start += size;
1298
1299 //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
1300
1301 /*
1302 * Allocate and map physical pages to back the new virtual space.
1303 */
1304 map_addr = round_page(addr);
1305 while (map_addr < addr + size) {
1306 #if defined(__x86_64__)
1307 /*
1308 * Back with a large page if properly aligned on x86
1309 */
1310 if ((map_addr & I386_LPGMASK) == 0 &&
1311 map_addr + I386_LPGBYTES <= addr + size &&
1312 pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1313 pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1314 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1315 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1316 VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE, mapping_type);
1317
1318 if (kr != KERN_SUCCESS) {
1319 panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1320 (unsigned long)map_addr, phys_page);
1321 }
1322 map_addr += I386_LPGBYTES;
1323 vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1324 vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1325 vm_page_kern_lpage_count++;
1326 continue;
1327 }
1328 #endif
1329
1330 if (!pmap_next_page_hi(&phys_page, might_free)) {
1331 panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1332 }
1333
1334 #if defined(__x86_64__)
1335 pmap_pre_expand(kernel_pmap, map_addr);
1336 #endif
1337 pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
1338
1339 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1340 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1341 pmap_flags, FALSE, mapping_type);
1342
1343 if (kr != KERN_SUCCESS) {
1344 panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1345 (unsigned long)map_addr, phys_page);
1346 }
1347 map_addr += PAGE_SIZE;
1348
1349 /*
1350 * Account for newly stolen memory
1351 */
1352 vm_page_wire_count++;
1353 vm_page_stolen_count++;
1354 }
1355
1356 #if defined(__x86_64__)
1357 /*
1358 * The call with might_free is currently the last use of pmap_steal_memory*().
1359 * Notify the pmap layer to record which high pages were allocated so far.
1360 */
1361 if (might_free) {
1362 pmap_hi_pages_done();
1363 }
1364 #endif
1365 #if KASAN
1366 kasan_notify_address(round_page(addr), size);
1367 #endif
1368 return (void *) addr;
1369 }
1370
1371 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)1372 pmap_steal_memory(
1373 vm_size_t size,
1374 vm_size_t alignment)
1375 {
1376 return pmap_steal_memory_internal(size, alignment, FALSE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1377 }
1378
1379 void *
pmap_steal_freeable_memory(vm_size_t size)1380 pmap_steal_freeable_memory(
1381 vm_size_t size)
1382 {
1383 return pmap_steal_memory_internal(size, 0, TRUE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1384 }
1385
1386
1387
1388
1389 #if CONFIG_SECLUDED_MEMORY
1390 /* boot-args to control secluded memory */
1391 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
1392 /* IOKit can use secluded memory */
1393 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
1394 /* apps can use secluded memory */
1395 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
1396 /* filecache can use seclude memory */
1397 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
1398 uint64_t secluded_shutoff_trigger = 0;
1399 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1400 #endif /* CONFIG_SECLUDED_MEMORY */
1401
1402
1403 #if defined(__arm64__)
1404 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1405 unsigned int vm_first_phys_ppnum = 0;
1406 #endif
1407
1408 void vm_page_release_startup(vm_page_t mem);
1409 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1410 pmap_startup(
1411 vm_offset_t *startp,
1412 vm_offset_t *endp)
1413 {
1414 unsigned int i, npages;
1415 ppnum_t phys_page;
1416 uint64_t mem_sz;
1417 uint64_t start_ns;
1418 uint64_t now_ns;
1419 uint_t low_page_count = 0;
1420
1421 #if defined(__LP64__)
1422 /*
1423 * make sure we are aligned on a 64 byte boundary
1424 * for VM_PAGE_PACK_PTR (it clips off the low-order
1425 * 6 bits of the pointer)
1426 */
1427 if (virtual_space_start != virtual_space_end) {
1428 virtual_space_start = round_page(virtual_space_start);
1429 }
1430 #endif
1431
1432 /*
1433 * We calculate how many page frames we will have
1434 * and then allocate the page structures in one chunk.
1435 *
1436 * Note that the calculation here doesn't take into account
1437 * the memory needed to map what's being allocated, i.e. the page
1438 * table entries. So the actual number of pages we get will be
1439 * less than this. To do someday: include that in the computation.
1440 *
1441 * Also for ARM, we don't use the count of free_pages, but rather the
1442 * range from last page to first page (ignore holes due to retired pages).
1443 */
1444 #if defined(__arm64__)
1445 mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1446 #else /* defined(__arm64__) */
1447 mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1448 #endif /* defined(__arm64__) */
1449 mem_sz += round_page(virtual_space_start) - virtual_space_start; /* Account for any slop */
1450 npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages))); /* scaled to include the vm_page_ts */
1451
1452
1453 vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1454
1455 /*
1456 * Check if we want to initialize pages to a known value
1457 */
1458 if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1459 fill = TRUE;
1460 }
1461 #if DEBUG
1462 /* This slows down booting the DEBUG kernel, particularly on
1463 * large memory systems, but is worthwhile in deterministically
1464 * trapping uninitialized memory usage.
1465 */
1466 if (!fill) {
1467 fill = TRUE;
1468 fillval = 0xDEB8F177;
1469 }
1470 #endif
1471 if (fill) {
1472 kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1473 }
1474
1475 #if CONFIG_SECLUDED_MEMORY
1476 /*
1477 * Figure out how much secluded memory to have before we start
1478 * release pages to free lists.
1479 * The default, if specified nowhere else, is no secluded mem.
1480 */
1481 vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
1482
1483 /*
1484 * Allow a really large app to effectively use secluded memory until it exits.
1485 */
1486 if (vm_page_secluded_target != 0) {
1487 /*
1488 * Get an amount from boot-args, else use 1/2 of max_mem.
1489 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1490 * used munch to induce jetsam thrashing of false idle daemons on N56.
1491 */
1492 int secluded_shutoff_mb;
1493 if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1494 sizeof(secluded_shutoff_mb))) {
1495 secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1496 } else {
1497 secluded_shutoff_trigger = max_mem / 2;
1498 }
1499
1500 /* ensure the headroom value is sensible and avoid underflows */
1501 assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1502 }
1503
1504 #endif /* CONFIG_SECLUDED_MEMORY */
1505
1506 #if defined(__x86_64__)
1507
1508 /*
1509 * Decide how much memory we delay freeing at boot time.
1510 */
1511 uint32_t delay_above_gb;
1512 if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1513 delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1514 }
1515
1516 if (delay_above_gb == 0) {
1517 delay_above_pnum = PPNUM_MAX;
1518 } else {
1519 delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1520 }
1521
1522 /* make sure we have sane breathing room: 1G above low memory */
1523 if (delay_above_pnum <= max_valid_low_ppnum) {
1524 delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1525 }
1526
1527 if (delay_above_pnum < PPNUM_MAX) {
1528 printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1529 }
1530
1531 #endif /* defined(__x86_64__) */
1532
1533 /*
1534 * Initialize and release the page frames.
1535 */
1536 kernel_debug_string_early("page_frame_init");
1537
1538 vm_page_array_beginning_addr = &vm_pages[0];
1539 vm_page_array_ending_addr = &vm_pages[npages]; /* used by ptr packing/unpacking code */
1540 #if VM_PAGE_PACKED_FROM_ARRAY
1541 if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1542 panic("pmap_startup(): too many pages to support vm_page packing");
1543 }
1544 #endif
1545
1546 vm_delayed_count = 0;
1547
1548 absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1549 vm_pages_count = 0;
1550 for (i = 0; i < npages; i++) {
1551 /* Did we run out of pages? */
1552 if (!pmap_next_page(&phys_page)) {
1553 break;
1554 }
1555
1556 if (phys_page < max_valid_low_ppnum) {
1557 ++low_page_count;
1558 }
1559
1560 /* Are we at high enough pages to delay the rest? */
1561 if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1562 vm_delayed_count = pmap_free_pages();
1563 break;
1564 }
1565
1566 #if defined(__arm64__)
1567 if (i == 0) {
1568 vm_first_phys_ppnum = phys_page;
1569 patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1570 (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1571 }
1572 #endif /* defined(__arm64__) */
1573
1574 #if defined(__x86_64__)
1575 /* The x86 clump freeing code requires increasing ppn's to work correctly */
1576 if (i > 0) {
1577 assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1578 }
1579 #endif
1580 ++vm_pages_count;
1581 vm_page_init(&vm_pages[i], phys_page, FALSE);
1582 if (fill) {
1583 fillPage(phys_page, fillval);
1584 }
1585 if (vm_himemory_mode) {
1586 vm_page_release_startup(&vm_pages[i]);
1587 }
1588 }
1589 vm_page_pages = vm_pages_count; /* used to report to user space */
1590
1591 if (!vm_himemory_mode) {
1592 do {
1593 if (!VMP_ERROR_GET(&vm_pages[--i])) { /* skip retired pages */
1594 vm_page_release_startup(&vm_pages[i]);
1595 }
1596 } while (i != 0);
1597 }
1598
1599 absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1600 printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1601 printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1602
1603 #if defined(__LP64__)
1604 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1605 panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1606 }
1607
1608 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1609 panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1610 }
1611 #endif
1612
1613 VM_CHECK_MEMORYSTATUS;
1614
1615 /*
1616 * We have to re-align virtual_space_start,
1617 * because pmap_steal_memory has been using it.
1618 */
1619 virtual_space_start = round_page(virtual_space_start);
1620 *startp = virtual_space_start;
1621 *endp = virtual_space_end;
1622 }
1623 #endif /* MACHINE_PAGES */
1624
1625 /*
1626 * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1627 * or frees to this zone. It's just here for reporting purposes via zprint command.
1628 * This needs to be done after all initially delayed pages are put on the free lists.
1629 */
1630 static void
vm_page_module_init_delayed(void)1631 vm_page_module_init_delayed(void)
1632 {
1633 (void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1634 ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
1635 uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1636
1637 zone_set_exhaustible(z, 0, true);
1638 /*
1639 * Reflect size and usage information for vm_pages[].
1640 */
1641
1642 z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1643 z->z_elems_free = z->z_elems_avail - vm_pages_count;
1644 zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1645 vm_pages_count * sizeof(struct vm_page);
1646 vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1647 vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1648 z->z_wired_cur += vm_page_zone_pages;
1649 z->z_wired_hwm = z->z_wired_cur;
1650 z->z_va_cur = z->z_wired_cur;
1651 /* since zone accounts for these, take them out of stolen */
1652 VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1653 });
1654 }
1655
1656 /*
1657 * Create the vm_pages zone. This is used for the vm_page structures for the pages
1658 * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1659 * this needs to happen in early VM bootstrap.
1660 */
1661
1662 __startup_func
1663 static void
vm_page_module_init(void)1664 vm_page_module_init(void)
1665 {
1666 vm_size_t vm_page_with_ppnum_size;
1667
1668 /*
1669 * Since the pointers to elements in this zone will be packed, they
1670 * must have appropriate size. Not strictly what sizeof() reports.
1671 */
1672 vm_page_with_ppnum_size =
1673 (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1674 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1675
1676 vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1677 ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NO_TBI_TAG,
1678 ZONE_ID_ANY, ^(zone_t z) {
1679 /*
1680 * The number "10" is a small number that is larger than the number
1681 * of fictitious pages that any single caller will attempt to allocate
1682 * without blocking.
1683 *
1684 * The largest such number at the moment is kmem_alloc()
1685 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1686 * taking into account the 50% hysteresis the zone allocator uses.
1687 *
1688 * Note: this works at all because the zone allocator
1689 * doesn't ever allocate fictitious pages.
1690 */
1691 zone_raise_reserve(z, 10);
1692 });
1693 }
1694 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1695
1696 /*
1697 * Routine: vm_page_create
1698 * Purpose:
1699 * After the VM system is up, machine-dependent code
1700 * may stumble across more physical memory. For example,
1701 * memory that it was reserving for a frame buffer.
1702 * vm_page_create turns this memory into available pages.
1703 */
1704
1705 void
vm_page_create(ppnum_t start,ppnum_t end)1706 vm_page_create(
1707 ppnum_t start,
1708 ppnum_t end)
1709 {
1710 ppnum_t phys_page;
1711 vm_page_t m;
1712
1713 for (phys_page = start;
1714 phys_page < end;
1715 phys_page++) {
1716 m = vm_page_grab_fictitious_common(phys_page, TRUE);
1717 m->vmp_fictitious = FALSE;
1718 pmap_clear_noencrypt(phys_page);
1719
1720
1721 vm_free_page_lock();
1722 vm_page_pages++;
1723 vm_free_page_unlock();
1724 vm_page_release(m, FALSE);
1725 }
1726 }
1727
1728
1729 /*
1730 * vm_page_hash:
1731 *
1732 * Distributes the object/offset key pair among hash buckets.
1733 *
1734 * NOTE: The bucket count must be a power of 2
1735 */
1736 #define vm_page_hash(object, offset) (\
1737 ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1738 & vm_page_hash_mask)
1739
1740
1741 /*
1742 * vm_page_insert: [ internal use only ]
1743 *
1744 * Inserts the given mem entry into the object/object-page
1745 * table and object list.
1746 *
1747 * The object must be locked.
1748 */
1749 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1750 vm_page_insert(
1751 vm_page_t mem,
1752 vm_object_t object,
1753 vm_object_offset_t offset)
1754 {
1755 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1756 }
1757
1758 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1759 vm_page_insert_wired(
1760 vm_page_t mem,
1761 vm_object_t object,
1762 vm_object_offset_t offset,
1763 vm_tag_t tag)
1764 {
1765 vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1766 }
1767
1768 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1769 vm_page_insert_internal(
1770 vm_page_t mem,
1771 vm_object_t object,
1772 vm_object_offset_t offset,
1773 vm_tag_t tag,
1774 boolean_t queues_lock_held,
1775 boolean_t insert_in_hash,
1776 boolean_t batch_pmap_op,
1777 boolean_t batch_accounting,
1778 uint64_t *delayed_ledger_update)
1779 {
1780 vm_page_bucket_t *bucket;
1781 lck_spin_t *bucket_lock;
1782 int hash_id;
1783 task_t owner;
1784 int ledger_idx_volatile;
1785 int ledger_idx_nonvolatile;
1786 int ledger_idx_volatile_compressed;
1787 int ledger_idx_nonvolatile_compressed;
1788 int ledger_idx_composite;
1789 int ledger_idx_external_wired;
1790 boolean_t do_footprint;
1791
1792 #if 0
1793 /*
1794 * we may not hold the page queue lock
1795 * so this check isn't safe to make
1796 */
1797 VM_PAGE_CHECK(mem);
1798 #endif
1799
1800 assertf(page_aligned(offset), "0x%llx\n", offset);
1801
1802 assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1803
1804 vm_object_lock_assert_exclusive(object);
1805 LCK_MTX_ASSERT(&vm_page_queue_lock,
1806 queues_lock_held ? LCK_MTX_ASSERT_OWNED
1807 : LCK_MTX_ASSERT_NOTOWNED);
1808
1809 if (queues_lock_held == FALSE) {
1810 assert(!VM_PAGE_PAGEABLE(mem));
1811 }
1812
1813 if (insert_in_hash == TRUE) {
1814 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1815 if (mem->vmp_tabled || mem->vmp_object) {
1816 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1817 "already in (obj=%p,off=0x%llx)",
1818 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1819 }
1820 #endif
1821 if (object->internal && (offset >= object->vo_size)) {
1822 panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1823 mem, object, offset, object->vo_size);
1824 }
1825
1826 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1827
1828 /*
1829 * Record the object/offset pair in this page
1830 */
1831
1832 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1833 mem->vmp_offset = offset;
1834
1835 #if CONFIG_SECLUDED_MEMORY
1836 if (object->eligible_for_secluded) {
1837 vm_page_secluded.eligible_for_secluded++;
1838 }
1839 #endif /* CONFIG_SECLUDED_MEMORY */
1840
1841 /*
1842 * Insert it into the object_object/offset hash table
1843 */
1844 hash_id = vm_page_hash(object, offset);
1845 bucket = &vm_page_buckets[hash_id];
1846 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1847
1848 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1849
1850 mem->vmp_next_m = bucket->page_list;
1851 bucket->page_list = VM_PAGE_PACK_PTR(mem);
1852 assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1853
1854 #if MACH_PAGE_HASH_STATS
1855 if (++bucket->cur_count > bucket->hi_count) {
1856 bucket->hi_count = bucket->cur_count;
1857 }
1858 #endif /* MACH_PAGE_HASH_STATS */
1859 mem->vmp_hashed = TRUE;
1860 lck_spin_unlock(bucket_lock);
1861 }
1862
1863 {
1864 unsigned int cache_attr;
1865
1866 cache_attr = object->wimg_bits & VM_WIMG_MASK;
1867
1868 if (cache_attr != VM_WIMG_USE_DEFAULT) {
1869 PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1870 }
1871 }
1872 /*
1873 * Now link into the object's list of backed pages.
1874 */
1875 vm_page_queue_enter(&object->memq, mem, vmp_listq);
1876 object->memq_hint = mem;
1877 mem->vmp_tabled = TRUE;
1878
1879 /*
1880 * Show that the object has one more resident page.
1881 */
1882
1883 object->resident_page_count++;
1884 if (VM_PAGE_WIRED(mem)) {
1885 assert(mem->vmp_wire_count > 0);
1886 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1887 VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1888 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1889 }
1890 assert(object->resident_page_count >= object->wired_page_count);
1891
1892 #if DEVELOPMENT || DEBUG
1893 if (object->object_is_shared_cache &&
1894 object->pager != NULL &&
1895 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1896 int new, old;
1897 assert(!object->internal);
1898 new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1899 do {
1900 old = shared_region_pagers_resident_peak;
1901 } while (old < new &&
1902 !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1903 }
1904 #endif /* DEVELOPMENT || DEBUG */
1905
1906 if (batch_accounting == FALSE) {
1907 if (object->internal) {
1908 OSAddAtomic(1, &vm_page_internal_count);
1909 } else {
1910 OSAddAtomic(1, &vm_page_external_count);
1911 }
1912 }
1913
1914 /*
1915 * It wouldn't make sense to insert a "reusable" page in
1916 * an object (the page would have been marked "reusable" only
1917 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1918 * in the object at that time).
1919 * But a page could be inserted in a "all_reusable" object, if
1920 * something faults it in (a vm_read() from another task or a
1921 * "use-after-free" issue in user space, for example). It can
1922 * also happen if we're relocating a page from that object to
1923 * a different physical page during a physically-contiguous
1924 * allocation.
1925 */
1926 assert(!mem->vmp_reusable);
1927 if (object->all_reusable) {
1928 OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1929 }
1930
1931 if (object->purgable == VM_PURGABLE_DENY &&
1932 !object->vo_ledger_tag) {
1933 owner = TASK_NULL;
1934 } else {
1935 owner = VM_OBJECT_OWNER(object);
1936 vm_object_ledger_tag_ledgers(object,
1937 &ledger_idx_volatile,
1938 &ledger_idx_nonvolatile,
1939 &ledger_idx_volatile_compressed,
1940 &ledger_idx_nonvolatile_compressed,
1941 &ledger_idx_composite,
1942 &ledger_idx_external_wired,
1943 &do_footprint);
1944 }
1945 if (owner &&
1946 object->internal &&
1947 (object->purgable == VM_PURGABLE_NONVOLATILE ||
1948 object->purgable == VM_PURGABLE_DENY ||
1949 VM_PAGE_WIRED(mem))) {
1950 if (delayed_ledger_update) {
1951 *delayed_ledger_update += PAGE_SIZE;
1952 } else {
1953 /* more non-volatile bytes */
1954 ledger_credit(owner->ledger,
1955 ledger_idx_nonvolatile,
1956 PAGE_SIZE);
1957 if (do_footprint) {
1958 /* more footprint */
1959 ledger_credit(owner->ledger,
1960 task_ledgers.phys_footprint,
1961 PAGE_SIZE);
1962 } else if (ledger_idx_composite != -1) {
1963 ledger_credit(owner->ledger,
1964 ledger_idx_composite,
1965 PAGE_SIZE);
1966 }
1967 }
1968 } else if (owner &&
1969 object->internal &&
1970 (object->purgable == VM_PURGABLE_VOLATILE ||
1971 object->purgable == VM_PURGABLE_EMPTY)) {
1972 assert(!VM_PAGE_WIRED(mem));
1973 /* more volatile bytes */
1974 ledger_credit(owner->ledger,
1975 ledger_idx_volatile,
1976 PAGE_SIZE);
1977 }
1978
1979 if (object->purgable == VM_PURGABLE_VOLATILE) {
1980 if (VM_PAGE_WIRED(mem)) {
1981 OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1982 } else {
1983 OSAddAtomic(+1, &vm_page_purgeable_count);
1984 }
1985 } else if (object->purgable == VM_PURGABLE_EMPTY &&
1986 mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1987 /*
1988 * This page belongs to a purged VM object but hasn't
1989 * been purged (because it was "busy").
1990 * It's in the "throttled" queue and hence not
1991 * visible to vm_pageout_scan(). Move it to a pageable
1992 * queue, so that it can eventually be reclaimed, instead
1993 * of lingering in the "empty" object.
1994 */
1995 if (queues_lock_held == FALSE) {
1996 vm_page_lockspin_queues();
1997 }
1998 vm_page_deactivate(mem);
1999 if (queues_lock_held == FALSE) {
2000 vm_page_unlock_queues();
2001 }
2002 }
2003
2004 #if VM_OBJECT_TRACKING_OP_MODIFIED
2005 if (vm_object_tracking_btlog &&
2006 object->internal &&
2007 object->resident_page_count == 0 &&
2008 object->pager == NULL &&
2009 object->shadow != NULL &&
2010 object->shadow->vo_copy == object) {
2011 btlog_record(vm_object_tracking_btlog, object,
2012 VM_OBJECT_TRACKING_OP_MODIFIED,
2013 btref_get(__builtin_frame_address(0), 0));
2014 }
2015 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2016 }
2017
2018 /*
2019 * vm_page_replace:
2020 *
2021 * Exactly like vm_page_insert, except that we first
2022 * remove any existing page at the given offset in object.
2023 *
2024 * The object must be locked.
2025 */
2026 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2027 vm_page_replace(
2028 vm_page_t mem,
2029 vm_object_t object,
2030 vm_object_offset_t offset)
2031 {
2032 vm_page_bucket_t *bucket;
2033 vm_page_t found_m = VM_PAGE_NULL;
2034 lck_spin_t *bucket_lock;
2035 int hash_id;
2036
2037 #if 0
2038 /*
2039 * we don't hold the page queue lock
2040 * so this check isn't safe to make
2041 */
2042 VM_PAGE_CHECK(mem);
2043 #endif
2044 vm_object_lock_assert_exclusive(object);
2045 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2046 if (mem->vmp_tabled || mem->vmp_object) {
2047 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2048 "already in (obj=%p,off=0x%llx)",
2049 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2050 }
2051 #endif
2052 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2053
2054 assert(!VM_PAGE_PAGEABLE(mem));
2055
2056 /*
2057 * Record the object/offset pair in this page
2058 */
2059 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2060 mem->vmp_offset = offset;
2061
2062 /*
2063 * Insert it into the object_object/offset hash table,
2064 * replacing any page that might have been there.
2065 */
2066
2067 hash_id = vm_page_hash(object, offset);
2068 bucket = &vm_page_buckets[hash_id];
2069 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2070
2071 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2072
2073 if (bucket->page_list) {
2074 vm_page_packed_t *mp = &bucket->page_list;
2075 vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2076
2077 do {
2078 /*
2079 * compare packed object pointers
2080 */
2081 if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2082 /*
2083 * Remove old page from hash list
2084 */
2085 *mp = m->vmp_next_m;
2086 m->vmp_hashed = FALSE;
2087 m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2088
2089 found_m = m;
2090 break;
2091 }
2092 mp = &m->vmp_next_m;
2093 } while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2094
2095 mem->vmp_next_m = bucket->page_list;
2096 } else {
2097 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2098 }
2099 /*
2100 * insert new page at head of hash list
2101 */
2102 bucket->page_list = VM_PAGE_PACK_PTR(mem);
2103 mem->vmp_hashed = TRUE;
2104
2105 lck_spin_unlock(bucket_lock);
2106
2107 if (found_m) {
2108 /*
2109 * there was already a page at the specified
2110 * offset for this object... remove it from
2111 * the object and free it back to the free list
2112 */
2113 vm_page_free_unlocked(found_m, FALSE);
2114 }
2115 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2116 }
2117
2118 /*
2119 * vm_page_remove: [ internal use only ]
2120 *
2121 * Removes the given mem entry from the object/offset-page
2122 * table and the object page list.
2123 *
2124 * The object must be locked.
2125 */
2126
2127 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2128 vm_page_remove(
2129 vm_page_t mem,
2130 boolean_t remove_from_hash)
2131 {
2132 vm_page_bucket_t *bucket;
2133 vm_page_t this;
2134 lck_spin_t *bucket_lock;
2135 int hash_id;
2136 task_t owner;
2137 vm_object_t m_object;
2138 int ledger_idx_volatile;
2139 int ledger_idx_nonvolatile;
2140 int ledger_idx_volatile_compressed;
2141 int ledger_idx_nonvolatile_compressed;
2142 int ledger_idx_composite;
2143 int ledger_idx_external_wired;
2144 int do_footprint;
2145
2146 m_object = VM_PAGE_OBJECT(mem);
2147
2148 vm_object_lock_assert_exclusive(m_object);
2149 assert(mem->vmp_tabled);
2150 assert(!mem->vmp_cleaning);
2151 assert(!mem->vmp_laundry);
2152
2153 if (VM_PAGE_PAGEABLE(mem)) {
2154 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2155 }
2156 #if 0
2157 /*
2158 * we don't hold the page queue lock
2159 * so this check isn't safe to make
2160 */
2161 VM_PAGE_CHECK(mem);
2162 #endif
2163 if (remove_from_hash == TRUE) {
2164 /*
2165 * Remove from the object_object/offset hash table
2166 */
2167 hash_id = vm_page_hash(m_object, mem->vmp_offset);
2168 bucket = &vm_page_buckets[hash_id];
2169 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2170
2171 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2172
2173 if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2174 /* optimize for common case */
2175
2176 bucket->page_list = mem->vmp_next_m;
2177 } else {
2178 vm_page_packed_t *prev;
2179
2180 for (prev = &this->vmp_next_m;
2181 (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2182 prev = &this->vmp_next_m) {
2183 continue;
2184 }
2185 *prev = this->vmp_next_m;
2186 }
2187 #if MACH_PAGE_HASH_STATS
2188 bucket->cur_count--;
2189 #endif /* MACH_PAGE_HASH_STATS */
2190 mem->vmp_hashed = FALSE;
2191 this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2192 lck_spin_unlock(bucket_lock);
2193 }
2194 /*
2195 * Now remove from the object's list of backed pages.
2196 */
2197
2198 vm_page_remove_internal(mem);
2199
2200 /*
2201 * And show that the object has one fewer resident
2202 * page.
2203 */
2204
2205 assert(m_object->resident_page_count > 0);
2206 m_object->resident_page_count--;
2207
2208 #if DEVELOPMENT || DEBUG
2209 if (m_object->object_is_shared_cache &&
2210 m_object->pager != NULL &&
2211 m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2212 assert(!m_object->internal);
2213 OSAddAtomic(-1, &shared_region_pagers_resident_count);
2214 }
2215 #endif /* DEVELOPMENT || DEBUG */
2216
2217 if (m_object->internal) {
2218 #if DEBUG
2219 assert(vm_page_internal_count);
2220 #endif /* DEBUG */
2221
2222 OSAddAtomic(-1, &vm_page_internal_count);
2223 } else {
2224 assert(vm_page_external_count);
2225 OSAddAtomic(-1, &vm_page_external_count);
2226
2227 if (mem->vmp_xpmapped) {
2228 assert(vm_page_xpmapped_external_count);
2229 OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2230 }
2231 }
2232 if (!m_object->internal &&
2233 m_object->cached_list.next &&
2234 m_object->cached_list.prev) {
2235 if (m_object->resident_page_count == 0) {
2236 vm_object_cache_remove(m_object);
2237 }
2238 }
2239
2240 if (VM_PAGE_WIRED(mem)) {
2241 assert(mem->vmp_wire_count > 0);
2242 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2243 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2244 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2245 }
2246 assert(m_object->resident_page_count >=
2247 m_object->wired_page_count);
2248 if (mem->vmp_reusable) {
2249 assert(m_object->reusable_page_count > 0);
2250 m_object->reusable_page_count--;
2251 assert(m_object->reusable_page_count <=
2252 m_object->resident_page_count);
2253 mem->vmp_reusable = FALSE;
2254 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2255 vm_page_stats_reusable.reused_remove++;
2256 } else if (m_object->all_reusable) {
2257 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2258 vm_page_stats_reusable.reused_remove++;
2259 }
2260
2261 if (m_object->purgable == VM_PURGABLE_DENY &&
2262 !m_object->vo_ledger_tag) {
2263 owner = TASK_NULL;
2264 } else {
2265 owner = VM_OBJECT_OWNER(m_object);
2266 vm_object_ledger_tag_ledgers(m_object,
2267 &ledger_idx_volatile,
2268 &ledger_idx_nonvolatile,
2269 &ledger_idx_volatile_compressed,
2270 &ledger_idx_nonvolatile_compressed,
2271 &ledger_idx_composite,
2272 &ledger_idx_external_wired,
2273 &do_footprint);
2274 }
2275 if (owner &&
2276 m_object->internal &&
2277 (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2278 m_object->purgable == VM_PURGABLE_DENY ||
2279 VM_PAGE_WIRED(mem))) {
2280 /* less non-volatile bytes */
2281 ledger_debit(owner->ledger,
2282 ledger_idx_nonvolatile,
2283 PAGE_SIZE);
2284 if (do_footprint) {
2285 /* less footprint */
2286 ledger_debit(owner->ledger,
2287 task_ledgers.phys_footprint,
2288 PAGE_SIZE);
2289 } else if (ledger_idx_composite != -1) {
2290 ledger_debit(owner->ledger,
2291 ledger_idx_composite,
2292 PAGE_SIZE);
2293 }
2294 } else if (owner &&
2295 m_object->internal &&
2296 (m_object->purgable == VM_PURGABLE_VOLATILE ||
2297 m_object->purgable == VM_PURGABLE_EMPTY)) {
2298 assert(!VM_PAGE_WIRED(mem));
2299 /* less volatile bytes */
2300 ledger_debit(owner->ledger,
2301 ledger_idx_volatile,
2302 PAGE_SIZE);
2303 }
2304
2305 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2306 if (VM_PAGE_WIRED(mem)) {
2307 assert(vm_page_purgeable_wired_count > 0);
2308 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2309 } else {
2310 assert(vm_page_purgeable_count > 0);
2311 OSAddAtomic(-1, &vm_page_purgeable_count);
2312 }
2313 }
2314
2315 if (m_object->set_cache_attr == TRUE) {
2316 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2317 }
2318
2319 mem->vmp_tabled = FALSE;
2320 mem->vmp_object = 0;
2321 mem->vmp_offset = (vm_object_offset_t) -1;
2322 }
2323
2324
2325 /*
2326 * vm_page_lookup:
2327 *
2328 * Returns the page associated with the object/offset
2329 * pair specified; if none is found, VM_PAGE_NULL is returned.
2330 *
2331 * The object must be locked. No side effects.
2332 */
2333
2334 #define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
2335
2336 #if DEBUG_VM_PAGE_LOOKUP
2337
2338 struct {
2339 uint64_t vpl_total;
2340 uint64_t vpl_empty_obj;
2341 uint64_t vpl_bucket_NULL;
2342 uint64_t vpl_hit_hint;
2343 uint64_t vpl_hit_hint_next;
2344 uint64_t vpl_hit_hint_prev;
2345 uint64_t vpl_fast;
2346 uint64_t vpl_slow;
2347 uint64_t vpl_hit;
2348 uint64_t vpl_miss;
2349
2350 uint64_t vpl_fast_elapsed;
2351 uint64_t vpl_slow_elapsed;
2352 } vm_page_lookup_stats __attribute__((aligned(8)));
2353
2354 #endif
2355
2356 #define KDP_VM_PAGE_WALK_MAX 1000
2357
2358 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2359 kdp_vm_page_lookup(
2360 vm_object_t object,
2361 vm_object_offset_t offset)
2362 {
2363 vm_page_t cur_page;
2364 int num_traversed = 0;
2365
2366 if (not_in_kdp) {
2367 panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2368 }
2369
2370 vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2371 if (cur_page->vmp_offset == offset) {
2372 return cur_page;
2373 }
2374 num_traversed++;
2375
2376 if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2377 return VM_PAGE_NULL;
2378 }
2379 }
2380
2381 return VM_PAGE_NULL;
2382 }
2383
2384 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2385 vm_page_lookup(
2386 vm_object_t object,
2387 vm_object_offset_t offset)
2388 {
2389 vm_page_t mem;
2390 vm_page_bucket_t *bucket;
2391 vm_page_queue_entry_t qe;
2392 lck_spin_t *bucket_lock = NULL;
2393 int hash_id;
2394 #if DEBUG_VM_PAGE_LOOKUP
2395 uint64_t start, elapsed;
2396
2397 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2398 #endif
2399
2400 #if CONFIG_KERNEL_TAGGING
2401 if (is_kernel_object(object)) {
2402 offset = vm_memtag_canonicalize_address(offset);
2403 }
2404 #endif /* CONFIG_KERNEL_TAGGING */
2405
2406 vm_object_lock_assert_held(object);
2407 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2408
2409 if (object->resident_page_count == 0) {
2410 #if DEBUG_VM_PAGE_LOOKUP
2411 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2412 #endif
2413 return VM_PAGE_NULL;
2414 }
2415
2416 mem = object->memq_hint;
2417
2418 if (mem != VM_PAGE_NULL) {
2419 assert(VM_PAGE_OBJECT(mem) == object);
2420
2421 if (mem->vmp_offset == offset) {
2422 #if DEBUG_VM_PAGE_LOOKUP
2423 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2424 #endif
2425 return mem;
2426 }
2427 qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2428
2429 if (!vm_page_queue_end(&object->memq, qe)) {
2430 vm_page_t next_page;
2431
2432 next_page = (vm_page_t)((uintptr_t)qe);
2433 assert(VM_PAGE_OBJECT(next_page) == object);
2434
2435 if (next_page->vmp_offset == offset) {
2436 object->memq_hint = next_page; /* new hint */
2437 #if DEBUG_VM_PAGE_LOOKUP
2438 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2439 #endif
2440 return next_page;
2441 }
2442 }
2443 qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2444
2445 if (!vm_page_queue_end(&object->memq, qe)) {
2446 vm_page_t prev_page;
2447
2448 prev_page = (vm_page_t)((uintptr_t)qe);
2449 assert(VM_PAGE_OBJECT(prev_page) == object);
2450
2451 if (prev_page->vmp_offset == offset) {
2452 object->memq_hint = prev_page; /* new hint */
2453 #if DEBUG_VM_PAGE_LOOKUP
2454 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2455 #endif
2456 return prev_page;
2457 }
2458 }
2459 }
2460 /*
2461 * Search the hash table for this object/offset pair
2462 */
2463 hash_id = vm_page_hash(object, offset);
2464 bucket = &vm_page_buckets[hash_id];
2465
2466 /*
2467 * since we hold the object lock, we are guaranteed that no
2468 * new pages can be inserted into this object... this in turn
2469 * guarantess that the page we're looking for can't exist
2470 * if the bucket it hashes to is currently NULL even when looked
2471 * at outside the scope of the hash bucket lock... this is a
2472 * really cheap optimiztion to avoid taking the lock
2473 */
2474 if (!bucket->page_list) {
2475 #if DEBUG_VM_PAGE_LOOKUP
2476 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2477 #endif
2478 return VM_PAGE_NULL;
2479 }
2480
2481 #if DEBUG_VM_PAGE_LOOKUP
2482 start = mach_absolute_time();
2483 #endif
2484 if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2485 /*
2486 * on average, it's roughly 3 times faster to run a short memq list
2487 * than to take the spin lock and go through the hash list
2488 */
2489 mem = (vm_page_t)vm_page_queue_first(&object->memq);
2490
2491 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2492 if (mem->vmp_offset == offset) {
2493 break;
2494 }
2495
2496 mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2497 }
2498 if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2499 mem = NULL;
2500 }
2501 } else {
2502 vm_page_object_t packed_object;
2503
2504 packed_object = VM_PAGE_PACK_OBJECT(object);
2505
2506 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2507
2508 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2509
2510 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2511 mem != VM_PAGE_NULL;
2512 mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2513 #if 0
2514 /*
2515 * we don't hold the page queue lock
2516 * so this check isn't safe to make
2517 */
2518 VM_PAGE_CHECK(mem);
2519 #endif
2520 if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2521 break;
2522 }
2523 }
2524 lck_spin_unlock(bucket_lock);
2525 }
2526
2527 #if DEBUG_VM_PAGE_LOOKUP
2528 elapsed = mach_absolute_time() - start;
2529
2530 if (bucket_lock) {
2531 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2532 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2533 } else {
2534 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2535 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2536 }
2537 if (mem != VM_PAGE_NULL) {
2538 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2539 } else {
2540 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2541 }
2542 #endif
2543 if (mem != VM_PAGE_NULL) {
2544 assert(VM_PAGE_OBJECT(mem) == object);
2545
2546 object->memq_hint = mem;
2547 }
2548 return mem;
2549 }
2550
2551
2552 /*
2553 * vm_page_rename:
2554 *
2555 * Move the given memory entry from its
2556 * current object to the specified target object/offset.
2557 *
2558 * The object must be locked.
2559 */
2560 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2561 vm_page_rename(
2562 vm_page_t mem,
2563 vm_object_t new_object,
2564 vm_object_offset_t new_offset)
2565 {
2566 boolean_t internal_to_external, external_to_internal;
2567 vm_tag_t tag;
2568 vm_object_t m_object;
2569
2570 m_object = VM_PAGE_OBJECT(mem);
2571
2572 assert(m_object != new_object);
2573 assert(m_object);
2574
2575 /*
2576 * Changes to mem->vmp_object require the page lock because
2577 * the pageout daemon uses that lock to get the object.
2578 */
2579 vm_page_lockspin_queues();
2580
2581 internal_to_external = FALSE;
2582 external_to_internal = FALSE;
2583
2584 if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2585 /*
2586 * it's much easier to get the vm_page_pageable_xxx accounting correct
2587 * if we first move the page to the active queue... it's going to end
2588 * up there anyway, and we don't do vm_page_rename's frequently enough
2589 * for this to matter.
2590 */
2591 vm_page_queues_remove(mem, FALSE);
2592 vm_page_activate(mem);
2593 }
2594 if (VM_PAGE_PAGEABLE(mem)) {
2595 if (m_object->internal && !new_object->internal) {
2596 internal_to_external = TRUE;
2597 }
2598 if (!m_object->internal && new_object->internal) {
2599 external_to_internal = TRUE;
2600 }
2601 }
2602
2603 tag = m_object->wire_tag;
2604 vm_page_remove(mem, TRUE);
2605 vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2606
2607 if (internal_to_external) {
2608 vm_page_pageable_internal_count--;
2609 vm_page_pageable_external_count++;
2610 } else if (external_to_internal) {
2611 vm_page_pageable_external_count--;
2612 vm_page_pageable_internal_count++;
2613 }
2614
2615 vm_page_unlock_queues();
2616 }
2617
2618 /*
2619 * vm_page_init:
2620 *
2621 * Initialize the fields in a new page.
2622 * This takes a structure with random values and initializes it
2623 * so that it can be given to vm_page_release or vm_page_insert.
2624 */
2625 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2626 vm_page_init(
2627 vm_page_t mem,
2628 ppnum_t phys_page,
2629 boolean_t lopage)
2630 {
2631 uint_t i;
2632 uintptr_t *p;
2633
2634 assert(phys_page);
2635
2636 #if DEBUG
2637 if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2638 if (!(pmap_valid_page(phys_page))) {
2639 panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2640 }
2641 }
2642 #endif /* DEBUG */
2643
2644 /*
2645 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2646 * try to use initial values which match 0. This minimizes the number of writes
2647 * needed for boot-time initialization.
2648 *
2649 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2650 */
2651 assert(VM_PAGE_NOT_ON_Q == 0);
2652 assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2653 for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2654 *p++ = 0;
2655 }
2656 mem->vmp_offset = (vm_object_offset_t)-1;
2657 mem->vmp_busy = TRUE;
2658 mem->vmp_lopage = lopage;
2659
2660 VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2661 #if 0
2662 /*
2663 * we're leaving this turned off for now... currently pages
2664 * come off the free list and are either immediately dirtied/referenced
2665 * due to zero-fill or COW faults, or are used to read or write files...
2666 * in the file I/O case, the UPL mechanism takes care of clearing
2667 * the state of the HW ref/mod bits in a somewhat fragile way.
2668 * Since we may change the way this works in the future (to toughen it up),
2669 * I'm leaving this as a reminder of where these bits could get cleared
2670 */
2671
2672 /*
2673 * make sure both the h/w referenced and modified bits are
2674 * clear at this point... we are especially dependent on
2675 * not finding a 'stale' h/w modified in a number of spots
2676 * once this page goes back into use
2677 */
2678 pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2679 #endif
2680 }
2681
2682 /*
2683 * vm_page_grab_fictitious:
2684 *
2685 * Remove a fictitious page from the free list.
2686 * Returns VM_PAGE_NULL if there are no free pages.
2687 */
2688
2689 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2690 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2691 {
2692 vm_page_t m;
2693
2694 m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2695 if (m) {
2696 vm_page_init(m, phys_addr, FALSE);
2697 m->vmp_fictitious = TRUE;
2698 }
2699 return m;
2700 }
2701
2702 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2703 vm_page_grab_fictitious(boolean_t canwait)
2704 {
2705 return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2706 }
2707
2708 int vm_guard_count;
2709
2710
2711 vm_page_t
vm_page_grab_guard(boolean_t canwait)2712 vm_page_grab_guard(boolean_t canwait)
2713 {
2714 vm_page_t page;
2715 page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2716 if (page) {
2717 OSAddAtomic(1, &vm_guard_count);
2718 }
2719 return page;
2720 }
2721
2722
2723 /*
2724 * vm_page_release_fictitious:
2725 *
2726 * Release a fictitious page to the zone pool
2727 */
2728 void
vm_page_release_fictitious(vm_page_t m)2729 vm_page_release_fictitious(
2730 vm_page_t m)
2731 {
2732 assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2733 assert(m->vmp_fictitious);
2734 assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2735 VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2736 assert(!m->vmp_realtime);
2737
2738 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2739 OSAddAtomic(-1, &vm_guard_count);
2740 }
2741
2742 zfree(vm_page_zone, m);
2743 }
2744
2745 /*
2746 * vm_pool_low():
2747 *
2748 * Return true if it is not likely that a non-vm_privileged thread
2749 * can get memory without blocking. Advisory only, since the
2750 * situation may change under us.
2751 */
2752 bool
vm_pool_low(void)2753 vm_pool_low(void)
2754 {
2755 /* No locking, at worst we will fib. */
2756 return vm_page_free_count <= vm_page_free_reserved;
2757 }
2758
2759 boolean_t vm_darkwake_mode = FALSE;
2760
2761 /*
2762 * vm_update_darkwake_mode():
2763 *
2764 * Tells the VM that the system is in / out of darkwake.
2765 *
2766 * Today, the VM only lowers/raises the background queue target
2767 * so as to favor consuming more/less background pages when
2768 * darwake is ON/OFF.
2769 *
2770 * We might need to do more things in the future.
2771 */
2772
2773 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2774 vm_update_darkwake_mode(boolean_t darkwake_mode)
2775 {
2776 #if XNU_TARGET_OS_OSX && defined(__arm64__)
2777 #pragma unused(darkwake_mode)
2778 assert(vm_darkwake_mode == FALSE);
2779 /*
2780 * Darkwake mode isn't supported for AS macOS.
2781 */
2782 return;
2783 #else /* XNU_TARGET_OS_OSX && __arm64__ */
2784 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2785
2786 vm_page_lockspin_queues();
2787
2788 if (vm_darkwake_mode == darkwake_mode) {
2789 /*
2790 * No change.
2791 */
2792 vm_page_unlock_queues();
2793 return;
2794 }
2795
2796 vm_darkwake_mode = darkwake_mode;
2797
2798 if (vm_darkwake_mode == TRUE) {
2799 /* save background target to restore later */
2800 vm_page_background_target_snapshot = vm_page_background_target;
2801
2802 /* target is set to 0...no protection for background pages */
2803 vm_page_background_target = 0;
2804 } else if (vm_darkwake_mode == FALSE) {
2805 if (vm_page_background_target_snapshot) {
2806 vm_page_background_target = vm_page_background_target_snapshot;
2807 }
2808 }
2809 vm_page_unlock_queues();
2810 #endif
2811 }
2812
2813 void
vm_page_update_special_state(vm_page_t mem)2814 vm_page_update_special_state(vm_page_t mem)
2815 {
2816 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
2817 return;
2818 }
2819
2820 int mode = mem->vmp_on_specialq;
2821
2822 switch (mode) {
2823 case VM_PAGE_SPECIAL_Q_BG:
2824 {
2825 task_t my_task = current_task_early();
2826
2827 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2828 return;
2829 }
2830
2831 if (my_task) {
2832 if (task_get_darkwake_mode(my_task)) {
2833 return;
2834 }
2835 }
2836
2837 if (my_task) {
2838 if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2839 return;
2840 }
2841 }
2842 vm_page_lockspin_queues();
2843
2844 vm_page_background_promoted_count++;
2845
2846 vm_page_remove_from_specialq(mem);
2847 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2848
2849 vm_page_unlock_queues();
2850 break;
2851 }
2852
2853 case VM_PAGE_SPECIAL_Q_DONATE:
2854 {
2855 task_t my_task = current_task_early();
2856
2857 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2858 return;
2859 }
2860
2861 if (my_task->donates_own_pages == false) {
2862 vm_page_lockspin_queues();
2863
2864 vm_page_remove_from_specialq(mem);
2865 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2866
2867 vm_page_unlock_queues();
2868 }
2869 break;
2870 }
2871
2872 default:
2873 {
2874 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2875 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2876 break;
2877 }
2878 }
2879 }
2880
2881
2882 void
vm_page_assign_special_state(vm_page_t mem,int mode)2883 vm_page_assign_special_state(vm_page_t mem, int mode)
2884 {
2885 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
2886 return;
2887 }
2888
2889 switch (mode) {
2890 case VM_PAGE_SPECIAL_Q_BG:
2891 {
2892 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2893 return;
2894 }
2895
2896 task_t my_task = current_task_early();
2897
2898 if (my_task) {
2899 if (task_get_darkwake_mode(my_task)) {
2900 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2901 return;
2902 }
2903 }
2904
2905 if (my_task) {
2906 mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
2907 }
2908 break;
2909 }
2910
2911 case VM_PAGE_SPECIAL_Q_DONATE:
2912 {
2913 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2914 return;
2915 }
2916 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
2917 break;
2918 }
2919
2920 default:
2921 break;
2922 }
2923 }
2924
2925
2926 void
vm_page_remove_from_specialq(vm_page_t mem)2927 vm_page_remove_from_specialq(
2928 vm_page_t mem)
2929 {
2930 vm_object_t m_object;
2931 unsigned short mode;
2932
2933 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2934
2935 mode = mem->vmp_on_specialq;
2936
2937 switch (mode) {
2938 case VM_PAGE_SPECIAL_Q_BG:
2939 {
2940 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2941 vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
2942
2943 mem->vmp_specialq.next = 0;
2944 mem->vmp_specialq.prev = 0;
2945
2946 vm_page_background_count--;
2947
2948 m_object = VM_PAGE_OBJECT(mem);
2949
2950 if (m_object->internal) {
2951 vm_page_background_internal_count--;
2952 } else {
2953 vm_page_background_external_count--;
2954 }
2955 }
2956 break;
2957 }
2958
2959 case VM_PAGE_SPECIAL_Q_DONATE:
2960 {
2961 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2962 vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
2963 mem->vmp_specialq.next = 0;
2964 mem->vmp_specialq.prev = 0;
2965 vm_page_donate_count--;
2966 if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
2967 assert(vm_page_donate_target == vm_page_donate_target_low);
2968 vm_page_donate_target = vm_page_donate_target_high;
2969 vm_page_donate_queue_ripe = false;
2970 }
2971 }
2972
2973 break;
2974 }
2975
2976 default:
2977 {
2978 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2979 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2980 break;
2981 }
2982 }
2983 }
2984
2985
2986 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)2987 vm_page_add_to_specialq(
2988 vm_page_t mem,
2989 boolean_t first)
2990 {
2991 vm_object_t m_object;
2992
2993 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2994
2995 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2996 return;
2997 }
2998
2999 int mode = mem->vmp_on_specialq;
3000
3001 switch (mode) {
3002 case VM_PAGE_SPECIAL_Q_BG:
3003 {
3004 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
3005 return;
3006 }
3007
3008 m_object = VM_PAGE_OBJECT(mem);
3009
3010 if (vm_page_background_exclude_external && !m_object->internal) {
3011 return;
3012 }
3013
3014 if (first == TRUE) {
3015 vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
3016 } else {
3017 vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
3018 }
3019 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
3020
3021 vm_page_background_count++;
3022
3023 if (m_object->internal) {
3024 vm_page_background_internal_count++;
3025 } else {
3026 vm_page_background_external_count++;
3027 }
3028 break;
3029 }
3030
3031 case VM_PAGE_SPECIAL_Q_DONATE:
3032 {
3033 if (first == TRUE) {
3034 vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3035 } else {
3036 vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3037 }
3038 vm_page_donate_count++;
3039 if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3040 assert(vm_page_donate_target == vm_page_donate_target_high);
3041 vm_page_donate_target = vm_page_donate_target_low;
3042 vm_page_donate_queue_ripe = true;
3043 }
3044 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3045 break;
3046 }
3047
3048 default:
3049 break;
3050 }
3051 }
3052
3053 /*
3054 * This can be switched to FALSE to help debug drivers
3055 * that are having problems with memory > 4G.
3056 */
3057 boolean_t vm_himemory_mode = TRUE;
3058
3059 /*
3060 * this interface exists to support hardware controllers
3061 * incapable of generating DMAs with more than 32 bits
3062 * of address on platforms with physical memory > 4G...
3063 */
3064 unsigned int vm_lopages_allocated_q = 0;
3065 unsigned int vm_lopages_allocated_cpm_success = 0;
3066 unsigned int vm_lopages_allocated_cpm_failed = 0;
3067 vm_page_queue_head_t vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3068
3069 vm_page_t
vm_page_grablo(void)3070 vm_page_grablo(void)
3071 {
3072 vm_page_t mem;
3073
3074 if (vm_lopage_needed == FALSE) {
3075 return vm_page_grab();
3076 }
3077
3078 vm_free_page_lock_spin();
3079
3080 if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3081 vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3082 assert(vm_lopage_free_count);
3083 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3084 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3085
3086 vm_lopage_free_count--;
3087 vm_lopages_allocated_q++;
3088
3089 if (vm_lopage_free_count < vm_lopage_lowater) {
3090 vm_lopage_refill = TRUE;
3091 }
3092
3093 vm_free_page_unlock();
3094
3095 if (current_task()->donates_own_pages) {
3096 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3097 } else {
3098 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3099 }
3100 } else {
3101 vm_free_page_unlock();
3102
3103 if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3104 vm_free_page_lock_spin();
3105 vm_lopages_allocated_cpm_failed++;
3106 vm_free_page_unlock();
3107
3108 return VM_PAGE_NULL;
3109 }
3110 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3111
3112 mem->vmp_busy = TRUE;
3113
3114 vm_page_lockspin_queues();
3115
3116 mem->vmp_gobbled = FALSE;
3117 vm_page_gobble_count--;
3118 vm_page_wire_count--;
3119
3120 vm_lopages_allocated_cpm_success++;
3121 vm_page_unlock_queues();
3122 }
3123 assert(mem->vmp_busy);
3124 assert(!mem->vmp_pmapped);
3125 assert(!mem->vmp_wpmapped);
3126 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3127
3128 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3129
3130 counter_inc(&vm_page_grab_count);
3131 VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3132
3133 return mem;
3134 }
3135
3136 /*
3137 * vm_page_grab:
3138 *
3139 * first try to grab a page from the per-cpu free list...
3140 * this must be done while pre-emption is disabled... if
3141 * a page is available, we're done...
3142 * if no page is available, grab the vm_page_queue_free_lock
3143 * and see if current number of free pages would allow us
3144 * to grab at least 1... if not, return VM_PAGE_NULL as before...
3145 * if there are pages available, disable preemption and
3146 * recheck the state of the per-cpu free list... we could
3147 * have been preempted and moved to a different cpu, or
3148 * some other thread could have re-filled it... if still
3149 * empty, figure out how many pages we can steal from the
3150 * global free queue and move to the per-cpu queue...
3151 * return 1 of these pages when done... only wakeup the
3152 * pageout_scan thread if we moved pages from the global
3153 * list... no need for the wakeup if we've satisfied the
3154 * request from the per-cpu queue.
3155 */
3156
3157 #if CONFIG_SECLUDED_MEMORY
3158 vm_page_t vm_page_grab_secluded(void);
3159 #endif /* CONFIG_SECLUDED_MEMORY */
3160
3161 static inline void
3162 vm_page_grab_diags(void);
3163
3164 /*
3165 * vm_page_validate_no_references:
3166 *
3167 * Make sure the physical page has no refcounts.
3168 *
3169 */
3170 static inline void
vm_page_validate_no_references(vm_page_t mem)3171 vm_page_validate_no_references(
3172 vm_page_t mem)
3173 {
3174 bool is_freed;
3175
3176 if (mem->vmp_fictitious) {
3177 return;
3178 }
3179
3180 pmap_paddr_t paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(mem));
3181
3182 #if CONFIG_SPTM
3183 is_freed = pmap_is_page_free(paddr);
3184 #else
3185 is_freed = pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem));
3186 #endif /* CONFIG_SPTM */
3187
3188 if (!is_freed) {
3189 /*
3190 * There is a redundancy here, but we are going to panic anyways,
3191 * and ASSERT_PMAP_FREE traces useful information. So, we keep this
3192 * behavior.
3193 */
3194 ASSERT_PMAP_FREE(mem);
3195 panic("%s: page 0x%llx is referenced", __func__, paddr);
3196 }
3197 }
3198
3199 vm_page_t
vm_page_grab(void)3200 vm_page_grab(void)
3201 {
3202 return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3203 }
3204
3205 #if HIBERNATION
3206 boolean_t hibernate_rebuild_needed = FALSE;
3207 #endif /* HIBERNATION */
3208
3209 static void
vm_page_finalize_grabed_page(vm_page_t mem)3210 vm_page_finalize_grabed_page(vm_page_t mem)
3211 {
3212 task_t cur_task = current_task_early();
3213 if (cur_task && cur_task != kernel_task) {
3214 /* tag:DONATE this is where the donate state of the page is decided according to what task grabs it */
3215 if (cur_task->donates_own_pages) {
3216 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3217 } else {
3218 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3219 }
3220 }
3221 }
3222
3223 vm_page_t
vm_page_grab_options(int grab_options)3224 vm_page_grab_options(
3225 int grab_options)
3226 {
3227 vm_page_t mem;
3228
3229 restart:
3230 disable_preemption();
3231
3232 if ((mem = *PERCPU_GET(free_pages))) {
3233 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3234
3235 #if HIBERNATION
3236 if (hibernate_rebuild_needed) {
3237 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3238 }
3239 #endif /* HIBERNATION */
3240
3241 vm_page_grab_diags();
3242
3243 vm_offset_t pcpu_base = current_percpu_base();
3244 counter_inc_preemption_disabled(&vm_page_grab_count);
3245 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3246 VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3247
3248 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3249 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3250 enable_preemption();
3251
3252 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3253 assert(mem->vmp_tabled == FALSE);
3254 assert(mem->vmp_object == 0);
3255 assert(!mem->vmp_laundry);
3256 assert(mem->vmp_busy);
3257 assert(!mem->vmp_pmapped);
3258 assert(!mem->vmp_wpmapped);
3259 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3260 assert(!mem->vmp_realtime);
3261
3262 #if MACH_ASSERT
3263 if (vm_check_refs_on_alloc) {
3264 vm_page_validate_no_references(mem);
3265 }
3266 #endif /* MACH_ASSERT */
3267 vm_page_finalize_grabed_page(mem);
3268 return mem;
3269 }
3270 enable_preemption();
3271
3272
3273 /*
3274 * Optionally produce warnings if the wire or gobble
3275 * counts exceed some threshold.
3276 */
3277 #if VM_PAGE_WIRE_COUNT_WARNING
3278 if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3279 printf("mk: vm_page_grab(): high wired page count of %d\n",
3280 vm_page_wire_count);
3281 }
3282 #endif
3283 #if VM_PAGE_GOBBLE_COUNT_WARNING
3284 if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3285 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3286 vm_page_gobble_count);
3287 }
3288 #endif
3289
3290 /*
3291 * If free count is low and we have delayed pages from early boot,
3292 * get one of those instead.
3293 */
3294 if (__improbable(vm_delayed_count > 0 &&
3295 vm_page_free_count <= vm_page_free_target &&
3296 (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3297 assert(!mem->vmp_realtime);
3298 // TODO: missing vm_page_finalize_grabed_page()?
3299 return mem;
3300 }
3301
3302 vm_free_page_lock_spin();
3303
3304 /*
3305 * Only let privileged threads (involved in pageout)
3306 * dip into the reserved pool.
3307 */
3308 if ((vm_page_free_count < vm_page_free_reserved) &&
3309 !(current_thread()->options & TH_OPT_VMPRIV)) {
3310 /* no page for us in the free queue... */
3311 vm_free_page_unlock();
3312 mem = VM_PAGE_NULL;
3313
3314 #if CONFIG_SECLUDED_MEMORY
3315 /* ... but can we try and grab from the secluded queue? */
3316 if (vm_page_secluded_count > 0 &&
3317 ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3318 task_can_use_secluded_mem(current_task(), TRUE))) {
3319 mem = vm_page_grab_secluded();
3320 if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3321 vm_page_secluded.grab_for_iokit++;
3322 if (mem) {
3323 vm_page_secluded.grab_for_iokit_success++;
3324 }
3325 }
3326 if (mem) {
3327 VM_CHECK_MEMORYSTATUS;
3328
3329 vm_page_grab_diags();
3330 counter_inc(&vm_page_grab_count);
3331 VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3332
3333 assert(!mem->vmp_realtime);
3334 // TODO: missing vm_page_finalize_grabed_page()?
3335 return mem;
3336 }
3337 }
3338 #else /* CONFIG_SECLUDED_MEMORY */
3339 (void) grab_options;
3340 #endif /* CONFIG_SECLUDED_MEMORY */
3341 } else {
3342 vm_page_t head;
3343 vm_page_t tail;
3344 unsigned int pages_to_steal;
3345 unsigned int color;
3346 unsigned int clump_end, sub_count;
3347
3348 /*
3349 * Replenishing our per-CPU cache of free pages might take
3350 * too long to keep holding the "free_page" lock as a spinlock,
3351 * so convert to the full mutex to prevent other threads trying
3352 * to acquire the "free_page" lock from timing out spinning on
3353 * the mutex interlock.
3354 */
3355 vm_free_page_lock_convert();
3356
3357 while (vm_page_free_count == 0) {
3358 vm_free_page_unlock();
3359 /*
3360 * must be a privileged thread to be
3361 * in this state since a non-privileged
3362 * thread would have bailed if we were
3363 * under the vm_page_free_reserved mark
3364 */
3365 VM_PAGE_WAIT();
3366 vm_free_page_lock();
3367 }
3368
3369 /*
3370 * Need to repopulate the per-CPU free list from the global free list.
3371 * Note we don't do any processing of pending retirement pages here.
3372 * That'll happen in the code above when the page comes off the per-CPU list.
3373 */
3374 disable_preemption();
3375
3376 /*
3377 * If we got preempted the cache might now have pages.
3378 */
3379 if ((mem = *PERCPU_GET(free_pages))) {
3380 vm_free_page_unlock();
3381 enable_preemption();
3382 goto restart;
3383 }
3384
3385 if (vm_page_free_count <= vm_page_free_reserved) {
3386 pages_to_steal = 1;
3387 } else {
3388 if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3389 pages_to_steal = vm_free_magazine_refill_limit;
3390 } else {
3391 pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3392 }
3393 }
3394 color = *PERCPU_GET(start_color);
3395 head = tail = NULL;
3396
3397 vm_page_free_count -= pages_to_steal;
3398 clump_end = sub_count = 0;
3399
3400 while (pages_to_steal--) {
3401 while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3402 color = (color + 1) & vm_color_mask;
3403 }
3404 #if defined(__x86_64__)
3405 vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3406 mem, clump_end);
3407 #else
3408 vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3409 mem, vmp_pageq);
3410 #endif
3411
3412 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3413
3414 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3415
3416 #if defined(__arm64__)
3417 color = (color + 1) & vm_color_mask;
3418 #else
3419
3420 #if DEVELOPMENT || DEBUG
3421
3422 sub_count++;
3423 if (clump_end) {
3424 vm_clump_update_stats(sub_count);
3425 sub_count = 0;
3426 color = (color + 1) & vm_color_mask;
3427 }
3428 #else
3429 if (clump_end) {
3430 color = (color + 1) & vm_color_mask;
3431 }
3432
3433 #endif /* if DEVELOPMENT || DEBUG */
3434
3435 #endif /* if defined(__arm64__) */
3436
3437 if (head == NULL) {
3438 head = mem;
3439 } else {
3440 tail->vmp_snext = mem;
3441 }
3442 tail = mem;
3443
3444 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3445 assert(mem->vmp_tabled == FALSE);
3446 assert(mem->vmp_object == 0);
3447 assert(!mem->vmp_laundry);
3448
3449 mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3450
3451 assert(mem->vmp_busy);
3452 assert(!mem->vmp_pmapped);
3453 assert(!mem->vmp_wpmapped);
3454 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3455 assert(!mem->vmp_realtime);
3456 #if MACH_ASSERT
3457 if (vm_check_refs_on_alloc) {
3458 vm_page_validate_no_references(mem);
3459 }
3460 #endif /* MACH_ASSERT */
3461 }
3462 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3463 vm_clump_update_stats(sub_count);
3464 #endif
3465
3466 #if HIBERNATION
3467 if (hibernate_rebuild_needed) {
3468 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3469 }
3470 #endif /* HIBERNATION */
3471 vm_offset_t pcpu_base = current_percpu_base();
3472 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head;
3473 *PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3474
3475 /*
3476 * We decremented vm_page_free_count above
3477 * so we must wake up vm_pageout_scan() if
3478 * we brought it down below vm_page_free_min.
3479 */
3480 bool wakeup_pageout_scan = false;
3481 if (vm_page_free_count < vm_page_free_min &&
3482 !vm_pageout_running) {
3483 wakeup_pageout_scan = true;
3484 }
3485 vm_free_page_unlock();
3486
3487 enable_preemption();
3488
3489 if (wakeup_pageout_scan) {
3490 thread_wakeup((event_t) &vm_page_free_wanted);
3491 }
3492 VM_CHECK_MEMORYSTATUS;
3493
3494 goto restart;
3495 }
3496
3497 /*
3498 * Decide if we should poke the pageout daemon.
3499 * We do this if the free count is less than the low
3500 * water mark. VM Pageout Scan will keep running till
3501 * the free_count > free_target (& hence above free_min).
3502 * This wakeup is to catch the possibility of the counts
3503 * dropping between VM Pageout Scan parking and this check.
3504 *
3505 * We don't have the counts locked ... if they change a little,
3506 * it doesn't really matter.
3507 */
3508 if (vm_page_free_count < vm_page_free_min) {
3509 vm_free_page_lock();
3510 if (vm_pageout_running == FALSE) {
3511 vm_free_page_unlock();
3512 thread_wakeup((event_t) &vm_page_free_wanted);
3513 } else {
3514 vm_free_page_unlock();
3515 }
3516 }
3517
3518 VM_CHECK_MEMORYSTATUS;
3519
3520 if (mem) {
3521 assert(!mem->vmp_realtime);
3522 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
3523
3524 vm_page_finalize_grabed_page(mem);
3525 }
3526 return mem;
3527 }
3528
3529 #if CONFIG_SECLUDED_MEMORY
3530 vm_page_t
vm_page_grab_secluded(void)3531 vm_page_grab_secluded(void)
3532 {
3533 vm_page_t mem;
3534 vm_object_t object;
3535 int refmod_state;
3536
3537 if (vm_page_secluded_count == 0) {
3538 /* no secluded pages to grab... */
3539 return VM_PAGE_NULL;
3540 }
3541
3542 /* secluded queue is protected by the VM page queue lock */
3543 vm_page_lock_queues();
3544
3545 if (vm_page_secluded_count == 0) {
3546 /* no secluded pages to grab... */
3547 vm_page_unlock_queues();
3548 return VM_PAGE_NULL;
3549 }
3550
3551 #if 00
3552 /* can we grab from the secluded queue? */
3553 if (vm_page_secluded_count > vm_page_secluded_target ||
3554 (vm_page_secluded_count > 0 &&
3555 task_can_use_secluded_mem(current_task(), TRUE))) {
3556 /* OK */
3557 } else {
3558 /* can't grab from secluded queue... */
3559 vm_page_unlock_queues();
3560 return VM_PAGE_NULL;
3561 }
3562 #endif
3563
3564 /* we can grab a page from secluded queue! */
3565 assert((vm_page_secluded_count_free +
3566 vm_page_secluded_count_inuse) ==
3567 vm_page_secluded_count);
3568 if (current_task()->task_can_use_secluded_mem) {
3569 assert(num_tasks_can_use_secluded_mem > 0);
3570 }
3571 assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3572 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3573 mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3574 assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3575 vm_page_queues_remove(mem, TRUE);
3576
3577 object = VM_PAGE_OBJECT(mem);
3578
3579 assert(!mem->vmp_fictitious);
3580 assert(!VM_PAGE_WIRED(mem));
3581 if (object == VM_OBJECT_NULL) {
3582 /* free for grab! */
3583 vm_page_unlock_queues();
3584 vm_page_secluded.grab_success_free++;
3585
3586 assert(mem->vmp_busy);
3587 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3588 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3589 assert(mem->vmp_pageq.next == 0);
3590 assert(mem->vmp_pageq.prev == 0);
3591 assert(mem->vmp_listq.next == 0);
3592 assert(mem->vmp_listq.prev == 0);
3593 assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3594 assert(mem->vmp_specialq.next == 0);
3595 assert(mem->vmp_specialq.prev == 0);
3596 return mem;
3597 }
3598
3599 assert(!object->internal);
3600 // vm_page_pageable_external_count--;
3601
3602 if (!vm_object_lock_try(object)) {
3603 // printf("SECLUDED: page %p: object %p locked\n", mem, object);
3604 vm_page_secluded.grab_failure_locked++;
3605 reactivate_secluded_page:
3606 vm_page_activate(mem);
3607 vm_page_unlock_queues();
3608 return VM_PAGE_NULL;
3609 }
3610 if (mem->vmp_busy ||
3611 mem->vmp_cleaning ||
3612 mem->vmp_laundry) {
3613 /* can't steal page in this state... */
3614 vm_object_unlock(object);
3615 vm_page_secluded.grab_failure_state++;
3616 goto reactivate_secluded_page;
3617 }
3618 if (mem->vmp_realtime) {
3619 /* don't steal pages used by realtime threads... */
3620 vm_object_unlock(object);
3621 vm_page_secluded.grab_failure_realtime++;
3622 goto reactivate_secluded_page;
3623 }
3624
3625 mem->vmp_busy = TRUE;
3626 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3627 if (refmod_state & VM_MEM_REFERENCED) {
3628 mem->vmp_reference = TRUE;
3629 }
3630 if (refmod_state & VM_MEM_MODIFIED) {
3631 SET_PAGE_DIRTY(mem, FALSE);
3632 }
3633 if (mem->vmp_dirty || mem->vmp_precious) {
3634 /* can't grab a dirty page; re-activate */
3635 // printf("SECLUDED: dirty page %p\n", mem);
3636 vm_page_wakeup_done(object, mem);
3637 vm_page_secluded.grab_failure_dirty++;
3638 vm_object_unlock(object);
3639 goto reactivate_secluded_page;
3640 }
3641 if (mem->vmp_reference) {
3642 /* it's been used but we do need to grab a page... */
3643 }
3644
3645 vm_page_unlock_queues();
3646
3647
3648 /* finish what vm_page_free() would have done... */
3649 vm_page_free_prepare_object(mem, TRUE);
3650 vm_object_unlock(object);
3651 object = VM_OBJECT_NULL;
3652
3653 #if MACH_ASSERT
3654 if (vm_check_refs_on_alloc) {
3655 vm_page_validate_no_references(mem);
3656 }
3657 #endif /* MACH_ASSERT */
3658
3659 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3660 vm_page_secluded.grab_success_other++;
3661
3662 assert(mem->vmp_busy);
3663 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3664 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3665 assert(mem->vmp_pageq.next == 0);
3666 assert(mem->vmp_pageq.prev == 0);
3667 assert(mem->vmp_listq.next == 0);
3668 assert(mem->vmp_listq.prev == 0);
3669 assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3670 assert(mem->vmp_specialq.next == 0);
3671 assert(mem->vmp_specialq.prev == 0);
3672
3673 return mem;
3674 }
3675
3676 uint64_t
vm_page_secluded_drain(void)3677 vm_page_secluded_drain(void)
3678 {
3679 vm_page_t local_freeq;
3680 int local_freed;
3681 uint64_t num_reclaimed;
3682 unsigned int saved_secluded_count, saved_secluded_target;
3683
3684 num_reclaimed = 0;
3685 local_freeq = NULL;
3686 local_freed = 0;
3687
3688 vm_page_lock_queues();
3689
3690 saved_secluded_count = vm_page_secluded_count;
3691 saved_secluded_target = vm_page_secluded_target;
3692 vm_page_secluded_target = 0;
3693 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3694 while (vm_page_secluded_count) {
3695 vm_page_t secluded_page;
3696
3697 assert((vm_page_secluded_count_free +
3698 vm_page_secluded_count_inuse) ==
3699 vm_page_secluded_count);
3700 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3701 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3702
3703 vm_page_queues_remove(secluded_page, FALSE);
3704 assert(!secluded_page->vmp_fictitious);
3705 assert(!VM_PAGE_WIRED(secluded_page));
3706
3707 if (secluded_page->vmp_object == 0) {
3708 /* transfer to free queue */
3709 assert(secluded_page->vmp_busy);
3710 secluded_page->vmp_snext = local_freeq;
3711 local_freeq = secluded_page;
3712 local_freed += 1;
3713 } else {
3714 /* transfer to head of active queue */
3715 vm_page_enqueue_active(secluded_page, FALSE);
3716 secluded_page = VM_PAGE_NULL;
3717 }
3718 num_reclaimed++;
3719 }
3720 vm_page_secluded_target = saved_secluded_target;
3721 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3722
3723 // printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3724
3725 vm_page_unlock_queues();
3726
3727 if (local_freed) {
3728 vm_page_free_list(local_freeq, TRUE);
3729 local_freeq = NULL;
3730 local_freed = 0;
3731 }
3732
3733 return num_reclaimed;
3734 }
3735 #endif /* CONFIG_SECLUDED_MEMORY */
3736
3737 static inline void
vm_page_grab_diags()3738 vm_page_grab_diags()
3739 {
3740 #if DEVELOPMENT || DEBUG
3741 task_t task = current_task_early();
3742 if (task == NULL) {
3743 return;
3744 }
3745
3746 ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3747 #endif /* DEVELOPMENT || DEBUG */
3748 }
3749
3750 /*
3751 * vm_page_release:
3752 *
3753 * Return a page to the free list.
3754 */
3755
3756 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3757 vm_page_release(
3758 vm_page_t mem,
3759 boolean_t page_queues_locked)
3760 {
3761 unsigned int color;
3762 int need_wakeup = 0;
3763 int need_priv_wakeup = 0;
3764 #if CONFIG_SECLUDED_MEMORY
3765 int need_secluded_wakeup = 0;
3766 #endif /* CONFIG_SECLUDED_MEMORY */
3767 event_t wakeup_event = NULL;
3768
3769 if (page_queues_locked) {
3770 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3771 } else {
3772 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3773 }
3774
3775 assert(!mem->vmp_private && !mem->vmp_fictitious);
3776
3777 vm_page_validate_no_references(mem);
3778
3779 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
3780
3781 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3782
3783 if (__improbable(mem->vmp_realtime)) {
3784 if (!page_queues_locked) {
3785 vm_page_lock_queues();
3786 }
3787 if (mem->vmp_realtime) {
3788 mem->vmp_realtime = false;
3789 vm_page_realtime_count--;
3790 }
3791 if (!page_queues_locked) {
3792 vm_page_unlock_queues();
3793 }
3794 }
3795
3796 vm_free_page_lock_spin();
3797
3798 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3799 assert(mem->vmp_busy);
3800 assert(!mem->vmp_laundry);
3801 assert(mem->vmp_object == 0);
3802 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3803 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3804 assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
3805
3806 /* Clear any specialQ hints before releasing page to the free pool*/
3807 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3808
3809 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3810 vm_lopage_free_count < vm_lopage_free_limit &&
3811 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3812 /*
3813 * this exists to support hardware controllers
3814 * incapable of generating DMAs with more than 32 bits
3815 * of address on platforms with physical memory > 4G...
3816 */
3817 vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3818 vm_lopage_free_count++;
3819
3820 if (vm_lopage_free_count >= vm_lopage_free_limit) {
3821 vm_lopage_refill = FALSE;
3822 }
3823
3824 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3825 mem->vmp_lopage = TRUE;
3826 #if CONFIG_SECLUDED_MEMORY
3827 } else if (vm_page_free_count > vm_page_free_reserved &&
3828 vm_page_secluded_count < vm_page_secluded_target &&
3829 num_tasks_can_use_secluded_mem == 0) {
3830 /*
3831 * XXX FBDP TODO: also avoid refilling secluded queue
3832 * when some IOKit objects are already grabbing from it...
3833 */
3834 if (!page_queues_locked) {
3835 if (!vm_page_trylock_queues()) {
3836 /* take locks in right order */
3837 vm_free_page_unlock();
3838 vm_page_lock_queues();
3839 vm_free_page_lock_spin();
3840 }
3841 }
3842 mem->vmp_lopage = FALSE;
3843 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3844 vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3845 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3846 vm_page_secluded_count++;
3847 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3848 vm_page_secluded_count_free++;
3849 if (!page_queues_locked) {
3850 vm_page_unlock_queues();
3851 }
3852 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3853 if (vm_page_free_wanted_secluded > 0) {
3854 vm_page_free_wanted_secluded--;
3855 need_secluded_wakeup = 1;
3856 }
3857 #endif /* CONFIG_SECLUDED_MEMORY */
3858 } else {
3859 mem->vmp_lopage = FALSE;
3860 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3861
3862 color = VM_PAGE_GET_COLOR(mem);
3863 #if defined(__x86_64__)
3864 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3865 #else
3866 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3867 #endif
3868 vm_page_free_count++;
3869 /*
3870 * Check if we should wake up someone waiting for page.
3871 * But don't bother waking them unless they can allocate.
3872 *
3873 * We wakeup only one thread, to prevent starvation.
3874 * Because the scheduling system handles wait queues FIFO,
3875 * if we wakeup all waiting threads, one greedy thread
3876 * can starve multiple niceguy threads. When the threads
3877 * all wakeup, the greedy threads runs first, grabs the page,
3878 * and waits for another page. It will be the first to run
3879 * when the next page is freed.
3880 *
3881 * However, there is a slight danger here.
3882 * The thread we wake might not use the free page.
3883 * Then the other threads could wait indefinitely
3884 * while the page goes unused. To forestall this,
3885 * the pageout daemon will keep making free pages
3886 * as long as vm_page_free_wanted is non-zero.
3887 */
3888
3889 assert(vm_page_free_count > 0);
3890 if (vm_page_free_wanted_privileged > 0) {
3891 vm_page_free_wanted_privileged--;
3892 need_priv_wakeup = 1;
3893 #if CONFIG_SECLUDED_MEMORY
3894 } else if (vm_page_free_wanted_secluded > 0 &&
3895 vm_page_free_count > vm_page_free_reserved) {
3896 vm_page_free_wanted_secluded--;
3897 need_secluded_wakeup = 1;
3898 #endif /* CONFIG_SECLUDED_MEMORY */
3899 } else if (vm_page_free_wanted > 0 &&
3900 vm_page_free_count > vm_page_free_reserved) {
3901 vm_page_free_wanted--;
3902 need_wakeup = 1;
3903 }
3904 }
3905 vm_pageout_vminfo.vm_page_pages_freed++;
3906
3907 vm_free_page_unlock();
3908
3909 VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3910
3911 if (need_priv_wakeup) {
3912 wakeup_event = &vm_page_free_wanted_privileged;
3913 }
3914 #if CONFIG_SECLUDED_MEMORY
3915 else if (need_secluded_wakeup) {
3916 wakeup_event = &vm_page_free_wanted_secluded;
3917 }
3918 #endif /* CONFIG_SECLUDED_MEMORY */
3919 else if (need_wakeup) {
3920 wakeup_event = &vm_page_free_count;
3921 }
3922
3923 if (wakeup_event) {
3924 if (vps_dynamic_priority_enabled) {
3925 wakeup_one_with_inheritor((event_t) wakeup_event,
3926 THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
3927 NULL);
3928 } else {
3929 thread_wakeup_one((event_t) wakeup_event);
3930 }
3931 }
3932
3933 VM_CHECK_MEMORYSTATUS;
3934 }
3935
3936 /*
3937 * This version of vm_page_release() is used only at startup
3938 * when we are single-threaded and pages are being released
3939 * for the first time. Hence, no locking or unnecessary checks are made.
3940 * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3941 */
3942 void
vm_page_release_startup(vm_page_t mem)3943 vm_page_release_startup(
3944 vm_page_t mem)
3945 {
3946 vm_page_queue_t queue_free;
3947
3948 if (vm_lopage_free_count < vm_lopage_free_limit &&
3949 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3950 mem->vmp_lopage = TRUE;
3951 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3952 vm_lopage_free_count++;
3953 queue_free = &vm_lopage_queue_free;
3954 #if CONFIG_SECLUDED_MEMORY
3955 } else if (vm_page_secluded_count < vm_page_secluded_target) {
3956 mem->vmp_lopage = FALSE;
3957 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3958 vm_page_secluded_count++;
3959 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3960 vm_page_secluded_count_free++;
3961 queue_free = &vm_page_queue_secluded;
3962 #endif /* CONFIG_SECLUDED_MEMORY */
3963 } else {
3964 mem->vmp_lopage = FALSE;
3965 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3966 vm_page_free_count++;
3967 queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3968 }
3969 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3970 #if defined(__x86_64__)
3971 vm_page_queue_enter_clump(queue_free, mem);
3972 #else
3973 vm_page_queue_enter(queue_free, mem, vmp_pageq);
3974 #endif
3975 } else {
3976 vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3977 }
3978 }
3979
3980 /*
3981 * vm_page_wait:
3982 *
3983 * Wait for a page to become available.
3984 * If there are plenty of free pages, then we don't sleep.
3985 *
3986 * Returns:
3987 * TRUE: There may be another page, try again
3988 * FALSE: We were interrupted out of our wait, don't try again
3989 */
3990
3991 boolean_t
vm_page_wait(int interruptible)3992 vm_page_wait(
3993 int interruptible )
3994 {
3995 /*
3996 * We can't use vm_page_free_reserved to make this
3997 * determination. Consider: some thread might
3998 * need to allocate two pages. The first allocation
3999 * succeeds, the second fails. After the first page is freed,
4000 * a call to vm_page_wait must really block.
4001 */
4002 kern_return_t wait_result;
4003 int need_wakeup = 0;
4004 int is_privileged = current_thread()->options & TH_OPT_VMPRIV;
4005 event_t wait_event = NULL;
4006
4007 vm_free_page_lock_spin();
4008
4009 if (is_privileged && vm_page_free_count) {
4010 vm_free_page_unlock();
4011 return TRUE;
4012 }
4013
4014 if (vm_page_free_count >= vm_page_free_target) {
4015 vm_free_page_unlock();
4016 return TRUE;
4017 }
4018
4019 if (is_privileged) {
4020 if (vm_page_free_wanted_privileged++ == 0) {
4021 need_wakeup = 1;
4022 }
4023 wait_event = (event_t)&vm_page_free_wanted_privileged;
4024 #if CONFIG_SECLUDED_MEMORY
4025 } else if (secluded_for_apps &&
4026 task_can_use_secluded_mem(current_task(), FALSE)) {
4027 #if 00
4028 /* XXX FBDP: need pageq lock for this... */
4029 /* XXX FBDP: might wait even if pages available, */
4030 /* XXX FBDP: hopefully not for too long... */
4031 if (vm_page_secluded_count > 0) {
4032 vm_free_page_unlock();
4033 return TRUE;
4034 }
4035 #endif
4036 if (vm_page_free_wanted_secluded++ == 0) {
4037 need_wakeup = 1;
4038 }
4039 wait_event = (event_t)&vm_page_free_wanted_secluded;
4040 #endif /* CONFIG_SECLUDED_MEMORY */
4041 } else {
4042 if (vm_page_free_wanted++ == 0) {
4043 need_wakeup = 1;
4044 }
4045 wait_event = (event_t)&vm_page_free_count;
4046 }
4047
4048 /*
4049 * We don't do a vm_pageout_scan wakeup if we already have
4050 * some waiters because vm_pageout_scan checks for waiters
4051 * before it returns and does so behind the vm_page_queue_free_lock,
4052 * which we own when we bump the waiter counts.
4053 */
4054
4055 if (vps_dynamic_priority_enabled) {
4056 /*
4057 * We are waking up vm_pageout_scan here. If it needs
4058 * the vm_page_queue_free_lock before we unlock it
4059 * we'll end up just blocking and incur an extra
4060 * context switch. Could be a perf. issue.
4061 */
4062
4063 if (need_wakeup) {
4064 thread_wakeup((event_t)&vm_page_free_wanted);
4065 }
4066
4067 /*
4068 * LD: This event is going to get recorded every time because
4069 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
4070 * We just block in that routine.
4071 */
4072 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4073 vm_page_free_wanted_privileged,
4074 vm_page_free_wanted,
4075 #if CONFIG_SECLUDED_MEMORY
4076 vm_page_free_wanted_secluded,
4077 #else /* CONFIG_SECLUDED_MEMORY */
4078 0,
4079 #endif /* CONFIG_SECLUDED_MEMORY */
4080 0);
4081 wait_result = lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
4082 LCK_SLEEP_UNLOCK,
4083 wait_event,
4084 vm_pageout_scan_thread,
4085 interruptible,
4086 0);
4087 } else {
4088 wait_result = assert_wait(wait_event, interruptible);
4089
4090 vm_free_page_unlock();
4091
4092 if (need_wakeup) {
4093 thread_wakeup((event_t)&vm_page_free_wanted);
4094 }
4095
4096 if (wait_result == THREAD_WAITING) {
4097 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4098 vm_page_free_wanted_privileged,
4099 vm_page_free_wanted,
4100 #if CONFIG_SECLUDED_MEMORY
4101 vm_page_free_wanted_secluded,
4102 #else /* CONFIG_SECLUDED_MEMORY */
4103 0,
4104 #endif /* CONFIG_SECLUDED_MEMORY */
4105 0);
4106 wait_result = thread_block(THREAD_CONTINUE_NULL);
4107 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4108 DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4109 }
4110 }
4111
4112 return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4113 }
4114
4115 /*
4116 * vm_page_alloc:
4117 *
4118 * Allocate and return a memory cell associated
4119 * with this VM object/offset pair.
4120 *
4121 * Object must be locked.
4122 */
4123
4124 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4125 vm_page_alloc(
4126 vm_object_t object,
4127 vm_object_offset_t offset)
4128 {
4129 vm_page_t mem;
4130 int grab_options;
4131
4132 vm_object_lock_assert_exclusive(object);
4133 grab_options = 0;
4134 #if CONFIG_SECLUDED_MEMORY
4135 if (object->can_grab_secluded) {
4136 grab_options |= VM_PAGE_GRAB_SECLUDED;
4137 }
4138 #endif /* CONFIG_SECLUDED_MEMORY */
4139 mem = vm_page_grab_options(grab_options);
4140 if (mem == VM_PAGE_NULL) {
4141 return VM_PAGE_NULL;
4142 }
4143
4144 vm_page_insert(mem, object, offset);
4145
4146 return mem;
4147 }
4148
4149 /*
4150 * vm_page_free_prepare:
4151 *
4152 * Removes page from any queue it may be on
4153 * and disassociates it from its VM object.
4154 *
4155 * Object and page queues must be locked prior to entry.
4156 */
4157 static void
vm_page_free_prepare(vm_page_t mem)4158 vm_page_free_prepare(
4159 vm_page_t mem)
4160 {
4161 #if CONFIG_SPTM
4162 /**
4163 * SPTM TODO: The pmap should retype frames automatically as mappings to them are
4164 * created and destroyed. In order to catch potential cases where this
4165 * does not happen, add an appropriate assert here. This code should be
4166 * executed on every frame that is about to be released to the VM.
4167 */
4168 const sptm_paddr_t paddr = ((uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)) << PAGE_SHIFT;
4169 __unused const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
4170
4171 assert(frame_type == XNU_DEFAULT);
4172 #endif /* CONFIG_SPTM */
4173
4174 vm_page_free_prepare_queues(mem);
4175 vm_page_free_prepare_object(mem, TRUE);
4176 }
4177
4178
4179 void
vm_page_free_prepare_queues(vm_page_t mem)4180 vm_page_free_prepare_queues(
4181 vm_page_t mem)
4182 {
4183 vm_object_t m_object;
4184
4185 VM_PAGE_CHECK(mem);
4186
4187 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4188 assert(!mem->vmp_cleaning);
4189 m_object = VM_PAGE_OBJECT(mem);
4190
4191 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4192 if (m_object) {
4193 vm_object_lock_assert_exclusive(m_object);
4194 }
4195 if (mem->vmp_laundry) {
4196 /*
4197 * We may have to free a page while it's being laundered
4198 * if we lost its pager (due to a forced unmount, for example).
4199 * We need to call vm_pageout_steal_laundry() before removing
4200 * the page from its VM object, so that we can remove it
4201 * from its pageout queue and adjust the laundry accounting
4202 */
4203 vm_pageout_steal_laundry(mem, TRUE);
4204 }
4205
4206 vm_page_queues_remove(mem, TRUE);
4207
4208 if (__improbable(mem->vmp_realtime)) {
4209 mem->vmp_realtime = false;
4210 vm_page_realtime_count--;
4211 }
4212
4213 if (VM_PAGE_WIRED(mem)) {
4214 assert(mem->vmp_wire_count > 0);
4215
4216 if (m_object) {
4217 task_t owner;
4218 int ledger_idx_volatile;
4219 int ledger_idx_nonvolatile;
4220 int ledger_idx_volatile_compressed;
4221 int ledger_idx_nonvolatile_compressed;
4222 int ledger_idx_composite;
4223 int ledger_idx_external_wired;
4224 boolean_t do_footprint;
4225
4226 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4227 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4228 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4229
4230 assert(m_object->resident_page_count >=
4231 m_object->wired_page_count);
4232
4233 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4234 OSAddAtomic(+1, &vm_page_purgeable_count);
4235 assert(vm_page_purgeable_wired_count > 0);
4236 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4237 }
4238 if (m_object->internal &&
4239 m_object->vo_owner != TASK_NULL &&
4240 (m_object->purgable == VM_PURGABLE_VOLATILE ||
4241 m_object->purgable == VM_PURGABLE_EMPTY)) {
4242 owner = VM_OBJECT_OWNER(m_object);
4243 vm_object_ledger_tag_ledgers(
4244 m_object,
4245 &ledger_idx_volatile,
4246 &ledger_idx_nonvolatile,
4247 &ledger_idx_volatile_compressed,
4248 &ledger_idx_nonvolatile_compressed,
4249 &ledger_idx_composite,
4250 &ledger_idx_external_wired,
4251 &do_footprint);
4252 /*
4253 * While wired, this page was accounted
4254 * as "non-volatile" but it should now
4255 * be accounted as "volatile".
4256 */
4257 /* one less "non-volatile"... */
4258 ledger_debit(owner->ledger,
4259 ledger_idx_nonvolatile,
4260 PAGE_SIZE);
4261 if (do_footprint) {
4262 /* ... and "phys_footprint" */
4263 ledger_debit(owner->ledger,
4264 task_ledgers.phys_footprint,
4265 PAGE_SIZE);
4266 } else if (ledger_idx_composite != -1) {
4267 ledger_debit(owner->ledger,
4268 ledger_idx_composite,
4269 PAGE_SIZE);
4270 }
4271 /* one more "volatile" */
4272 ledger_credit(owner->ledger,
4273 ledger_idx_volatile,
4274 PAGE_SIZE);
4275 }
4276 }
4277 if (!mem->vmp_private && !mem->vmp_fictitious) {
4278 vm_page_wire_count--;
4279 }
4280
4281 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4282 mem->vmp_wire_count = 0;
4283 assert(!mem->vmp_gobbled);
4284 } else if (mem->vmp_gobbled) {
4285 if (!mem->vmp_private && !mem->vmp_fictitious) {
4286 vm_page_wire_count--;
4287 }
4288 vm_page_gobble_count--;
4289 }
4290 }
4291
4292
4293 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4294 vm_page_free_prepare_object(
4295 vm_page_t mem,
4296 boolean_t remove_from_hash)
4297 {
4298 assert(!mem->vmp_realtime);
4299 if (mem->vmp_tabled) {
4300 vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */
4301 }
4302 vm_page_wakeup(VM_OBJECT_NULL, mem); /* clears wanted */
4303
4304 if (mem->vmp_private) {
4305 mem->vmp_private = FALSE;
4306 mem->vmp_fictitious = TRUE;
4307 VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4308 }
4309 if (!mem->vmp_fictitious) {
4310 assert(mem->vmp_pageq.next == 0);
4311 assert(mem->vmp_pageq.prev == 0);
4312 assert(mem->vmp_listq.next == 0);
4313 assert(mem->vmp_listq.prev == 0);
4314 assert(mem->vmp_specialq.next == 0);
4315 assert(mem->vmp_specialq.prev == 0);
4316 assert(mem->vmp_next_m == 0);
4317
4318 vm_page_validate_no_references(mem);
4319
4320 {
4321 vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4322 }
4323 }
4324 }
4325
4326 /*
4327 * vm_page_free:
4328 *
4329 * Returns the given page to the free list,
4330 * disassociating it with any VM object.
4331 *
4332 * Object and page queues must be locked prior to entry.
4333 */
4334 void
vm_page_free(vm_page_t mem)4335 vm_page_free(
4336 vm_page_t mem)
4337 {
4338 vm_page_free_prepare(mem);
4339
4340 if (mem->vmp_fictitious) {
4341 vm_page_release_fictitious(mem);
4342 } else {
4343 vm_page_release(mem, TRUE); /* page queues are locked */
4344 }
4345 }
4346
4347
4348 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4349 vm_page_free_unlocked(
4350 vm_page_t mem,
4351 boolean_t remove_from_hash)
4352 {
4353 vm_page_lockspin_queues();
4354 vm_page_free_prepare_queues(mem);
4355 vm_page_unlock_queues();
4356
4357 vm_page_free_prepare_object(mem, remove_from_hash);
4358
4359 if (mem->vmp_fictitious) {
4360 vm_page_release_fictitious(mem);
4361 } else {
4362 vm_page_release(mem, FALSE); /* page queues are not locked */
4363 }
4364 }
4365
4366
4367 /*
4368 * Free a list of pages. The list can be up to several hundred pages,
4369 * as blocked up by vm_pageout_scan().
4370 * The big win is not having to take the free list lock once
4371 * per page.
4372 *
4373 * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4374 * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4375 */
4376 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4377 vm_page_free_list(
4378 vm_page_t freeq,
4379 boolean_t prepare_object)
4380 {
4381 vm_page_t mem;
4382 vm_page_t nxt;
4383 vm_page_t local_freeq;
4384 int pg_count;
4385
4386 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4387 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4388
4389 while (freeq) {
4390 pg_count = 0;
4391 local_freeq = VM_PAGE_NULL;
4392 mem = freeq;
4393
4394 /*
4395 * break up the processing into smaller chunks so
4396 * that we can 'pipeline' the pages onto the
4397 * free list w/o introducing too much
4398 * contention on the global free queue lock
4399 */
4400 while (mem && pg_count < 64) {
4401 assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4402 (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4403 assert(mem->vmp_specialq.next == 0 &&
4404 mem->vmp_specialq.prev == 0);
4405 /*
4406 * &&
4407 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4408 */
4409 nxt = mem->vmp_snext;
4410 mem->vmp_snext = NULL;
4411 assert(mem->vmp_pageq.prev == 0);
4412
4413 if (!mem->vmp_fictitious && !mem->vmp_private) {
4414 vm_page_validate_no_references(mem);
4415 }
4416
4417 if (__improbable(mem->vmp_realtime)) {
4418 vm_page_lock_queues();
4419 if (mem->vmp_realtime) {
4420 mem->vmp_realtime = false;
4421 vm_page_realtime_count--;
4422 }
4423 vm_page_unlock_queues();
4424 }
4425
4426 if (prepare_object == TRUE) {
4427 vm_page_free_prepare_object(mem, TRUE);
4428 }
4429
4430 if (!mem->vmp_fictitious) {
4431 assert(mem->vmp_busy);
4432
4433 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4434 vm_lopage_free_count < vm_lopage_free_limit &&
4435 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4436 vm_page_release(mem, FALSE); /* page queues are not locked */
4437 #if CONFIG_SECLUDED_MEMORY
4438 } else if (vm_page_secluded_count < vm_page_secluded_target &&
4439 num_tasks_can_use_secluded_mem == 0) {
4440 vm_page_release(mem,
4441 FALSE); /* page queues are not locked */
4442 #endif /* CONFIG_SECLUDED_MEMORY */
4443 } else {
4444 /*
4445 * IMPORTANT: we can't set the page "free" here
4446 * because that would make the page eligible for
4447 * a physically-contiguous allocation (see
4448 * vm_page_find_contiguous()) right away (we don't
4449 * hold the vm_page_queue_free lock). That would
4450 * cause trouble because the page is not actually
4451 * in the free queue yet...
4452 */
4453 mem->vmp_snext = local_freeq;
4454 local_freeq = mem;
4455 pg_count++;
4456
4457 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4458 }
4459 } else {
4460 assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4461 VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4462 vm_page_release_fictitious(mem);
4463 }
4464 mem = nxt;
4465 }
4466 freeq = mem;
4467
4468 if ((mem = local_freeq)) {
4469 unsigned int avail_free_count;
4470 unsigned int need_wakeup = 0;
4471 unsigned int need_priv_wakeup = 0;
4472 #if CONFIG_SECLUDED_MEMORY
4473 unsigned int need_wakeup_secluded = 0;
4474 #endif /* CONFIG_SECLUDED_MEMORY */
4475 event_t priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4476 boolean_t priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4477
4478 vm_free_page_lock_spin();
4479
4480 while (mem) {
4481 int color;
4482
4483 nxt = mem->vmp_snext;
4484
4485 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4486 assert(mem->vmp_busy);
4487 assert(!mem->vmp_realtime);
4488 mem->vmp_lopage = FALSE;
4489 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4490
4491 color = VM_PAGE_GET_COLOR(mem);
4492 #if defined(__x86_64__)
4493 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4494 #else
4495 vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4496 mem, vmp_pageq);
4497 #endif
4498 mem = nxt;
4499 }
4500 vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4501 vm_page_free_count += pg_count;
4502 avail_free_count = vm_page_free_count;
4503
4504 VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4505
4506 if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4507 if (avail_free_count < vm_page_free_wanted_privileged) {
4508 need_priv_wakeup = avail_free_count;
4509 vm_page_free_wanted_privileged -= avail_free_count;
4510 avail_free_count = 0;
4511 } else {
4512 need_priv_wakeup = vm_page_free_wanted_privileged;
4513 avail_free_count -= vm_page_free_wanted_privileged;
4514 vm_page_free_wanted_privileged = 0;
4515 }
4516 }
4517 #if CONFIG_SECLUDED_MEMORY
4518 if (vm_page_free_wanted_secluded > 0 &&
4519 avail_free_count > vm_page_free_reserved) {
4520 unsigned int available_pages;
4521 available_pages = (avail_free_count -
4522 vm_page_free_reserved);
4523 if (available_pages <
4524 vm_page_free_wanted_secluded) {
4525 need_wakeup_secluded = available_pages;
4526 vm_page_free_wanted_secluded -=
4527 available_pages;
4528 avail_free_count -= available_pages;
4529 } else {
4530 need_wakeup_secluded =
4531 vm_page_free_wanted_secluded;
4532 avail_free_count -=
4533 vm_page_free_wanted_secluded;
4534 vm_page_free_wanted_secluded = 0;
4535 }
4536 }
4537 #endif /* CONFIG_SECLUDED_MEMORY */
4538 if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4539 unsigned int available_pages;
4540
4541 available_pages = avail_free_count - vm_page_free_reserved;
4542
4543 if (available_pages >= vm_page_free_wanted) {
4544 need_wakeup = vm_page_free_wanted;
4545 vm_page_free_wanted = 0;
4546 } else {
4547 need_wakeup = available_pages;
4548 vm_page_free_wanted -= available_pages;
4549 }
4550 }
4551 vm_free_page_unlock();
4552
4553 priv_wakeup_event = NULL;
4554 secluded_wakeup_event = NULL;
4555 normal_wakeup_event = NULL;
4556
4557 priv_wakeup_all = FALSE;
4558 secluded_wakeup_all = FALSE;
4559 normal_wakeup_all = FALSE;
4560
4561
4562 if (need_priv_wakeup != 0) {
4563 /*
4564 * There shouldn't be that many VM-privileged threads,
4565 * so let's wake them all up, even if we don't quite
4566 * have enough pages to satisfy them all.
4567 */
4568 priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4569 priv_wakeup_all = TRUE;
4570 }
4571 #if CONFIG_SECLUDED_MEMORY
4572 if (need_wakeup_secluded != 0 &&
4573 vm_page_free_wanted_secluded == 0) {
4574 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4575 secluded_wakeup_all = TRUE;
4576 need_wakeup_secluded = 0;
4577 } else {
4578 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4579 }
4580 #endif /* CONFIG_SECLUDED_MEMORY */
4581 if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4582 /*
4583 * We don't expect to have any more waiters
4584 * after this, so let's wake them all up at
4585 * once.
4586 */
4587 normal_wakeup_event = (event_t) &vm_page_free_count;
4588 normal_wakeup_all = TRUE;
4589 need_wakeup = 0;
4590 } else {
4591 normal_wakeup_event = (event_t) &vm_page_free_count;
4592 }
4593
4594 if (priv_wakeup_event ||
4595 #if CONFIG_SECLUDED_MEMORY
4596 secluded_wakeup_event ||
4597 #endif /* CONFIG_SECLUDED_MEMORY */
4598 normal_wakeup_event) {
4599 if (vps_dynamic_priority_enabled) {
4600 if (priv_wakeup_all == TRUE) {
4601 wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4602 }
4603
4604 #if CONFIG_SECLUDED_MEMORY
4605 if (secluded_wakeup_all == TRUE) {
4606 wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4607 }
4608
4609 while (need_wakeup_secluded-- != 0) {
4610 /*
4611 * Wake up one waiter per page we just released.
4612 */
4613 wakeup_one_with_inheritor(secluded_wakeup_event,
4614 THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
4615 }
4616 #endif /* CONFIG_SECLUDED_MEMORY */
4617
4618 if (normal_wakeup_all == TRUE) {
4619 wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4620 }
4621
4622 while (need_wakeup-- != 0) {
4623 /*
4624 * Wake up one waiter per page we just released.
4625 */
4626 wakeup_one_with_inheritor(normal_wakeup_event,
4627 THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
4628 NULL);
4629 }
4630 } else {
4631 /*
4632 * Non-priority-aware wakeups.
4633 */
4634
4635 if (priv_wakeup_all == TRUE) {
4636 thread_wakeup(priv_wakeup_event);
4637 }
4638
4639 #if CONFIG_SECLUDED_MEMORY
4640 if (secluded_wakeup_all == TRUE) {
4641 thread_wakeup(secluded_wakeup_event);
4642 }
4643
4644 while (need_wakeup_secluded-- != 0) {
4645 /*
4646 * Wake up one waiter per page we just released.
4647 */
4648 thread_wakeup_one(secluded_wakeup_event);
4649 }
4650
4651 #endif /* CONFIG_SECLUDED_MEMORY */
4652 if (normal_wakeup_all == TRUE) {
4653 thread_wakeup(normal_wakeup_event);
4654 }
4655
4656 while (need_wakeup-- != 0) {
4657 /*
4658 * Wake up one waiter per page we just released.
4659 */
4660 thread_wakeup_one(normal_wakeup_event);
4661 }
4662 }
4663 }
4664
4665 VM_CHECK_MEMORYSTATUS;
4666 }
4667 }
4668 }
4669
4670
4671 /*
4672 * vm_page_wire:
4673 *
4674 * Mark this page as wired down by yet
4675 * another map, removing it from paging queues
4676 * as necessary.
4677 *
4678 * The page's object and the page queues must be locked.
4679 */
4680
4681
4682 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4683 vm_page_wire(
4684 vm_page_t mem,
4685 vm_tag_t tag,
4686 boolean_t check_memorystatus)
4687 {
4688 vm_object_t m_object;
4689
4690 m_object = VM_PAGE_OBJECT(mem);
4691
4692 // dbgLog(current_thread(), mem->vmp_offset, m_object, 1); /* (TEST/DEBUG) */
4693
4694 VM_PAGE_CHECK(mem);
4695 if (m_object) {
4696 vm_object_lock_assert_exclusive(m_object);
4697 } else {
4698 /*
4699 * In theory, the page should be in an object before it
4700 * gets wired, since we need to hold the object lock
4701 * to update some fields in the page structure.
4702 * However, some code (i386 pmap, for example) might want
4703 * to wire a page before it gets inserted into an object.
4704 * That's somewhat OK, as long as nobody else can get to
4705 * that page and update it at the same time.
4706 */
4707 }
4708 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4709 if (!VM_PAGE_WIRED(mem)) {
4710 if (mem->vmp_laundry) {
4711 vm_pageout_steal_laundry(mem, TRUE);
4712 }
4713
4714 vm_page_queues_remove(mem, TRUE);
4715
4716 assert(mem->vmp_wire_count == 0);
4717 mem->vmp_q_state = VM_PAGE_IS_WIRED;
4718
4719 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4720 if (mem->vmp_unmodified_ro == true) {
4721 /* Object and PageQ locks are held*/
4722 mem->vmp_unmodified_ro = false;
4723 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4724 vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(mem), mem->vmp_offset);
4725 }
4726 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4727
4728 if (m_object) {
4729 task_t owner;
4730 int ledger_idx_volatile;
4731 int ledger_idx_nonvolatile;
4732 int ledger_idx_volatile_compressed;
4733 int ledger_idx_nonvolatile_compressed;
4734 int ledger_idx_composite;
4735 int ledger_idx_external_wired;
4736 boolean_t do_footprint;
4737
4738 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4739 VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4740 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4741
4742 assert(m_object->resident_page_count >=
4743 m_object->wired_page_count);
4744 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4745 assert(vm_page_purgeable_count > 0);
4746 OSAddAtomic(-1, &vm_page_purgeable_count);
4747 OSAddAtomic(1, &vm_page_purgeable_wired_count);
4748 }
4749 if (m_object->internal &&
4750 m_object->vo_owner != TASK_NULL &&
4751 (m_object->purgable == VM_PURGABLE_VOLATILE ||
4752 m_object->purgable == VM_PURGABLE_EMPTY)) {
4753 owner = VM_OBJECT_OWNER(m_object);
4754 vm_object_ledger_tag_ledgers(
4755 m_object,
4756 &ledger_idx_volatile,
4757 &ledger_idx_nonvolatile,
4758 &ledger_idx_volatile_compressed,
4759 &ledger_idx_nonvolatile_compressed,
4760 &ledger_idx_composite,
4761 &ledger_idx_external_wired,
4762 &do_footprint);
4763 /* less volatile bytes */
4764 ledger_debit(owner->ledger,
4765 ledger_idx_volatile,
4766 PAGE_SIZE);
4767 /* more not-quite-volatile bytes */
4768 ledger_credit(owner->ledger,
4769 ledger_idx_nonvolatile,
4770 PAGE_SIZE);
4771 if (do_footprint) {
4772 /* more footprint */
4773 ledger_credit(owner->ledger,
4774 task_ledgers.phys_footprint,
4775 PAGE_SIZE);
4776 } else if (ledger_idx_composite != -1) {
4777 ledger_credit(owner->ledger,
4778 ledger_idx_composite,
4779 PAGE_SIZE);
4780 }
4781 }
4782
4783 if (m_object->all_reusable) {
4784 /*
4785 * Wired pages are not counted as "re-usable"
4786 * in "all_reusable" VM objects, so nothing
4787 * to do here.
4788 */
4789 } else if (mem->vmp_reusable) {
4790 /*
4791 * This page is not "re-usable" when it's
4792 * wired, so adjust its state and the
4793 * accounting.
4794 */
4795 vm_page_lockconvert_queues();
4796 vm_object_reuse_pages(m_object,
4797 mem->vmp_offset,
4798 mem->vmp_offset + PAGE_SIZE_64,
4799 FALSE);
4800 }
4801 }
4802 assert(!mem->vmp_reusable);
4803
4804 if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4805 vm_page_wire_count++;
4806 }
4807 if (mem->vmp_gobbled) {
4808 vm_page_gobble_count--;
4809 }
4810 mem->vmp_gobbled = FALSE;
4811
4812 if (check_memorystatus == TRUE) {
4813 VM_CHECK_MEMORYSTATUS;
4814 }
4815 }
4816 assert(!mem->vmp_gobbled);
4817 assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4818 mem->vmp_wire_count++;
4819 if (__improbable(mem->vmp_wire_count == 0)) {
4820 panic("vm_page_wire(%p): wire_count overflow", mem);
4821 }
4822 VM_PAGE_CHECK(mem);
4823 }
4824
4825 /*
4826 * vm_page_unwire:
4827 *
4828 * Release one wiring of this page, potentially
4829 * enabling it to be paged again.
4830 *
4831 * The page's object and the page queues must be locked.
4832 */
4833 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4834 vm_page_unwire(
4835 vm_page_t mem,
4836 boolean_t queueit)
4837 {
4838 vm_object_t m_object;
4839
4840 m_object = VM_PAGE_OBJECT(mem);
4841
4842 // dbgLog(current_thread(), mem->vmp_offset, m_object, 0); /* (TEST/DEBUG) */
4843
4844 VM_PAGE_CHECK(mem);
4845 assert(VM_PAGE_WIRED(mem));
4846 assert(mem->vmp_wire_count > 0);
4847 assert(!mem->vmp_gobbled);
4848 assert(m_object != VM_OBJECT_NULL);
4849 vm_object_lock_assert_exclusive(m_object);
4850 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4851 if (--mem->vmp_wire_count == 0) {
4852 task_t owner;
4853 int ledger_idx_volatile;
4854 int ledger_idx_nonvolatile;
4855 int ledger_idx_volatile_compressed;
4856 int ledger_idx_nonvolatile_compressed;
4857 int ledger_idx_composite;
4858 int ledger_idx_external_wired;
4859 boolean_t do_footprint;
4860
4861 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4862
4863 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4864 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4865 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4866 if (!mem->vmp_private && !mem->vmp_fictitious) {
4867 vm_page_wire_count--;
4868 }
4869
4870 assert(m_object->resident_page_count >=
4871 m_object->wired_page_count);
4872 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4873 OSAddAtomic(+1, &vm_page_purgeable_count);
4874 assert(vm_page_purgeable_wired_count > 0);
4875 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4876 }
4877 if (m_object->internal &&
4878 m_object->vo_owner != TASK_NULL &&
4879 (m_object->purgable == VM_PURGABLE_VOLATILE ||
4880 m_object->purgable == VM_PURGABLE_EMPTY)) {
4881 owner = VM_OBJECT_OWNER(m_object);
4882 vm_object_ledger_tag_ledgers(
4883 m_object,
4884 &ledger_idx_volatile,
4885 &ledger_idx_nonvolatile,
4886 &ledger_idx_volatile_compressed,
4887 &ledger_idx_nonvolatile_compressed,
4888 &ledger_idx_composite,
4889 &ledger_idx_external_wired,
4890 &do_footprint);
4891 /* more volatile bytes */
4892 ledger_credit(owner->ledger,
4893 ledger_idx_volatile,
4894 PAGE_SIZE);
4895 /* less not-quite-volatile bytes */
4896 ledger_debit(owner->ledger,
4897 ledger_idx_nonvolatile,
4898 PAGE_SIZE);
4899 if (do_footprint) {
4900 /* less footprint */
4901 ledger_debit(owner->ledger,
4902 task_ledgers.phys_footprint,
4903 PAGE_SIZE);
4904 } else if (ledger_idx_composite != -1) {
4905 ledger_debit(owner->ledger,
4906 ledger_idx_composite,
4907 PAGE_SIZE);
4908 }
4909 }
4910 assert(!is_kernel_object(m_object));
4911 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4912
4913 if (queueit == TRUE) {
4914 if (m_object->purgable == VM_PURGABLE_EMPTY) {
4915 vm_page_deactivate(mem);
4916 } else {
4917 vm_page_activate(mem);
4918 }
4919 }
4920
4921 VM_CHECK_MEMORYSTATUS;
4922 }
4923 VM_PAGE_CHECK(mem);
4924 }
4925
4926 /*
4927 * vm_page_deactivate:
4928 *
4929 * Returns the given page to the inactive list,
4930 * indicating that no physical maps have access
4931 * to this page. [Used by the physical mapping system.]
4932 *
4933 * The page queues must be locked.
4934 */
4935 void
vm_page_deactivate(vm_page_t m)4936 vm_page_deactivate(
4937 vm_page_t m)
4938 {
4939 vm_page_deactivate_internal(m, TRUE);
4940 }
4941
4942
4943 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4944 vm_page_deactivate_internal(
4945 vm_page_t m,
4946 boolean_t clear_hw_reference)
4947 {
4948 vm_object_t m_object;
4949
4950 m_object = VM_PAGE_OBJECT(m);
4951
4952 VM_PAGE_CHECK(m);
4953 assert(!is_kernel_object(m_object));
4954 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4955
4956 // dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
4957 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4958 /*
4959 * This page is no longer very interesting. If it was
4960 * interesting (active or inactive/referenced), then we
4961 * clear the reference bit and (re)enter it in the
4962 * inactive queue. Note wired pages should not have
4963 * their reference bit cleared.
4964 */
4965 assert( !(m->vmp_absent && !m->vmp_unusual));
4966
4967 if (m->vmp_gobbled) { /* can this happen? */
4968 assert( !VM_PAGE_WIRED(m));
4969
4970 if (!m->vmp_private && !m->vmp_fictitious) {
4971 vm_page_wire_count--;
4972 }
4973 vm_page_gobble_count--;
4974 m->vmp_gobbled = FALSE;
4975 }
4976 /*
4977 * if this page is currently on the pageout queue, we can't do the
4978 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4979 * and we can't remove it manually since we would need the object lock
4980 * (which is not required here) to decrement the activity_in_progress
4981 * reference which is held on the object while the page is in the pageout queue...
4982 * just let the normal laundry processing proceed
4983 */
4984 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4985 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4986 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4987 VM_PAGE_WIRED(m)) {
4988 return;
4989 }
4990 if (!m->vmp_absent && clear_hw_reference == TRUE) {
4991 vm_page_lockconvert_queues();
4992 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4993 }
4994
4995 m->vmp_reference = FALSE;
4996 m->vmp_no_cache = FALSE;
4997
4998 if (!VM_PAGE_INACTIVE(m)) {
4999 vm_page_queues_remove(m, FALSE);
5000
5001 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5002 m->vmp_dirty && m_object->internal &&
5003 (m_object->purgable == VM_PURGABLE_DENY ||
5004 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5005 m_object->purgable == VM_PURGABLE_VOLATILE)) {
5006 vm_page_check_pageable_safe(m);
5007 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5008 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5009 vm_page_throttled_count++;
5010 } else {
5011 if (m_object->named &&
5012 os_ref_get_count_raw(&m_object->ref_count) == 1) {
5013 vm_page_speculate(m, FALSE);
5014 #if DEVELOPMENT || DEBUG
5015 vm_page_speculative_recreated++;
5016 #endif
5017 } else {
5018 vm_page_enqueue_inactive(m, FALSE);
5019 }
5020 }
5021 }
5022 }
5023
5024 /*
5025 * vm_page_enqueue_cleaned
5026 *
5027 * Put the page on the cleaned queue, mark it cleaned, etc.
5028 * Being on the cleaned queue (and having m->clean_queue set)
5029 * does ** NOT ** guarantee that the page is clean!
5030 *
5031 * Call with the queues lock held.
5032 */
5033
5034 void
vm_page_enqueue_cleaned(vm_page_t m)5035 vm_page_enqueue_cleaned(vm_page_t m)
5036 {
5037 vm_object_t m_object;
5038
5039 m_object = VM_PAGE_OBJECT(m);
5040
5041 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5042 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5043 assert( !(m->vmp_absent && !m->vmp_unusual));
5044
5045 if (VM_PAGE_WIRED(m)) {
5046 return;
5047 }
5048
5049 if (m->vmp_gobbled) {
5050 if (!m->vmp_private && !m->vmp_fictitious) {
5051 vm_page_wire_count--;
5052 }
5053 vm_page_gobble_count--;
5054 m->vmp_gobbled = FALSE;
5055 }
5056 /*
5057 * if this page is currently on the pageout queue, we can't do the
5058 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5059 * and we can't remove it manually since we would need the object lock
5060 * (which is not required here) to decrement the activity_in_progress
5061 * reference which is held on the object while the page is in the pageout queue...
5062 * just let the normal laundry processing proceed
5063 */
5064 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5065 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
5066 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5067 return;
5068 }
5069 vm_page_queues_remove(m, FALSE);
5070
5071 vm_page_check_pageable_safe(m);
5072 vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
5073 m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
5074 vm_page_cleaned_count++;
5075
5076 vm_page_inactive_count++;
5077 if (m_object->internal) {
5078 vm_page_pageable_internal_count++;
5079 } else {
5080 vm_page_pageable_external_count++;
5081 }
5082 vm_page_add_to_specialq(m, TRUE);
5083 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
5084 }
5085
5086 /*
5087 * vm_page_activate:
5088 *
5089 * Put the specified page on the active list (if appropriate).
5090 *
5091 * The page queues must be locked.
5092 */
5093
5094 void
vm_page_activate(vm_page_t m)5095 vm_page_activate(
5096 vm_page_t m)
5097 {
5098 vm_object_t m_object;
5099
5100 m_object = VM_PAGE_OBJECT(m);
5101
5102 VM_PAGE_CHECK(m);
5103 #ifdef FIXME_4778297
5104 assert(!is_kernel_object(m_object));
5105 #endif
5106 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5107 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5108 assert( !(m->vmp_absent && !m->vmp_unusual));
5109
5110 if (m->vmp_gobbled) {
5111 assert( !VM_PAGE_WIRED(m));
5112 if (!m->vmp_private && !m->vmp_fictitious) {
5113 vm_page_wire_count--;
5114 }
5115 vm_page_gobble_count--;
5116 m->vmp_gobbled = FALSE;
5117 }
5118 /*
5119 * if this page is currently on the pageout queue, we can't do the
5120 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5121 * and we can't remove it manually since we would need the object lock
5122 * (which is not required here) to decrement the activity_in_progress
5123 * reference which is held on the object while the page is in the pageout queue...
5124 * just let the normal laundry processing proceed
5125 */
5126 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5127 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5128 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5129 return;
5130 }
5131
5132 #if DEBUG
5133 if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
5134 panic("vm_page_activate: already active");
5135 }
5136 #endif
5137
5138 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5139 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5140 DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5141 }
5142
5143 /*
5144 * A freshly activated page should be promoted in the donation queue.
5145 * So we remove it here while preserving its hint and we will enqueue
5146 * it again in vm_page_enqueue_active.
5147 */
5148 vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5149
5150 if (!VM_PAGE_WIRED(m)) {
5151 vm_page_check_pageable_safe(m);
5152 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5153 m->vmp_dirty && m_object->internal &&
5154 (m_object->purgable == VM_PURGABLE_DENY ||
5155 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5156 m_object->purgable == VM_PURGABLE_VOLATILE)) {
5157 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5158 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5159 vm_page_throttled_count++;
5160 } else {
5161 #if CONFIG_SECLUDED_MEMORY
5162 if (secluded_for_filecache &&
5163 vm_page_secluded_target != 0 &&
5164 num_tasks_can_use_secluded_mem == 0 &&
5165 m_object->eligible_for_secluded &&
5166 !m->vmp_realtime) {
5167 vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5168 m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5169 vm_page_secluded_count++;
5170 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5171 vm_page_secluded_count_inuse++;
5172 assert(!m_object->internal);
5173 // vm_page_pageable_external_count++;
5174 } else
5175 #endif /* CONFIG_SECLUDED_MEMORY */
5176 vm_page_enqueue_active(m, FALSE);
5177 }
5178 m->vmp_reference = TRUE;
5179 m->vmp_no_cache = FALSE;
5180 }
5181 VM_PAGE_CHECK(m);
5182 }
5183
5184
5185 /*
5186 * vm_page_speculate:
5187 *
5188 * Put the specified page on the speculative list (if appropriate).
5189 *
5190 * The page queues must be locked.
5191 */
5192 void
vm_page_speculate(vm_page_t m,boolean_t new)5193 vm_page_speculate(
5194 vm_page_t m,
5195 boolean_t new)
5196 {
5197 struct vm_speculative_age_q *aq;
5198 vm_object_t m_object;
5199
5200 m_object = VM_PAGE_OBJECT(m);
5201
5202 VM_PAGE_CHECK(m);
5203 vm_page_check_pageable_safe(m);
5204
5205 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5206 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5207 assert( !(m->vmp_absent && !m->vmp_unusual));
5208 assert(m_object->internal == FALSE);
5209
5210 /*
5211 * if this page is currently on the pageout queue, we can't do the
5212 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5213 * and we can't remove it manually since we would need the object lock
5214 * (which is not required here) to decrement the activity_in_progress
5215 * reference which is held on the object while the page is in the pageout queue...
5216 * just let the normal laundry processing proceed
5217 */
5218 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5219 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5220 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5221 return;
5222 }
5223
5224 vm_page_queues_remove(m, FALSE);
5225
5226 if (!VM_PAGE_WIRED(m)) {
5227 mach_timespec_t ts;
5228 clock_sec_t sec;
5229 clock_nsec_t nsec;
5230
5231 clock_get_system_nanotime(&sec, &nsec);
5232 ts.tv_sec = (unsigned int) sec;
5233 ts.tv_nsec = nsec;
5234
5235 if (vm_page_speculative_count == 0) {
5236 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5237 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5238
5239 aq = &vm_page_queue_speculative[speculative_age_index];
5240
5241 /*
5242 * set the timer to begin a new group
5243 */
5244 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5245 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5246
5247 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5248 } else {
5249 aq = &vm_page_queue_speculative[speculative_age_index];
5250
5251 if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5252 speculative_age_index++;
5253
5254 if (speculative_age_index > vm_page_max_speculative_age_q) {
5255 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5256 }
5257 if (speculative_age_index == speculative_steal_index) {
5258 speculative_steal_index = speculative_age_index + 1;
5259
5260 if (speculative_steal_index > vm_page_max_speculative_age_q) {
5261 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5262 }
5263 }
5264 aq = &vm_page_queue_speculative[speculative_age_index];
5265
5266 if (!vm_page_queue_empty(&aq->age_q)) {
5267 vm_page_speculate_ageit(aq);
5268 }
5269
5270 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5271 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5272
5273 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5274 }
5275 }
5276 vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5277 m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5278 vm_page_speculative_count++;
5279 vm_page_pageable_external_count++;
5280
5281 if (new == TRUE) {
5282 vm_object_lock_assert_exclusive(m_object);
5283
5284 m_object->pages_created++;
5285 #if DEVELOPMENT || DEBUG
5286 vm_page_speculative_created++;
5287 #endif
5288 }
5289 }
5290 VM_PAGE_CHECK(m);
5291 }
5292
5293
5294 /*
5295 * move pages from the specified aging bin to
5296 * the speculative bin that pageout_scan claims from
5297 *
5298 * The page queues must be locked.
5299 */
5300 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5301 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5302 {
5303 struct vm_speculative_age_q *sq;
5304 vm_page_t t;
5305
5306 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5307
5308 if (vm_page_queue_empty(&sq->age_q)) {
5309 sq->age_q.next = aq->age_q.next;
5310 sq->age_q.prev = aq->age_q.prev;
5311
5312 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5313 t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5314
5315 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5316 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5317 } else {
5318 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5319 t->vmp_pageq.next = aq->age_q.next;
5320
5321 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5322 t->vmp_pageq.prev = sq->age_q.prev;
5323
5324 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5325 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5326
5327 sq->age_q.prev = aq->age_q.prev;
5328 }
5329 vm_page_queue_init(&aq->age_q);
5330 }
5331
5332
5333 void
vm_page_lru(vm_page_t m)5334 vm_page_lru(
5335 vm_page_t m)
5336 {
5337 VM_PAGE_CHECK(m);
5338 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5339 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5340
5341 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5342
5343 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5344 /*
5345 * we don't need to do all the other work that
5346 * vm_page_queues_remove and vm_page_enqueue_inactive
5347 * bring along for the ride
5348 */
5349 assert(!m->vmp_laundry);
5350 assert(!m->vmp_private);
5351
5352 m->vmp_no_cache = FALSE;
5353
5354 vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5355 vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5356
5357 return;
5358 }
5359 /*
5360 * if this page is currently on the pageout queue, we can't do the
5361 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5362 * and we can't remove it manually since we would need the object lock
5363 * (which is not required here) to decrement the activity_in_progress
5364 * reference which is held on the object while the page is in the pageout queue...
5365 * just let the normal laundry processing proceed
5366 */
5367 if (m->vmp_laundry || m->vmp_private ||
5368 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5369 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5370 VM_PAGE_WIRED(m)) {
5371 return;
5372 }
5373
5374 m->vmp_no_cache = FALSE;
5375
5376 vm_page_queues_remove(m, FALSE);
5377
5378 vm_page_enqueue_inactive(m, FALSE);
5379 }
5380
5381
5382 void
vm_page_reactivate_all_throttled(void)5383 vm_page_reactivate_all_throttled(void)
5384 {
5385 vm_page_t first_throttled, last_throttled;
5386 vm_page_t first_active;
5387 vm_page_t m;
5388 int extra_active_count;
5389 int extra_internal_count, extra_external_count;
5390 vm_object_t m_object;
5391
5392 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5393 return;
5394 }
5395
5396 extra_active_count = 0;
5397 extra_internal_count = 0;
5398 extra_external_count = 0;
5399 vm_page_lock_queues();
5400 if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5401 /*
5402 * Switch "throttled" pages to "active".
5403 */
5404 vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5405 VM_PAGE_CHECK(m);
5406 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5407
5408 m_object = VM_PAGE_OBJECT(m);
5409
5410 extra_active_count++;
5411 if (m_object->internal) {
5412 extra_internal_count++;
5413 } else {
5414 extra_external_count++;
5415 }
5416
5417 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5418 VM_PAGE_CHECK(m);
5419 vm_page_add_to_specialq(m, FALSE);
5420 }
5421
5422 /*
5423 * Transfer the entire throttled queue to a regular LRU page queues.
5424 * We insert it at the head of the active queue, so that these pages
5425 * get re-evaluated by the LRU algorithm first, since they've been
5426 * completely out of it until now.
5427 */
5428 first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5429 last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5430 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5431 if (vm_page_queue_empty(&vm_page_queue_active)) {
5432 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5433 } else {
5434 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5435 }
5436 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5437 first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5438 last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5439
5440 #if DEBUG
5441 printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5442 #endif
5443 vm_page_queue_init(&vm_page_queue_throttled);
5444 /*
5445 * Adjust the global page counts.
5446 */
5447 vm_page_active_count += extra_active_count;
5448 vm_page_pageable_internal_count += extra_internal_count;
5449 vm_page_pageable_external_count += extra_external_count;
5450 vm_page_throttled_count = 0;
5451 }
5452 assert(vm_page_throttled_count == 0);
5453 assert(vm_page_queue_empty(&vm_page_queue_throttled));
5454 vm_page_unlock_queues();
5455 }
5456
5457
5458 /*
5459 * move pages from the indicated local queue to the global active queue
5460 * its ok to fail if we're below the hard limit and force == FALSE
5461 * the nolocks == TRUE case is to allow this function to be run on
5462 * the hibernate path
5463 */
5464
5465 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5466 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5467 {
5468 struct vpl *lq;
5469 vm_page_t first_local, last_local;
5470 vm_page_t first_active;
5471 vm_page_t m;
5472 uint32_t count = 0;
5473
5474 if (vm_page_local_q == NULL) {
5475 return;
5476 }
5477
5478 lq = zpercpu_get_cpu(vm_page_local_q, lid);
5479
5480 if (nolocks == FALSE) {
5481 if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5482 if (!vm_page_trylockspin_queues()) {
5483 return;
5484 }
5485 } else {
5486 vm_page_lockspin_queues();
5487 }
5488
5489 VPL_LOCK(&lq->vpl_lock);
5490 }
5491 if (lq->vpl_count) {
5492 /*
5493 * Switch "local" pages to "active".
5494 */
5495 assert(!vm_page_queue_empty(&lq->vpl_queue));
5496
5497 vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5498 VM_PAGE_CHECK(m);
5499 vm_page_check_pageable_safe(m);
5500 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5501 assert(!m->vmp_fictitious);
5502
5503 if (m->vmp_local_id != lid) {
5504 panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5505 }
5506
5507 m->vmp_local_id = 0;
5508 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5509 VM_PAGE_CHECK(m);
5510 vm_page_add_to_specialq(m, FALSE);
5511 count++;
5512 }
5513 if (count != lq->vpl_count) {
5514 panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5515 }
5516
5517 /*
5518 * Transfer the entire local queue to a regular LRU page queues.
5519 */
5520 first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5521 last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5522 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5523
5524 if (vm_page_queue_empty(&vm_page_queue_active)) {
5525 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5526 } else {
5527 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5528 }
5529 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5530 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5531 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5532
5533 vm_page_queue_init(&lq->vpl_queue);
5534 /*
5535 * Adjust the global page counts.
5536 */
5537 vm_page_active_count += lq->vpl_count;
5538 vm_page_pageable_internal_count += lq->vpl_internal_count;
5539 vm_page_pageable_external_count += lq->vpl_external_count;
5540 lq->vpl_count = 0;
5541 lq->vpl_internal_count = 0;
5542 lq->vpl_external_count = 0;
5543 }
5544 assert(vm_page_queue_empty(&lq->vpl_queue));
5545
5546 if (nolocks == FALSE) {
5547 VPL_UNLOCK(&lq->vpl_lock);
5548
5549 vm_page_balance_inactive(count / 4);
5550 vm_page_unlock_queues();
5551 }
5552 }
5553
5554 /*
5555 * vm_page_part_zero_fill:
5556 *
5557 * Zero-fill a part of the page.
5558 */
5559 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5560 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5561 vm_page_part_zero_fill(
5562 vm_page_t m,
5563 vm_offset_t m_pa,
5564 vm_size_t len)
5565 {
5566 #if 0
5567 /*
5568 * we don't hold the page queue lock
5569 * so this check isn't safe to make
5570 */
5571 VM_PAGE_CHECK(m);
5572 #endif
5573
5574 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5575 pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5576 #else
5577 vm_page_t tmp;
5578 while (1) {
5579 tmp = vm_page_grab();
5580 if (tmp == VM_PAGE_NULL) {
5581 vm_page_wait(THREAD_UNINT);
5582 continue;
5583 }
5584 break;
5585 }
5586 vm_page_zero_fill(tmp);
5587 if (m_pa != 0) {
5588 vm_page_part_copy(m, 0, tmp, 0, m_pa);
5589 }
5590 if ((m_pa + len) < PAGE_SIZE) {
5591 vm_page_part_copy(m, m_pa + len, tmp,
5592 m_pa + len, PAGE_SIZE - (m_pa + len));
5593 }
5594 vm_page_copy(tmp, m);
5595 VM_PAGE_FREE(tmp);
5596 #endif
5597 }
5598
5599 /*
5600 * vm_page_zero_fill:
5601 *
5602 * Zero-fill the specified page.
5603 */
5604 void
vm_page_zero_fill(vm_page_t m)5605 vm_page_zero_fill(
5606 vm_page_t m)
5607 {
5608 #if 0
5609 /*
5610 * we don't hold the page queue lock
5611 * so this check isn't safe to make
5612 */
5613 VM_PAGE_CHECK(m);
5614 #endif
5615
5616 // dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
5617 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5618 }
5619
5620 /*
5621 * vm_page_part_copy:
5622 *
5623 * copy part of one page to another
5624 */
5625
5626 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5627 vm_page_part_copy(
5628 vm_page_t src_m,
5629 vm_offset_t src_pa,
5630 vm_page_t dst_m,
5631 vm_offset_t dst_pa,
5632 vm_size_t len)
5633 {
5634 #if 0
5635 /*
5636 * we don't hold the page queue lock
5637 * so this check isn't safe to make
5638 */
5639 VM_PAGE_CHECK(src_m);
5640 VM_PAGE_CHECK(dst_m);
5641 #endif
5642 pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5643 VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5644 }
5645
5646 /*
5647 * vm_page_copy:
5648 *
5649 * Copy one page to another
5650 */
5651
5652 int vm_page_copy_cs_validations = 0;
5653 int vm_page_copy_cs_tainted = 0;
5654
5655 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5656 vm_page_copy(
5657 vm_page_t src_m,
5658 vm_page_t dest_m)
5659 {
5660 vm_object_t src_m_object;
5661
5662 src_m_object = VM_PAGE_OBJECT(src_m);
5663
5664 #if 0
5665 /*
5666 * we don't hold the page queue lock
5667 * so this check isn't safe to make
5668 */
5669 VM_PAGE_CHECK(src_m);
5670 VM_PAGE_CHECK(dest_m);
5671 #endif
5672 vm_object_lock_assert_held(src_m_object);
5673
5674 #if CONFIG_SPTM
5675 sptm_paddr_t src_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(src_m));
5676 sptm_frame_type_t src_frame_type = sptm_get_frame_type(src_paddr);
5677 if (src_frame_type == XNU_KERNEL_RESTRICTED) {
5678 panic("%s: cannot copy from a restricted page", __func__);
5679 }
5680 #endif /* CONFIG_SPTM */
5681
5682 if (src_m_object != VM_OBJECT_NULL &&
5683 src_m_object->code_signed) {
5684 /*
5685 * We're copying a page from a code-signed object.
5686 * Whoever ends up mapping the copy page might care about
5687 * the original page's integrity, so let's validate the
5688 * source page now.
5689 */
5690 vm_page_copy_cs_validations++;
5691 vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5692 #if DEVELOPMENT || DEBUG
5693 DTRACE_VM4(codesigned_copy,
5694 vm_object_t, src_m_object,
5695 vm_object_offset_t, src_m->vmp_offset,
5696 int, src_m->vmp_cs_validated,
5697 int, src_m->vmp_cs_tainted);
5698 #endif /* DEVELOPMENT || DEBUG */
5699 }
5700
5701 /*
5702 * Propagate the cs_tainted bit to the copy page. Do not propagate
5703 * the cs_validated bit.
5704 */
5705 dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5706 dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5707 if (dest_m->vmp_cs_tainted) {
5708 vm_page_copy_cs_tainted++;
5709 }
5710 dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
5711 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5712 }
5713
5714 #if MACH_ASSERT
5715 static void
_vm_page_print(vm_page_t p)5716 _vm_page_print(
5717 vm_page_t p)
5718 {
5719 printf("vm_page %p: \n", p);
5720 printf(" pageq: next=%p prev=%p\n",
5721 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5722 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5723 printf(" listq: next=%p prev=%p\n",
5724 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5725 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5726 printf(" next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5727 printf(" object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5728 printf(" wire_count=%u\n", p->vmp_wire_count);
5729 printf(" q_state=%u\n", p->vmp_q_state);
5730
5731 printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
5732 (p->vmp_laundry ? "" : "!"),
5733 (p->vmp_reference ? "" : "!"),
5734 (p->vmp_gobbled ? "" : "!"),
5735 (p->vmp_private ? "" : "!"));
5736 printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5737 (p->vmp_busy ? "" : "!"),
5738 (p->vmp_wanted ? "" : "!"),
5739 (p->vmp_tabled ? "" : "!"),
5740 (p->vmp_fictitious ? "" : "!"),
5741 (p->vmp_pmapped ? "" : "!"),
5742 (p->vmp_wpmapped ? "" : "!"));
5743 printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5744 (p->vmp_free_when_done ? "" : "!"),
5745 (p->vmp_absent ? "" : "!"),
5746 (VMP_ERROR_GET(p) ? "" : "!"),
5747 (p->vmp_dirty ? "" : "!"),
5748 (p->vmp_cleaning ? "" : "!"),
5749 (p->vmp_precious ? "" : "!"),
5750 (p->vmp_clustered ? "" : "!"));
5751 printf(" %soverwriting, %srestart, %sunusual\n",
5752 (p->vmp_overwriting ? "" : "!"),
5753 (p->vmp_restart ? "" : "!"),
5754 (p->vmp_unusual ? "" : "!"));
5755 printf(" cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5756 p->vmp_cs_validated,
5757 p->vmp_cs_tainted,
5758 p->vmp_cs_nx,
5759 (p->vmp_no_cache ? "" : "!"));
5760
5761 printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5762 }
5763
5764 /*
5765 * Check that the list of pages is ordered by
5766 * ascending physical address and has no holes.
5767 */
5768 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5769 vm_page_verify_contiguous(
5770 vm_page_t pages,
5771 unsigned int npages)
5772 {
5773 vm_page_t m;
5774 unsigned int page_count;
5775 vm_offset_t prev_addr;
5776
5777 prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5778 page_count = 1;
5779 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5780 if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5781 printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5782 m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5783 printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5784 panic("vm_page_verify_contiguous: not contiguous!");
5785 }
5786 prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5787 ++page_count;
5788 }
5789 if (page_count != npages) {
5790 printf("pages %p actual count 0x%x but requested 0x%x\n",
5791 pages, page_count, npages);
5792 panic("vm_page_verify_contiguous: count error");
5793 }
5794 return 1;
5795 }
5796
5797
5798 /*
5799 * Check the free lists for proper length etc.
5800 */
5801 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5802 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5803 vm_page_verify_free_list(
5804 vm_page_queue_head_t *vm_page_queue,
5805 unsigned int color,
5806 vm_page_t look_for_page,
5807 boolean_t expect_page)
5808 {
5809 unsigned int npages;
5810 vm_page_t m;
5811 vm_page_t prev_m;
5812 boolean_t found_page;
5813
5814 if (!vm_page_verify_this_free_list_enabled) {
5815 return 0;
5816 }
5817
5818 found_page = FALSE;
5819 npages = 0;
5820 prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5821
5822 vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5823 if (m == look_for_page) {
5824 found_page = TRUE;
5825 }
5826 if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5827 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5828 color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5829 }
5830 if (!m->vmp_busy) {
5831 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5832 color, npages, m);
5833 }
5834 if (color != (unsigned int) -1) {
5835 if (VM_PAGE_GET_COLOR(m) != color) {
5836 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5837 color, npages, m, VM_PAGE_GET_COLOR(m), color);
5838 }
5839 if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5840 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5841 color, npages, m, m->vmp_q_state);
5842 }
5843 } else {
5844 if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5845 panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5846 npages, m, m->vmp_q_state);
5847 }
5848 }
5849 ++npages;
5850 prev_m = m;
5851 }
5852 if (look_for_page != VM_PAGE_NULL) {
5853 unsigned int other_color;
5854
5855 if (expect_page && !found_page) {
5856 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5857 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5858 _vm_page_print(look_for_page);
5859 for (other_color = 0;
5860 other_color < vm_colors;
5861 other_color++) {
5862 if (other_color == color) {
5863 continue;
5864 }
5865 vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5866 other_color, look_for_page, FALSE);
5867 }
5868 if (color == (unsigned int) -1) {
5869 vm_page_verify_free_list(&vm_lopage_queue_free,
5870 (unsigned int) -1, look_for_page, FALSE);
5871 }
5872 panic("vm_page_verify_free_list(color=%u)", color);
5873 }
5874 if (!expect_page && found_page) {
5875 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5876 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5877 }
5878 }
5879 return npages;
5880 }
5881
5882 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5883 static void
vm_page_verify_free_lists(void)5884 vm_page_verify_free_lists( void )
5885 {
5886 unsigned int color, npages, nlopages;
5887 boolean_t toggle = TRUE;
5888
5889 if (!vm_page_verify_all_free_lists_enabled) {
5890 return;
5891 }
5892
5893 npages = 0;
5894
5895 vm_free_page_lock();
5896
5897 if (vm_page_verify_this_free_list_enabled == TRUE) {
5898 /*
5899 * This variable has been set globally for extra checking of
5900 * each free list Q. Since we didn't set it, we don't own it
5901 * and we shouldn't toggle it.
5902 */
5903 toggle = FALSE;
5904 }
5905
5906 if (toggle == TRUE) {
5907 vm_page_verify_this_free_list_enabled = TRUE;
5908 }
5909
5910 for (color = 0; color < vm_colors; color++) {
5911 npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5912 color, VM_PAGE_NULL, FALSE);
5913 }
5914 nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5915 (unsigned int) -1,
5916 VM_PAGE_NULL, FALSE);
5917 if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5918 panic("vm_page_verify_free_lists: "
5919 "npages %u free_count %d nlopages %u lo_free_count %u",
5920 npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5921 }
5922
5923 if (toggle == TRUE) {
5924 vm_page_verify_this_free_list_enabled = FALSE;
5925 }
5926
5927 vm_free_page_unlock();
5928 }
5929
5930 #endif /* MACH_ASSERT */
5931
5932 /*
5933 * wrapper for pmap_enter()
5934 */
5935 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired)5936 pmap_enter_check(
5937 pmap_t pmap,
5938 vm_map_address_t virtual_address,
5939 vm_page_t page,
5940 vm_prot_t protection,
5941 vm_prot_t fault_type,
5942 unsigned int flags,
5943 boolean_t wired)
5944 {
5945 int options = 0;
5946 vm_object_t obj;
5947
5948 if (VMP_ERROR_GET(page)) {
5949 return KERN_MEMORY_FAILURE;
5950 }
5951 obj = VM_PAGE_OBJECT(page);
5952 if (obj->internal) {
5953 options |= PMAP_OPTIONS_INTERNAL;
5954 }
5955 if (page->vmp_reusable || obj->all_reusable) {
5956 options |= PMAP_OPTIONS_REUSABLE;
5957 }
5958 return pmap_enter_options(pmap,
5959 virtual_address,
5960 VM_PAGE_GET_PHYS_PAGE(page),
5961 protection,
5962 fault_type,
5963 flags,
5964 wired,
5965 options,
5966 NULL,
5967 PMAP_MAPPING_TYPE_INFER);
5968 }
5969
5970
5971 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5972
5973 /*
5974 * CONTIGUOUS PAGE ALLOCATION AND HELPER FUNCTIONS
5975 */
5976
5977 /*
5978 * Helper function used to determine if a page can be relocated
5979 * A page is relocatable if it is in a stable non-transient state
5980 */
5981 static inline boolean_t
vm_page_is_relocatable(vm_page_t m)5982 vm_page_is_relocatable(vm_page_t m)
5983 {
5984
5985 if (VM_PAGE_WIRED(m) || m->vmp_gobbled || m->vmp_laundry || m->vmp_wanted ||
5986 m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5987 /*
5988 * Page is in a transient state
5989 * or a state we don't want to deal with.
5990 */
5991 return FALSE;
5992 } else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5993 (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
5994 (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
5995 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5996 /*
5997 * Page needs to be on one of our queues (other then the pageout or special
5998 * free queues) or it needs to belong to the compressor pool (which is now
5999 * indicated by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out from
6000 * the check for VM_PAGE_NOT_ON_Q) in order for it to be stable behind the
6001 * locks we hold at this point...
6002 */
6003 return FALSE;
6004 } else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) &&
6005 (!m->vmp_tabled || m->vmp_busy)) {
6006 /*
6007 * pages on the free list are always 'busy'
6008 * so we couldn't test for 'busy' in the check
6009 * for the transient states... pages that are
6010 * 'free' are never 'tabled', so we also couldn't
6011 * test for 'tabled'. So we check here to make
6012 * sure that a non-free page is not busy and is
6013 * tabled on an object...
6014 */
6015 return FALSE;
6016 }
6017 return TRUE;
6018 }
6019
6020 /*
6021 * Free up the given page by possibily relocating its contents to a new page
6022 * If the page is on an object the object lock must be held.
6023 */
6024 static kern_return_t
vm_page_relocate(vm_page_t m1,int * compressed_pages)6025 vm_page_relocate(vm_page_t m1, int *compressed_pages)
6026 {
6027 int refmod = 0;
6028 vm_object_t object = VM_PAGE_OBJECT(m1);
6029 kern_return_t kr;
6030
6031 if (object == VM_OBJECT_NULL) {
6032 return KERN_FAILURE;
6033 }
6034
6035 vm_object_lock_assert_held(object);
6036
6037 if (VM_PAGE_WIRED(m1) ||
6038 m1->vmp_gobbled ||
6039 m1->vmp_laundry ||
6040 m1->vmp_wanted ||
6041 m1->vmp_cleaning ||
6042 m1->vmp_overwriting ||
6043 m1->vmp_free_when_done ||
6044 m1->vmp_busy ||
6045 m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6046 return KERN_FAILURE;
6047 }
6048
6049 boolean_t disconnected = FALSE;
6050 boolean_t reusable = FALSE;
6051
6052 /*
6053 * Pages from reusable objects can be reclaimed directly.
6054 */
6055 if ((m1->vmp_reusable || object->all_reusable) &&
6056 m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q && !m1->vmp_dirty &&
6057 !m1->vmp_reference) {
6058 /*
6059 * reusable page...
6060 */
6061
6062 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6063 disconnected = TRUE;
6064 if (refmod == 0) {
6065 /*
6066 * ... not reused: can steal without relocating contents.
6067 */
6068 reusable = TRUE;
6069 }
6070 }
6071
6072 if ((m1->vmp_pmapped && !reusable) || m1->vmp_dirty || m1->vmp_precious) {
6073 vm_object_offset_t offset;
6074
6075 /* page is not reusable, we need to allocate a new page
6076 * and move its contents there.
6077 */
6078 vm_page_t m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6079
6080 if (m2 == VM_PAGE_NULL) {
6081 return KERN_RESOURCE_SHORTAGE;
6082 }
6083
6084 if (!disconnected) {
6085 if (m1->vmp_pmapped) {
6086 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6087 } else {
6088 refmod = 0;
6089 }
6090 }
6091
6092 /* copy the page's contents */
6093 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6094
6095 /* copy the page's state */
6096 assert(!VM_PAGE_WIRED(m1));
6097 assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6098 assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6099 assert(!m1->vmp_laundry);
6100 m2->vmp_reference = m1->vmp_reference;
6101 assert(!m1->vmp_gobbled);
6102 assert(!m1->vmp_private);
6103 m2->vmp_no_cache = m1->vmp_no_cache;
6104 m2->vmp_xpmapped = 0;
6105 assert(!m1->vmp_busy);
6106 assert(!m1->vmp_wanted);
6107 assert(!m1->vmp_fictitious);
6108 m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6109 m2->vmp_wpmapped = m1->vmp_wpmapped;
6110 assert(!m1->vmp_free_when_done);
6111 m2->vmp_absent = m1->vmp_absent;
6112 m2->vmp_error = VMP_ERROR_GET(m1);
6113 m2->vmp_dirty = m1->vmp_dirty;
6114 assert(!m1->vmp_cleaning);
6115 m2->vmp_precious = m1->vmp_precious;
6116 m2->vmp_clustered = m1->vmp_clustered;
6117 assert(!m1->vmp_overwriting);
6118 m2->vmp_restart = m1->vmp_restart;
6119 m2->vmp_unusual = m1->vmp_unusual;
6120 m2->vmp_cs_validated = m1->vmp_cs_validated;
6121 m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6122 m2->vmp_cs_nx = m1->vmp_cs_nx;
6123
6124 m2->vmp_realtime = m1->vmp_realtime;
6125 m1->vmp_realtime = false;
6126
6127 /*
6128 * If m1 had really been reusable,
6129 * we would have just stolen it, so
6130 * let's not propagate its "reusable"
6131 * bit and assert that m2 is not
6132 * marked as "reusable".
6133 */
6134 // m2->vmp_reusable = m1->vmp_reusable;
6135 assert(!m2->vmp_reusable);
6136
6137 // assert(!m1->vmp_lopage);
6138
6139 if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6140 m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6141 /*
6142 * We just grabbed m2 up above and so it isn't
6143 * going to be on any special Q as yet and so
6144 * we don't need to 'remove' it from the special
6145 * queues. Just resetting the state should be enough.
6146 */
6147 m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6148 }
6149
6150 /*
6151 * page may need to be flushed if
6152 * it is marshalled into a UPL
6153 * that is going to be used by a device
6154 * that doesn't support coherency
6155 */
6156 m2->vmp_written_by_kernel = TRUE;
6157
6158 /*
6159 * make sure we clear the ref/mod state
6160 * from the pmap layer... else we risk
6161 * inheriting state from the last time
6162 * this page was used...
6163 */
6164 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2),
6165 VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6166
6167 if (refmod & VM_MEM_REFERENCED) {
6168 m2->vmp_reference = TRUE;
6169 }
6170 if (refmod & VM_MEM_MODIFIED) {
6171 SET_PAGE_DIRTY(m2, TRUE);
6172 }
6173 offset = m1->vmp_offset;
6174
6175 /*
6176 * completely cleans up the state
6177 * of the page so that it is ready
6178 * to be put onto the free list, or
6179 * for this purpose it looks like it
6180 * just came off of the free list
6181 */
6182 vm_page_free_prepare(m1);
6183
6184 /*
6185 * now put the substitute page on the object
6186 */
6187 vm_page_insert_internal(m2, object, offset, VM_KERN_MEMORY_NONE, TRUE,
6188 TRUE, FALSE, FALSE, NULL);
6189
6190 if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6191 m2->vmp_pmapped = TRUE;
6192 m2->vmp_wpmapped = TRUE;
6193
6194 kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6195 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
6196
6197 assert(kr == KERN_SUCCESS);
6198
6199 if (compressed_pages) {
6200 ++*compressed_pages;
6201 }
6202 } else {
6203 /* relocated page was not used by the compressor
6204 * put it on either the active or inactive lists */
6205 if (m2->vmp_reference) {
6206 vm_page_activate(m2);
6207 } else {
6208 vm_page_deactivate(m2);
6209 }
6210 }
6211
6212 /* unset the busy flag (pages on the free queue are busy) and notify if wanted */
6213 vm_page_wakeup_done(object, m2);
6214
6215 return KERN_SUCCESS;
6216 } else {
6217 assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6218
6219 /*
6220 * completely cleans up the state
6221 * of the page so that it is ready
6222 * to be put onto the free list, or
6223 * for this purpose it looks like it
6224 * just came off of the free list
6225 */
6226 vm_page_free_prepare(m1);
6227
6228 /* we're done here */
6229 return KERN_SUCCESS;
6230 }
6231
6232 return KERN_FAILURE;
6233 }
6234
6235 /*
6236 * CONTIGUOUS PAGE ALLOCATION
6237 *
6238 * Find a region large enough to contain at least n pages
6239 * of contiguous physical memory.
6240 *
6241 * This is done by traversing the vm_page_t array in a linear fashion
6242 * we assume that the vm_page_t array has the avaiable physical pages in an
6243 * ordered, ascending list... this is currently true of all our implementations
6244 * and must remain so... there can be 'holes' in the array... we also can
6245 * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
6246 * which use to happen via 'vm_page_convert'... that function was no longer
6247 * being called and was removed...
6248 *
6249 * The basic flow consists of stabilizing some of the interesting state of
6250 * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
6251 * sweep at the beginning of the array looking for pages that meet our criterea
6252 * for a 'stealable' page... currently we are pretty conservative... if the page
6253 * meets this criterea and is physically contiguous to the previous page in the 'run'
6254 * we keep developing it. If we hit a page that doesn't fit, we reset our state
6255 * and start to develop a new run... if at this point we've already considered
6256 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
6257 * and mutex_pause (which will yield the processor), to keep the latency low w/r
6258 * to other threads trying to acquire free pages (or move pages from q to q),
6259 * and then continue from the spot we left off... we only make 1 pass through the
6260 * array. Once we have a 'run' that is long enough, we'll go into the loop which
6261 * which steals the pages from the queues they're currently on... pages on the free
6262 * queue can be stolen directly... pages that are on any of the other queues
6263 * must be removed from the object they are tabled on... this requires taking the
6264 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
6265 * or if the state of the page behind the vm_object lock is no longer viable, we'll
6266 * dump the pages we've currently stolen back to the free list, and pick up our
6267 * scan from the point where we aborted the 'current' run.
6268 *
6269 *
6270 * Requirements:
6271 * - neither vm_page_queue nor vm_free_list lock can be held on entry
6272 *
6273 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
6274 *
6275 * Algorithm:
6276 */
6277
6278 #define MAX_CONSIDERED_BEFORE_YIELD 1000
6279
6280
6281 #define RESET_STATE_OF_RUN() \
6282 MACRO_BEGIN \
6283 prevcontaddr = -2; \
6284 start_pnum = -1; \
6285 free_considered = 0; \
6286 substitute_needed = 0; \
6287 npages = 0; \
6288 MACRO_END
6289
6290 /*
6291 * Can we steal in-use (i.e. not free) pages when searching for
6292 * physically-contiguous pages ?
6293 */
6294 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
6295
6296 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
6297 #if DEBUG
6298 int vm_page_find_contig_debug = 0;
6299 #endif
6300
6301 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6302 vm_page_find_contiguous(
6303 unsigned int contig_pages,
6304 ppnum_t max_pnum,
6305 ppnum_t pnum_mask,
6306 boolean_t wire,
6307 int flags)
6308 {
6309 vm_page_t m = NULL;
6310 ppnum_t prevcontaddr = 0;
6311 ppnum_t start_pnum = 0;
6312 unsigned int npages = 0, considered = 0, scanned = 0;
6313 unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
6314 unsigned int idx_last_contig_page_found = 0;
6315 int free_considered = 0, free_available = 0;
6316 int substitute_needed = 0;
6317 int zone_gc_called = 0;
6318 boolean_t wrapped;
6319 kern_return_t kr;
6320 #if DEBUG
6321 clock_sec_t tv_start_sec = 0, tv_end_sec = 0;
6322 clock_usec_t tv_start_usec = 0, tv_end_usec = 0;
6323 #endif
6324
6325 int yielded = 0;
6326 int dumped_run = 0;
6327 int stolen_pages = 0;
6328 int compressed_pages = 0;
6329
6330
6331 if (contig_pages == 0) {
6332 return VM_PAGE_NULL;
6333 }
6334
6335 full_scan_again:
6336
6337 #if MACH_ASSERT
6338 vm_page_verify_free_lists();
6339 #endif
6340 #if DEBUG
6341 clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
6342 #endif
6343 PAGE_REPLACEMENT_ALLOWED(TRUE);
6344
6345 /*
6346 * If there are still delayed pages, try to free up some that match.
6347 */
6348 if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
6349 vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
6350 }
6351
6352 vm_page_lock_queues();
6353 vm_free_page_lock();
6354
6355 RESET_STATE_OF_RUN();
6356
6357 scanned = 0;
6358 considered = 0;
6359 free_available = vm_page_free_count - vm_page_free_reserved;
6360
6361 wrapped = FALSE;
6362
6363 if (flags & KMA_LOMEM) {
6364 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
6365 } else {
6366 idx_last_contig_page_found = vm_page_find_contiguous_last_idx;
6367 }
6368
6369 orig_last_idx = idx_last_contig_page_found;
6370 last_idx = orig_last_idx;
6371
6372 for (page_idx = last_idx, start_idx = last_idx;
6373 npages < contig_pages && page_idx < vm_pages_count;
6374 page_idx++) {
6375 retry:
6376 if (wrapped &&
6377 npages == 0 &&
6378 page_idx >= orig_last_idx) {
6379 /*
6380 * We're back where we started and we haven't
6381 * found any suitable contiguous range. Let's
6382 * give up.
6383 */
6384 break;
6385 }
6386 scanned++;
6387 m = &vm_pages[page_idx];
6388
6389 assert(!m->vmp_fictitious);
6390 assert(!m->vmp_private);
6391
6392 if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
6393 /* no more low pages... */
6394 break;
6395 }
6396 if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
6397 /*
6398 * not aligned
6399 */
6400 RESET_STATE_OF_RUN();
6401 } else if (!vm_page_is_relocatable(m)) {
6402 /*
6403 * page is not relocatable */
6404 RESET_STATE_OF_RUN();
6405 } else {
6406 if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
6407 if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
6408 RESET_STATE_OF_RUN();
6409 goto did_consider;
6410 } else {
6411 npages = 1;
6412 start_idx = page_idx;
6413 start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6414 }
6415 } else {
6416 npages++;
6417 }
6418 prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6419
6420 VM_PAGE_CHECK(m);
6421 if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6422 free_considered++;
6423 } else {
6424 /*
6425 * This page is not free.
6426 * If we can't steal used pages,
6427 * we have to give up this run
6428 * and keep looking.
6429 * Otherwise, we might need to
6430 * move the contents of this page
6431 * into a substitute page.
6432 */
6433 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6434 if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6435 substitute_needed++;
6436 }
6437 #else
6438 RESET_STATE_OF_RUN();
6439 #endif
6440 }
6441
6442 if ((free_considered + substitute_needed) > free_available) {
6443 /*
6444 * if we let this run continue
6445 * we will end up dropping the vm_page_free_count
6446 * below the reserve limit... we need to abort
6447 * this run, but we can at least re-consider this
6448 * page... thus the jump back to 'retry'
6449 */
6450 RESET_STATE_OF_RUN();
6451
6452 if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6453 considered++;
6454 goto retry;
6455 }
6456 /*
6457 * free_available == 0
6458 * so can't consider any free pages... if
6459 * we went to retry in this case, we'd
6460 * get stuck looking at the same page
6461 * w/o making any forward progress
6462 * we also want to take this path if we've already
6463 * reached our limit that controls the lock latency
6464 */
6465 }
6466 }
6467 did_consider:
6468 if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6469 PAGE_REPLACEMENT_ALLOWED(FALSE);
6470
6471 vm_free_page_unlock();
6472 vm_page_unlock_queues();
6473
6474 mutex_pause(0);
6475
6476 PAGE_REPLACEMENT_ALLOWED(TRUE);
6477
6478 vm_page_lock_queues();
6479 vm_free_page_lock();
6480
6481 RESET_STATE_OF_RUN();
6482 /*
6483 * reset our free page limit since we
6484 * dropped the lock protecting the vm_page_free_queue
6485 */
6486 free_available = vm_page_free_count - vm_page_free_reserved;
6487 considered = 0;
6488
6489 yielded++;
6490
6491 goto retry;
6492 }
6493 considered++;
6494 } /* main for-loop end */
6495
6496 m = VM_PAGE_NULL;
6497
6498 if (npages != contig_pages) {
6499 if (!wrapped) {
6500 /*
6501 * We didn't find a contiguous range but we didn't
6502 * start from the very first page.
6503 * Start again from the very first page.
6504 */
6505 RESET_STATE_OF_RUN();
6506 if (flags & KMA_LOMEM) {
6507 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = 0;
6508 } else {
6509 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6510 }
6511 last_idx = 0;
6512 page_idx = last_idx;
6513 wrapped = TRUE;
6514 goto retry;
6515 }
6516 vm_free_page_unlock();
6517 } else {
6518 vm_page_t m1;
6519 unsigned int cur_idx;
6520 unsigned int tmp_start_idx;
6521 vm_object_t locked_object = VM_OBJECT_NULL;
6522 boolean_t abort_run = FALSE;
6523
6524 assert(page_idx - start_idx == contig_pages);
6525
6526 tmp_start_idx = start_idx;
6527
6528 /*
6529 * first pass through to pull the free pages
6530 * off of the free queue so that in case we
6531 * need substitute pages, we won't grab any
6532 * of the free pages in the run... we'll clear
6533 * the 'free' bit in the 2nd pass, and even in
6534 * an abort_run case, we'll collect all of the
6535 * free pages in this run and return them to the free list
6536 */
6537 while (start_idx < page_idx) {
6538 m1 = &vm_pages[start_idx++];
6539
6540 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6541 assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6542 #endif
6543
6544 if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6545 unsigned int color;
6546
6547 color = VM_PAGE_GET_COLOR(m1);
6548 #if MACH_ASSERT
6549 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6550 #endif
6551 vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6552
6553 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6554 #if MACH_ASSERT
6555 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6556 #endif
6557 /*
6558 * Clear the "free" bit so that this page
6559 * does not get considered for another
6560 * concurrent physically-contiguous allocation.
6561 */
6562 m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6563 assert(m1->vmp_busy);
6564
6565 vm_page_free_count--;
6566 }
6567 }
6568 if (flags & KMA_LOMEM) {
6569 vm_page_lomem_find_contiguous_last_idx = page_idx;
6570 } else {
6571 vm_page_find_contiguous_last_idx = page_idx;
6572 }
6573
6574 /*
6575 * we can drop the free queue lock at this point since
6576 * we've pulled any 'free' candidates off of the list
6577 * we need it dropped so that we can do a vm_page_grab
6578 * when substituing for pmapped/dirty pages
6579 */
6580 vm_free_page_unlock();
6581
6582 start_idx = tmp_start_idx;
6583 cur_idx = page_idx - 1;
6584
6585 while (start_idx++ < page_idx) {
6586 /*
6587 * must go through the list from back to front
6588 * so that the page list is created in the
6589 * correct order - low -> high phys addresses
6590 */
6591 m1 = &vm_pages[cur_idx--];
6592
6593 if (m1->vmp_object == 0) {
6594 /*
6595 * page has already been removed from
6596 * the free list in the 1st pass
6597 */
6598 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6599 assert(m1->vmp_offset == (vm_object_offset_t) -1);
6600 assert(m1->vmp_busy);
6601 assert(!m1->vmp_wanted);
6602 assert(!m1->vmp_laundry);
6603 } else {
6604 /*
6605 * try to relocate/steal the page
6606 */
6607 if (abort_run == TRUE) {
6608 continue;
6609 }
6610
6611 assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6612
6613 vm_object_t object = VM_PAGE_OBJECT(m1);
6614
6615 if (object != locked_object) {
6616 if (locked_object) {
6617 vm_object_unlock(locked_object);
6618 locked_object = VM_OBJECT_NULL;
6619 }
6620 if (vm_object_lock_try(object)) {
6621 locked_object = object;
6622 } else {
6623 /* object must be locked to relocate its pages */
6624 tmp_start_idx = cur_idx;
6625 abort_run = TRUE;
6626 continue;
6627 }
6628 }
6629
6630 kr = vm_page_relocate(m1, &compressed_pages);
6631 if (kr != KERN_SUCCESS) {
6632 if (locked_object) {
6633 vm_object_unlock(locked_object);
6634 locked_object = VM_OBJECT_NULL;
6635 }
6636 tmp_start_idx = cur_idx;
6637 abort_run = TRUE;
6638 continue;
6639 }
6640
6641 stolen_pages++;
6642 }
6643
6644 /* m1 is ours at this point ... */
6645
6646 if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
6647 /*
6648 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
6649 * change it for pages marked as used-by-compressor.
6650 */
6651 vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
6652 }
6653 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6654 m1->vmp_snext = m;
6655 m = m1;
6656 }
6657
6658 if (locked_object) {
6659 vm_object_unlock(locked_object);
6660 locked_object = VM_OBJECT_NULL;
6661 }
6662
6663 if (abort_run == TRUE) {
6664 /*
6665 * want the index of the last
6666 * page in this run that was
6667 * successfully 'stolen', so back
6668 * it up 1 for the auto-decrement on use
6669 * and 1 more to bump back over this page
6670 */
6671 page_idx = tmp_start_idx + 2;
6672 if (page_idx >= vm_pages_count) {
6673 if (wrapped) {
6674 if (m != VM_PAGE_NULL) {
6675 vm_page_unlock_queues();
6676 vm_page_free_list(m, FALSE);
6677 vm_page_lock_queues();
6678 m = VM_PAGE_NULL;
6679 }
6680 dumped_run++;
6681 goto done_scanning;
6682 }
6683 page_idx = last_idx = 0;
6684 wrapped = TRUE;
6685 }
6686 abort_run = FALSE;
6687
6688 /*
6689 * We didn't find a contiguous range but we didn't
6690 * start from the very first page.
6691 * Start again from the very first page.
6692 */
6693 RESET_STATE_OF_RUN();
6694
6695 if (flags & KMA_LOMEM) {
6696 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = page_idx;
6697 } else {
6698 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6699 }
6700
6701 last_idx = page_idx;
6702
6703 if (m != VM_PAGE_NULL) {
6704 vm_page_unlock_queues();
6705 vm_page_free_list(m, FALSE);
6706 vm_page_lock_queues();
6707 m = VM_PAGE_NULL;
6708 }
6709 dumped_run++;
6710
6711 vm_free_page_lock();
6712 /*
6713 * reset our free page limit since we
6714 * dropped the lock protecting the vm_page_free_queue
6715 */
6716 free_available = vm_page_free_count - vm_page_free_reserved;
6717 goto retry;
6718 }
6719
6720 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6721 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6722 assert(m1->vmp_wire_count == 0);
6723
6724 if (wire == TRUE) {
6725 m1->vmp_wire_count++;
6726 m1->vmp_q_state = VM_PAGE_IS_WIRED;
6727 } else {
6728 m1->vmp_gobbled = TRUE;
6729 }
6730 }
6731 if (wire == FALSE) {
6732 vm_page_gobble_count += npages;
6733 }
6734
6735 /*
6736 * gobbled pages are also counted as wired pages
6737 */
6738 vm_page_wire_count += npages;
6739
6740 assert(vm_page_verify_contiguous(m, npages));
6741 }
6742 done_scanning:
6743 PAGE_REPLACEMENT_ALLOWED(FALSE);
6744
6745 vm_page_unlock_queues();
6746
6747 #if DEBUG
6748 clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6749
6750 tv_end_sec -= tv_start_sec;
6751 if (tv_end_usec < tv_start_usec) {
6752 tv_end_sec--;
6753 tv_end_usec += 1000000;
6754 }
6755 tv_end_usec -= tv_start_usec;
6756 if (tv_end_usec >= 1000000) {
6757 tv_end_sec++;
6758 tv_end_sec -= 1000000;
6759 }
6760 if (vm_page_find_contig_debug) {
6761 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
6762 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6763 (long)tv_end_sec, tv_end_usec, orig_last_idx,
6764 scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6765 }
6766
6767 #endif
6768 #if MACH_ASSERT
6769 vm_page_verify_free_lists();
6770 #endif
6771 if (m == NULL && zone_gc_called < 2) {
6772 printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6773 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6774 scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6775
6776 if (consider_buffer_cache_collect != NULL) {
6777 (void)(*consider_buffer_cache_collect)(1);
6778 }
6779
6780 zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6781
6782 zone_gc_called++;
6783
6784 printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6785 goto full_scan_again;
6786 }
6787
6788 return m;
6789 }
6790
6791 /*
6792 * Allocate a list of contiguous, wired pages.
6793 */
6794 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6795 cpm_allocate(
6796 vm_size_t size,
6797 vm_page_t *list,
6798 ppnum_t max_pnum,
6799 ppnum_t pnum_mask,
6800 boolean_t wire,
6801 int flags)
6802 {
6803 vm_page_t pages;
6804 unsigned int npages;
6805
6806 if (size % PAGE_SIZE != 0) {
6807 return KERN_INVALID_ARGUMENT;
6808 }
6809
6810 npages = (unsigned int) (size / PAGE_SIZE);
6811 if (npages != size / PAGE_SIZE) {
6812 /* 32-bit overflow */
6813 return KERN_INVALID_ARGUMENT;
6814 }
6815
6816 /*
6817 * Obtain a pointer to a subset of the free
6818 * list large enough to satisfy the request;
6819 * the region will be physically contiguous.
6820 */
6821 pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6822
6823 if (pages == VM_PAGE_NULL) {
6824 return KERN_NO_SPACE;
6825 }
6826 /*
6827 * determine need for wakeups
6828 */
6829 if (vm_page_free_count < vm_page_free_min) {
6830 vm_free_page_lock();
6831 if (vm_pageout_running == FALSE) {
6832 vm_free_page_unlock();
6833 thread_wakeup((event_t) &vm_page_free_wanted);
6834 } else {
6835 vm_free_page_unlock();
6836 }
6837 }
6838
6839 VM_CHECK_MEMORYSTATUS;
6840
6841 /*
6842 * The CPM pages should now be available and
6843 * ordered by ascending physical address.
6844 */
6845 assert(vm_page_verify_contiguous(pages, npages));
6846
6847 if (flags & KMA_ZERO) {
6848 for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
6849 vm_page_zero_fill(m);
6850 }
6851 }
6852
6853 *list = pages;
6854 return KERN_SUCCESS;
6855 }
6856
6857
6858 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6859
6860 /*
6861 * when working on a 'run' of pages, it is necessary to hold
6862 * the vm_page_queue_lock (a hot global lock) for certain operations
6863 * on the page... however, the majority of the work can be done
6864 * while merely holding the object lock... in fact there are certain
6865 * collections of pages that don't require any work brokered by the
6866 * vm_page_queue_lock... to mitigate the time spent behind the global
6867 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6868 * while doing all of the work that doesn't require the vm_page_queue_lock...
6869 * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6870 * necessary work for each page... we will grab the busy bit on the page
6871 * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6872 * if it can't immediately take the vm_page_queue_lock in order to compete
6873 * for the locks in the same order that vm_pageout_scan takes them.
6874 * the operation names are modeled after the names of the routines that
6875 * need to be called in order to make the changes very obvious in the
6876 * original loop
6877 */
6878
6879 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6880 vm_page_do_delayed_work(
6881 vm_object_t object,
6882 vm_tag_t tag,
6883 struct vm_page_delayed_work *dwp,
6884 int dw_count)
6885 {
6886 int j;
6887 vm_page_t m;
6888 vm_page_t local_free_q = VM_PAGE_NULL;
6889
6890 /*
6891 * pageout_scan takes the vm_page_lock_queues first
6892 * then tries for the object lock... to avoid what
6893 * is effectively a lock inversion, we'll go to the
6894 * trouble of taking them in that same order... otherwise
6895 * if this object contains the majority of the pages resident
6896 * in the UBC (or a small set of large objects actively being
6897 * worked on contain the majority of the pages), we could
6898 * cause the pageout_scan thread to 'starve' in its attempt
6899 * to find pages to move to the free queue, since it has to
6900 * successfully acquire the object lock of any candidate page
6901 * before it can steal/clean it.
6902 */
6903 if (!vm_page_trylock_queues()) {
6904 vm_object_unlock(object);
6905
6906 /*
6907 * "Turnstile enabled vm_pageout_scan" can be runnable
6908 * for a very long time without getting on a core.
6909 * If this is a higher priority thread it could be
6910 * waiting here for a very long time respecting the fact
6911 * that pageout_scan would like its object after VPS does
6912 * a mutex_pause(0).
6913 * So we cap the number of yields in the vm_object_lock_avoid()
6914 * case to a single mutex_pause(0) which will give vm_pageout_scan
6915 * 10us to run and grab the object if needed.
6916 */
6917 vm_page_lock_queues();
6918
6919 for (j = 0;; j++) {
6920 if ((!vm_object_lock_avoid(object) ||
6921 (vps_dynamic_priority_enabled && (j > 0))) &&
6922 _vm_object_lock_try(object)) {
6923 break;
6924 }
6925 vm_page_unlock_queues();
6926 mutex_pause(j);
6927 vm_page_lock_queues();
6928 }
6929 }
6930 for (j = 0; j < dw_count; j++, dwp++) {
6931 m = dwp->dw_m;
6932
6933 if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6934 vm_pageout_throttle_up(m);
6935 }
6936 #if CONFIG_PHANTOM_CACHE
6937 if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6938 vm_phantom_cache_update(m);
6939 }
6940 #endif
6941 if (dwp->dw_mask & DW_vm_page_wire) {
6942 vm_page_wire(m, tag, FALSE);
6943 } else if (dwp->dw_mask & DW_vm_page_unwire) {
6944 boolean_t queueit;
6945
6946 queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6947
6948 vm_page_unwire(m, queueit);
6949 }
6950 if (dwp->dw_mask & DW_vm_page_free) {
6951 vm_page_free_prepare_queues(m);
6952
6953 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6954 /*
6955 * Add this page to our list of reclaimed pages,
6956 * to be freed later.
6957 */
6958 m->vmp_snext = local_free_q;
6959 local_free_q = m;
6960 } else {
6961 if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6962 vm_page_deactivate_internal(m, FALSE);
6963 } else if (dwp->dw_mask & DW_vm_page_activate) {
6964 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6965 vm_page_activate(m);
6966 }
6967 } else if (dwp->dw_mask & DW_vm_page_speculate) {
6968 vm_page_speculate(m, TRUE);
6969 } else if (dwp->dw_mask & DW_enqueue_cleaned) {
6970 /*
6971 * if we didn't hold the object lock and did this,
6972 * we might disconnect the page, then someone might
6973 * soft fault it back in, then we would put it on the
6974 * cleaned queue, and so we would have a referenced (maybe even dirty)
6975 * page on that queue, which we don't want
6976 */
6977 int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6978
6979 if ((refmod_state & VM_MEM_REFERENCED)) {
6980 /*
6981 * this page has been touched since it got cleaned; let's activate it
6982 * if it hasn't already been
6983 */
6984 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6985 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6986
6987 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6988 vm_page_activate(m);
6989 }
6990 } else {
6991 m->vmp_reference = FALSE;
6992 vm_page_enqueue_cleaned(m);
6993 }
6994 } else if (dwp->dw_mask & DW_vm_page_lru) {
6995 vm_page_lru(m);
6996 } else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6997 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6998 vm_page_queues_remove(m, TRUE);
6999 }
7000 }
7001 if (dwp->dw_mask & DW_set_reference) {
7002 m->vmp_reference = TRUE;
7003 } else if (dwp->dw_mask & DW_clear_reference) {
7004 m->vmp_reference = FALSE;
7005 }
7006
7007 if (dwp->dw_mask & DW_move_page) {
7008 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
7009 vm_page_queues_remove(m, FALSE);
7010
7011 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
7012
7013 vm_page_enqueue_inactive(m, FALSE);
7014 }
7015 }
7016 if (dwp->dw_mask & DW_clear_busy) {
7017 m->vmp_busy = FALSE;
7018 }
7019
7020 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7021 vm_page_wakeup(object, m);
7022 }
7023 }
7024 }
7025 vm_page_unlock_queues();
7026
7027 if (local_free_q) {
7028 vm_page_free_list(local_free_q, TRUE);
7029 }
7030
7031 VM_CHECK_MEMORYSTATUS;
7032 }
7033
7034 __abortlike
7035 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)7036 __vm_page_alloc_list_failed_panic(
7037 vm_size_t page_count,
7038 kma_flags_t flags,
7039 kern_return_t kr)
7040 {
7041 panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
7042 (size_t)page_count, flags, kr);
7043 }
7044
7045 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)7046 vm_page_alloc_list(
7047 vm_size_t page_count,
7048 kma_flags_t flags,
7049 vm_page_t *list)
7050 {
7051 vm_page_t page_list = VM_PAGE_NULL;
7052 vm_page_t mem;
7053 kern_return_t kr = KERN_SUCCESS;
7054 int page_grab_count = 0;
7055 #if DEVELOPMENT || DEBUG
7056 task_t task;
7057 #endif /* DEVELOPMENT || DEBUG */
7058
7059 for (vm_size_t i = 0; i < page_count; i++) {
7060 for (;;) {
7061 if (flags & KMA_LOMEM) {
7062 mem = vm_page_grablo();
7063 } else {
7064 uint_t options = VM_PAGE_GRAB_OPTIONS_NONE;
7065 mem = vm_page_grab_options(options);
7066 }
7067
7068 if (mem != VM_PAGE_NULL) {
7069 break;
7070 }
7071
7072 if (flags & KMA_NOPAGEWAIT) {
7073 kr = KERN_RESOURCE_SHORTAGE;
7074 goto out;
7075 }
7076 if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
7077 kr = KERN_RESOURCE_SHORTAGE;
7078 goto out;
7079 }
7080
7081 /* VM privileged threads should have waited in vm_page_grab() and not get here. */
7082 assert(!(current_thread()->options & TH_OPT_VMPRIV));
7083
7084 if ((flags & KMA_NOFAIL) == 0 && ptoa_64(page_count) > max_mem / 4) {
7085 uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
7086 if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
7087 kr = KERN_RESOURCE_SHORTAGE;
7088 goto out;
7089 }
7090 }
7091 VM_PAGE_WAIT();
7092 }
7093
7094 page_grab_count++;
7095 mem->vmp_snext = page_list;
7096 page_list = mem;
7097 }
7098
7099 if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
7100 for (mem = page_list; mem; mem = mem->vmp_snext) {
7101 vm_page_zero_fill(mem);
7102 }
7103 }
7104
7105 out:
7106 #if DEBUG || DEVELOPMENT
7107 task = current_task_early();
7108 if (task != NULL) {
7109 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
7110 }
7111 #endif
7112
7113 if (kr == KERN_SUCCESS) {
7114 *list = page_list;
7115 } else if (flags & KMA_NOFAIL) {
7116 __vm_page_alloc_list_failed_panic(page_count, flags, kr);
7117 } else {
7118 vm_page_free_list(page_list, FALSE);
7119 }
7120
7121 return kr;
7122 }
7123
7124 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)7125 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
7126 {
7127 page->vmp_offset = offset;
7128 }
7129
7130 vm_page_t
vm_page_get_next(vm_page_t page)7131 vm_page_get_next(vm_page_t page)
7132 {
7133 return page->vmp_snext;
7134 }
7135
7136 vm_object_offset_t
vm_page_get_offset(vm_page_t page)7137 vm_page_get_offset(vm_page_t page)
7138 {
7139 return page->vmp_offset;
7140 }
7141
7142 ppnum_t
vm_page_get_phys_page(vm_page_t page)7143 vm_page_get_phys_page(vm_page_t page)
7144 {
7145 return VM_PAGE_GET_PHYS_PAGE(page);
7146 }
7147
7148
7149 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7150
7151 #if HIBERNATION
7152
7153 static vm_page_t hibernate_gobble_queue;
7154
7155 static int hibernate_drain_pageout_queue(struct vm_pageout_queue *);
7156 static int hibernate_flush_dirty_pages(int);
7157 static int hibernate_flush_queue(vm_page_queue_head_t *, int);
7158
7159 void hibernate_flush_wait(void);
7160 void hibernate_mark_in_progress(void);
7161 void hibernate_clear_in_progress(void);
7162
7163 void hibernate_free_range(int, int);
7164 void hibernate_hash_insert_page(vm_page_t);
7165 uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
7166 uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
7167 ppnum_t hibernate_lookup_paddr(unsigned int);
7168
7169 struct hibernate_statistics {
7170 int hibernate_considered;
7171 int hibernate_reentered_on_q;
7172 int hibernate_found_dirty;
7173 int hibernate_skipped_cleaning;
7174 int hibernate_skipped_transient;
7175 int hibernate_skipped_precious;
7176 int hibernate_skipped_external;
7177 int hibernate_queue_nolock;
7178 int hibernate_queue_paused;
7179 int hibernate_throttled;
7180 int hibernate_throttle_timeout;
7181 int hibernate_drained;
7182 int hibernate_drain_timeout;
7183 int cd_lock_failed;
7184 int cd_found_precious;
7185 int cd_found_wired;
7186 int cd_found_busy;
7187 int cd_found_unusual;
7188 int cd_found_cleaning;
7189 int cd_found_laundry;
7190 int cd_found_dirty;
7191 int cd_found_xpmapped;
7192 int cd_skipped_xpmapped;
7193 int cd_local_free;
7194 int cd_total_free;
7195 int cd_vm_page_wire_count;
7196 int cd_vm_struct_pages_unneeded;
7197 int cd_pages;
7198 int cd_discarded;
7199 int cd_count_wire;
7200 } hibernate_stats;
7201
7202 #if CONFIG_SPTM
7203 /**
7204 * On SPTM-based systems don't save any executable pages into the hibernation
7205 * image. The SPTM has stronger guarantees around not allowing write access to
7206 * the executable pages than on older systems, which prevents XNU from being
7207 * able to restore any pages mapped as executable.
7208 */
7209 #define HIBERNATE_XPMAPPED_LIMIT 0ULL
7210 #else /* CONFIG_SPTM */
7211 /*
7212 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
7213 * so that we don't overrun the estimated image size, which would
7214 * result in a hibernation failure.
7215 *
7216 * We use a size value instead of pages because we don't want to take up more space
7217 * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
7218 * to have that additional space available.
7219 *
7220 * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7221 * xpmapped size.
7222 */
7223 #define HIBERNATE_XPMAPPED_LIMIT ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7224 #endif /* CONFIG_SPTM */
7225
7226 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7227 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7228 {
7229 wait_result_t wait_result;
7230
7231 vm_page_lock_queues();
7232
7233 while (!vm_page_queue_empty(&q->pgo_pending)) {
7234 q->pgo_draining = TRUE;
7235
7236 assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7237
7238 vm_page_unlock_queues();
7239
7240 wait_result = thread_block(THREAD_CONTINUE_NULL);
7241
7242 if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7243 hibernate_stats.hibernate_drain_timeout++;
7244
7245 if (q == &vm_pageout_queue_external) {
7246 return 0;
7247 }
7248
7249 return 1;
7250 }
7251 vm_page_lock_queues();
7252
7253 hibernate_stats.hibernate_drained++;
7254 }
7255 vm_page_unlock_queues();
7256
7257 return 0;
7258 }
7259
7260
7261 boolean_t hibernate_skip_external = FALSE;
7262
7263 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7264 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7265 {
7266 vm_page_t m;
7267 vm_object_t l_object = NULL;
7268 vm_object_t m_object = NULL;
7269 int refmod_state = 0;
7270 int try_failed_count = 0;
7271 int retval = 0;
7272 int current_run = 0;
7273 struct vm_pageout_queue *iq;
7274 struct vm_pageout_queue *eq;
7275 struct vm_pageout_queue *tq;
7276
7277 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7278 VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7279
7280 iq = &vm_pageout_queue_internal;
7281 eq = &vm_pageout_queue_external;
7282
7283 vm_page_lock_queues();
7284
7285 while (qcount && !vm_page_queue_empty(q)) {
7286 if (current_run++ == 1000) {
7287 if (hibernate_should_abort()) {
7288 retval = 1;
7289 break;
7290 }
7291 current_run = 0;
7292 }
7293
7294 m = (vm_page_t) vm_page_queue_first(q);
7295 m_object = VM_PAGE_OBJECT(m);
7296
7297 /*
7298 * check to see if we currently are working
7299 * with the same object... if so, we've
7300 * already got the lock
7301 */
7302 if (m_object != l_object) {
7303 /*
7304 * the object associated with candidate page is
7305 * different from the one we were just working
7306 * with... dump the lock if we still own it
7307 */
7308 if (l_object != NULL) {
7309 vm_object_unlock(l_object);
7310 l_object = NULL;
7311 }
7312 /*
7313 * Try to lock object; since we've alread got the
7314 * page queues lock, we can only 'try' for this one.
7315 * if the 'try' fails, we need to do a mutex_pause
7316 * to allow the owner of the object lock a chance to
7317 * run...
7318 */
7319 if (!vm_object_lock_try_scan(m_object)) {
7320 if (try_failed_count > 20) {
7321 hibernate_stats.hibernate_queue_nolock++;
7322
7323 goto reenter_pg_on_q;
7324 }
7325
7326 vm_page_unlock_queues();
7327 mutex_pause(try_failed_count++);
7328 vm_page_lock_queues();
7329
7330 hibernate_stats.hibernate_queue_paused++;
7331 continue;
7332 } else {
7333 l_object = m_object;
7334 }
7335 }
7336 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7337 /*
7338 * page is not to be cleaned
7339 * put it back on the head of its queue
7340 */
7341 if (m->vmp_cleaning) {
7342 hibernate_stats.hibernate_skipped_cleaning++;
7343 } else {
7344 hibernate_stats.hibernate_skipped_transient++;
7345 }
7346
7347 goto reenter_pg_on_q;
7348 }
7349 if (m_object->vo_copy == VM_OBJECT_NULL) {
7350 if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7351 /*
7352 * let the normal hibernate image path
7353 * deal with these
7354 */
7355 goto reenter_pg_on_q;
7356 }
7357 }
7358 if (!m->vmp_dirty && m->vmp_pmapped) {
7359 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7360
7361 if ((refmod_state & VM_MEM_MODIFIED)) {
7362 SET_PAGE_DIRTY(m, FALSE);
7363 }
7364 } else {
7365 refmod_state = 0;
7366 }
7367
7368 if (!m->vmp_dirty) {
7369 /*
7370 * page is not to be cleaned
7371 * put it back on the head of its queue
7372 */
7373 if (m->vmp_precious) {
7374 hibernate_stats.hibernate_skipped_precious++;
7375 }
7376
7377 goto reenter_pg_on_q;
7378 }
7379
7380 if (hibernate_skip_external == TRUE && !m_object->internal) {
7381 hibernate_stats.hibernate_skipped_external++;
7382
7383 goto reenter_pg_on_q;
7384 }
7385 tq = NULL;
7386
7387 if (m_object->internal) {
7388 if (VM_PAGE_Q_THROTTLED(iq)) {
7389 tq = iq;
7390 }
7391 } else if (VM_PAGE_Q_THROTTLED(eq)) {
7392 tq = eq;
7393 }
7394
7395 if (tq != NULL) {
7396 wait_result_t wait_result;
7397 int wait_count = 5;
7398
7399 if (l_object != NULL) {
7400 vm_object_unlock(l_object);
7401 l_object = NULL;
7402 }
7403
7404 while (retval == 0) {
7405 tq->pgo_throttled = TRUE;
7406
7407 assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7408
7409 vm_page_unlock_queues();
7410
7411 wait_result = thread_block(THREAD_CONTINUE_NULL);
7412
7413 vm_page_lock_queues();
7414
7415 if (wait_result != THREAD_TIMED_OUT) {
7416 break;
7417 }
7418 if (!VM_PAGE_Q_THROTTLED(tq)) {
7419 break;
7420 }
7421
7422 if (hibernate_should_abort()) {
7423 retval = 1;
7424 }
7425
7426 if (--wait_count == 0) {
7427 hibernate_stats.hibernate_throttle_timeout++;
7428
7429 if (tq == eq) {
7430 hibernate_skip_external = TRUE;
7431 break;
7432 }
7433 retval = 1;
7434 }
7435 }
7436 if (retval) {
7437 break;
7438 }
7439
7440 hibernate_stats.hibernate_throttled++;
7441
7442 continue;
7443 }
7444 /*
7445 * we've already factored out pages in the laundry which
7446 * means this page can't be on the pageout queue so it's
7447 * safe to do the vm_page_queues_remove
7448 */
7449 vm_page_queues_remove(m, TRUE);
7450
7451 if (m_object->internal == TRUE) {
7452 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7453 }
7454
7455 vm_pageout_cluster(m);
7456
7457 hibernate_stats.hibernate_found_dirty++;
7458
7459 goto next_pg;
7460
7461 reenter_pg_on_q:
7462 vm_page_queue_remove(q, m, vmp_pageq);
7463 vm_page_queue_enter(q, m, vmp_pageq);
7464
7465 hibernate_stats.hibernate_reentered_on_q++;
7466 next_pg:
7467 hibernate_stats.hibernate_considered++;
7468
7469 qcount--;
7470 try_failed_count = 0;
7471 }
7472 if (l_object != NULL) {
7473 vm_object_unlock(l_object);
7474 l_object = NULL;
7475 }
7476
7477 vm_page_unlock_queues();
7478
7479 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7480
7481 return retval;
7482 }
7483
7484
7485 static int
hibernate_flush_dirty_pages(int pass)7486 hibernate_flush_dirty_pages(int pass)
7487 {
7488 struct vm_speculative_age_q *aq;
7489 uint32_t i;
7490
7491 if (vm_page_local_q) {
7492 zpercpu_foreach_cpu(lid) {
7493 vm_page_reactivate_local(lid, TRUE, FALSE);
7494 }
7495 }
7496
7497 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
7498 int qcount;
7499 vm_page_t m;
7500
7501 aq = &vm_page_queue_speculative[i];
7502
7503 if (vm_page_queue_empty(&aq->age_q)) {
7504 continue;
7505 }
7506 qcount = 0;
7507
7508 vm_page_lockspin_queues();
7509
7510 vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7511 qcount++;
7512 }
7513 vm_page_unlock_queues();
7514
7515 if (qcount) {
7516 if (hibernate_flush_queue(&aq->age_q, qcount)) {
7517 return 1;
7518 }
7519 }
7520 }
7521 if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7522 return 1;
7523 }
7524 /* XXX FBDP TODO: flush secluded queue */
7525 if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7526 return 1;
7527 }
7528 if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7529 return 1;
7530 }
7531 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7532 return 1;
7533 }
7534
7535 if (pass == 1) {
7536 vm_compressor_record_warmup_start();
7537 }
7538
7539 if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7540 if (pass == 1) {
7541 vm_compressor_record_warmup_end();
7542 }
7543 return 1;
7544 }
7545 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7546 if (pass == 1) {
7547 vm_compressor_record_warmup_end();
7548 }
7549 return 1;
7550 }
7551 if (pass == 1) {
7552 vm_compressor_record_warmup_end();
7553 }
7554
7555 if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7556 return 1;
7557 }
7558
7559 return 0;
7560 }
7561
7562
7563 void
hibernate_reset_stats()7564 hibernate_reset_stats()
7565 {
7566 bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7567 }
7568
7569
7570 int
hibernate_flush_memory()7571 hibernate_flush_memory()
7572 {
7573 int retval;
7574
7575 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7576
7577 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7578
7579 hibernate_cleaning_in_progress = TRUE;
7580 hibernate_skip_external = FALSE;
7581
7582 if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7583 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7584
7585 vm_compressor_flush();
7586
7587 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7588
7589 if (consider_buffer_cache_collect != NULL) {
7590 unsigned int orig_wire_count;
7591
7592 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7593 orig_wire_count = vm_page_wire_count;
7594
7595 (void)(*consider_buffer_cache_collect)(1);
7596 zone_gc(ZONE_GC_DRAIN);
7597
7598 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7599
7600 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7601 }
7602 }
7603 hibernate_cleaning_in_progress = FALSE;
7604
7605 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7606
7607 if (retval) {
7608 HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7609 }
7610
7611
7612 HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7613 hibernate_stats.hibernate_considered,
7614 hibernate_stats.hibernate_reentered_on_q,
7615 hibernate_stats.hibernate_found_dirty);
7616 HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7617 hibernate_stats.hibernate_skipped_cleaning,
7618 hibernate_stats.hibernate_skipped_transient,
7619 hibernate_stats.hibernate_skipped_precious,
7620 hibernate_stats.hibernate_skipped_external,
7621 hibernate_stats.hibernate_queue_nolock);
7622 HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7623 hibernate_stats.hibernate_queue_paused,
7624 hibernate_stats.hibernate_throttled,
7625 hibernate_stats.hibernate_throttle_timeout,
7626 hibernate_stats.hibernate_drained,
7627 hibernate_stats.hibernate_drain_timeout);
7628
7629 return retval;
7630 }
7631
7632
7633 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7634 hibernate_page_list_zero(hibernate_page_list_t *list)
7635 {
7636 uint32_t bank;
7637 hibernate_bitmap_t * bitmap;
7638
7639 bitmap = &list->bank_bitmap[0];
7640 for (bank = 0; bank < list->bank_count; bank++) {
7641 uint32_t last_bit;
7642
7643 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7644 // set out-of-bound bits at end of bitmap.
7645 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7646 if (last_bit) {
7647 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7648 }
7649
7650 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7651 }
7652 }
7653
7654 void
hibernate_free_gobble_pages(void)7655 hibernate_free_gobble_pages(void)
7656 {
7657 vm_page_t m, next;
7658 uint32_t count = 0;
7659
7660 m = (vm_page_t) hibernate_gobble_queue;
7661 while (m) {
7662 next = m->vmp_snext;
7663 vm_page_free(m);
7664 count++;
7665 m = next;
7666 }
7667 hibernate_gobble_queue = VM_PAGE_NULL;
7668
7669 if (count) {
7670 HIBLOG("Freed %d pages\n", count);
7671 }
7672 }
7673
7674 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7675 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7676 {
7677 vm_object_t object = NULL;
7678 int refmod_state;
7679 boolean_t discard = FALSE;
7680
7681 do{
7682 if (m->vmp_private) {
7683 panic("hibernate_consider_discard: private");
7684 }
7685
7686 object = VM_PAGE_OBJECT(m);
7687
7688 if (!vm_object_lock_try(object)) {
7689 object = NULL;
7690 if (!preflight) {
7691 hibernate_stats.cd_lock_failed++;
7692 }
7693 break;
7694 }
7695 if (VM_PAGE_WIRED(m)) {
7696 if (!preflight) {
7697 hibernate_stats.cd_found_wired++;
7698 }
7699 break;
7700 }
7701 if (m->vmp_precious) {
7702 if (!preflight) {
7703 hibernate_stats.cd_found_precious++;
7704 }
7705 break;
7706 }
7707 if (m->vmp_busy || !object->alive) {
7708 /*
7709 * Somebody is playing with this page.
7710 */
7711 if (!preflight) {
7712 hibernate_stats.cd_found_busy++;
7713 }
7714 break;
7715 }
7716 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7717 /*
7718 * If it's unusual in anyway, ignore it
7719 */
7720 if (!preflight) {
7721 hibernate_stats.cd_found_unusual++;
7722 }
7723 break;
7724 }
7725 if (m->vmp_cleaning) {
7726 if (!preflight) {
7727 hibernate_stats.cd_found_cleaning++;
7728 }
7729 break;
7730 }
7731 if (m->vmp_laundry) {
7732 if (!preflight) {
7733 hibernate_stats.cd_found_laundry++;
7734 }
7735 break;
7736 }
7737 if (!m->vmp_dirty) {
7738 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7739
7740 if (refmod_state & VM_MEM_REFERENCED) {
7741 m->vmp_reference = TRUE;
7742 }
7743 if (refmod_state & VM_MEM_MODIFIED) {
7744 SET_PAGE_DIRTY(m, FALSE);
7745 }
7746 }
7747
7748 /*
7749 * If it's clean or purgeable we can discard the page on wakeup.
7750 */
7751 discard = (!m->vmp_dirty)
7752 || (VM_PURGABLE_VOLATILE == object->purgable)
7753 || (VM_PURGABLE_EMPTY == object->purgable);
7754
7755
7756 if (discard == FALSE) {
7757 if (!preflight) {
7758 hibernate_stats.cd_found_dirty++;
7759 }
7760 } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7761 if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7762 if (!preflight) {
7763 hibernate_stats.cd_found_xpmapped++;
7764 }
7765 discard = FALSE;
7766 } else {
7767 if (!preflight) {
7768 hibernate_stats.cd_skipped_xpmapped++;
7769 }
7770 }
7771 }
7772 }while (FALSE);
7773
7774 if (object) {
7775 vm_object_unlock(object);
7776 }
7777
7778 return discard;
7779 }
7780
7781
7782 static void
hibernate_discard_page(vm_page_t m)7783 hibernate_discard_page(vm_page_t m)
7784 {
7785 vm_object_t m_object;
7786
7787 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7788 /*
7789 * If it's unusual in anyway, ignore
7790 */
7791 return;
7792 }
7793
7794 m_object = VM_PAGE_OBJECT(m);
7795
7796 #if MACH_ASSERT || DEBUG
7797 if (!vm_object_lock_try(m_object)) {
7798 panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7799 }
7800 #else
7801 /* No need to lock page queue for token delete, hibernate_vm_unlock()
7802 * makes sure these locks are uncontended before sleep */
7803 #endif /* MACH_ASSERT || DEBUG */
7804
7805 if (m->vmp_pmapped == TRUE) {
7806 __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7807 }
7808
7809 if (m->vmp_laundry) {
7810 panic("hibernate_discard_page(%p) laundry", m);
7811 }
7812 if (m->vmp_private) {
7813 panic("hibernate_discard_page(%p) private", m);
7814 }
7815 if (m->vmp_fictitious) {
7816 panic("hibernate_discard_page(%p) fictitious", m);
7817 }
7818
7819 if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7820 /* object should be on a queue */
7821 assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7822 purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7823 assert(old_queue);
7824 if (m_object->purgeable_when_ripe) {
7825 vm_purgeable_token_delete_first(old_queue);
7826 }
7827 vm_object_lock_assert_exclusive(m_object);
7828 VM_OBJECT_SET_PURGABLE(m_object, VM_PURGABLE_EMPTY);
7829
7830 /*
7831 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
7832 * accounted in the "volatile" ledger, so no change here.
7833 * We have to update vm_page_purgeable_count, though, since we're
7834 * effectively purging this object.
7835 */
7836 unsigned int delta;
7837 assert(m_object->resident_page_count >= m_object->wired_page_count);
7838 delta = (m_object->resident_page_count - m_object->wired_page_count);
7839 assert(vm_page_purgeable_count >= delta);
7840 assert(delta > 0);
7841 OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7842 }
7843
7844 vm_page_free(m);
7845
7846 #if MACH_ASSERT || DEBUG
7847 vm_object_unlock(m_object);
7848 #endif /* MACH_ASSERT || DEBUG */
7849 }
7850
7851 /*
7852 * Grab locks for hibernate_page_list_setall()
7853 */
7854 void
hibernate_vm_lock_queues(void)7855 hibernate_vm_lock_queues(void)
7856 {
7857 vm_object_lock(compressor_object);
7858 vm_page_lock_queues();
7859 vm_free_page_lock();
7860 lck_mtx_lock(&vm_purgeable_queue_lock);
7861
7862 if (vm_page_local_q) {
7863 zpercpu_foreach(lq, vm_page_local_q) {
7864 VPL_LOCK(&lq->vpl_lock);
7865 }
7866 }
7867 }
7868
7869 void
hibernate_vm_unlock_queues(void)7870 hibernate_vm_unlock_queues(void)
7871 {
7872 if (vm_page_local_q) {
7873 zpercpu_foreach(lq, vm_page_local_q) {
7874 VPL_UNLOCK(&lq->vpl_lock);
7875 }
7876 }
7877 lck_mtx_unlock(&vm_purgeable_queue_lock);
7878 vm_free_page_unlock();
7879 vm_page_unlock_queues();
7880 vm_object_unlock(compressor_object);
7881 }
7882
7883 #if CONFIG_SPTM
7884 static bool
hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)7885 hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)
7886 {
7887 const sptm_paddr_t paddr = ptoa_64(VM_PAGE_GET_PHYS_PAGE(vmp));
7888 const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
7889 const vm_object_t vmp_objp = VM_PAGE_OBJECT(vmp);
7890
7891 return frame_type == XNU_USER_JIT || frame_type == XNU_USER_DEBUG ||
7892 (frame_type == XNU_USER_EXEC && vmp_objp->internal == TRUE);
7893 }
7894 #endif
7895
7896 /*
7897 * Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7898 * pages known to VM to not need saving are subtracted.
7899 * Wired pages to be saved are present in page_list_wired, pageable in page_list.
7900 */
7901
7902 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7903 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7904 hibernate_page_list_t * page_list_wired,
7905 hibernate_page_list_t * page_list_pal,
7906 boolean_t preflight,
7907 boolean_t will_discard,
7908 uint32_t * pagesOut)
7909 {
7910 uint64_t start, end, nsec;
7911 vm_page_t m;
7912 vm_page_t next;
7913 uint32_t pages = page_list->page_count;
7914 uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7915 uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7916 uint32_t count_wire = pages;
7917 uint32_t count_discard_active = 0;
7918 uint32_t count_discard_inactive = 0;
7919 uint32_t count_retired = 0;
7920 uint32_t count_discard_cleaned = 0;
7921 uint32_t count_discard_purgeable = 0;
7922 uint32_t count_discard_speculative = 0;
7923 uint32_t count_discard_vm_struct_pages = 0;
7924 uint32_t i;
7925 uint32_t bank;
7926 hibernate_bitmap_t * bitmap;
7927 hibernate_bitmap_t * bitmap_wired;
7928 boolean_t discard_all;
7929 boolean_t discard = FALSE;
7930
7931 HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7932
7933 if (preflight) {
7934 page_list = NULL;
7935 page_list_wired = NULL;
7936 page_list_pal = NULL;
7937 discard_all = FALSE;
7938 } else {
7939 discard_all = will_discard;
7940 }
7941
7942 #if MACH_ASSERT || DEBUG
7943 if (!preflight) {
7944 assert(hibernate_vm_locks_are_safe());
7945 vm_page_lock_queues();
7946 if (vm_page_local_q) {
7947 zpercpu_foreach(lq, vm_page_local_q) {
7948 VPL_LOCK(&lq->vpl_lock);
7949 }
7950 }
7951 }
7952 #endif /* MACH_ASSERT || DEBUG */
7953
7954
7955 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7956
7957 clock_get_uptime(&start);
7958
7959 if (!preflight) {
7960 hibernate_page_list_zero(page_list);
7961 hibernate_page_list_zero(page_list_wired);
7962 hibernate_page_list_zero(page_list_pal);
7963
7964 hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7965 hibernate_stats.cd_pages = pages;
7966 }
7967
7968 if (vm_page_local_q) {
7969 zpercpu_foreach_cpu(lid) {
7970 vm_page_reactivate_local(lid, TRUE, !preflight);
7971 }
7972 }
7973
7974 if (preflight) {
7975 vm_object_lock(compressor_object);
7976 vm_page_lock_queues();
7977 vm_free_page_lock();
7978 }
7979
7980 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7981
7982 hibernation_vmqueues_inspection = TRUE;
7983
7984 m = (vm_page_t) hibernate_gobble_queue;
7985 while (m) {
7986 pages--;
7987 count_wire--;
7988 if (!preflight) {
7989 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7990 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7991 }
7992 m = m->vmp_snext;
7993 }
7994
7995 if (!preflight) {
7996 percpu_foreach(free_pages_head, free_pages) {
7997 for (m = *free_pages_head; m; m = m->vmp_snext) {
7998 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7999
8000 pages--;
8001 count_wire--;
8002 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8003 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8004
8005 hibernate_stats.cd_local_free++;
8006 hibernate_stats.cd_total_free++;
8007 }
8008 }
8009 }
8010
8011 for (i = 0; i < vm_colors; i++) {
8012 vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
8013 assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
8014
8015 pages--;
8016 count_wire--;
8017 if (!preflight) {
8018 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8019 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8020
8021 hibernate_stats.cd_total_free++;
8022 }
8023 }
8024 }
8025
8026 vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
8027 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
8028
8029 pages--;
8030 count_wire--;
8031 if (!preflight) {
8032 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8033 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8034
8035 hibernate_stats.cd_total_free++;
8036 }
8037 }
8038
8039 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
8040 while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
8041 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
8042
8043 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8044 discard = FALSE;
8045 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
8046 && hibernate_consider_discard(m, preflight)) {
8047 if (!preflight) {
8048 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8049 }
8050 count_discard_inactive++;
8051 discard = discard_all;
8052 } else {
8053 count_throttled++;
8054 }
8055 count_wire--;
8056 if (!preflight) {
8057 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8058 }
8059
8060 if (discard) {
8061 hibernate_discard_page(m);
8062 }
8063 m = next;
8064 }
8065
8066 m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
8067 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8068 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8069 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8070 #if CONFIG_SPTM
8071 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8072 #endif
8073 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8074 discard = FALSE;
8075 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8076 hibernate_consider_discard(m, preflight)) {
8077 if (!preflight) {
8078 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8079 }
8080 if (m->vmp_dirty) {
8081 count_discard_purgeable++;
8082 } else {
8083 count_discard_inactive++;
8084 }
8085 discard = discard_all;
8086 } else {
8087 /*
8088 * If the page must be force-added to the wired page list, prevent it from appearing
8089 * in the unwired page list.
8090 */
8091 if (force_to_wired_list) {
8092 if (!preflight) {
8093 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8094 }
8095 } else {
8096 count_anonymous++;
8097 }
8098 }
8099 /*
8100 * If the page is NOT being forced into the wired page list, remove it from the
8101 * wired page list here.
8102 */
8103 if (!force_to_wired_list) {
8104 count_wire--;
8105 if (!preflight) {
8106 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8107 }
8108 }
8109 if (discard) {
8110 hibernate_discard_page(m);
8111 }
8112 m = next;
8113 }
8114
8115 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8116 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8117 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8118
8119 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8120 discard = FALSE;
8121 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8122 hibernate_consider_discard(m, preflight)) {
8123 if (!preflight) {
8124 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8125 }
8126 if (m->vmp_dirty) {
8127 count_discard_purgeable++;
8128 } else {
8129 count_discard_cleaned++;
8130 }
8131 discard = discard_all;
8132 } else {
8133 count_cleaned++;
8134 }
8135 count_wire--;
8136 if (!preflight) {
8137 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8138 }
8139 if (discard) {
8140 hibernate_discard_page(m);
8141 }
8142 m = next;
8143 }
8144
8145 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8146 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8147 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8148 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8149 #if CONFIG_SPTM
8150 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8151 #endif
8152 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8153 discard = FALSE;
8154 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
8155 hibernate_consider_discard(m, preflight)) {
8156 if (!preflight) {
8157 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8158 }
8159 if (m->vmp_dirty) {
8160 count_discard_purgeable++;
8161 } else {
8162 count_discard_active++;
8163 }
8164 discard = discard_all;
8165 } else {
8166 /*
8167 * If the page must be force-added to the wired page list, prevent it from appearing
8168 * in the unwired page list.
8169 */
8170 if (force_to_wired_list) {
8171 if (!preflight) {
8172 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8173 }
8174 } else {
8175 count_active++;
8176 }
8177 }
8178 /*
8179 * If the page is NOT being forced into the wired page list, remove it from the
8180 * wired page list here.
8181 */
8182 if (!force_to_wired_list) {
8183 count_wire--;
8184 if (!preflight) {
8185 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8186 }
8187 }
8188 if (discard) {
8189 hibernate_discard_page(m);
8190 }
8191 m = next;
8192 }
8193
8194 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8195 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8196 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8197 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8198 #if CONFIG_SPTM
8199 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8200 #endif
8201 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8202 discard = FALSE;
8203 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8204 hibernate_consider_discard(m, preflight)) {
8205 if (!preflight) {
8206 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8207 }
8208 if (m->vmp_dirty) {
8209 count_discard_purgeable++;
8210 } else {
8211 count_discard_inactive++;
8212 }
8213 discard = discard_all;
8214 } else {
8215 /*
8216 * If the page must be force-added to the wired page list, prevent it from appearing
8217 * in the unwired page list.
8218 */
8219 if (force_to_wired_list) {
8220 if (!preflight) {
8221 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8222 }
8223 } else {
8224 count_inactive++;
8225 }
8226 }
8227 /*
8228 * If the page is NOT being forced into the wired page list, remove it from the
8229 * wired page list here.
8230 */
8231 if (!force_to_wired_list) {
8232 count_wire--;
8233 if (!preflight) {
8234 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8235 }
8236 }
8237 if (discard) {
8238 hibernate_discard_page(m);
8239 }
8240 m = next;
8241 }
8242 /* XXX FBDP TODO: secluded queue */
8243
8244 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8245 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8246 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8247 assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
8248 "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
8249 m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
8250
8251 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8252 discard = FALSE;
8253 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8254 hibernate_consider_discard(m, preflight)) {
8255 if (!preflight) {
8256 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8257 }
8258 count_discard_speculative++;
8259 discard = discard_all;
8260 } else {
8261 count_speculative++;
8262 }
8263 count_wire--;
8264 if (!preflight) {
8265 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8266 }
8267 if (discard) {
8268 hibernate_discard_page(m);
8269 }
8270 m = next;
8271 }
8272 }
8273
8274 vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
8275 assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
8276
8277 count_compressor++;
8278 count_wire--;
8279 if (!preflight) {
8280 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8281 }
8282 }
8283
8284
8285 if (preflight == FALSE && discard_all == TRUE) {
8286 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
8287
8288 HIBLOG("hibernate_teardown started\n");
8289 count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
8290 HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
8291
8292 pages -= count_discard_vm_struct_pages;
8293 count_wire -= count_discard_vm_struct_pages;
8294
8295 hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
8296
8297 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
8298 }
8299
8300 if (!preflight) {
8301 // pull wired from hibernate_bitmap
8302 bitmap = &page_list->bank_bitmap[0];
8303 bitmap_wired = &page_list_wired->bank_bitmap[0];
8304 for (bank = 0; bank < page_list->bank_count; bank++) {
8305 for (i = 0; i < bitmap->bitmapwords; i++) {
8306 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
8307 }
8308 bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
8309 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
8310 }
8311 }
8312
8313 // machine dependent adjustments
8314 hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
8315
8316 if (!preflight) {
8317 hibernate_stats.cd_count_wire = count_wire;
8318 hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
8319 count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
8320 }
8321
8322 clock_get_uptime(&end);
8323 absolutetime_to_nanoseconds(end - start, &nsec);
8324 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
8325
8326 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
8327 pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
8328 discard_all ? "did" : "could",
8329 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8330
8331 if (hibernate_stats.cd_skipped_xpmapped) {
8332 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8333 }
8334
8335 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8336
8337 if (preflight && will_discard) {
8338 *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8339 /*
8340 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8341 * even if these are clean and so we need to size the hibernation image accordingly.
8342 *
8343 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8344 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8345 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8346 * clean xpmapped pages.
8347 *
8348 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8349 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8350 */
8351 *pagesOut += HIBERNATE_XPMAPPED_LIMIT;
8352 }
8353
8354 hibernation_vmqueues_inspection = FALSE;
8355
8356 #if MACH_ASSERT || DEBUG
8357 if (!preflight) {
8358 if (vm_page_local_q) {
8359 zpercpu_foreach(lq, vm_page_local_q) {
8360 VPL_UNLOCK(&lq->vpl_lock);
8361 }
8362 }
8363 vm_page_unlock_queues();
8364 }
8365 #endif /* MACH_ASSERT || DEBUG */
8366
8367 if (preflight) {
8368 vm_free_page_unlock();
8369 vm_page_unlock_queues();
8370 vm_object_unlock(compressor_object);
8371 }
8372
8373 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8374 }
8375
8376 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8377 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8378 {
8379 uint64_t start, end, nsec;
8380 vm_page_t m;
8381 vm_page_t next;
8382 uint32_t i;
8383 uint32_t count_discard_active = 0;
8384 uint32_t count_discard_inactive = 0;
8385 uint32_t count_discard_purgeable = 0;
8386 uint32_t count_discard_cleaned = 0;
8387 uint32_t count_discard_speculative = 0;
8388
8389
8390 #if MACH_ASSERT || DEBUG
8391 vm_page_lock_queues();
8392 if (vm_page_local_q) {
8393 zpercpu_foreach(lq, vm_page_local_q) {
8394 VPL_LOCK(&lq->vpl_lock);
8395 }
8396 }
8397 #endif /* MACH_ASSERT || DEBUG */
8398
8399 clock_get_uptime(&start);
8400
8401 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8402 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8403 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8404
8405 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8406 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8407 if (m->vmp_dirty) {
8408 count_discard_purgeable++;
8409 } else {
8410 count_discard_inactive++;
8411 }
8412 hibernate_discard_page(m);
8413 }
8414 m = next;
8415 }
8416
8417 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8418 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8419 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8420 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8421
8422 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8423 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8424 count_discard_speculative++;
8425 hibernate_discard_page(m);
8426 }
8427 m = next;
8428 }
8429 }
8430
8431 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8432 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8433 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8434
8435 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8436 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8437 if (m->vmp_dirty) {
8438 count_discard_purgeable++;
8439 } else {
8440 count_discard_inactive++;
8441 }
8442 hibernate_discard_page(m);
8443 }
8444 m = next;
8445 }
8446 /* XXX FBDP TODO: secluded queue */
8447
8448 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8449 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8450 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8451
8452 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8453 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8454 if (m->vmp_dirty) {
8455 count_discard_purgeable++;
8456 } else {
8457 count_discard_active++;
8458 }
8459 hibernate_discard_page(m);
8460 }
8461 m = next;
8462 }
8463
8464 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8465 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8466 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8467
8468 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8469 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8470 if (m->vmp_dirty) {
8471 count_discard_purgeable++;
8472 } else {
8473 count_discard_cleaned++;
8474 }
8475 hibernate_discard_page(m);
8476 }
8477 m = next;
8478 }
8479
8480 #if MACH_ASSERT || DEBUG
8481 if (vm_page_local_q) {
8482 zpercpu_foreach(lq, vm_page_local_q) {
8483 VPL_UNLOCK(&lq->vpl_lock);
8484 }
8485 }
8486 vm_page_unlock_queues();
8487 #endif /* MACH_ASSERT || DEBUG */
8488
8489 clock_get_uptime(&end);
8490 absolutetime_to_nanoseconds(end - start, &nsec);
8491 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8492 nsec / 1000000ULL,
8493 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8494 }
8495
8496 boolean_t hibernate_paddr_map_inited = FALSE;
8497 unsigned int hibernate_teardown_last_valid_compact_indx = -1;
8498 vm_page_t hibernate_rebuild_hash_list = NULL;
8499
8500 unsigned int hibernate_teardown_found_tabled_pages = 0;
8501 unsigned int hibernate_teardown_found_created_pages = 0;
8502 unsigned int hibernate_teardown_found_free_pages = 0;
8503 unsigned int hibernate_teardown_vm_page_free_count;
8504
8505
8506 struct ppnum_mapping {
8507 struct ppnum_mapping *ppnm_next;
8508 ppnum_t ppnm_base_paddr;
8509 unsigned int ppnm_sindx;
8510 unsigned int ppnm_eindx;
8511 };
8512
8513 struct ppnum_mapping *ppnm_head;
8514 struct ppnum_mapping *ppnm_last_found = NULL;
8515
8516
8517 void
hibernate_create_paddr_map(void)8518 hibernate_create_paddr_map(void)
8519 {
8520 unsigned int i;
8521 ppnum_t next_ppnum_in_run = 0;
8522 struct ppnum_mapping *ppnm = NULL;
8523
8524 if (hibernate_paddr_map_inited == FALSE) {
8525 for (i = 0; i < vm_pages_count; i++) {
8526 if (ppnm) {
8527 ppnm->ppnm_eindx = i;
8528 }
8529
8530 if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8531 ppnm = zalloc_permanent_type(struct ppnum_mapping);
8532
8533 ppnm->ppnm_next = ppnm_head;
8534 ppnm_head = ppnm;
8535
8536 ppnm->ppnm_sindx = i;
8537 ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8538 }
8539 next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8540 }
8541 ppnm->ppnm_eindx = vm_pages_count;
8542
8543 hibernate_paddr_map_inited = TRUE;
8544 }
8545 }
8546
8547 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8548 hibernate_lookup_paddr(unsigned int indx)
8549 {
8550 struct ppnum_mapping *ppnm = NULL;
8551
8552 ppnm = ppnm_last_found;
8553
8554 if (ppnm) {
8555 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8556 goto done;
8557 }
8558 }
8559 for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8560 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8561 ppnm_last_found = ppnm;
8562 break;
8563 }
8564 }
8565 if (ppnm == NULL) {
8566 panic("hibernate_lookup_paddr of %d failed", indx);
8567 }
8568 done:
8569 return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8570 }
8571
8572
8573 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8574 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8575 {
8576 addr64_t saddr_aligned;
8577 addr64_t eaddr_aligned;
8578 addr64_t addr;
8579 ppnum_t paddr;
8580 unsigned int mark_as_unneeded_pages = 0;
8581
8582 saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8583 eaddr_aligned = eaddr & ~PAGE_MASK_64;
8584
8585 for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8586 paddr = pmap_find_phys(kernel_pmap, addr);
8587
8588 assert(paddr);
8589
8590 hibernate_page_bitset(page_list, TRUE, paddr);
8591 hibernate_page_bitset(page_list_wired, TRUE, paddr);
8592
8593 mark_as_unneeded_pages++;
8594 }
8595 return mark_as_unneeded_pages;
8596 }
8597
8598
8599 void
hibernate_hash_insert_page(vm_page_t mem)8600 hibernate_hash_insert_page(vm_page_t mem)
8601 {
8602 vm_page_bucket_t *bucket;
8603 int hash_id;
8604 vm_object_t m_object;
8605
8606 m_object = VM_PAGE_OBJECT(mem);
8607
8608 assert(mem->vmp_hashed);
8609 assert(m_object);
8610 assert(mem->vmp_offset != (vm_object_offset_t) -1);
8611
8612 /*
8613 * Insert it into the object_object/offset hash table
8614 */
8615 hash_id = vm_page_hash(m_object, mem->vmp_offset);
8616 bucket = &vm_page_buckets[hash_id];
8617
8618 mem->vmp_next_m = bucket->page_list;
8619 bucket->page_list = VM_PAGE_PACK_PTR(mem);
8620 }
8621
8622
8623 void
hibernate_free_range(int sindx,int eindx)8624 hibernate_free_range(int sindx, int eindx)
8625 {
8626 vm_page_t mem;
8627 unsigned int color;
8628
8629 while (sindx < eindx) {
8630 mem = &vm_pages[sindx];
8631
8632 vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8633
8634 mem->vmp_lopage = FALSE;
8635 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8636
8637 color = VM_PAGE_GET_COLOR(mem);
8638 #if defined(__x86_64__)
8639 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8640 #else
8641 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8642 #endif
8643 vm_page_free_count++;
8644
8645 sindx++;
8646 }
8647 }
8648
8649 void
hibernate_rebuild_vm_structs(void)8650 hibernate_rebuild_vm_structs(void)
8651 {
8652 int i, cindx, sindx, eindx;
8653 vm_page_t mem, tmem, mem_next;
8654 AbsoluteTime startTime, endTime;
8655 uint64_t nsec;
8656
8657 if (hibernate_rebuild_needed == FALSE) {
8658 return;
8659 }
8660
8661 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8662 HIBLOG("hibernate_rebuild started\n");
8663
8664 clock_get_uptime(&startTime);
8665
8666 pal_hib_rebuild_pmap_structs();
8667
8668 bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8669 eindx = vm_pages_count;
8670
8671 /*
8672 * Mark all the vm_pages[] that have not been initialized yet as being
8673 * transient. This is needed to ensure that buddy page search is corrrect.
8674 * Without this random data in these vm_pages[] can trip the buddy search
8675 */
8676 for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8677 vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8678 }
8679
8680 for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8681 mem = &vm_pages[cindx];
8682 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8683 /*
8684 * hibernate_teardown_vm_structs leaves the location where
8685 * this vm_page_t must be located in "next".
8686 */
8687 tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8688 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8689
8690 sindx = (int)(tmem - &vm_pages[0]);
8691
8692 if (mem != tmem) {
8693 /*
8694 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8695 * so move it back to its real location
8696 */
8697 *tmem = *mem;
8698 mem = tmem;
8699 }
8700 if (mem->vmp_hashed) {
8701 hibernate_hash_insert_page(mem);
8702 }
8703 /*
8704 * the 'hole' between this vm_page_t and the previous
8705 * vm_page_t we moved needs to be initialized as
8706 * a range of free vm_page_t's
8707 */
8708 hibernate_free_range(sindx + 1, eindx);
8709
8710 eindx = sindx;
8711 }
8712 if (sindx) {
8713 hibernate_free_range(0, sindx);
8714 }
8715
8716 assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8717
8718 /*
8719 * process the list of vm_page_t's that were entered in the hash,
8720 * but were not located in the vm_pages arrary... these are
8721 * vm_page_t's that were created on the fly (i.e. fictitious)
8722 */
8723 for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8724 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8725
8726 mem->vmp_next_m = 0;
8727 hibernate_hash_insert_page(mem);
8728 }
8729 hibernate_rebuild_hash_list = NULL;
8730
8731 clock_get_uptime(&endTime);
8732 SUB_ABSOLUTETIME(&endTime, &startTime);
8733 absolutetime_to_nanoseconds(endTime, &nsec);
8734
8735 HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8736
8737 hibernate_rebuild_needed = FALSE;
8738
8739 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8740 }
8741
8742 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8743 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8744 {
8745 unsigned int i;
8746 unsigned int compact_target_indx;
8747 vm_page_t mem, mem_next;
8748 vm_page_bucket_t *bucket;
8749 unsigned int mark_as_unneeded_pages = 0;
8750 unsigned int unneeded_vm_page_bucket_pages = 0;
8751 unsigned int unneeded_vm_pages_pages = 0;
8752 unsigned int unneeded_pmap_pages = 0;
8753 addr64_t start_of_unneeded = 0;
8754 addr64_t end_of_unneeded = 0;
8755
8756
8757 if (hibernate_should_abort()) {
8758 return 0;
8759 }
8760
8761 hibernate_rebuild_needed = TRUE;
8762
8763 HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8764 vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8765 vm_page_cleaned_count, compressor_object->resident_page_count);
8766
8767 for (i = 0; i < vm_page_bucket_count; i++) {
8768 bucket = &vm_page_buckets[i];
8769
8770 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8771 assert(mem->vmp_hashed);
8772
8773 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8774
8775 if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8776 mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8777 hibernate_rebuild_hash_list = mem;
8778 }
8779 }
8780 }
8781 unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8782 mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8783
8784 hibernate_teardown_vm_page_free_count = vm_page_free_count;
8785
8786 compact_target_indx = 0;
8787
8788 for (i = 0; i < vm_pages_count; i++) {
8789 mem = &vm_pages[i];
8790
8791 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8792 unsigned int color;
8793
8794 assert(mem->vmp_busy);
8795 assert(!mem->vmp_lopage);
8796
8797 color = VM_PAGE_GET_COLOR(mem);
8798
8799 vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8800
8801 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8802
8803 vm_page_free_count--;
8804
8805 hibernate_teardown_found_free_pages++;
8806
8807 if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8808 compact_target_indx = i;
8809 }
8810 } else {
8811 /*
8812 * record this vm_page_t's original location
8813 * we need this even if it doesn't get moved
8814 * as an indicator to the rebuild function that
8815 * we don't have to move it
8816 */
8817 mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8818
8819 if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8820 /*
8821 * we've got a hole to fill, so
8822 * move this vm_page_t to it's new home
8823 */
8824 vm_pages[compact_target_indx] = *mem;
8825 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8826
8827 hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8828 compact_target_indx++;
8829 } else {
8830 hibernate_teardown_last_valid_compact_indx = i;
8831 }
8832 }
8833 }
8834 unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8835 (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8836 mark_as_unneeded_pages += unneeded_vm_pages_pages;
8837
8838 pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8839
8840 if (start_of_unneeded) {
8841 unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8842 mark_as_unneeded_pages += unneeded_pmap_pages;
8843 }
8844 HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8845
8846 return mark_as_unneeded_pages;
8847 }
8848
8849
8850 #endif /* HIBERNATION */
8851
8852 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8853
8854 #include <mach_vm_debug.h>
8855 #if MACH_VM_DEBUG
8856
8857 #include <mach_debug/hash_info.h>
8858 #include <vm/vm_debug_internal.h>
8859
8860 /*
8861 * Routine: vm_page_info
8862 * Purpose:
8863 * Return information about the global VP table.
8864 * Fills the buffer with as much information as possible
8865 * and returns the desired size of the buffer.
8866 * Conditions:
8867 * Nothing locked. The caller should provide
8868 * possibly-pageable memory.
8869 */
8870
8871 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8872 vm_page_info(
8873 hash_info_bucket_t *info,
8874 unsigned int count)
8875 {
8876 unsigned int i;
8877 lck_spin_t *bucket_lock;
8878
8879 if (vm_page_bucket_count < count) {
8880 count = vm_page_bucket_count;
8881 }
8882
8883 for (i = 0; i < count; i++) {
8884 vm_page_bucket_t *bucket = &vm_page_buckets[i];
8885 unsigned int bucket_count = 0;
8886 vm_page_t m;
8887
8888 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8889 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8890
8891 for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8892 m != VM_PAGE_NULL;
8893 m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8894 bucket_count++;
8895 }
8896
8897 lck_spin_unlock(bucket_lock);
8898
8899 /* don't touch pageable memory while holding locks */
8900 info[i].hib_count = bucket_count;
8901 }
8902
8903 return vm_page_bucket_count;
8904 }
8905 #endif /* MACH_VM_DEBUG */
8906
8907 #if VM_PAGE_BUCKETS_CHECK
8908 void
vm_page_buckets_check(void)8909 vm_page_buckets_check(void)
8910 {
8911 unsigned int i;
8912 vm_page_t p;
8913 unsigned int p_hash;
8914 vm_page_bucket_t *bucket;
8915 lck_spin_t *bucket_lock;
8916
8917 if (!vm_page_buckets_check_ready) {
8918 return;
8919 }
8920
8921 #if HIBERNATION
8922 if (hibernate_rebuild_needed ||
8923 hibernate_rebuild_hash_list) {
8924 panic("BUCKET_CHECK: hibernation in progress: "
8925 "rebuild_needed=%d rebuild_hash_list=%p\n",
8926 hibernate_rebuild_needed,
8927 hibernate_rebuild_hash_list);
8928 }
8929 #endif /* HIBERNATION */
8930
8931 #if VM_PAGE_FAKE_BUCKETS
8932 char *cp;
8933 for (cp = (char *) vm_page_fake_buckets_start;
8934 cp < (char *) vm_page_fake_buckets_end;
8935 cp++) {
8936 if (*cp != 0x5a) {
8937 panic("BUCKET_CHECK: corruption at %p in fake buckets "
8938 "[0x%llx:0x%llx]\n",
8939 cp,
8940 (uint64_t) vm_page_fake_buckets_start,
8941 (uint64_t) vm_page_fake_buckets_end);
8942 }
8943 }
8944 #endif /* VM_PAGE_FAKE_BUCKETS */
8945
8946 for (i = 0; i < vm_page_bucket_count; i++) {
8947 vm_object_t p_object;
8948
8949 bucket = &vm_page_buckets[i];
8950 if (!bucket->page_list) {
8951 continue;
8952 }
8953
8954 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8955 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8956 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8957
8958 while (p != VM_PAGE_NULL) {
8959 p_object = VM_PAGE_OBJECT(p);
8960
8961 if (!p->vmp_hashed) {
8962 panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8963 "hash %d in bucket %d at %p "
8964 "is not hashed\n",
8965 p, p_object, p->vmp_offset,
8966 p_hash, i, bucket);
8967 }
8968 p_hash = vm_page_hash(p_object, p->vmp_offset);
8969 if (p_hash != i) {
8970 panic("BUCKET_CHECK: corruption in bucket %d "
8971 "at %p: page %p object %p offset 0x%llx "
8972 "hash %d\n",
8973 i, bucket, p, p_object, p->vmp_offset,
8974 p_hash);
8975 }
8976 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8977 }
8978 lck_spin_unlock(bucket_lock);
8979 }
8980
8981 // printf("BUCKET_CHECK: checked buckets\n");
8982 }
8983 #endif /* VM_PAGE_BUCKETS_CHECK */
8984
8985 /*
8986 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8987 * local queues if they exist... its the only spot in the system where we add pages
8988 * to those queues... once on those queues, those pages can only move to one of the
8989 * global page queues or the free queues... they NEVER move from local q to local q.
8990 * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8991 * the global vm_page_queue_lock at this point... we still need to take the local lock
8992 * in case this operation is being run on a different CPU then the local queue's identity,
8993 * but we don't have to worry about the page moving to a global queue or becoming wired
8994 * while we're grabbing the local lock since those operations would require the global
8995 * vm_page_queue_lock to be held, and we already own it.
8996 *
8997 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8998 * 'wired' and local are ALWAYS mutually exclusive conditions.
8999 */
9000
9001 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)9002 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
9003 {
9004 boolean_t was_pageable = TRUE;
9005 vm_object_t m_object;
9006
9007 m_object = VM_PAGE_OBJECT(mem);
9008
9009 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9010
9011 if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
9012 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9013 if (remove_from_specialq == TRUE) {
9014 vm_page_remove_from_specialq(mem);
9015 }
9016 /*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
9017 * assert(mem->vmp_specialq.next != 0);
9018 * assert(mem->vmp_specialq.prev != 0);
9019 * } else {*/
9020 if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
9021 assert(mem->vmp_specialq.next == 0);
9022 assert(mem->vmp_specialq.prev == 0);
9023 }
9024 return;
9025 }
9026
9027 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9028 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9029 assert(mem->vmp_specialq.next == 0 &&
9030 mem->vmp_specialq.prev == 0 &&
9031 mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9032 return;
9033 }
9034 if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
9035 /*
9036 * might put these guys on a list for debugging purposes
9037 * if we do, we'll need to remove this assert
9038 */
9039 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9040 assert(mem->vmp_specialq.next == 0 &&
9041 mem->vmp_specialq.prev == 0);
9042 /*
9043 * Recall that vmp_on_specialq also means a request to put
9044 * it on the special Q. So we don't want to reset that bit
9045 * just because a wiring request came in. We might want to
9046 * put it on the special queue post-unwiring.
9047 *
9048 * &&
9049 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9050 */
9051 return;
9052 }
9053
9054 assert(m_object != compressor_object);
9055 assert(!is_kernel_object(m_object));
9056 assert(!mem->vmp_fictitious);
9057
9058 switch (mem->vmp_q_state) {
9059 case VM_PAGE_ON_ACTIVE_LOCAL_Q:
9060 {
9061 struct vpl *lq;
9062
9063 lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
9064 VPL_LOCK(&lq->vpl_lock);
9065 vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
9066 mem->vmp_local_id = 0;
9067 lq->vpl_count--;
9068 if (m_object->internal) {
9069 lq->vpl_internal_count--;
9070 } else {
9071 lq->vpl_external_count--;
9072 }
9073 VPL_UNLOCK(&lq->vpl_lock);
9074 was_pageable = FALSE;
9075 break;
9076 }
9077 case VM_PAGE_ON_ACTIVE_Q:
9078 {
9079 vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
9080 vm_page_active_count--;
9081 break;
9082 }
9083
9084 case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
9085 {
9086 assert(m_object->internal == TRUE);
9087
9088 vm_page_inactive_count--;
9089 vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
9090 vm_page_anonymous_count--;
9091
9092 vm_purgeable_q_advance_all();
9093 vm_page_balance_inactive(3);
9094 break;
9095 }
9096
9097 case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
9098 {
9099 assert(m_object->internal == FALSE);
9100
9101 vm_page_inactive_count--;
9102 vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
9103 vm_purgeable_q_advance_all();
9104 vm_page_balance_inactive(3);
9105 break;
9106 }
9107
9108 case VM_PAGE_ON_INACTIVE_CLEANED_Q:
9109 {
9110 assert(m_object->internal == FALSE);
9111
9112 vm_page_inactive_count--;
9113 vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
9114 vm_page_cleaned_count--;
9115 vm_page_balance_inactive(3);
9116 break;
9117 }
9118
9119 case VM_PAGE_ON_THROTTLED_Q:
9120 {
9121 assert(m_object->internal == TRUE);
9122
9123 vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
9124 vm_page_throttled_count--;
9125 was_pageable = FALSE;
9126 break;
9127 }
9128
9129 case VM_PAGE_ON_SPECULATIVE_Q:
9130 {
9131 assert(m_object->internal == FALSE);
9132
9133 vm_page_remque(&mem->vmp_pageq);
9134 vm_page_speculative_count--;
9135 vm_page_balance_inactive(3);
9136 break;
9137 }
9138
9139 #if CONFIG_SECLUDED_MEMORY
9140 case VM_PAGE_ON_SECLUDED_Q:
9141 {
9142 vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
9143 vm_page_secluded_count--;
9144 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9145 if (m_object == VM_OBJECT_NULL) {
9146 vm_page_secluded_count_free--;
9147 was_pageable = FALSE;
9148 } else {
9149 assert(!m_object->internal);
9150 vm_page_secluded_count_inuse--;
9151 was_pageable = FALSE;
9152 // was_pageable = TRUE;
9153 }
9154 break;
9155 }
9156 #endif /* CONFIG_SECLUDED_MEMORY */
9157
9158 default:
9159 {
9160 /*
9161 * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
9162 * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
9163 * the caller is responsible for determing if the page is on that queue, and if so, must
9164 * either first remove it (it needs both the page queues lock and the object lock to do
9165 * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
9166 *
9167 * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
9168 * or any of the undefined states
9169 */
9170 panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
9171 break;
9172 }
9173 }
9174 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
9175 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
9176
9177 if (remove_from_specialq == TRUE) {
9178 vm_page_remove_from_specialq(mem);
9179 }
9180 if (was_pageable) {
9181 if (m_object->internal) {
9182 vm_page_pageable_internal_count--;
9183 } else {
9184 vm_page_pageable_external_count--;
9185 }
9186 }
9187 }
9188
9189 void
vm_page_remove_internal(vm_page_t page)9190 vm_page_remove_internal(vm_page_t page)
9191 {
9192 vm_object_t __object = VM_PAGE_OBJECT(page);
9193 if (page == __object->memq_hint) {
9194 vm_page_t __new_hint;
9195 vm_page_queue_entry_t __qe;
9196 __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
9197 if (vm_page_queue_end(&__object->memq, __qe)) {
9198 __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
9199 if (vm_page_queue_end(&__object->memq, __qe)) {
9200 __qe = NULL;
9201 }
9202 }
9203 __new_hint = (vm_page_t)((uintptr_t) __qe);
9204 __object->memq_hint = __new_hint;
9205 }
9206 vm_page_queue_remove(&__object->memq, page, vmp_listq);
9207 #if CONFIG_SECLUDED_MEMORY
9208 if (__object->eligible_for_secluded) {
9209 vm_page_secluded.eligible_for_secluded--;
9210 }
9211 #endif /* CONFIG_SECLUDED_MEMORY */
9212 }
9213
9214 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)9215 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
9216 {
9217 vm_object_t m_object;
9218
9219 m_object = VM_PAGE_OBJECT(mem);
9220
9221 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9222 assert(!mem->vmp_fictitious);
9223 assert(!mem->vmp_laundry);
9224 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9225 vm_page_check_pageable_safe(mem);
9226
9227 if (m_object->internal) {
9228 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
9229
9230 if (first == TRUE) {
9231 vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
9232 } else {
9233 vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
9234 }
9235
9236 vm_page_anonymous_count++;
9237 vm_page_pageable_internal_count++;
9238 } else {
9239 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
9240
9241 if (first == TRUE) {
9242 vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
9243 } else {
9244 vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
9245 }
9246
9247 vm_page_pageable_external_count++;
9248 }
9249 vm_page_inactive_count++;
9250 token_new_pagecount++;
9251
9252 vm_page_add_to_specialq(mem, FALSE);
9253 }
9254
9255 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)9256 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
9257 {
9258 vm_object_t m_object;
9259
9260 m_object = VM_PAGE_OBJECT(mem);
9261
9262 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9263 assert(!mem->vmp_fictitious);
9264 assert(!mem->vmp_laundry);
9265 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9266 vm_page_check_pageable_safe(mem);
9267
9268 mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
9269 if (first == TRUE) {
9270 vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
9271 } else {
9272 vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
9273 }
9274 vm_page_active_count++;
9275
9276 if (m_object->internal) {
9277 vm_page_pageable_internal_count++;
9278 } else {
9279 vm_page_pageable_external_count++;
9280 }
9281
9282 vm_page_add_to_specialq(mem, FALSE);
9283 vm_page_balance_inactive(3);
9284 }
9285
9286 /*
9287 * Pages from special kernel objects shouldn't
9288 * be placed on pageable queues.
9289 */
9290 void
vm_page_check_pageable_safe(vm_page_t page)9291 vm_page_check_pageable_safe(vm_page_t page)
9292 {
9293 vm_object_t page_object;
9294
9295 page_object = VM_PAGE_OBJECT(page);
9296
9297 if (is_kernel_object(page_object)) {
9298 panic("vm_page_check_pageable_safe: trying to add page"
9299 "from a kernel object to pageable queue");
9300 }
9301
9302 if (page_object == compressor_object) {
9303 panic("vm_page_check_pageable_safe: trying to add page"
9304 "from compressor object (%p) to pageable queue", compressor_object);
9305 }
9306 }
9307
9308 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
9309 * wired page diagnose
9310 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9311
9312 #include <libkern/OSKextLibPrivate.h>
9313
9314 #define KA_SIZE(namelen, subtotalscount) \
9315 (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
9316
9317 #define KA_NAME(alloc) \
9318 ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
9319
9320 #define KA_NAME_LEN(alloc) \
9321 (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
9322
9323 vm_tag_t
vm_tag_bt(void)9324 vm_tag_bt(void)
9325 {
9326 uintptr_t* frameptr;
9327 uintptr_t* frameptr_next;
9328 uintptr_t retaddr;
9329 uintptr_t kstackb, kstackt;
9330 const vm_allocation_site_t * site;
9331 thread_t cthread;
9332 kern_allocation_name_t name;
9333
9334 cthread = current_thread();
9335 if (__improbable(cthread == NULL)) {
9336 return VM_KERN_MEMORY_OSFMK;
9337 }
9338
9339 if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9340 if (!name->tag) {
9341 vm_tag_alloc(name);
9342 }
9343 return name->tag;
9344 }
9345
9346 kstackb = cthread->kernel_stack;
9347 kstackt = kstackb + kernel_stack_size;
9348
9349 /* Load stack frame pointer (EBP on x86) into frameptr */
9350 frameptr = __builtin_frame_address(0);
9351 site = NULL;
9352 while (frameptr != NULL) {
9353 /* Verify thread stack bounds */
9354 if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9355 break;
9356 }
9357
9358 /* Next frame pointer is pointed to by the previous one */
9359 frameptr_next = (uintptr_t*) *frameptr;
9360 #if defined(HAS_APPLE_PAC)
9361 frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
9362 #endif
9363
9364 /* Pull return address from one spot above the frame pointer */
9365 retaddr = *(frameptr + 1);
9366
9367 #if defined(HAS_APPLE_PAC)
9368 retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9369 #endif
9370
9371 if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9372 || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9373 site = OSKextGetAllocationSiteForCaller(retaddr);
9374 break;
9375 }
9376 frameptr = frameptr_next;
9377 }
9378
9379 if (site) {
9380 return site->tag;
9381 }
9382
9383 #if MACH_ASSERT
9384 /*
9385 * Kernel tests appear here as unrecognized call sites and would get
9386 * no memory tag. Give them a default tag to prevent panics later.
9387 */
9388 if (thread_get_test_option(test_option_vm_prevent_wire_tag_panic)) {
9389 return VM_KERN_MEMORY_OSFMK;
9390 }
9391 #endif
9392
9393 return VM_KERN_MEMORY_NONE;
9394 }
9395
9396 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9397
9398 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9399 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9400 {
9401 vm_tag_t tag;
9402 uint64_t avail;
9403 uint32_t idx;
9404 vm_allocation_site_t * prev;
9405
9406 if (site->tag) {
9407 return;
9408 }
9409
9410 idx = 0;
9411 while (TRUE) {
9412 avail = free_tag_bits[idx];
9413 if (avail) {
9414 tag = (vm_tag_t)__builtin_clzll(avail);
9415 avail &= ~(1ULL << (63 - tag));
9416 free_tag_bits[idx] = avail;
9417 tag += (idx << 6);
9418 break;
9419 }
9420 idx++;
9421 if (idx >= ARRAY_COUNT(free_tag_bits)) {
9422 for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9423 prev = vm_allocation_sites[idx];
9424 if (!prev) {
9425 continue;
9426 }
9427 if (!KA_NAME_LEN(prev)) {
9428 continue;
9429 }
9430 if (!prev->tag) {
9431 continue;
9432 }
9433 if (prev->total) {
9434 continue;
9435 }
9436 if (1 != prev->refcount) {
9437 continue;
9438 }
9439
9440 assert(idx == prev->tag);
9441 tag = (vm_tag_t)idx;
9442 prev->tag = VM_KERN_MEMORY_NONE;
9443 *releasesiteP = prev;
9444 break;
9445 }
9446 if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9447 tag = VM_KERN_MEMORY_ANY;
9448 }
9449 break;
9450 }
9451 }
9452 site->tag = tag;
9453
9454 OSAddAtomic16(1, &site->refcount);
9455
9456 if (VM_KERN_MEMORY_ANY != tag) {
9457 vm_allocation_sites[tag] = site;
9458 }
9459
9460 if (tag > vm_allocation_tag_highest) {
9461 vm_allocation_tag_highest = tag;
9462 }
9463 }
9464
9465 static void
vm_tag_free_locked(vm_tag_t tag)9466 vm_tag_free_locked(vm_tag_t tag)
9467 {
9468 uint64_t avail;
9469 uint32_t idx;
9470 uint64_t bit;
9471
9472 if (VM_KERN_MEMORY_ANY == tag) {
9473 return;
9474 }
9475
9476 idx = (tag >> 6);
9477 avail = free_tag_bits[idx];
9478 tag &= 63;
9479 bit = (1ULL << (63 - tag));
9480 assert(!(avail & bit));
9481 free_tag_bits[idx] = (avail | bit);
9482 }
9483
9484 static void
vm_tag_init(void)9485 vm_tag_init(void)
9486 {
9487 vm_tag_t tag;
9488 for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9489 vm_tag_free_locked(tag);
9490 }
9491
9492 for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9493 vm_tag_free_locked(tag);
9494 }
9495 }
9496
9497 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9498 vm_tag_alloc(vm_allocation_site_t * site)
9499 {
9500 vm_allocation_site_t * releasesite;
9501
9502 if (!site->tag) {
9503 releasesite = NULL;
9504 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9505 vm_tag_alloc_locked(site, &releasesite);
9506 lck_ticket_unlock(&vm_allocation_sites_lock);
9507 if (releasesite) {
9508 kern_allocation_name_release(releasesite);
9509 }
9510 }
9511
9512 return site->tag;
9513 }
9514
9515 #if VM_BTLOG_TAGS
9516 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
9517 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
9518 #define VM_TAG_BTLOG_SIZE (16u << 10)
9519
9520 btlog_t vmtaglog_btlog;
9521 vm_tag_t vmtaglog_tag;
9522
9523 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)9524 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
9525 {
9526 if (is_kernel_object(object)) {
9527 /* kernel object backtraces are tracked in vm entries */
9528 return;
9529 }
9530 if (delta > 0) {
9531 btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
9532 btlog_record(vmtaglog_btlog, object, 0, ref);
9533 } else if (object->wired_page_count == 0) {
9534 btlog_erase(vmtaglog_btlog, object);
9535 }
9536 }
9537
9538 #ifndef ARRAY_SIZE
9539 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
9540 #endif /* ARRAY_SIZE */
9541 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
9542 const char *vm_kern_memory_strs[] = {
9543 VM_KERN_MEMORY_ELEM(OSFMK),
9544 VM_KERN_MEMORY_ELEM(BSD),
9545 VM_KERN_MEMORY_ELEM(IOKIT),
9546 VM_KERN_MEMORY_ELEM(LIBKERN),
9547 VM_KERN_MEMORY_ELEM(OSKEXT),
9548 VM_KERN_MEMORY_ELEM(KEXT),
9549 VM_KERN_MEMORY_ELEM(IPC),
9550 VM_KERN_MEMORY_ELEM(STACK),
9551 VM_KERN_MEMORY_ELEM(CPU),
9552 VM_KERN_MEMORY_ELEM(PMAP),
9553 VM_KERN_MEMORY_ELEM(PTE),
9554 VM_KERN_MEMORY_ELEM(ZONE),
9555 VM_KERN_MEMORY_ELEM(KALLOC),
9556 VM_KERN_MEMORY_ELEM(COMPRESSOR),
9557 VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
9558 VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
9559 VM_KERN_MEMORY_ELEM(WAITQ),
9560 VM_KERN_MEMORY_ELEM(DIAG),
9561 VM_KERN_MEMORY_ELEM(LOG),
9562 VM_KERN_MEMORY_ELEM(FILE),
9563 VM_KERN_MEMORY_ELEM(MBUF),
9564 VM_KERN_MEMORY_ELEM(UBC),
9565 VM_KERN_MEMORY_ELEM(SECURITY),
9566 VM_KERN_MEMORY_ELEM(MLOCK),
9567 VM_KERN_MEMORY_ELEM(REASON),
9568 VM_KERN_MEMORY_ELEM(SKYWALK),
9569 VM_KERN_MEMORY_ELEM(LTABLE),
9570 VM_KERN_MEMORY_ELEM(HV),
9571 VM_KERN_MEMORY_ELEM(KALLOC_DATA),
9572 VM_KERN_MEMORY_ELEM(RETIRED),
9573 VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
9574 VM_KERN_MEMORY_ELEM(TRIAGE),
9575 VM_KERN_MEMORY_ELEM(RECOUNT),
9576 };
9577
9578 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])9579 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
9580 {
9581 for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
9582 if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
9583 return i;
9584 }
9585 }
9586
9587 printf("Unable to find vm tag %s for btlog\n", tagstr);
9588 return VM_KERN_MEMORY_NONE;
9589 }
9590
9591 __startup_func
9592 static void
vm_btlog_init(void)9593 vm_btlog_init(void)
9594 {
9595 vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
9596
9597 if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
9598 vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
9599 }
9600 }
9601 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
9602 #endif /* VM_BTLOG_TAGS */
9603
9604 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)9605 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
9606 {
9607 assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9608
9609 kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
9610 }
9611
9612 uint64_t
vm_tag_get_size(vm_tag_t tag)9613 vm_tag_get_size(vm_tag_t tag)
9614 {
9615 vm_allocation_site_t *allocation;
9616
9617 assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9618
9619 allocation = vm_allocation_sites[tag];
9620 return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
9621 }
9622
9623 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)9624 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
9625 {
9626 uint64_t value;
9627
9628 value = os_atomic_add(&allocation->total, delta, relaxed);
9629 if (delta < 0) {
9630 assertf(value + (uint64_t)-delta > value,
9631 "tag %d, site %p", allocation->tag, allocation);
9632 }
9633
9634 #if DEBUG || DEVELOPMENT
9635 if (value > allocation->peak) {
9636 os_atomic_max(&allocation->peak, value, relaxed);
9637 }
9638 #endif /* DEBUG || DEVELOPMENT */
9639
9640 if (value == (uint64_t)delta && !allocation->tag) {
9641 vm_tag_alloc(allocation);
9642 }
9643
9644 #if VM_BTLOG_TAGS
9645 if (vmtaglog_tag && (allocation->tag == vmtaglog_tag) && object) {
9646 vm_tag_log(object, delta, __builtin_frame_address(0));
9647 }
9648 #endif /* VM_BTLOG_TAGS */
9649 }
9650
9651 #if VM_TAG_SIZECLASSES
9652
9653 void
vm_allocation_zones_init(void)9654 vm_allocation_zones_init(void)
9655 {
9656 vm_offset_t addr;
9657 vm_size_t size;
9658
9659 const vm_tag_t early_tags[] = {
9660 VM_KERN_MEMORY_DIAG,
9661 VM_KERN_MEMORY_KALLOC,
9662 VM_KERN_MEMORY_KALLOC_DATA,
9663 VM_KERN_MEMORY_KALLOC_TYPE,
9664 VM_KERN_MEMORY_LIBKERN,
9665 VM_KERN_MEMORY_OSFMK,
9666 VM_KERN_MEMORY_RECOUNT,
9667 };
9668
9669 size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9670 + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9671
9672 kmem_alloc(kernel_map, &addr, round_page(size),
9673 KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
9674 VM_KERN_MEMORY_DIAG);
9675
9676 vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9677 addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9678
9679 // prepopulate early tag ranges so allocations
9680 // in vm_tag_update_zone_size() and early boot won't recurse
9681 for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
9682 vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
9683 addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9684 }
9685 }
9686
9687 __attribute__((noinline))
9688 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9689 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9690 {
9691 vm_allocation_zone_total_t *stats;
9692 vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9693
9694 flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
9695 stats = kalloc_data(size, flags);
9696 if (!stats) {
9697 return VM_KERN_MEMORY_NONE;
9698 }
9699 if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9700 kfree_data(stats, size);
9701 }
9702 return tag;
9703 }
9704
9705 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zflags)9706 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zflags)
9707 {
9708 assert(VM_KERN_MEMORY_NONE != tag);
9709 assert(tag < VM_MAX_TAG_VALUE);
9710
9711 if (__probable(vm_allocation_zone_totals[tag])) {
9712 return tag;
9713 }
9714 return vm_tag_zone_stats_alloc(tag, zflags);
9715 }
9716
9717 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9718 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9719 {
9720 vm_allocation_zone_total_t *stats;
9721 vm_size_t value;
9722
9723 assert(VM_KERN_MEMORY_NONE != tag);
9724 assert(tag < VM_MAX_TAG_VALUE);
9725
9726 if (zidx >= VM_TAG_SIZECLASSES) {
9727 return;
9728 }
9729
9730 stats = vm_allocation_zone_totals[tag];
9731 assert(stats);
9732 stats += zidx;
9733
9734 value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9735 if (delta < 0) {
9736 assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9737 return;
9738 } else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9739 os_atomic_max(&stats->vazt_peak, value, relaxed);
9740 }
9741 }
9742
9743 #endif /* VM_TAG_SIZECLASSES */
9744
9745 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,vm_tag_t subtag,int64_t delta)9746 kern_allocation_update_subtotal(kern_allocation_name_t allocation, vm_tag_t subtag, int64_t delta)
9747 {
9748 kern_allocation_name_t other;
9749 struct vm_allocation_total * total;
9750 uint32_t subidx;
9751
9752 assert(VM_KERN_MEMORY_NONE != subtag);
9753 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9754 for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9755 total = &allocation->subtotals[subidx];
9756 if (subtag == total->tag) {
9757 break;
9758 }
9759 }
9760 if (subidx >= allocation->subtotalscount) {
9761 for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9762 total = &allocation->subtotals[subidx];
9763 if ((VM_KERN_MEMORY_NONE == total->tag)
9764 || !total->total) {
9765 total->tag = (vm_tag_t)subtag;
9766 break;
9767 }
9768 }
9769 }
9770 assert(subidx < allocation->subtotalscount);
9771 if (subidx >= allocation->subtotalscount) {
9772 lck_ticket_unlock(&vm_allocation_sites_lock);
9773 return;
9774 }
9775 if (delta < 0) {
9776 assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9777 }
9778 OSAddAtomic64(delta, &total->total);
9779 lck_ticket_unlock(&vm_allocation_sites_lock);
9780
9781 other = vm_allocation_sites[subtag];
9782 assert(other);
9783 if (delta < 0) {
9784 assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9785 }
9786 OSAddAtomic64(delta, &other->mapped);
9787 }
9788
9789 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9790 kern_allocation_get_name(kern_allocation_name_t allocation)
9791 {
9792 return KA_NAME(allocation);
9793 }
9794
9795 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9796 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9797 {
9798 kern_allocation_name_t allocation;
9799 uint16_t namelen;
9800
9801 namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9802
9803 allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9804 allocation->refcount = 1;
9805 allocation->subtotalscount = subtotalscount;
9806 allocation->flags = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9807 strlcpy(KA_NAME(allocation), name, namelen + 1);
9808
9809 vm_tag_alloc(allocation);
9810 return allocation;
9811 }
9812
9813 void
kern_allocation_name_release(kern_allocation_name_t allocation)9814 kern_allocation_name_release(kern_allocation_name_t allocation)
9815 {
9816 assert(allocation->refcount > 0);
9817 if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9818 kfree_data(allocation,
9819 KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9820 }
9821 }
9822
9823 #if !VM_TAG_ACTIVE_UPDATE
9824 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9825 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9826 {
9827 if (!object->wired_page_count) {
9828 return;
9829 }
9830 if (!is_kernel_object(object)) {
9831 assert(object->wire_tag < num_info);
9832 info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9833 }
9834 }
9835
9836 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9837 unsigned int num_info, vm_object_t object);
9838
9839 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9840 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9841 vm_page_iterate_proc proc, purgeable_q_t queue,
9842 int group)
9843 {
9844 vm_object_t object;
9845
9846 for (object = (vm_object_t) queue_first(&queue->objq[group]);
9847 !queue_end(&queue->objq[group], (queue_entry_t) object);
9848 object = (vm_object_t) queue_next(&object->objq)) {
9849 proc(info, num_info, object);
9850 }
9851 }
9852
9853 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9854 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9855 vm_page_iterate_proc proc)
9856 {
9857 vm_object_t object;
9858
9859 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9860 queue_iterate(&vm_objects_wired,
9861 object,
9862 vm_object_t,
9863 wired_objq)
9864 {
9865 proc(info, num_info, object);
9866 }
9867 lck_spin_unlock(&vm_objects_wired_lock);
9868 }
9869 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9870
9871 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)9872 process_account(mach_memory_info_t * info, unsigned int num_info,
9873 uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
9874 {
9875 size_t namelen;
9876 unsigned int idx, count, nextinfo;
9877 vm_allocation_site_t * site;
9878 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9879
9880 for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9881 site = vm_allocation_sites[idx];
9882 if (!site) {
9883 continue;
9884 }
9885 info[idx].mapped = site->mapped;
9886 info[idx].tag = site->tag;
9887 if (!iterated) {
9888 info[idx].size = site->total;
9889 #if DEBUG || DEVELOPMENT
9890 info[idx].peak = site->peak;
9891 #endif /* DEBUG || DEVELOPMENT */
9892 } else {
9893 if (!site->subtotalscount && (site->total != info[idx].size)) {
9894 printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9895 info[idx].size = site->total;
9896 }
9897 }
9898 info[idx].flags |= VM_KERN_SITE_WIRED;
9899 if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9900 info[idx].site = idx;
9901 info[idx].flags |= VM_KERN_SITE_TAG;
9902 if (VM_KERN_MEMORY_ZONE == idx) {
9903 info[idx].flags |= VM_KERN_SITE_HIDE;
9904 info[idx].flags &= ~VM_KERN_SITE_WIRED;
9905 info[idx].collectable_bytes = zones_collectable_bytes;
9906 }
9907 } else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9908 info[idx].site = 0;
9909 info[idx].flags |= VM_KERN_SITE_NAMED;
9910 if (namelen > sizeof(info[idx].name)) {
9911 namelen = sizeof(info[idx].name);
9912 }
9913 strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9914 } else if (VM_TAG_KMOD & site->flags) {
9915 info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
9916 info[idx].flags |= VM_KERN_SITE_KMOD;
9917 } else {
9918 info[idx].site = VM_KERNEL_UNSLIDE(site);
9919 info[idx].flags |= VM_KERN_SITE_KERNEL;
9920 }
9921 }
9922
9923 nextinfo = (vm_allocation_tag_highest + 1);
9924 count = nextinfo;
9925 if (count >= num_info) {
9926 count = num_info;
9927 }
9928
9929 for (idx = 0; idx < count; idx++) {
9930 site = vm_allocation_sites[idx];
9931 if (!site) {
9932 continue;
9933 }
9934 #if VM_TAG_SIZECLASSES
9935 vm_allocation_zone_total_t * zone;
9936 unsigned int zidx;
9937
9938 if (!redact_info
9939 && vm_allocation_zone_totals
9940 && (zone = vm_allocation_zone_totals[idx])
9941 && (nextinfo < num_info)) {
9942 for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9943 if (!zone[zidx].vazt_peak) {
9944 continue;
9945 }
9946 info[nextinfo] = info[idx];
9947 info[nextinfo].zone = zone_index_from_tag_index(zidx);
9948 info[nextinfo].flags &= ~VM_KERN_SITE_WIRED;
9949 info[nextinfo].flags |= VM_KERN_SITE_ZONE;
9950 info[nextinfo].flags |= VM_KERN_SITE_KALLOC;
9951 info[nextinfo].size = zone[zidx].vazt_total;
9952 info[nextinfo].peak = zone[zidx].vazt_peak;
9953 info[nextinfo].mapped = 0;
9954 nextinfo++;
9955 }
9956 }
9957 #endif /* VM_TAG_SIZECLASSES */
9958 if (site->subtotalscount) {
9959 uint64_t mapped, mapcost, take;
9960 uint32_t sub;
9961 vm_tag_t alloctag;
9962
9963 info[idx].size = site->total;
9964 mapped = info[idx].size;
9965 info[idx].mapped = mapped;
9966 mapcost = 0;
9967 for (sub = 0; sub < site->subtotalscount; sub++) {
9968 alloctag = site->subtotals[sub].tag;
9969 assert(alloctag < num_info);
9970 if (info[alloctag].name[0]) {
9971 continue;
9972 }
9973 take = site->subtotals[sub].total;
9974 if (take > info[alloctag].size) {
9975 take = info[alloctag].size;
9976 }
9977 if (take > mapped) {
9978 take = mapped;
9979 }
9980 info[alloctag].mapped -= take;
9981 info[alloctag].size -= take;
9982 mapped -= take;
9983 mapcost += take;
9984 }
9985 info[idx].size = mapcost;
9986 }
9987 }
9988 lck_ticket_unlock(&vm_allocation_sites_lock);
9989
9990 return 0;
9991 }
9992
9993 uint32_t
vm_page_diagnose_estimate(void)9994 vm_page_diagnose_estimate(void)
9995 {
9996 vm_allocation_site_t * site;
9997 uint32_t count = zone_view_count;
9998 uint32_t idx;
9999
10000 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10001 for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
10002 site = vm_allocation_sites[idx];
10003 if (!site) {
10004 continue;
10005 }
10006 count++;
10007 #if VM_TAG_SIZECLASSES
10008 if (vm_allocation_zone_totals) {
10009 vm_allocation_zone_total_t * zone;
10010 zone = vm_allocation_zone_totals[idx];
10011 if (!zone) {
10012 continue;
10013 }
10014 for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
10015 count += (zone[zidx].vazt_peak != 0);
10016 }
10017 }
10018 #endif
10019 }
10020 lck_ticket_unlock(&vm_allocation_sites_lock);
10021
10022 /* some slop for new tags created */
10023 count += 8;
10024 count += VM_KERN_COUNTER_COUNT;
10025
10026 return count;
10027 }
10028
10029 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)10030 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
10031 bool percpu)
10032 {
10033 zpercpu_foreach(zs, zstats) {
10034 info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
10035 }
10036 if (percpu) {
10037 info->size *= zpercpu_count();
10038 }
10039 info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
10040 }
10041
10042 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10043 vm_page_add_info(
10044 mach_memory_info_t *info,
10045 zone_stats_t stats,
10046 bool per_cpu,
10047 const char *parent_heap_name,
10048 const char *parent_zone_name,
10049 const char *view_name)
10050 {
10051 vm_page_diagnose_zone_stats(info, stats, per_cpu);
10052 snprintf(info->name, sizeof(info->name),
10053 "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
10054 }
10055
10056 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)10057 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
10058 {
10059 vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
10060 z->z_name, "raw");
10061 }
10062
10063 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10064 vm_page_add_view(
10065 mach_memory_info_t *info,
10066 zone_stats_t stats,
10067 const char *parent_heap_name,
10068 const char *parent_zone_name,
10069 const char *view_name)
10070 {
10071 vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
10072 view_name);
10073 }
10074
10075 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)10076 vm_page_diagnose_heap_views(
10077 mach_memory_info_t *info,
10078 kalloc_heap_t kh,
10079 const char *parent_heap_name,
10080 const char *parent_zone_name)
10081 {
10082 uint32_t i = 0;
10083
10084 while (kh) {
10085 vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
10086 parent_zone_name, kh->kh_name);
10087 kh = kh->kh_views;
10088 i++;
10089 }
10090 return i;
10091 }
10092
10093 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)10094 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
10095 {
10096 uint32_t i = 0;
10097
10098 for (; i < KHEAP_NUM_ZONES; i++) {
10099 vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
10100 }
10101
10102 i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
10103 NULL);
10104 return i;
10105 }
10106
10107 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)10108 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
10109 {
10110 uint32_t idx = 0;
10111 vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
10112 "", "raw");
10113 idx++;
10114
10115 for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
10116 struct kheap_info heap = kalloc_type_heap_array[i];
10117 char heap_num_tmp[MAX_ZONE_NAME] = "";
10118 const char *heap_num;
10119
10120 snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
10121 heap_num = &heap_num_tmp[0];
10122
10123 for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
10124 ktv = (kalloc_type_var_view_t) ktv->kt_next) {
10125 if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
10126 vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
10127 heap_num, ktv->kt_name);
10128 idx++;
10129 }
10130 }
10131
10132 idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
10133 KHEAP_KT_VAR->kh_name, heap_num);
10134 }
10135
10136 return idx;
10137 }
10138
10139 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)10140 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
10141 {
10142 uint64_t wired_size;
10143 uint64_t wired_managed_size;
10144 uint64_t wired_reserved_size;
10145 boolean_t iterate;
10146 mach_memory_info_t * counts;
10147 uint32_t i;
10148
10149 bzero(info, num_info * sizeof(mach_memory_info_t));
10150
10151 if (!vm_page_wire_count_initial) {
10152 return KERN_ABORTED;
10153 }
10154
10155 #if !XNU_TARGET_OS_OSX
10156 wired_size = ptoa_64(vm_page_wire_count);
10157 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
10158 #else /* !XNU_TARGET_OS_OSX */
10159 wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
10160 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
10161 #endif /* !XNU_TARGET_OS_OSX */
10162 wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
10163
10164 wired_size += booter_size;
10165
10166 assert(num_info >= VM_KERN_COUNTER_COUNT);
10167 num_info -= VM_KERN_COUNTER_COUNT;
10168 counts = &info[num_info];
10169
10170 #define SET_COUNT(xcount, xsize, xflags) \
10171 counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
10172 counts[xcount].site = (xcount); \
10173 counts[xcount].size = (xsize); \
10174 counts[xcount].mapped = (xsize); \
10175 counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
10176
10177 SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
10178 SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
10179 SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
10180 SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
10181 SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
10182 SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
10183 SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
10184 SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
10185 SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
10186 #if CONFIG_SPTM
10187 SET_COUNT(VM_KERN_COUNT_EXCLAVES_CARVEOUT, SPTMArgs->sk_carveout_size, 0);
10188 #endif
10189
10190 #define SET_MAP(xcount, xsize, xfree, xlargest) \
10191 counts[xcount].site = (xcount); \
10192 counts[xcount].size = (xsize); \
10193 counts[xcount].mapped = (xsize); \
10194 counts[xcount].free = (xfree); \
10195 counts[xcount].largest = (xlargest); \
10196 counts[xcount].flags = VM_KERN_SITE_COUNTER;
10197
10198 vm_map_size_t map_size, map_free, map_largest;
10199
10200 vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
10201 SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
10202
10203 zone_map_sizes(&map_size, &map_free, &map_largest);
10204 SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
10205
10206 assert(num_info >= zone_view_count);
10207 num_info -= zone_view_count;
10208 counts = &info[num_info];
10209 i = 0;
10210
10211 if (!redact_info) {
10212 if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
10213 i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
10214 }
10215 if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
10216 i += vm_page_diagnose_kt_heaps(counts + i);
10217 }
10218 assert(i <= zone_view_count);
10219
10220 zone_index_foreach(zidx) {
10221 zone_t z = &zone_array[zidx];
10222 zone_security_flags_t zsflags = zone_security_array[zidx];
10223 zone_view_t zv = z->z_views;
10224
10225 if (zv == NULL) {
10226 continue;
10227 }
10228
10229 zone_stats_t zv_stats_head = z->z_stats;
10230 bool has_raw_view = false;
10231
10232 for (; zv; zv = zv->zv_next) {
10233 /*
10234 * kalloc_types that allocate from the same zone are linked
10235 * as views. Only print the ones that have their own stats.
10236 */
10237 if (zv->zv_stats == zv_stats_head) {
10238 continue;
10239 }
10240 has_raw_view = true;
10241 vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
10242 z->z_percpu);
10243 snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
10244 zone_heap_name(z), z->z_name, zv->zv_name);
10245 i++;
10246 assert(i <= zone_view_count);
10247 }
10248
10249 /*
10250 * Print raw views for non kalloc or kalloc_type zones
10251 */
10252 bool kalloc_type = zsflags.z_kalloc_type;
10253 if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
10254 (kalloc_type && has_raw_view)) {
10255 vm_page_diagnose_zone(counts + i, z);
10256 i++;
10257 assert(i <= zone_view_count);
10258 }
10259 }
10260 }
10261
10262 iterate = !VM_TAG_ACTIVE_UPDATE;
10263 if (iterate) {
10264 enum { kMaxKernelDepth = 1 };
10265 vm_map_t maps[kMaxKernelDepth];
10266 vm_map_entry_t entries[kMaxKernelDepth];
10267 vm_map_t map;
10268 vm_map_entry_t entry;
10269 vm_object_offset_t offset;
10270 vm_page_t page;
10271 int stackIdx, count;
10272
10273 #if !VM_TAG_ACTIVE_UPDATE
10274 vm_page_iterate_objects(info, num_info, &vm_page_count_object);
10275 #endif /* ! VM_TAG_ACTIVE_UPDATE */
10276
10277 map = kernel_map;
10278 stackIdx = 0;
10279 while (map) {
10280 vm_map_lock(map);
10281 for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
10282 if (entry->is_sub_map) {
10283 assert(stackIdx < kMaxKernelDepth);
10284 maps[stackIdx] = map;
10285 entries[stackIdx] = entry;
10286 stackIdx++;
10287 map = VME_SUBMAP(entry);
10288 entry = NULL;
10289 break;
10290 }
10291 if (is_kernel_object(VME_OBJECT(entry))) {
10292 count = 0;
10293 vm_object_lock(VME_OBJECT(entry));
10294 for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
10295 page = vm_page_lookup(VME_OBJECT(entry), offset);
10296 if (page && VM_PAGE_WIRED(page)) {
10297 count++;
10298 }
10299 }
10300 vm_object_unlock(VME_OBJECT(entry));
10301
10302 if (count) {
10303 assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
10304 assert(VME_ALIAS(entry) < num_info);
10305 info[VME_ALIAS(entry)].size += ptoa_64(count);
10306 }
10307 }
10308 while (map && (entry == vm_map_last_entry(map))) {
10309 vm_map_unlock(map);
10310 if (!stackIdx) {
10311 map = NULL;
10312 } else {
10313 --stackIdx;
10314 map = maps[stackIdx];
10315 entry = entries[stackIdx];
10316 }
10317 }
10318 }
10319 }
10320 }
10321
10322 process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
10323
10324 return KERN_SUCCESS;
10325 }
10326
10327 #if DEBUG || DEVELOPMENT
10328
10329 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)10330 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
10331 {
10332 kern_return_t ret;
10333 vm_size_t zsize;
10334 vm_map_t map;
10335 vm_map_entry_t entry;
10336
10337 zsize = zone_element_info((void *) addr, tag);
10338 if (zsize) {
10339 *zone_size = *size = zsize;
10340 return KERN_SUCCESS;
10341 }
10342
10343 *zone_size = 0;
10344 ret = KERN_INVALID_ADDRESS;
10345 for (map = kernel_map; map;) {
10346 vm_map_lock(map);
10347 if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
10348 break;
10349 }
10350 if (entry->is_sub_map) {
10351 if (map != kernel_map) {
10352 break;
10353 }
10354 map = VME_SUBMAP(entry);
10355 continue;
10356 }
10357 if (entry->vme_start != addr) {
10358 break;
10359 }
10360 *tag = (vm_tag_t)VME_ALIAS(entry);
10361 *size = (entry->vme_end - addr);
10362 ret = KERN_SUCCESS;
10363 break;
10364 }
10365 if (map != kernel_map) {
10366 vm_map_unlock(map);
10367 }
10368 vm_map_unlock(kernel_map);
10369
10370 return ret;
10371 }
10372
10373 #endif /* DEBUG || DEVELOPMENT */
10374
10375 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)10376 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
10377 {
10378 vm_allocation_site_t * site;
10379 uint32_t kmodId;
10380
10381 kmodId = 0;
10382 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10383 if ((site = vm_allocation_sites[tag])) {
10384 if (VM_TAG_KMOD & site->flags) {
10385 kmodId = OSKextGetKmodIDForSite(site, name, namelen);
10386 }
10387 }
10388 lck_ticket_unlock(&vm_allocation_sites_lock);
10389
10390 return kmodId;
10391 }
10392
10393
10394 #if CONFIG_SECLUDED_MEMORY
10395 /*
10396 * Note that there's no locking around other accesses to vm_page_secluded_target.
10397 * That should be OK, since these are the only place where it can be changed after
10398 * initialization. Other users (like vm_pageout) may see the wrong value briefly,
10399 * but will eventually get the correct value. This brief mismatch is OK as pageout
10400 * and page freeing will auto-adjust the vm_page_secluded_count to match the target
10401 * over time.
10402 */
10403 unsigned int vm_page_secluded_suppress_cnt = 0;
10404 unsigned int vm_page_secluded_save_target;
10405
10406 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
10407 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
10408
10409 void
start_secluded_suppression(task_t task)10410 start_secluded_suppression(task_t task)
10411 {
10412 if (task->task_suppressed_secluded) {
10413 return;
10414 }
10415 lck_spin_lock(&secluded_suppress_slock);
10416 if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
10417 task->task_suppressed_secluded = TRUE;
10418 vm_page_secluded_save_target = vm_page_secluded_target;
10419 vm_page_secluded_target = 0;
10420 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10421 }
10422 lck_spin_unlock(&secluded_suppress_slock);
10423 }
10424
10425 void
stop_secluded_suppression(task_t task)10426 stop_secluded_suppression(task_t task)
10427 {
10428 lck_spin_lock(&secluded_suppress_slock);
10429 if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
10430 task->task_suppressed_secluded = FALSE;
10431 vm_page_secluded_target = vm_page_secluded_save_target;
10432 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10433 }
10434 lck_spin_unlock(&secluded_suppress_slock);
10435 }
10436
10437 #endif /* CONFIG_SECLUDED_MEMORY */
10438
10439 /*
10440 * Move the list of retired pages on the vm_page_queue_retired to
10441 * their final resting place on retired_pages_object.
10442 */
10443 void
vm_retire_boot_pages(void)10444 vm_retire_boot_pages(void)
10445 {
10446 }
10447
10448 /*
10449 * This holds the reported physical address if an ECC error leads to a panic.
10450 * SMC will store it in PMU SRAM under the 'sECC' key.
10451 */
10452 uint64_t ecc_panic_physical_address = 0;
10453
10454
10455 boolean_t
vm_page_created(vm_page_t page)10456 vm_page_created(vm_page_t page)
10457 {
10458 return (page < &vm_pages[0]) || (page >= &vm_pages[vm_pages_count]);
10459 }
10460