1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _VM_VM_OBJECT_INTERNAL_H_
30 #define _VM_VM_OBJECT_INTERNAL_H_
31
32 #ifdef XNU_KERNEL_PRIVATE
33 #include <vm/vm_object_xnu.h>
34
35 #if VM_OBJECT_TRACKING
36 #include <libkern/OSDebug.h>
37 #include <kern/btlog.h>
38 extern void vm_object_tracking_init(void);
39 extern btlog_t vm_object_tracking_btlog;
40 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
41 #define VM_OBJECT_TRACKING_OP_CREATED 1
42 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
43 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
44 #endif /* VM_OBJECT_TRACKING */
45
46 #if VM_OBJECT_ACCESS_TRACKING
47 extern uint64_t vm_object_access_tracking_reads;
48 extern uint64_t vm_object_access_tracking_writes;
49 extern void vm_object_access_tracking(vm_object_t object,
50 int *access_tracking,
51 uint32_t *access_tracking_reads,
52 uint32_t *acess_tracking_writes);
53 #endif /* VM_OBJECT_ACCESS_TRACKING */
54
55 extern uint16_t vm_object_pagein_throttle;
56
57 /*
58 * Object locking macros
59 */
60
61 #define vm_object_lock_init(object) \
62 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
63 (is_kernel_object(object) ? \
64 &kernel_object_lck_attr : \
65 (((object) == compressor_object) ? \
66 &compressor_object_lck_attr : \
67 &vm_object_lck_attr)))
68 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
69
70 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
71
72 /*
73 * CAUTION: the following vm_object_lock_assert_held*() macros merely
74 * check if anyone is holding the lock, but the holder may not necessarily
75 * be the caller...
76 */
77 #define vm_object_lock_assert_held(object) \
78 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
79 #define vm_object_lock_assert_shared(object) \
80 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
81 #define vm_object_lock_assert_exclusive(object) \
82 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
83 #define vm_object_lock_assert_notheld(object) \
84 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
85
86
87 static inline void
VM_OBJECT_SET_PAGER_CREATED(vm_object_t object,bool value)88 VM_OBJECT_SET_PAGER_CREATED(
89 vm_object_t object,
90 bool value)
91 {
92 vm_object_lock_assert_exclusive(object);
93 object->pager_created = value;
94 }
95 static inline void
VM_OBJECT_SET_PAGER_INITIALIZED(vm_object_t object,bool value)96 VM_OBJECT_SET_PAGER_INITIALIZED(
97 vm_object_t object,
98 bool value)
99 {
100 vm_object_lock_assert_exclusive(object);
101 object->pager_initialized = value;
102 }
103 static inline void
VM_OBJECT_SET_PAGER_READY(vm_object_t object,bool value)104 VM_OBJECT_SET_PAGER_READY(
105 vm_object_t object,
106 bool value)
107 {
108 vm_object_lock_assert_exclusive(object);
109 object->pager_ready = value;
110 }
111 static inline void
VM_OBJECT_SET_PAGER_TRUSTED(vm_object_t object,bool value)112 VM_OBJECT_SET_PAGER_TRUSTED(
113 vm_object_t object,
114 bool value)
115 {
116 vm_object_lock_assert_exclusive(object);
117 object->pager_trusted = value;
118 }
119 static inline void
VM_OBJECT_SET_CAN_PERSIST(vm_object_t object,bool value)120 VM_OBJECT_SET_CAN_PERSIST(
121 vm_object_t object,
122 bool value)
123 {
124 vm_object_lock_assert_exclusive(object);
125 object->can_persist = value;
126 }
127 static inline void
VM_OBJECT_SET_INTERNAL(vm_object_t object,bool value)128 VM_OBJECT_SET_INTERNAL(
129 vm_object_t object,
130 bool value)
131 {
132 vm_object_lock_assert_exclusive(object);
133 object->internal = value;
134 }
135 static inline void
VM_OBJECT_SET_PRIVATE(vm_object_t object,bool value)136 VM_OBJECT_SET_PRIVATE(
137 vm_object_t object,
138 bool value)
139 {
140 vm_object_lock_assert_exclusive(object);
141 object->private = value;
142 }
143 static inline void
VM_OBJECT_SET_PAGEOUT(vm_object_t object,bool value)144 VM_OBJECT_SET_PAGEOUT(
145 vm_object_t object,
146 bool value)
147 {
148 vm_object_lock_assert_exclusive(object);
149 object->pageout = value;
150 }
151 static inline void
VM_OBJECT_SET_ALIVE(vm_object_t object,bool value)152 VM_OBJECT_SET_ALIVE(
153 vm_object_t object,
154 bool value)
155 {
156 vm_object_lock_assert_exclusive(object);
157 object->alive = value;
158 }
159 static inline void
VM_OBJECT_SET_PURGABLE(vm_object_t object,unsigned int value)160 VM_OBJECT_SET_PURGABLE(
161 vm_object_t object,
162 unsigned int value)
163 {
164 vm_object_lock_assert_exclusive(object);
165 object->purgable = value;
166 assert3u(object->purgable, ==, value);
167 }
168 static inline void
VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(vm_object_t object,bool value)169 VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(
170 vm_object_t object,
171 bool value)
172 {
173 vm_object_lock_assert_exclusive(object);
174 object->purgeable_only_by_kernel = value;
175 }
176 static inline void
VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(vm_object_t object,bool value)177 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(
178 vm_object_t object,
179 bool value)
180 {
181 vm_object_lock_assert_exclusive(object);
182 object->purgeable_when_ripe = value;
183 }
184 static inline void
VM_OBJECT_SET_SHADOWED(vm_object_t object,bool value)185 VM_OBJECT_SET_SHADOWED(
186 vm_object_t object,
187 bool value)
188 {
189 vm_object_lock_assert_exclusive(object);
190 object->shadowed = value;
191 }
192 static inline void
VM_OBJECT_SET_TRUE_SHARE(vm_object_t object,bool value)193 VM_OBJECT_SET_TRUE_SHARE(
194 vm_object_t object,
195 bool value)
196 {
197 vm_object_lock_assert_exclusive(object);
198 object->true_share = value;
199 }
200 static inline void
VM_OBJECT_SET_TERMINATING(vm_object_t object,bool value)201 VM_OBJECT_SET_TERMINATING(
202 vm_object_t object,
203 bool value)
204 {
205 vm_object_lock_assert_exclusive(object);
206 object->terminating = value;
207 }
208 static inline void
VM_OBJECT_SET_NAMED(vm_object_t object,bool value)209 VM_OBJECT_SET_NAMED(
210 vm_object_t object,
211 bool value)
212 {
213 vm_object_lock_assert_exclusive(object);
214 object->named = value;
215 }
216 static inline void
VM_OBJECT_SET_SHADOW_SEVERED(vm_object_t object,bool value)217 VM_OBJECT_SET_SHADOW_SEVERED(
218 vm_object_t object,
219 bool value)
220 {
221 vm_object_lock_assert_exclusive(object);
222 object->shadow_severed = value;
223 }
224 static inline void
VM_OBJECT_SET_PHYS_CONTIGUOUS(vm_object_t object,bool value)225 VM_OBJECT_SET_PHYS_CONTIGUOUS(
226 vm_object_t object,
227 bool value)
228 {
229 vm_object_lock_assert_exclusive(object);
230 object->phys_contiguous = value;
231 }
232 static inline void
VM_OBJECT_SET_NOPHYSCACHE(vm_object_t object,bool value)233 VM_OBJECT_SET_NOPHYSCACHE(
234 vm_object_t object,
235 bool value)
236 {
237 vm_object_lock_assert_exclusive(object);
238 object->nophyscache = value;
239 }
240 static inline void
VM_OBJECT_SET_FOR_REALTIME(vm_object_t object,bool value)241 VM_OBJECT_SET_FOR_REALTIME(
242 vm_object_t object,
243 bool value)
244 {
245 vm_object_lock_assert_exclusive(object);
246 object->for_realtime = value;
247 }
248 static inline void
VM_OBJECT_SET_NO_PAGER_REASON(vm_object_t object,unsigned int value)249 VM_OBJECT_SET_NO_PAGER_REASON(
250 vm_object_t object,
251 unsigned int value)
252 {
253 vm_object_lock_assert_exclusive(object);
254 object->no_pager_reason = value;
255 assert3u(object->no_pager_reason, ==, value);
256 }
257 #if FBDP_DEBUG_OBJECT_NO_PAGER
258 static inline void
VM_OBJECT_SET_FBDP_TRACKED(vm_object_t object,bool value)259 VM_OBJECT_SET_FBDP_TRACKED(
260 vm_object_t object,
261 bool value)
262 {
263 vm_object_lock_assert_exclusive(object);
264 object->fbdp_tracked = value;
265 }
266 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
267
268 /*
269 * Declare procedures that operate on VM objects.
270 */
271
272 __private_extern__ void vm_object_bootstrap(void);
273
274 __private_extern__ void vm_object_reaper_init(void);
275
276 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
277
278 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
279 vm_object_t object);
280
281 __private_extern__ void vm_object_set_size(
282 vm_object_t object,
283 vm_object_size_t outer_size,
284 vm_object_size_t inner_size);
285
286 static inline void
vm_object_reference_locked(vm_object_t object)287 vm_object_reference_locked(vm_object_t object)
288 {
289 vm_object_lock_assert_exclusive(object);
290 os_ref_retain_locked_raw(&object->ref_count, &vm_object_refgrp);
291 }
292
293 static inline void
vm_object_reference_shared(vm_object_t object)294 vm_object_reference_shared(vm_object_t object)
295 {
296 vm_object_lock_assert_shared(object);
297 os_ref_retain_raw(&object->ref_count, &vm_object_refgrp);
298 }
299
300 __private_extern__ void vm_object_reference(
301 vm_object_t object);
302
303 #if !MACH_ASSERT
304
305 #define vm_object_reference(object) \
306 MACRO_BEGIN \
307 vm_object_t RObject = (object); \
308 if (RObject) { \
309 vm_object_lock_shared(RObject); \
310 vm_object_reference_shared(RObject); \
311 vm_object_unlock(RObject); \
312 } \
313 MACRO_END
314
315 #endif /* MACH_ASSERT */
316
317 __private_extern__ void vm_object_deallocate(
318 vm_object_t object);
319
320 __private_extern__ void vm_object_pmap_protect(
321 vm_object_t object,
322 vm_object_offset_t offset,
323 vm_object_size_t size,
324 pmap_t pmap,
325 vm_map_size_t pmap_page_size,
326 vm_map_offset_t pmap_start,
327 vm_prot_t prot);
328
329 __private_extern__ void vm_object_pmap_protect_options(
330 vm_object_t object,
331 vm_object_offset_t offset,
332 vm_object_size_t size,
333 pmap_t pmap,
334 vm_map_size_t pmap_page_size,
335 vm_map_offset_t pmap_start,
336 vm_prot_t prot,
337 int options);
338
339 __private_extern__ void vm_object_page_remove(
340 vm_object_t object,
341 vm_object_offset_t start,
342 vm_object_offset_t end);
343
344 __private_extern__ void vm_object_deactivate_pages(
345 vm_object_t object,
346 vm_object_offset_t offset,
347 vm_object_size_t size,
348 boolean_t kill_page,
349 boolean_t reusable_page,
350 boolean_t reusable_no_write,
351 struct pmap *pmap,
352 /* XXX TODO4K: need pmap_page_size here too? */
353 vm_map_offset_t pmap_offset);
354
355 __private_extern__ void vm_object_reuse_pages(
356 vm_object_t object,
357 vm_object_offset_t start_offset,
358 vm_object_offset_t end_offset,
359 boolean_t allow_partial_reuse);
360
361 __private_extern__ kern_return_t vm_object_zero(
362 vm_object_t object,
363 vm_object_offset_t cur_offset,
364 vm_object_offset_t end_offset);
365
366 __private_extern__ uint64_t vm_object_purge(
367 vm_object_t object,
368 int flags);
369
370 __private_extern__ kern_return_t vm_object_purgable_control(
371 vm_object_t object,
372 vm_purgable_t control,
373 int *state);
374
375 __private_extern__ kern_return_t vm_object_get_page_counts(
376 vm_object_t object,
377 vm_object_offset_t offset,
378 vm_object_size_t size,
379 unsigned int *resident_page_count,
380 unsigned int *dirty_page_count);
381
382 __private_extern__ boolean_t vm_object_coalesce(
383 vm_object_t prev_object,
384 vm_object_t next_object,
385 vm_object_offset_t prev_offset,
386 vm_object_offset_t next_offset,
387 vm_object_size_t prev_size,
388 vm_object_size_t next_size);
389
390 __private_extern__ boolean_t vm_object_shadow(
391 vm_object_t *object,
392 vm_object_offset_t *offset,
393 vm_object_size_t length,
394 boolean_t always_shadow);
395
396 __private_extern__ void vm_object_collapse(
397 vm_object_t object,
398 vm_object_offset_t offset,
399 boolean_t can_bypass);
400
401 __private_extern__ boolean_t vm_object_copy_quickly(
402 vm_object_t object,
403 vm_object_offset_t src_offset,
404 vm_object_size_t size,
405 boolean_t *_src_needs_copy,
406 boolean_t *_dst_needs_copy);
407
408 __private_extern__ kern_return_t vm_object_copy_strategically(
409 vm_object_t src_object,
410 vm_object_offset_t src_offset,
411 vm_object_size_t size,
412 bool forking,
413 vm_object_t *dst_object,
414 vm_object_offset_t *dst_offset,
415 boolean_t *dst_needs_copy);
416
417 __private_extern__ kern_return_t vm_object_copy_slowly(
418 vm_object_t src_object,
419 vm_object_offset_t src_offset,
420 vm_object_size_t size,
421 boolean_t interruptible,
422 vm_object_t *_result_object);
423
424 __private_extern__ vm_object_t vm_object_copy_delayed(
425 vm_object_t src_object,
426 vm_object_offset_t src_offset,
427 vm_object_size_t size,
428 boolean_t src_object_shared);
429
430 __private_extern__ kern_return_t vm_object_destroy(
431 vm_object_t object,
432 vm_object_destroy_reason_t reason);
433
434 __private_extern__ void vm_object_compressor_pager_create(
435 vm_object_t object);
436
437 /*
438 * Query whether the provided object,offset reside in the compressor. The
439 * caller must hold the object lock and ensure that the object,offset under
440 * inspection is not in the process of being paged in/out (i.e. no busy
441 * backing page)
442 */
443 __private_extern__ vm_external_state_t vm_object_compressor_pager_state_get(
444 vm_object_t object,
445 vm_object_offset_t offset);
446
447 /*
448 * Clear the compressor slot corresponding to an object,offset. The caller
449 * must hold the object lock (exclusive) and ensure that the object,offset
450 * under inspection is not in the process of being paged in/out (i.e. no busy
451 * backing page)
452 */
453 __private_extern__ void vm_object_compressor_pager_state_clr(
454 vm_object_t object,
455 vm_object_offset_t offset);
456
457 __private_extern__ kern_return_t vm_object_upl_request(
458 vm_object_t object,
459 vm_object_offset_t offset,
460 upl_size_t size,
461 upl_t *upl,
462 upl_page_info_t *page_info,
463 unsigned int *count,
464 upl_control_flags_t flags,
465 vm_tag_t tag);
466
467 __private_extern__ kern_return_t vm_object_transpose(
468 vm_object_t object1,
469 vm_object_t object2,
470 vm_object_size_t transpose_size);
471
472 __private_extern__ boolean_t vm_object_sync(
473 vm_object_t object,
474 vm_object_offset_t offset,
475 vm_object_size_t size,
476 boolean_t should_flush,
477 boolean_t should_return,
478 boolean_t should_iosync);
479
480 __private_extern__ kern_return_t vm_object_update(
481 vm_object_t object,
482 vm_object_offset_t offset,
483 vm_object_size_t size,
484 vm_object_offset_t *error_offset,
485 int *io_errno,
486 memory_object_return_t should_return,
487 int flags,
488 vm_prot_t prot);
489
490 __private_extern__ kern_return_t vm_object_lock_request(
491 vm_object_t object,
492 vm_object_offset_t offset,
493 vm_object_size_t size,
494 memory_object_return_t should_return,
495 int flags,
496 vm_prot_t prot);
497
498
499
500 __private_extern__ vm_object_t vm_object_memory_object_associate(
501 memory_object_t pager,
502 vm_object_t object,
503 vm_object_size_t size,
504 boolean_t check_named);
505
506
507 __private_extern__ void vm_object_cluster_size(
508 vm_object_t object,
509 vm_object_offset_t *start,
510 vm_size_t *length,
511 vm_object_fault_info_t fault_info,
512 uint32_t *io_streaming);
513
514 __private_extern__ kern_return_t vm_object_populate_with_private(
515 vm_object_t object,
516 vm_object_offset_t offset,
517 ppnum_t phys_page,
518 vm_size_t size);
519
520 __private_extern__ void vm_object_change_wimg_mode(
521 vm_object_t object,
522 unsigned int wimg_mode);
523
524 extern kern_return_t vm_object_page_op(
525 vm_object_t object,
526 vm_object_offset_t offset,
527 int ops,
528 ppnum_t *phys_entry,
529 int *flags);
530
531 extern kern_return_t vm_object_range_op(
532 vm_object_t object,
533 vm_object_offset_t offset_beg,
534 vm_object_offset_t offset_end,
535 int ops,
536 uint32_t *range);
537
538
539 __private_extern__ void vm_object_reap_pages(
540 vm_object_t object,
541 int reap_type);
542 #define REAP_REAP 0
543 #define REAP_TERMINATE 1
544 #define REAP_PURGEABLE 2
545 #define REAP_DATA_FLUSH 3
546
547 #if CONFIG_FREEZE
548
549 __private_extern__ uint32_t
550 vm_object_compressed_freezer_pageout(
551 vm_object_t object, uint32_t dirty_budget);
552
553 __private_extern__ void
554 vm_object_compressed_freezer_done(
555 void);
556
557 #endif /* CONFIG_FREEZE */
558
559 __private_extern__ void
560 vm_object_pageout(
561 vm_object_t object);
562
563 /*
564 * Event waiting handling
565 */
566 __enum_closed_decl(vm_object_wait_reason_t, uint8_t, {
567 VM_OBJECT_EVENT_PAGER_INIT = 0,
568 VM_OBJECT_EVENT_PAGER_READY = 1,
569 VM_OBJECT_EVENT_PAGING_IN_PROGRESS = 2,
570 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS = 3,
571 VM_OBJECT_EVENT_UNBLOCKED = 4,
572 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS = 5,
573 VM_OBJECT_EVENT_PAGEIN_THROTTLE = 6,
574 });
575 #define VM_OBJECT_EVENT_MAX VM_OBJECT_EVENT_PAGEIN_THROTTLE
576 /* 7 bits in "all_wanted" */
577 _Static_assert(VM_OBJECT_EVENT_MAX < 7,
578 "vm_object_wait_reason_t must fit in all_wanted");
579 /*
580 * @c vm_object_sleep uses (object + wait_reason) as the wait event, ensure
581 * this does not colide with the object lock.
582 */
583 _Static_assert(VM_OBJECT_EVENT_MAX < offsetof(struct vm_object, Lock),
584 "Wait reason collides with vm_object->Lock");
585
586 extern wait_result_t vm_object_sleep(
587 vm_object_t object,
588 vm_object_wait_reason_t reason,
589 wait_interrupt_t interruptible,
590 lck_sleep_action_t action);
591
592
593 static inline void
vm_object_set_wanted(vm_object_t object,vm_object_wait_reason_t reason)594 vm_object_set_wanted(
595 vm_object_t object,
596 vm_object_wait_reason_t reason)
597 {
598 vm_object_lock_assert_exclusive(object);
599 assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
600
601 object->all_wanted |= (1 << reason);
602 }
603
604 static inline bool
vm_object_wanted(vm_object_t object,vm_object_wait_reason_t event)605 vm_object_wanted(
606 vm_object_t object,
607 vm_object_wait_reason_t event)
608 {
609 vm_object_lock_assert_held(object);
610 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
611
612 return object->all_wanted & (1 << event);
613 }
614
615 extern void vm_object_wakeup(
616 vm_object_t object,
617 vm_object_wait_reason_t reason);
618
619 /*
620 * Routines implemented as macros
621 */
622 #ifdef VM_PIP_DEBUG
623 #include <libkern/OSDebug.h>
624 #define VM_PIP_DEBUG_BEGIN(object) \
625 MACRO_BEGIN \
626 int pip = ((object)->paging_in_progress + \
627 (object)->activity_in_progress); \
628 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
629 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
630 VM_PIP_DEBUG_STACK_FRAMES); \
631 } \
632 MACRO_END
633 #else /* VM_PIP_DEBUG */
634 #define VM_PIP_DEBUG_BEGIN(object)
635 #endif /* VM_PIP_DEBUG */
636
637 static inline void
vm_object_activity_begin(vm_object_t object)638 vm_object_activity_begin(vm_object_t object)
639 {
640 vm_object_lock_assert_exclusive(object);
641 VM_PIP_DEBUG_BEGIN(object);
642 if (os_inc_overflow(&object->activity_in_progress)) {
643 panic("vm_object_activity_begin(%p): overflow\n", object);
644 }
645 }
646
647 static inline void
vm_object_activity_end(vm_object_t object)648 vm_object_activity_end(vm_object_t object)
649 {
650 vm_object_lock_assert_exclusive(object);
651 if (os_dec_overflow(&object->activity_in_progress)) {
652 panic("vm_object_activity_end(%p): underflow\n", object);
653 }
654 if (object->paging_in_progress == 0 &&
655 object->activity_in_progress == 0) {
656 vm_object_wakeup((object),
657 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
658 }
659 }
660
661 static inline void
vm_object_paging_begin(vm_object_t object)662 vm_object_paging_begin(vm_object_t object)
663 {
664 vm_object_lock_assert_exclusive(object);
665 VM_PIP_DEBUG_BEGIN((object));
666 if (os_inc_overflow(&object->paging_in_progress)) {
667 panic("vm_object_paging_begin(%p): overflow\n", object);
668 }
669 }
670
671 static inline void
vm_object_paging_end(vm_object_t object)672 vm_object_paging_end(vm_object_t object)
673 {
674 vm_object_lock_assert_exclusive(object);
675 if (os_dec_overflow(&object->paging_in_progress)) {
676 panic("vm_object_paging_end(%p): underflow\n", object);
677 }
678 /*
679 * NB: This broadcast can be noisy, especially because all threads
680 * receiving the wakeup are given a priority floor. In the future, it
681 * would be great to utilize a primitive which can arbitrate
682 * the priority of all waiters and only issue as many wakeups as can be
683 * serviced.
684 */
685 if (object->paging_in_progress == vm_object_pagein_throttle - 1) {
686 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGEIN_THROTTLE);
687 }
688 if (object->paging_in_progress == 0) {
689 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS);
690 if (object->activity_in_progress == 0) {
691 vm_object_wakeup((object),
692 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
693 }
694 }
695 }
696
697 /* Wait for *all* paging and activities on this object to complete */
698 extern wait_result_t vm_object_paging_wait(vm_object_t object, wait_interrupt_t interruptible);
699 /* Wait for *all* paging on this object to complete */
700 extern wait_result_t vm_object_paging_only_wait(vm_object_t object, wait_interrupt_t interruptible);
701 /* Wait for the number of page-ins on this object to fall below the throttle limit */
702 extern wait_result_t vm_object_paging_throttle_wait(vm_object_t object, wait_interrupt_t interruptible);
703
704 static inline void
vm_object_mapping_begin(vm_object_t object)705 vm_object_mapping_begin(vm_object_t object)
706 {
707 vm_object_lock_assert_exclusive(object);
708 assert(!object->mapping_in_progress);
709 object->mapping_in_progress = TRUE;
710 }
711
712 static inline void
vm_object_mapping_end(vm_object_t object)713 vm_object_mapping_end(vm_object_t object)
714 {
715 vm_object_lock_assert_exclusive(object);
716 assert(object->mapping_in_progress);
717 object->mapping_in_progress = FALSE;
718 vm_object_wakeup(object,
719 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);
720 }
721
722 extern wait_result_t vm_object_mapping_wait(vm_object_t object, wait_interrupt_t interruptible);
723
724 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
725 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
726
727 extern void vm_object_cache_add(vm_object_t);
728 extern void vm_object_cache_remove(vm_object_t);
729 extern int vm_object_cache_evict(int, int);
730
731 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
732 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
733 #define VM_OBJECT_OWNER(object) \
734 ((object == VM_OBJECT_NULL || \
735 ((object)->purgable == VM_PURGABLE_DENY && \
736 (object)->vo_ledger_tag == 0) || \
737 (object)->vo_owner == TASK_NULL) \
738 ? TASK_NULL /* not owned */ \
739 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
740 ? kernel_task /* disowned -> kernel */ \
741 : (object)->vo_owner)) /* explicit owner */ \
742
743
744 extern void vm_object_ledger_tag_ledgers(
745 vm_object_t object,
746 int *ledger_idx_volatile,
747 int *ledger_idx_nonvolatile,
748 int *ledger_idx_volatile_compressed,
749 int *ledger_idx_nonvolatile_compressed,
750 int *ledger_idx_composite,
751 int *ledger_idx_external_wired,
752 boolean_t *do_footprint);
753
754 extern kern_return_t vm_object_ownership_change(
755 vm_object_t object,
756 int new_ledger_tag,
757 task_t new_owner,
758 int new_ledger_flags,
759 boolean_t task_objq_locked);
760
761
762 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
763 // so probably should be a real 32b ID vs. ptr.
764 // Current users just check for equality
765 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((o)))
766
767 static inline void
VM_OBJECT_COPY_SET(vm_object_t object,vm_object_t copy)768 VM_OBJECT_COPY_SET(
769 vm_object_t object,
770 vm_object_t copy)
771 {
772 vm_object_lock_assert_exclusive(object);
773 object->vo_copy = copy;
774 if (copy != VM_OBJECT_NULL) {
775 object->vo_copy_version++;
776 }
777 }
778
779 #endif /* XNU_KERNEL_PRIVATE */
780
781 #endif /* _VM_VM_OBJECT_INTERNAL_H_ */
782