xref: /xnu-11215/osfmk/vm/vm_object_xnu.h (revision 4f1223e8)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_object_xnu.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory object module definitions.
64  */
65 
66 #ifndef _VM_VM_OBJECT_XNU_H_
67 #define _VM_VM_OBJECT_XNU_H_
68 
69 #ifdef XNU_KERNEL_PRIVATE
70 
71 #include <kern/queue.h>
72 
73 #ifdef MACH_KERNEL_PRIVATE
74 
75 #include <debug.h>
76 #include <mach_assert.h>
77 
78 #include <mach/kern_return.h>
79 #include <mach/boolean.h>
80 #include <mach/memory_object_types.h>
81 #include <mach/port.h>
82 #include <mach/vm_prot.h>
83 #include <mach/vm_param.h>
84 #include <mach/machine/vm_types.h>
85 #include <kern/locks.h>
86 #include <kern/assert.h>
87 #include <kern/misc_protos.h>
88 #include <vm/pmap.h>
89 #include <vm/vm_external.h>
90 #include <vm/vm_options.h>
91 #include <kern/macro_help.h>
92 #include <ipc/ipc_types.h>
93 #include <vm/vm_page.h>
94 
95 
96 struct vm_page;
97 
98 /*
99  *	Types defined:
100  *
101  *	vm_object_t		Virtual memory object.
102  *	vm_object_fault_info_t	Used to determine cluster size.
103  */
104 
105 struct vm_object_fault_info {
106 	int             interruptible;
107 	uint32_t        user_tag;
108 	vm_size_t       cluster_size;
109 	vm_behavior_t   behavior;
110 	vm_object_offset_t lo_offset;
111 	vm_object_offset_t hi_offset;
112 	unsigned int
113 	/* boolean_t */ no_cache:1,
114 	/* boolean_t */ stealth:1,
115 	/* boolean_t */ io_sync:1,
116 	/* boolean_t */ cs_bypass:1,
117 	/* boolean_t */ csm_associated:1,
118 	/* boolean_t */ mark_zf_absent:1,
119 	/* boolean_t */ batch_pmap_op:1,
120 	/* boolean_t */ resilient_media:1,
121 	/* boolean_t */ no_copy_on_read:1,
122 	/* boolean_t */ fi_xnu_user_debug:1,
123 	/* boolean_t */ fi_used_for_tpro:1,
124 	    __vm_object_fault_info_unused_bits:21;
125 	int             pmap_options;
126 };
127 
128 
129 #define vo_size                         vo_un1.vou_size
130 #define vo_cache_pages_to_scan          vo_un1.vou_cache_pages_to_scan
131 #define vo_shadow_offset                vo_un2.vou_shadow_offset
132 #define vo_cache_ts                     vo_un2.vou_cache_ts
133 #define vo_owner                        vo_un2.vou_owner
134 
135 struct vm_object {
136 	/*
137 	 * on 64 bit systems we pack the pointers hung off the memq.
138 	 * those pointers have to be able to point back to the memq.
139 	 * the packed pointers are required to be on a 64 byte boundary
140 	 * which means 2 things for the vm_object...  (1) the memq
141 	 * struct has to be the first element of the structure so that
142 	 * we can control it's alignment... (2) the vm_object must be
143 	 * aligned on a 64 byte boundary... for static vm_object's
144 	 * this is accomplished via the 'aligned' attribute... for
145 	 * vm_object's in the zone pool, this is accomplished by
146 	 * rounding the size of the vm_object element to the nearest
147 	 * 64 byte size before creating the zone.
148 	 */
149 	vm_page_queue_head_t    memq;           /* Resident memory - must be first */
150 	lck_rw_t                Lock;           /* Synchronization */
151 
152 	union {
153 		vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
154 		int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
155 		                                                 * external object in cache
156 		                                                 */
157 	} vo_un1;
158 
159 	struct vm_page          *memq_hint;
160 	os_ref_atomic_t         ref_count;        /* Number of references */
161 	unsigned int            resident_page_count;
162 	/* number of resident pages */
163 	unsigned int            wired_page_count; /* number of wired pages
164 	                                           *  use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
165 	unsigned int            reusable_page_count;
166 
167 	struct vm_object        *vo_copy;       /* Object that should receive
168 	                                         * a copy of my changed pages,
169 	                                         * for copy_delay, or just the
170 	                                         * temporary object that
171 	                                         * shadows this object, for
172 	                                         * copy_call.
173 	                                         */
174 	uint32_t                vo_copy_version;
175 	uint32_t                vo_inherit_copy_none:1,
176 	    __vo_unused_padding:31;
177 	struct vm_object        *shadow;        /* My shadow */
178 	memory_object_t         pager;          /* Where to get data */
179 
180 	union {
181 		vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
182 		clock_sec_t     vou_cache_ts;   /* age of an external object
183 		                                 * present in cache
184 		                                 */
185 		task_t          vou_owner;      /* If the object is purgeable
186 		                                 * or has a "ledger_tag", this
187 		                                 * is the task that owns it.
188 		                                 */
189 	} vo_un2;
190 
191 	vm_object_offset_t      paging_offset;  /* Offset into memory object */
192 	memory_object_control_t pager_control;  /* Where data comes back */
193 
194 	memory_object_copy_strategy_t
195 	    copy_strategy;                      /* How to handle data copy */
196 
197 	/*
198 	 * Some user processes (mostly VirtualMachine software) take a large
199 	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
200 	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
201 	 * Since we never enforced any limit there, let's give them 32 bits
202 	 * for backwards compatibility's sake.
203 	 */
204 	uint16_t                paging_in_progress;
205 	uint16_t                vo_size_delta;
206 	uint32_t                activity_in_progress;
207 
208 	/* The memory object ports are
209 	 * being used (e.g., for pagein
210 	 * or pageout) -- don't change
211 	 * any of these fields (i.e.,
212 	 * don't collapse, destroy or
213 	 * terminate)
214 	 */
215 
216 	unsigned int
217 	/* boolean_t array */ all_wanted:7,     /* Bit array of "want to be
218 	                                         * awakened" notations.  See
219 	                                         * VM_OBJECT_EVENT_* items
220 	                                         * below */
221 	/* boolean_t */ pager_created:1,        /* Has pager been created? */
222 	/* boolean_t */ pager_initialized:1,    /* Are fields ready to use? */
223 	/* boolean_t */ pager_ready:1,          /* Will pager take requests? */
224 
225 	/* boolean_t */ pager_trusted:1,        /* The pager for this object
226 	                                         * is trusted. This is true for
227 	                                         * all internal objects (backed
228 	                                         * by the default pager)
229 	                                         */
230 	/* boolean_t */ can_persist:1,          /* The kernel may keep the data
231 	                                         * for this object (and rights
232 	                                         * to the memory object) after
233 	                                         * all address map references
234 	                                         * are deallocated?
235 	                                         */
236 	/* boolean_t */ internal:1,             /* Created by the kernel (and
237 	                                         * therefore, managed by the
238 	                                         * default memory manger)
239 	                                         */
240 	/* boolean_t */ private:1,              /* magic device_pager object,
241 	                                        * holds private pages only */
242 	/* boolean_t */ pageout:1,              /* pageout object. contains
243 	                                         * private pages that refer to
244 	                                         * a real memory object. */
245 	/* boolean_t */ alive:1,                /* Not yet terminated */
246 
247 	/* boolean_t */ purgable:2,             /* Purgable state.  See
248 	                                         * VM_PURGABLE_*
249 	                                         */
250 	/* boolean_t */ purgeable_only_by_kernel:1,
251 	/* boolean_t */ purgeable_when_ripe:1,         /* Purgeable when a token
252 	                                                * becomes ripe.
253 	                                                */
254 	/* boolean_t */ shadowed:1,             /* Shadow may exist */
255 	/* boolean_t */ true_share:1,
256 	/* This object is mapped
257 	 * in more than one place
258 	 * and hence cannot be
259 	 * coalesced */
260 	/* boolean_t */ terminating:1,
261 	/* Allows vm_object_lookup
262 	 * and vm_object_deallocate
263 	 * to special case their
264 	 * behavior when they are
265 	 * called as a result of
266 	 * page cleaning during
267 	 * object termination
268 	 */
269 	/* boolean_t */ named:1,                /* An enforces an internal
270 	                                         * naming convention, by
271 	                                         * calling the right routines
272 	                                         * for allocation and
273 	                                         * destruction, UBC references
274 	                                         * against the vm_object are
275 	                                         * checked.
276 	                                         */
277 	/* boolean_t */ shadow_severed:1,
278 	/* When a permanent object
279 	 * backing a COW goes away
280 	 * unexpectedly.  This bit
281 	 * allows vm_fault to return
282 	 * an error rather than a
283 	 * zero filled page.
284 	 */
285 	/* boolean_t */ phys_contiguous:1,
286 	/* Memory is wired and
287 	 * guaranteed physically
288 	 * contiguous.  However
289 	 * it is not device memory
290 	 * and obeys normal virtual
291 	 * memory rules w.r.t pmap
292 	 * access bits.
293 	 */
294 	/* boolean_t */ nophyscache:1,
295 	/* When mapped at the
296 	 * pmap level, don't allow
297 	 * primary caching. (for
298 	 * I/O)
299 	 */
300 	/* boolean_t */ for_realtime:1,
301 	/* Might be needed for realtime code path */
302 	/* vm_object_destroy_reason_t */ no_pager_reason:3,
303 	/* differentiate known and unknown causes */
304 #if FBDP_DEBUG_OBJECT_NO_PAGER
305 	/* boolean_t */ fbdp_tracked:1;
306 #else /* FBDP_DEBUG_OBJECT_NO_PAGER */
307 	__object1_unused_bits:1;
308 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
309 
310 	queue_chain_t           cached_list;    /* Attachment point for the
311 	                                         * list of objects cached as a
312 	                                         * result of their can_persist
313 	                                         * value
314 	                                         */
315 	/*
316 	 * the following fields are not protected by any locks
317 	 * they are updated via atomic compare and swap
318 	 */
319 	vm_object_offset_t      last_alloc;     /* last allocation offset */
320 	vm_offset_t             cow_hint;       /* last page present in     */
321 	                                        /* shadow but not in object */
322 	int                     sequential;     /* sequential access size */
323 
324 	uint32_t                pages_created;
325 	uint32_t                pages_used;
326 	/* hold object lock when altering */
327 	unsigned        int
328 	    wimg_bits:8,                /* cache WIMG bits         */
329 	    code_signed:1,              /* pages are signed and should be
330 	                                 *  validated; the signatures are stored
331 	                                 *  with the pager */
332 	    transposed:1,               /* object was transposed with another */
333 	    mapping_in_progress:1,      /* pager being mapped/unmapped */
334 	    phantom_isssd:1,
335 	    volatile_empty:1,
336 	    volatile_fault:1,
337 	    all_reusable:1,
338 	    blocked_access:1,
339 	    set_cache_attr:1,
340 	    object_is_shared_cache:1,
341 	    purgeable_queue_type:2,
342 	    purgeable_queue_group:3,
343 	    io_tracking:1,
344 	    no_tag_update:1,            /*  */
345 #if CONFIG_SECLUDED_MEMORY
346 	    eligible_for_secluded:1,
347 	    can_grab_secluded:1,
348 #else /* CONFIG_SECLUDED_MEMORY */
349 	__object3_unused_bits:2,
350 #endif /* CONFIG_SECLUDED_MEMORY */
351 #if VM_OBJECT_ACCESS_TRACKING
352 	    access_tracking:1,
353 #else /* VM_OBJECT_ACCESS_TRACKING */
354 	__unused_access_tracking:1,
355 #endif /* VM_OBJECT_ACCESS_TRACKING */
356 	vo_ledger_tag:3,
357 	    vo_no_footprint:1;
358 
359 #if VM_OBJECT_ACCESS_TRACKING
360 	uint32_t        access_tracking_reads;
361 	uint32_t        access_tracking_writes;
362 #endif /* VM_OBJECT_ACCESS_TRACKING */
363 
364 	uint8_t                 scan_collisions;
365 	uint8_t                 __object4_unused_bits[1];
366 	vm_tag_t                wire_tag;
367 
368 #if CONFIG_PHANTOM_CACHE
369 	uint32_t                phantom_object_id;
370 #endif
371 #if CONFIG_IOSCHED || UPL_DEBUG
372 	queue_head_t            uplq;           /* List of outstanding upls */
373 #endif
374 
375 #ifdef  VM_PIP_DEBUG
376 /*
377  * Keep track of the stack traces for the first holders
378  * of a "paging_in_progress" reference for this VM object.
379  */
380 #define VM_PIP_DEBUG_STACK_FRAMES       25      /* depth of each stack trace */
381 #define VM_PIP_DEBUG_MAX_REFS           10      /* track that many references */
382 	struct __pip_backtrace {
383 		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
384 	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
385 #endif  /* VM_PIP_DEBUG  */
386 
387 	queue_chain_t           objq;      /* object queue - currently used for purgable queues */
388 	queue_chain_t           task_objq; /* objects owned by task - protected by task lock */
389 
390 #if !VM_TAG_ACTIVE_UPDATE
391 	queue_chain_t           wired_objq;
392 #endif /* !VM_TAG_ACTIVE_UPDATE */
393 
394 #if DEBUG
395 	void *purgeable_owner_bt[16];
396 	task_t vo_purgeable_volatilizer; /* who made it volatile? */
397 	void *purgeable_volatilizer_bt[16];
398 #endif /* DEBUG */
399 };
400 
401 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                         \
402 	((object)->volatile_fault &&                                    \
403 	 ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
404 	  (object)->purgable == VM_PURGABLE_EMPTY))
405 
406 extern const vm_object_t kernel_object_default;  /* the default kernel object */
407 
408 extern const vm_object_t compressor_object;      /* the single compressor object, allocates pages for compressed
409                                                   * buffers (not the segments) */
410 
411 extern const vm_object_t retired_pages_object;   /* pages retired due to ECC, should never be used */
412 
413 
414 #define is_kernel_object(object) ((object) == kernel_object_default)
415 
416 
417 extern const vm_object_t exclaves_object;        /* holds VM pages owned by exclaves */
418 
419 # define        VM_MSYNC_INITIALIZED                    0
420 # define        VM_MSYNC_SYNCHRONIZING                  1
421 # define        VM_MSYNC_DONE                           2
422 
423 
424 extern lck_grp_t                vm_map_lck_grp;
425 extern lck_attr_t               vm_map_lck_attr;
426 
427 /** os_refgrp_t for vm_objects */
428 os_refgrp_decl_extern(vm_object_refgrp);
429 
430 #ifndef VM_TAG_ACTIVE_UPDATE
431 #error VM_TAG_ACTIVE_UPDATE
432 #endif
433 
434 #if VM_TAG_ACTIVE_UPDATE
435 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
436 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
437 #else /* VM_TAG_ACTIVE_UPDATE */
438 #define VM_OBJECT_WIRED_ENQUEUE(object)                                 \
439 	MACRO_BEGIN                                                     \
440 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                          \
441 	assert(!(object)->wired_objq.next);                             \
442 	assert(!(object)->wired_objq.prev);                             \
443 	queue_enter(&vm_objects_wired, (object),                        \
444 	            vm_object_t, wired_objq);                           \
445 	lck_spin_unlock(&vm_objects_wired_lock);                        \
446 	MACRO_END
447 #define VM_OBJECT_WIRED_DEQUEUE(object)                                 \
448 	MACRO_BEGIN                                                     \
449 	if ((object)->wired_objq.next) {                                \
450 	        lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                  \
451 	        queue_remove(&vm_objects_wired, (object),               \
452 	                     vm_object_t, wired_objq);                  \
453 	        lck_spin_unlock(&vm_objects_wired_lock);                \
454 	}                                                               \
455 	MACRO_END
456 #endif /* VM_TAG_ACTIVE_UPDATE */
457 
458 #define VM_OBJECT_WIRED(object, tag)                                    \
459     MACRO_BEGIN                                                         \
460     assert(VM_KERN_MEMORY_NONE != (tag));                               \
461     assert(VM_KERN_MEMORY_NONE == (object)->wire_tag);                  \
462     (object)->wire_tag = (tag);                                         \
463     if (!VM_TAG_ACTIVE_UPDATE) {                                        \
464 	VM_OBJECT_WIRED_ENQUEUE((object));                              \
465     }                                                                   \
466     MACRO_END
467 
468 #define VM_OBJECT_UNWIRED(object)                                                       \
469     MACRO_BEGIN                                                                         \
470     if (!VM_TAG_ACTIVE_UPDATE) {                                                        \
471 	    VM_OBJECT_WIRED_DEQUEUE((object));                                          \
472     }                                                                                   \
473     if (VM_KERN_MEMORY_NONE != (object)->wire_tag) {                                    \
474 	vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count), (object));   \
475 	(object)->wire_tag = VM_KERN_MEMORY_NONE;                                       \
476     }                                                                                   \
477     MACRO_END
478 
479 // These two macros start & end a C block
480 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object)                                       \
481     MACRO_BEGIN                                                                         \
482     {                                                                                   \
483 	int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
484 
485 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag)                                    \
486 	if (__wireddelta) {                                                             \
487 	    boolean_t __overflow __assert_only =                                        \
488 	    os_add_overflow((object)->wired_page_count, __wireddelta,                   \
489 	                    &(object)->wired_page_count);                               \
490 	    assert(!__overflow);                                                        \
491 	    if (!(object)->internal &&                                  \
492 	        (object)->vo_ledger_tag &&                              \
493 	        VM_OBJECT_OWNER((object)) != NULL) {                    \
494 	            vm_object_wired_page_update_ledgers(object, __wireddelta); \
495 	    }                                                           \
496 	    if (!(object)->pageout && !(object)->no_tag_update) {                       \
497 	        if (__wireddelta > 0) {                                                 \
498 	            assert (VM_KERN_MEMORY_NONE != (tag));                              \
499 	            if (VM_KERN_MEMORY_NONE == __waswired) {                            \
500 	                VM_OBJECT_WIRED((object), (tag));                               \
501 	            }                                                                   \
502 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object));      \
503 	        } else if (VM_KERN_MEMORY_NONE != __waswired) {                         \
504 	            assert (VM_KERN_MEMORY_NONE != (object)->wire_tag);                 \
505 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object));      \
506 	            if (!(object)->wired_page_count) {                                  \
507 	                VM_OBJECT_UNWIRED((object));                                    \
508 	            }                                                                   \
509 	        }                                                                       \
510 	    }                                                                           \
511 	}                                                                               \
512     }                                                                                   \
513     MACRO_END
514 
515 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta)               \
516     __wireddelta += delta; \
517 
518 #define VM_OBJECT_WIRED_PAGE_ADD(object, m)                     \
519     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
520 
521 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m)                  \
522     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
523 
524 #define OBJECT_LOCK_SHARED      0
525 #define OBJECT_LOCK_EXCLUSIVE   1
526 
527 extern lck_grp_t        vm_object_lck_grp;
528 extern lck_attr_t       vm_object_lck_attr;
529 extern lck_attr_t       kernel_object_lck_attr;
530 extern lck_attr_t       compressor_object_lck_attr;
531 
532 extern vm_object_t      vm_pageout_scan_wants_object;
533 
534 extern void             vm_object_lock(vm_object_t);
535 extern bool             vm_object_lock_check_contended(vm_object_t);
536 extern boolean_t        vm_object_lock_try(vm_object_t);
537 extern boolean_t        _vm_object_lock_try(vm_object_t);
538 extern boolean_t        vm_object_lock_avoid(vm_object_t);
539 extern void             vm_object_lock_shared(vm_object_t);
540 extern boolean_t        vm_object_lock_yield_shared(vm_object_t);
541 extern boolean_t        vm_object_lock_try_shared(vm_object_t);
542 extern void             vm_object_unlock(vm_object_t);
543 extern boolean_t        vm_object_lock_upgrade(vm_object_t);
544 
545 extern void             kdp_vm_object_sleep_find_owner(
546 	event64_t          wait_event,
547 	block_hint_t       wait_type,
548 	thread_waitinfo_t *waitinfo);
549 
550 #endif /* MACH_KERNEL_PRIVATE */
551 
552 #if CONFIG_IOSCHED
553 struct io_reprioritize_req {
554 	uint64_t        blkno;
555 	uint32_t        len;
556 	int             priority;
557 	struct vnode    *devvp;
558 	struct mpsc_queue_chain iorr_elm;
559 };
560 typedef struct io_reprioritize_req *io_reprioritize_req_t;
561 
562 extern void vm_io_reprioritize_init(void);
563 #endif
564 
565 extern void page_worker_init(void);
566 
567 
568 #endif /* XNU_KERNEL_PRIVATE */
569 
570 #endif  /* _VM_VM_OBJECT_XNU_H_ */
571