1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41
42 #include <kern/assert.h>
43 #include <kern/host.h>
44 #include <kern/ledger.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 #include <os/refcnt.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <vm/vm_map_internal.h>
53 #include <vm/vm_pageout_internal.h>
54 #include <vm/memory_object_internal.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos_internal.h>
57 #include <vm/vm_purgeable_internal.h>
58 #include <vm/vm_ubc.h>
59 #include <vm/vm_page_internal.h>
60 #include <vm/vm_object_internal.h>
61
62 #include <sys/kdebug_triage.h>
63
64 /* BSD VM COMPONENT INTERFACES */
65 int
get_map_nentries(vm_map_t map)66 get_map_nentries(
67 vm_map_t map)
68 {
69 return map->hdr.nentries;
70 }
71
72 /*
73 * BSD VNODE PAGER
74 */
75
76 const struct memory_object_pager_ops vnode_pager_ops = {
77 .memory_object_reference = vnode_pager_reference,
78 .memory_object_deallocate = vnode_pager_deallocate,
79 .memory_object_init = vnode_pager_init,
80 .memory_object_terminate = vnode_pager_terminate,
81 .memory_object_data_request = vnode_pager_data_request,
82 .memory_object_data_return = vnode_pager_data_return,
83 .memory_object_data_initialize = vnode_pager_data_initialize,
84 .memory_object_map = vnode_pager_map,
85 .memory_object_last_unmap = vnode_pager_last_unmap,
86 .memory_object_backing_object = NULL,
87 .memory_object_pager_name = "vnode pager"
88 };
89
90 typedef struct vnode_pager {
91 /* mandatory generic header */
92 struct memory_object vn_pgr_hdr;
93
94 /* pager-specific */
95 #if MEMORY_OBJECT_HAS_REFCOUNT
96 #define vn_pgr_hdr_ref vn_pgr_hdr.mo_ref
97 #else
98 os_ref_atomic_t vn_pgr_hdr_ref;
99 #endif
100 struct vnode *vnode_handle; /* vnode handle */
101 } *vnode_pager_t;
102
103
104 kern_return_t
105 vnode_pager_cluster_read( /* forward */
106 vnode_pager_t,
107 vm_object_offset_t,
108 vm_object_offset_t,
109 uint32_t,
110 vm_size_t);
111
112 void
113 vnode_pager_cluster_write( /* forward */
114 vnode_pager_t,
115 vm_object_offset_t,
116 vm_size_t,
117 vm_object_offset_t *,
118 int *,
119 int);
120
121
122 vnode_pager_t
123 vnode_object_create( /* forward */
124 struct vnode *);
125
126 vnode_pager_t
127 vnode_pager_lookup( /* forward */
128 memory_object_t);
129
130 struct vnode *
131 vnode_pager_lookup_vnode( /* forward */
132 memory_object_t);
133
134 ZONE_DEFINE_TYPE(vnode_pager_zone, "vnode pager structures",
135 struct vnode_pager, ZC_NOENCRYPT);
136
137 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
138
139 /* TODO: Should be set dynamically by vnode_pager_init() */
140 #define CLUSTER_SHIFT 1
141
142
143 #if DEBUG
144 int pagerdebug = 0;
145
146 #define PAGER_ALL 0xffffffff
147 #define PAGER_INIT 0x00000001
148 #define PAGER_PAGEIN 0x00000002
149
150 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
151 #else
152 #define PAGER_DEBUG(LEVEL, A)
153 #endif
154
155 extern int proc_resetpcontrol(int);
156
157
158 extern int uiomove64(addr64_t, int, void *);
159 #define MAX_RUN 32
160
161 int
memory_object_control_uiomove(memory_object_control_t control,memory_object_offset_t offset,void * uio,int start_offset,int io_requested,int mark_dirty,int take_reference)162 memory_object_control_uiomove(
163 memory_object_control_t control,
164 memory_object_offset_t offset,
165 void * uio,
166 int start_offset,
167 int io_requested,
168 int mark_dirty,
169 int take_reference)
170 {
171 vm_object_t object;
172 vm_page_t dst_page;
173 int xsize;
174 int retval = 0;
175 int cur_run;
176 int cur_needed;
177 int i;
178 int orig_offset;
179 vm_page_t page_run[MAX_RUN];
180 int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
181
182 object = memory_object_control_to_vm_object(control);
183 if (object == VM_OBJECT_NULL) {
184 return 0;
185 }
186 assert(!object->internal);
187
188 vm_object_lock(object);
189
190 if (mark_dirty && object->vo_copy != VM_OBJECT_NULL) {
191 /*
192 * We can't modify the pages without honoring
193 * copy-on-write obligations first, so fall off
194 * this optimized path and fall back to the regular
195 * path.
196 */
197 vm_object_unlock(object);
198 return 0;
199 }
200 orig_offset = start_offset;
201
202 dirty_count = 0;
203 while (io_requested && retval == 0) {
204 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
205
206 if (cur_needed > MAX_RUN) {
207 cur_needed = MAX_RUN;
208 }
209
210 for (cur_run = 0; cur_run < cur_needed;) {
211 if (mark_dirty && object->vo_copy != VM_OBJECT_NULL) {
212 /*
213 * We checked that this file-backed object did not have
214 * a copy object when we entered this routine but it now has
215 * one, so we can't stay on this optimized path.
216 * We can finish processing the pages we have already grabbed
217 * because they were made "busy" before the copy object was
218 * created so they can't have been seen through that copy
219 * object yet.
220 */
221 break;
222 }
223
224 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
225 break;
226 }
227
228 if (__improbable(dst_page->vmp_error)) {
229 retval = EIO;
230 break;
231 }
232 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
233 /*
234 * someone else is playing with the page... if we've
235 * already collected pages into this run, go ahead
236 * and process now, we can't block on this
237 * page while holding other pages in the BUSY state
238 * otherwise we will wait
239 */
240 if (cur_run) {
241 break;
242 }
243 vm_page_sleep(object, dst_page, THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
244 continue;
245 }
246 if (dst_page->vmp_laundry) {
247 vm_pageout_steal_laundry(dst_page, FALSE);
248 }
249 if (__improbable(dst_page->vmp_absent)) {
250 printf("absent page %p (obj %p offset 0x%llx) -> EIO",
251 dst_page, object, offset);
252 retval = EIO;
253 break;
254 }
255
256 if (mark_dirty) {
257 if (dst_page->vmp_dirty == FALSE) {
258 dirty_count++;
259 }
260 SET_PAGE_DIRTY(dst_page, FALSE);
261 if (dst_page->vmp_cs_validated &&
262 !dst_page->vmp_cs_tainted) {
263 /*
264 * CODE SIGNING:
265 * We're modifying a code-signed
266 * page: force revalidate
267 */
268 dst_page->vmp_cs_validated = VMP_CS_ALL_FALSE;
269
270 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
271
272 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
273 }
274 }
275 dst_page->vmp_busy = TRUE;
276
277 page_run[cur_run++] = dst_page;
278
279 offset += PAGE_SIZE_64;
280 }
281 if (cur_run == 0) {
282 /*
283 * we hit a 'hole' in the cache or
284 * a page we don't want to try to handle,
285 * so bail at this point
286 * we'll unlock the object below
287 */
288 break;
289 }
290 vm_object_unlock(object);
291
292 for (i = 0; i < cur_run; i++) {
293 dst_page = page_run[i];
294
295 if ((xsize = PAGE_SIZE - start_offset) > io_requested) {
296 xsize = io_requested;
297 }
298
299 if ((retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio))) {
300 break;
301 }
302
303 io_requested -= xsize;
304 start_offset = 0;
305 }
306 vm_object_lock(object);
307
308 /*
309 * if we have more than 1 page to work on
310 * in the current run, or the original request
311 * started at offset 0 of the page, or we're
312 * processing multiple batches, we will move
313 * the pages to the tail of the inactive queue
314 * to implement an LRU for read/write accesses
315 *
316 * the check for orig_offset == 0 is there to
317 * mitigate the cost of small (< page_size) requests
318 * to the same page (this way we only move it once)
319 */
320 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
321 vm_page_lockspin_queues();
322
323 for (i = 0; i < cur_run; i++) {
324 vm_page_lru(page_run[i]);
325 }
326
327 vm_page_unlock_queues();
328 }
329 for (i = 0; i < cur_run; i++) {
330 dst_page = page_run[i];
331
332 /*
333 * someone is explicitly referencing this page...
334 * update clustered and speculative state
335 *
336 */
337 if (dst_page->vmp_clustered) {
338 VM_PAGE_CONSUME_CLUSTERED(dst_page);
339 }
340
341 vm_page_wakeup_done(object, dst_page);
342 }
343 orig_offset = 0;
344 }
345 vm_object_unlock(object);
346 return retval;
347 }
348
349
350 bool
memory_object_is_vnode_pager(memory_object_t mem_obj)351 memory_object_is_vnode_pager(
352 memory_object_t mem_obj)
353 {
354 if (mem_obj != NULL &&
355 mem_obj->mo_pager_ops == &vnode_pager_ops) {
356 return true;
357 }
358 return false;
359 }
360
361 /*
362 *
363 */
364 memory_object_t
vnode_pager_setup(struct vnode * vp,__unused memory_object_t pager)365 vnode_pager_setup(
366 struct vnode *vp,
367 __unused memory_object_t pager)
368 {
369 vnode_pager_t vnode_object;
370
371 vnode_object = vnode_object_create(vp);
372 if (vnode_object == VNODE_PAGER_NULL) {
373 panic("vnode_pager_setup: vnode_object_create() failed");
374 }
375 return (memory_object_t)vnode_object;
376 }
377
378 /*
379 *
380 */
381 kern_return_t
vnode_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)382 vnode_pager_init(memory_object_t mem_obj,
383 memory_object_control_t control,
384 #if !DEBUG
385 __unused
386 #endif
387 memory_object_cluster_size_t pg_size)
388 {
389 vnode_pager_t vnode_object;
390 kern_return_t kr;
391 memory_object_attr_info_data_t attributes;
392
393
394 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
395
396 if (control == MEMORY_OBJECT_CONTROL_NULL) {
397 return KERN_INVALID_ARGUMENT;
398 }
399
400 vnode_object = vnode_pager_lookup(mem_obj);
401
402 memory_object_control_reference(control);
403
404 vnode_object->vn_pgr_hdr.mo_control = control;
405
406 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
407 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
408 attributes.cluster_size = (1 << (PAGE_SHIFT));
409 attributes.may_cache_object = TRUE;
410 attributes.temporary = TRUE;
411
412 kr = memory_object_change_attributes(
413 control,
414 MEMORY_OBJECT_ATTRIBUTE_INFO,
415 (memory_object_info_t) &attributes,
416 MEMORY_OBJECT_ATTR_INFO_COUNT);
417 if (kr != KERN_SUCCESS) {
418 panic("vnode_pager_init: memory_object_change_attributes() failed");
419 }
420
421 return KERN_SUCCESS;
422 }
423
424 /*
425 *
426 */
427 kern_return_t
vnode_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,memory_object_offset_t * resid_offset,int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,int upl_flags)428 vnode_pager_data_return(
429 memory_object_t mem_obj,
430 memory_object_offset_t offset,
431 memory_object_cluster_size_t data_cnt,
432 memory_object_offset_t *resid_offset,
433 int *io_error,
434 __unused boolean_t dirty,
435 __unused boolean_t kernel_copy,
436 int upl_flags)
437 {
438 vnode_pager_t vnode_object;
439
440 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
441
442 vnode_object = vnode_pager_lookup(mem_obj);
443
444 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
445
446 return KERN_SUCCESS;
447 }
448
449 kern_return_t
vnode_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)450 vnode_pager_data_initialize(
451 __unused memory_object_t mem_obj,
452 __unused memory_object_offset_t offset,
453 __unused memory_object_cluster_size_t data_cnt)
454 {
455 panic("vnode_pager_data_initialize");
456 return KERN_FAILURE;
457 }
458
459 void
vnode_pager_dirtied(memory_object_t mem_obj,vm_object_offset_t s_offset,vm_object_offset_t e_offset)460 vnode_pager_dirtied(
461 memory_object_t mem_obj,
462 vm_object_offset_t s_offset,
463 vm_object_offset_t e_offset)
464 {
465 vnode_pager_t vnode_object;
466
467 if (mem_obj && mem_obj->mo_pager_ops == &vnode_pager_ops) {
468 vnode_object = vnode_pager_lookup(mem_obj);
469 vnode_pager_was_dirtied(vnode_object->vnode_handle, s_offset, e_offset);
470 }
471 }
472
473 kern_return_t
vnode_pager_get_isinuse(memory_object_t mem_obj,uint32_t * isinuse)474 vnode_pager_get_isinuse(
475 memory_object_t mem_obj,
476 uint32_t *isinuse)
477 {
478 vnode_pager_t vnode_object;
479
480 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
481 *isinuse = 1;
482 return KERN_INVALID_ARGUMENT;
483 }
484
485 vnode_object = vnode_pager_lookup(mem_obj);
486
487 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
488 return KERN_SUCCESS;
489 }
490
491 kern_return_t
vnode_pager_get_throttle_io_limit(memory_object_t mem_obj,uint32_t * limit)492 vnode_pager_get_throttle_io_limit(
493 memory_object_t mem_obj,
494 uint32_t *limit)
495 {
496 vnode_pager_t vnode_object;
497
498 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
499 return KERN_INVALID_ARGUMENT;
500 }
501
502 vnode_object = vnode_pager_lookup(mem_obj);
503
504 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
505 return KERN_SUCCESS;
506 }
507
508 kern_return_t
vnode_pager_get_isSSD(memory_object_t mem_obj,boolean_t * isSSD)509 vnode_pager_get_isSSD(
510 memory_object_t mem_obj,
511 boolean_t *isSSD)
512 {
513 vnode_pager_t vnode_object;
514
515 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
516 return KERN_INVALID_ARGUMENT;
517 }
518
519 vnode_object = vnode_pager_lookup(mem_obj);
520
521 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
522 return KERN_SUCCESS;
523 }
524
525 #if FBDP_DEBUG_OBJECT_NO_PAGER
526 kern_return_t
vnode_pager_get_forced_unmount(memory_object_t mem_obj,bool * forced_unmount)527 vnode_pager_get_forced_unmount(
528 memory_object_t mem_obj,
529 bool *forced_unmount)
530 {
531 vnode_pager_t vnode_object;
532
533 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
534 return KERN_INVALID_ARGUMENT;
535 }
536
537 vnode_object = vnode_pager_lookup(mem_obj);
538
539 *forced_unmount = vnode_pager_forced_unmount(vnode_object->vnode_handle);
540 return KERN_SUCCESS;
541 }
542 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
543
544 kern_return_t
vnode_pager_get_object_size(memory_object_t mem_obj,memory_object_offset_t * length)545 vnode_pager_get_object_size(
546 memory_object_t mem_obj,
547 memory_object_offset_t *length)
548 {
549 vnode_pager_t vnode_object;
550
551 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
552 *length = 0;
553 return KERN_INVALID_ARGUMENT;
554 }
555
556 vnode_object = vnode_pager_lookup(mem_obj);
557
558 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
559 return KERN_SUCCESS;
560 }
561
562 kern_return_t
vnode_pager_get_object_name(memory_object_t mem_obj,char * pathname,vm_size_t pathname_len,char * filename,vm_size_t filename_len,boolean_t * truncated_path_p)563 vnode_pager_get_object_name(
564 memory_object_t mem_obj,
565 char *pathname,
566 vm_size_t pathname_len,
567 char *filename,
568 vm_size_t filename_len,
569 boolean_t *truncated_path_p)
570 {
571 vnode_pager_t vnode_object;
572
573 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
574 return KERN_INVALID_ARGUMENT;
575 }
576
577 vnode_object = vnode_pager_lookup(mem_obj);
578
579 return vnode_pager_get_name(vnode_object->vnode_handle,
580 pathname,
581 pathname_len,
582 filename,
583 filename_len,
584 truncated_path_p);
585 }
586
587 kern_return_t
vnode_pager_get_object_mtime(memory_object_t mem_obj,struct timespec * mtime,struct timespec * cs_mtime)588 vnode_pager_get_object_mtime(
589 memory_object_t mem_obj,
590 struct timespec *mtime,
591 struct timespec *cs_mtime)
592 {
593 vnode_pager_t vnode_object;
594
595 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
596 return KERN_INVALID_ARGUMENT;
597 }
598
599 vnode_object = vnode_pager_lookup(mem_obj);
600
601 return vnode_pager_get_mtime(vnode_object->vnode_handle,
602 mtime,
603 cs_mtime);
604 }
605
606 #if CHECK_CS_VALIDATION_BITMAP
607 kern_return_t
vnode_pager_cs_check_validation_bitmap(memory_object_t mem_obj,memory_object_offset_t offset,int optype)608 vnode_pager_cs_check_validation_bitmap(
609 memory_object_t mem_obj,
610 memory_object_offset_t offset,
611 int optype )
612 {
613 vnode_pager_t vnode_object;
614
615 if (mem_obj == MEMORY_OBJECT_NULL ||
616 mem_obj->mo_pager_ops != &vnode_pager_ops) {
617 return KERN_INVALID_ARGUMENT;
618 }
619
620 vnode_object = vnode_pager_lookup(mem_obj);
621 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
622 }
623 #endif /* CHECK_CS_VALIDATION_BITMAP */
624
625 /*
626 *
627 */
628 kern_return_t
vnode_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,__unused memory_object_cluster_size_t length,__unused vm_prot_t desired_access,memory_object_fault_info_t fault_info)629 vnode_pager_data_request(
630 memory_object_t mem_obj,
631 memory_object_offset_t offset,
632 __unused memory_object_cluster_size_t length,
633 __unused vm_prot_t desired_access,
634 memory_object_fault_info_t fault_info)
635 {
636 vnode_pager_t vnode_object;
637 memory_object_offset_t base_offset;
638 vm_size_t size;
639 uint32_t io_streaming = 0;
640
641 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
642
643 vnode_object = vnode_pager_lookup(mem_obj);
644
645 size = MAX_UPL_TRANSFER_BYTES;
646 base_offset = offset;
647
648 if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
649 &base_offset, &size, &io_streaming,
650 fault_info) != KERN_SUCCESS) {
651 size = PAGE_SIZE;
652 }
653
654 assert(offset >= base_offset &&
655 offset < base_offset + size);
656
657 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
658 }
659
660 /*
661 *
662 */
663 void
vnode_pager_reference(memory_object_t mem_obj)664 vnode_pager_reference(
665 memory_object_t mem_obj)
666 {
667 vnode_pager_t vnode_object;
668
669 vnode_object = vnode_pager_lookup(mem_obj);
670 os_ref_retain_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
671 }
672
673 /*
674 *
675 */
676 void
vnode_pager_deallocate(memory_object_t mem_obj)677 vnode_pager_deallocate(
678 memory_object_t mem_obj)
679 {
680 vnode_pager_t vnode_object;
681
682 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
683
684 vnode_object = vnode_pager_lookup(mem_obj);
685
686 if (os_ref_release_raw(&vnode_object->vn_pgr_hdr_ref, NULL) == 0) {
687 if (vnode_object->vnode_handle != NULL) {
688 vnode_pager_vrele(vnode_object->vnode_handle);
689 }
690 zfree(vnode_pager_zone, vnode_object);
691 }
692 }
693
694 /*
695 *
696 */
697 kern_return_t
vnode_pager_terminate(__unused memory_object_t mem_obj)698 vnode_pager_terminate(
699 #if !DEBUG
700 __unused
701 #endif
702 memory_object_t mem_obj)
703 {
704 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
705
706 return KERN_SUCCESS;
707 }
708
709 /*
710 *
711 */
712 kern_return_t
vnode_pager_map(memory_object_t mem_obj,vm_prot_t prot)713 vnode_pager_map(
714 memory_object_t mem_obj,
715 vm_prot_t prot)
716 {
717 vnode_pager_t vnode_object;
718 int ret;
719 kern_return_t kr;
720
721 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
722
723 vnode_object = vnode_pager_lookup(mem_obj);
724
725 ret = ubc_map(vnode_object->vnode_handle, prot);
726
727 if (ret != 0) {
728 kr = KERN_FAILURE;
729 } else {
730 kr = KERN_SUCCESS;
731 }
732
733 return kr;
734 }
735
736 kern_return_t
vnode_pager_last_unmap(memory_object_t mem_obj)737 vnode_pager_last_unmap(
738 memory_object_t mem_obj)
739 {
740 vnode_pager_t vnode_object;
741
742 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
743
744 vnode_object = vnode_pager_lookup(mem_obj);
745
746 ubc_unmap(vnode_object->vnode_handle);
747 return KERN_SUCCESS;
748 }
749
750
751
752 /*
753 *
754 */
755 void
vnode_pager_cluster_write(vnode_pager_t vnode_object,vm_object_offset_t offset,vm_size_t cnt,vm_object_offset_t * resid_offset,int * io_error,int upl_flags)756 vnode_pager_cluster_write(
757 vnode_pager_t vnode_object,
758 vm_object_offset_t offset,
759 vm_size_t cnt,
760 vm_object_offset_t * resid_offset,
761 int * io_error,
762 int upl_flags)
763 {
764 vm_size_t size;
765 int errno;
766
767 if (upl_flags & UPL_MSYNC) {
768 upl_flags |= UPL_VNODE_PAGER;
769
770 if ((upl_flags & UPL_IOSYNC) && io_error) {
771 upl_flags |= UPL_KEEPCACHED;
772 }
773
774 while (cnt) {
775 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
776
777 assert((upl_size_t) size == size);
778 vnode_pageout(vnode_object->vnode_handle,
779 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
780
781 if ((upl_flags & UPL_KEEPCACHED)) {
782 if ((*io_error = errno)) {
783 break;
784 }
785 }
786 cnt -= size;
787 offset += size;
788 }
789 if (resid_offset) {
790 *resid_offset = offset;
791 }
792 } else {
793 vm_object_offset_t vnode_size;
794 vm_object_offset_t base_offset;
795
796 /*
797 * this is the pageout path
798 */
799 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
800
801 if (vnode_size > (offset + PAGE_SIZE)) {
802 /*
803 * preset the maximum size of the cluster
804 * and put us on a nice cluster boundary...
805 * and then clip the size to insure we
806 * don't request past the end of the underlying file
807 */
808 size = MAX_UPL_TRANSFER_BYTES;
809 base_offset = offset & ~((signed)(size - 1));
810
811 if ((base_offset + size) > vnode_size) {
812 size = round_page(((vm_size_t)(vnode_size - base_offset)));
813 }
814 } else {
815 /*
816 * we've been requested to page out a page beyond the current
817 * end of the 'file'... don't try to cluster in this case...
818 * we still need to send this page through because it might
819 * be marked precious and the underlying filesystem may need
820 * to do something with it (besides page it out)...
821 */
822 base_offset = offset;
823 size = PAGE_SIZE;
824 }
825 assert((upl_size_t) size == size);
826 vnode_pageout(vnode_object->vnode_handle,
827 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
828 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
829 }
830 }
831
832
833 /*
834 *
835 */
836 kern_return_t
vnode_pager_cluster_read(vnode_pager_t vnode_object,vm_object_offset_t base_offset,vm_object_offset_t offset,uint32_t io_streaming,vm_size_t cnt)837 vnode_pager_cluster_read(
838 vnode_pager_t vnode_object,
839 vm_object_offset_t base_offset,
840 vm_object_offset_t offset,
841 uint32_t io_streaming,
842 vm_size_t cnt)
843 {
844 int local_error = 0;
845 int kret;
846 int flags = 0;
847
848 assert(!(cnt & PAGE_MASK));
849
850 if (io_streaming) {
851 flags |= UPL_IOSTREAMING;
852 }
853
854 assert((upl_size_t) cnt == cnt);
855 kret = vnode_pagein(vnode_object->vnode_handle,
856 (upl_t) NULL,
857 (upl_offset_t) (offset - base_offset),
858 base_offset,
859 (upl_size_t) cnt,
860 flags,
861 &local_error);
862 /*
863 * if(kret == PAGER_ABSENT) {
864 * Need to work out the defs here, 1 corresponds to PAGER_ABSENT
865 * defined in bsd/vm/vm_pager_xnu.h However, we should not be including
866 * that file here it is a layering violation.
867 */
868 if (kret == 1) {
869 int uplflags;
870 upl_t upl = NULL;
871 unsigned int count = 0;
872 kern_return_t kr;
873
874 uplflags = (UPL_NO_SYNC |
875 UPL_CLEAN_IN_PLACE |
876 UPL_SET_INTERNAL);
877 count = 0;
878 assert((upl_size_t) cnt == cnt);
879 kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
880 base_offset, (upl_size_t) cnt,
881 &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
882 if (kr == KERN_SUCCESS) {
883 upl_abort(upl, 0);
884 upl_deallocate(upl);
885 } else {
886 /*
887 * We couldn't gather the page list, probably
888 * because the memory object doesn't have a link
889 * to a VM object anymore (forced unmount, for
890 * example). Just return an error to the vm_fault()
891 * path and let it handle it.
892 */
893 }
894
895 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_VNODEPAGER_CLREAD_NO_UPL), 0 /* arg */);
896 return KERN_FAILURE;
897 }
898
899 return KERN_SUCCESS;
900 }
901
902 /*
903 *
904 */
905 vnode_pager_t
vnode_object_create(struct vnode * vp)906 vnode_object_create(
907 struct vnode *vp)
908 {
909 vnode_pager_t vnode_object;
910
911 vnode_object = zalloc_flags(vnode_pager_zone, Z_WAITOK | Z_NOFAIL);
912
913 /*
914 * The vm_map call takes both named entry ports and raw memory
915 * objects in the same parameter. We need to make sure that
916 * vm_map does not see this object as a named entry port. So,
917 * we reserve the first word in the object for a fake ip_kotype
918 * setting - that will tell vm_map to use it as a memory object.
919 */
920 vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
921 vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
922 vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
923
924 os_ref_init_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
925 vnode_object->vnode_handle = vp;
926
927 return vnode_object;
928 }
929
930 /*
931 *
932 */
933 vnode_pager_t
vnode_pager_lookup(memory_object_t name)934 vnode_pager_lookup(
935 memory_object_t name)
936 {
937 vnode_pager_t vnode_object;
938
939 vnode_object = (vnode_pager_t)name;
940 assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
941 return vnode_object;
942 }
943
944
945 struct vnode *
vnode_pager_lookup_vnode(memory_object_t name)946 vnode_pager_lookup_vnode(
947 memory_object_t name)
948 {
949 vnode_pager_t vnode_object;
950 vnode_object = (vnode_pager_t)name;
951 if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) {
952 return vnode_object->vnode_handle;
953 } else {
954 return NULL;
955 }
956 }
957
958 /*********************** proc_info implementation *************/
959
960 #include <sys/bsdtask_info.h>
961
962 static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid, bool *is_map_shared);
963
964 int
fill_procregioninfo(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)965 fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
966 {
967 vm_map_t map;
968 vm_map_offset_t address = (vm_map_offset_t)arg;
969 vm_map_entry_t tmp_entry;
970 vm_map_entry_t entry;
971 vm_map_offset_t start;
972 vm_region_extended_info_data_t extended;
973 vm_region_top_info_data_t top;
974 boolean_t do_region_footprint;
975 int effective_page_shift, effective_page_size;
976
977 task_lock(task);
978 map = task->map;
979 if (map == VM_MAP_NULL) {
980 task_unlock(task);
981 return 0;
982 }
983
984 effective_page_shift = vm_self_region_page_shift(map);
985 effective_page_size = (1 << effective_page_shift);
986
987 vm_map_reference(map);
988 task_unlock(task);
989
990 do_region_footprint = task_self_region_footprint();
991
992 vm_map_lock_read(map);
993
994 start = address;
995
996 if (!vm_map_lookup_entry_allow_pgz(map, start, &tmp_entry)) {
997 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
998 if (do_region_footprint &&
999 address == tmp_entry->vme_end) {
1000 ledger_amount_t ledger_resident;
1001 ledger_amount_t ledger_compressed;
1002
1003 /*
1004 * This request is right after the last valid
1005 * memory region; instead of reporting the
1006 * end of the address space, report a fake
1007 * memory region to account for non-volatile
1008 * purgeable and/or ledger-tagged memory
1009 * owned by this task.
1010 */
1011 task_ledgers_footprint(task->ledger,
1012 &ledger_resident,
1013 &ledger_compressed);
1014 if (ledger_resident + ledger_compressed == 0) {
1015 /* nothing to report */
1016 vm_map_unlock_read(map);
1017 vm_map_deallocate(map);
1018 return 0;
1019 }
1020
1021 /* provide fake region for purgeable */
1022 pinfo->pri_offset = address;
1023 pinfo->pri_protection = VM_PROT_DEFAULT;
1024 pinfo->pri_max_protection = VM_PROT_DEFAULT;
1025 pinfo->pri_inheritance = VM_INHERIT_NONE;
1026 pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
1027 pinfo->pri_user_wired_count = 0;
1028 pinfo->pri_user_tag = -1;
1029 pinfo->pri_pages_resident =
1030 (uint32_t) (ledger_resident / effective_page_size);
1031 pinfo->pri_pages_shared_now_private = 0;
1032 pinfo->pri_pages_swapped_out =
1033 (uint32_t) (ledger_compressed / effective_page_size);
1034 pinfo->pri_pages_dirtied =
1035 (uint32_t) (ledger_resident / effective_page_size);
1036 pinfo->pri_ref_count = 1;
1037 pinfo->pri_shadow_depth = 0;
1038 pinfo->pri_share_mode = SM_PRIVATE;
1039 pinfo->pri_private_pages_resident =
1040 (uint32_t) (ledger_resident / effective_page_size);
1041 pinfo->pri_shared_pages_resident = 0;
1042 pinfo->pri_obj_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
1043 pinfo->pri_address = address;
1044 pinfo->pri_size =
1045 (uint64_t) (ledger_resident + ledger_compressed);
1046 pinfo->pri_depth = 0;
1047
1048 vm_map_unlock_read(map);
1049 vm_map_deallocate(map);
1050 return 1;
1051 }
1052 vm_map_unlock_read(map);
1053 vm_map_deallocate(map);
1054 return 0;
1055 }
1056 } else {
1057 entry = tmp_entry;
1058 }
1059
1060 start = entry->vme_start;
1061
1062 pinfo->pri_offset = VME_OFFSET(entry);
1063 pinfo->pri_protection = entry->protection;
1064 pinfo->pri_max_protection = entry->max_protection;
1065 pinfo->pri_inheritance = entry->inheritance;
1066 pinfo->pri_behavior = entry->behavior;
1067 pinfo->pri_user_wired_count = entry->user_wired_count;
1068 pinfo->pri_user_tag = VME_ALIAS(entry);
1069
1070 if (entry->is_sub_map) {
1071 pinfo->pri_flags |= PROC_REGION_SUBMAP;
1072 } else {
1073 if (entry->is_shared) {
1074 pinfo->pri_flags |= PROC_REGION_SHARED;
1075 }
1076 }
1077
1078
1079 extended.protection = entry->protection;
1080 extended.user_tag = VME_ALIAS(entry);
1081 extended.pages_resident = 0;
1082 extended.pages_swapped_out = 0;
1083 extended.pages_shared_now_private = 0;
1084 extended.pages_dirtied = 0;
1085 extended.external_pager = 0;
1086 extended.shadow_depth = 0;
1087
1088 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
1089
1090 top.private_pages_resident = 0;
1091 top.shared_pages_resident = 0;
1092 vm_map_region_top_walk(entry, &top);
1093
1094
1095 pinfo->pri_pages_resident = extended.pages_resident;
1096 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1097 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1098 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1099 pinfo->pri_ref_count = extended.ref_count;
1100 pinfo->pri_shadow_depth = extended.shadow_depth;
1101 pinfo->pri_share_mode = extended.share_mode;
1102
1103 pinfo->pri_private_pages_resident = top.private_pages_resident;
1104 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1105 pinfo->pri_obj_id = top.obj_id;
1106
1107 pinfo->pri_address = (uint64_t)start;
1108 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1109 pinfo->pri_depth = 0;
1110
1111 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
1112 *vnodeaddr = (uintptr_t)0;
1113
1114 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid, NULL) == 0) {
1115 vm_map_unlock_read(map);
1116 vm_map_deallocate(map);
1117 return 1;
1118 }
1119 }
1120
1121 vm_map_unlock_read(map);
1122 vm_map_deallocate(map);
1123 return 1;
1124 }
1125
1126 int
fill_procregioninfo_onlymappedvnodes(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)1127 fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1128 {
1129 vm_map_t map;
1130 vm_map_offset_t address = (vm_map_offset_t)arg;
1131 vm_map_entry_t tmp_entry;
1132 vm_map_entry_t entry;
1133
1134 task_lock(task);
1135 map = task->map;
1136 if (map == VM_MAP_NULL) {
1137 task_unlock(task);
1138 return 0;
1139 }
1140 vm_map_reference(map);
1141 task_unlock(task);
1142
1143 vm_map_lock_read(map);
1144
1145 if (!vm_map_lookup_entry_allow_pgz(map, address, &tmp_entry)) {
1146 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1147 vm_map_unlock_read(map);
1148 vm_map_deallocate(map);
1149 return 0;
1150 }
1151 } else {
1152 entry = tmp_entry;
1153 }
1154
1155 while (entry != vm_map_to_entry(map)) {
1156 *vnodeaddr = 0;
1157 *vid = 0;
1158
1159 if (entry->is_sub_map == 0) {
1160 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid, NULL)) {
1161 pinfo->pri_offset = VME_OFFSET(entry);
1162 pinfo->pri_protection = entry->protection;
1163 pinfo->pri_max_protection = entry->max_protection;
1164 pinfo->pri_inheritance = entry->inheritance;
1165 pinfo->pri_behavior = entry->behavior;
1166 pinfo->pri_user_wired_count = entry->user_wired_count;
1167 pinfo->pri_user_tag = VME_ALIAS(entry);
1168
1169 if (entry->is_shared) {
1170 pinfo->pri_flags |= PROC_REGION_SHARED;
1171 }
1172
1173 pinfo->pri_pages_resident = 0;
1174 pinfo->pri_pages_shared_now_private = 0;
1175 pinfo->pri_pages_swapped_out = 0;
1176 pinfo->pri_pages_dirtied = 0;
1177 pinfo->pri_ref_count = 0;
1178 pinfo->pri_shadow_depth = 0;
1179 pinfo->pri_share_mode = 0;
1180
1181 pinfo->pri_private_pages_resident = 0;
1182 pinfo->pri_shared_pages_resident = 0;
1183 pinfo->pri_obj_id = 0;
1184
1185 pinfo->pri_address = (uint64_t)entry->vme_start;
1186 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1187 pinfo->pri_depth = 0;
1188
1189 vm_map_unlock_read(map);
1190 vm_map_deallocate(map);
1191 return 1;
1192 }
1193 }
1194
1195 /* Keep searching for a vnode-backed mapping */
1196 entry = entry->vme_next;
1197 }
1198
1199 vm_map_unlock_read(map);
1200 vm_map_deallocate(map);
1201 return 0;
1202 }
1203
1204 extern int vnode_get(struct vnode *vp);
1205 int
task_find_region_details(task_t task,vm_map_offset_t offset,find_region_details_options_t options,uintptr_t * vp_p,uint32_t * vid_p,bool * is_map_shared_p,uint64_t * start_p,uint64_t * len_p)1206 task_find_region_details(
1207 task_t task,
1208 vm_map_offset_t offset,
1209 find_region_details_options_t options,
1210 uintptr_t *vp_p,
1211 uint32_t *vid_p,
1212 bool *is_map_shared_p,
1213 uint64_t *start_p,
1214 uint64_t *len_p)
1215 {
1216 vm_map_t map;
1217 vm_map_entry_t entry;
1218 int rc;
1219
1220 rc = 0;
1221 *vp_p = 0;
1222 *vid_p = 0;
1223 *is_map_shared_p = false;
1224 *start_p = 0;
1225 *len_p = 0;
1226 if (options & ~FIND_REGION_DETAILS_OPTIONS_ALL) {
1227 return 0;
1228 }
1229
1230 task_lock(task);
1231 map = task->map;
1232 if (map == VM_MAP_NULL) {
1233 task_unlock(task);
1234 return 0;
1235 }
1236 vm_map_reference(map);
1237 task_unlock(task);
1238
1239 vm_map_lock_read(map);
1240 if (!vm_map_lookup_entry_allow_pgz(map, offset, &entry)) {
1241 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1242 /* no mapping at this offset */
1243 goto ret;
1244 }
1245 /* check next entry */
1246 entry = entry->vme_next;
1247 if (entry == vm_map_to_entry(map)) {
1248 /* no next entry */
1249 goto ret;
1250 }
1251 }
1252
1253 for (;
1254 entry != vm_map_to_entry(map);
1255 entry = entry->vme_next) {
1256 if (entry->is_sub_map) {
1257 /* fallthru to check next entry */
1258 } else if (fill_vnodeinfoforaddr(entry, vp_p, vid_p, is_map_shared_p)) {
1259 if ((options & FIND_REGION_DETAILS_GET_VNODE) &&
1260 vnode_get((struct vnode *)*vp_p)) {
1261 /* tried but could not get an iocount */
1262 *vp_p = 0;
1263 *vid_p = 0;
1264 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1265 /* done */
1266 break;
1267 }
1268 /* check next entry */
1269 continue;
1270 }
1271 *start_p = entry->vme_start;
1272 *len_p = entry->vme_end - entry->vme_start;
1273 rc = 1; /* success */
1274 break;
1275 }
1276 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1277 /* no file mapping at this offset: done */
1278 break;
1279 }
1280 /* check next entry */
1281 }
1282
1283 ret:
1284 vm_map_unlock_read(map);
1285 vm_map_deallocate(map);
1286 return rc;
1287 }
1288
1289 static int
fill_vnodeinfoforaddr(vm_map_entry_t entry,uintptr_t * vnodeaddr,uint32_t * vid,bool * is_map_shared)1290 fill_vnodeinfoforaddr(
1291 vm_map_entry_t entry,
1292 uintptr_t * vnodeaddr,
1293 uint32_t * vid,
1294 bool *is_map_shared)
1295 {
1296 vm_object_t top_object, object;
1297 memory_object_t memory_object;
1298 memory_object_pager_ops_t pager_ops;
1299 kern_return_t kr;
1300 int shadow_depth;
1301
1302
1303 if (entry->is_sub_map) {
1304 return 0;
1305 } else {
1306 /*
1307 * The last object in the shadow chain has the
1308 * relevant pager information.
1309 */
1310 top_object = VME_OBJECT(entry);
1311 if (top_object == VM_OBJECT_NULL) {
1312 object = VM_OBJECT_NULL;
1313 shadow_depth = 0;
1314 } else {
1315 vm_object_lock(top_object);
1316 for (object = top_object, shadow_depth = 0;
1317 object->shadow != VM_OBJECT_NULL;
1318 object = object->shadow, shadow_depth++) {
1319 vm_object_lock(object->shadow);
1320 vm_object_unlock(object);
1321 }
1322 }
1323 }
1324
1325 if (object == VM_OBJECT_NULL) {
1326 return 0;
1327 } else if (object->internal) {
1328 vm_object_unlock(object);
1329 return 0;
1330 } else if (!object->pager_ready ||
1331 object->terminating ||
1332 !object->alive ||
1333 object->pager == NULL) {
1334 vm_object_unlock(object);
1335 return 0;
1336 } else {
1337 memory_object = object->pager;
1338 pager_ops = memory_object->mo_pager_ops;
1339 if (pager_ops == &vnode_pager_ops) {
1340 kr = vnode_pager_get_object_vnode(
1341 memory_object,
1342 vnodeaddr, vid);
1343 if (kr != KERN_SUCCESS) {
1344 vm_object_unlock(object);
1345 return 0;
1346 }
1347 } else {
1348 vm_object_unlock(object);
1349 return 0;
1350 }
1351 }
1352 if (is_map_shared) {
1353 *is_map_shared = (shadow_depth == 0);
1354 }
1355 vm_object_unlock(object);
1356 return 1;
1357 }
1358
1359 kern_return_t
vnode_pager_get_object_vnode(memory_object_t mem_obj,uintptr_t * vnodeaddr,uint32_t * vid)1360 vnode_pager_get_object_vnode(
1361 memory_object_t mem_obj,
1362 uintptr_t * vnodeaddr,
1363 uint32_t * vid)
1364 {
1365 vnode_pager_t vnode_object;
1366
1367 vnode_object = vnode_pager_lookup(mem_obj);
1368 if (vnode_object->vnode_handle) {
1369 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
1370 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
1371
1372 return KERN_SUCCESS;
1373 }
1374
1375 return KERN_FAILURE;
1376 }
1377
1378 #if CONFIG_IOSCHED
1379 kern_return_t
vnode_pager_get_object_devvp(memory_object_t mem_obj,uintptr_t * devvp)1380 vnode_pager_get_object_devvp(
1381 memory_object_t mem_obj,
1382 uintptr_t *devvp)
1383 {
1384 struct vnode *vp;
1385 uint32_t vid;
1386
1387 if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) {
1388 return KERN_FAILURE;
1389 }
1390 *devvp = (uintptr_t)vnode_mountdevvp(vp);
1391 if (*devvp) {
1392 return KERN_SUCCESS;
1393 }
1394 return KERN_FAILURE;
1395 }
1396 #endif
1397
1398 /*
1399 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1400 * object locked, otherwise return NULL with nothing locked.
1401 */
1402
1403 vm_object_t
find_vnode_object(vm_map_entry_t entry)1404 find_vnode_object(
1405 vm_map_entry_t entry
1406 )
1407 {
1408 vm_object_t top_object, object;
1409 memory_object_t memory_object;
1410 memory_object_pager_ops_t pager_ops;
1411
1412 if (!entry->is_sub_map) {
1413 /*
1414 * The last object in the shadow chain has the
1415 * relevant pager information.
1416 */
1417
1418 top_object = VME_OBJECT(entry);
1419
1420 if (top_object) {
1421 vm_object_lock(top_object);
1422
1423 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1424 vm_object_lock(object->shadow);
1425 vm_object_unlock(object);
1426 }
1427
1428 if (object &&
1429 !object->internal &&
1430 object->pager_ready &&
1431 !object->terminating &&
1432 object->alive &&
1433 object->pager != NULL) {
1434 memory_object = object->pager;
1435 pager_ops = memory_object->mo_pager_ops;
1436
1437 /*
1438 * If this object points to the vnode_pager_ops, then we found what we're
1439 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1440 * vnode and so we fall through to the bottom and return NULL.
1441 */
1442
1443 if (pager_ops == &vnode_pager_ops) {
1444 return object; /* we return with the object locked */
1445 }
1446 }
1447
1448 vm_object_unlock(object);
1449 }
1450 }
1451
1452 return VM_OBJECT_NULL;
1453 }
1454