Lines Matching refs:object

115 static int	vm_object_page_collect_flush(vm_object_t object, vm_page_t p,
120 static void vm_object_backing_remove(vm_object_t object);
153 static SYSCTL_NODE(_vm_stats, OID_AUTO, object, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
181 vm_object_t object; in vm_object_zdtor() local
183 object = (vm_object_t)mem; in vm_object_zdtor()
184 KASSERT(object->ref_count == 0, in vm_object_zdtor()
185 ("object %p ref_count = %d", object, object->ref_count)); in vm_object_zdtor()
186 KASSERT(TAILQ_EMPTY(&object->memq), in vm_object_zdtor()
187 ("object %p has resident pages in its memq", object)); in vm_object_zdtor()
188 KASSERT(vm_radix_is_empty(&object->rtree), in vm_object_zdtor()
189 ("object %p has resident pages in its trie", object)); in vm_object_zdtor()
191 KASSERT(LIST_EMPTY(&object->rvq), in vm_object_zdtor()
193 object)); in vm_object_zdtor()
195 KASSERT(!vm_object_busied(object), in vm_object_zdtor()
196 ("object %p busy = %d", object, blockcount_read(&object->busy))); in vm_object_zdtor()
197 KASSERT(object->resident_page_count == 0, in vm_object_zdtor()
199 object, object->resident_page_count)); in vm_object_zdtor()
200 KASSERT(object->shadow_count == 0, in vm_object_zdtor()
202 object, object->shadow_count)); in vm_object_zdtor()
203 KASSERT(object->type == OBJT_DEAD, in vm_object_zdtor()
205 object, object->type)); in vm_object_zdtor()
212 vm_object_t object; in vm_object_zinit() local
214 object = (vm_object_t)mem; in vm_object_zinit()
215 rw_init_flags(&object->lock, "vm object", RW_DUPOK | RW_NEW); in vm_object_zinit()
218 object->type = OBJT_DEAD; in vm_object_zinit()
219 vm_radix_init(&object->rtree); in vm_object_zinit()
220 refcount_init(&object->ref_count, 0); in vm_object_zinit()
221 blockcount_init(&object->paging_in_progress); in vm_object_zinit()
222 blockcount_init(&object->busy); in vm_object_zinit()
223 object->resident_page_count = 0; in vm_object_zinit()
224 object->shadow_count = 0; in vm_object_zinit()
225 object->flags = OBJ_DEAD; in vm_object_zinit()
228 TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); in vm_object_zinit()
235 vm_object_t object, void *handle) in _vm_object_allocate() argument
238 TAILQ_INIT(&object->memq); in _vm_object_allocate()
239 LIST_INIT(&object->shadow_head); in _vm_object_allocate()
241 object->type = type; in _vm_object_allocate()
243 pctrie_init(&object->un_pager.swp.swp_blks); in _vm_object_allocate()
252 object->pg_color = 0; in _vm_object_allocate()
253 object->flags = flags; in _vm_object_allocate()
254 object->size = size; in _vm_object_allocate()
255 object->domain.dr_policy = NULL; in _vm_object_allocate()
256 object->generation = 1; in _vm_object_allocate()
257 object->cleangeneration = 1; in _vm_object_allocate()
258 refcount_init(&object->ref_count, 1); in _vm_object_allocate()
259 object->memattr = VM_MEMATTR_DEFAULT; in _vm_object_allocate()
260 object->cred = NULL; in _vm_object_allocate()
261 object->charge = 0; in _vm_object_allocate()
262 object->handle = handle; in _vm_object_allocate()
263 object->backing_object = NULL; in _vm_object_allocate()
264 object->backing_object_offset = (vm_ooffset_t) 0; in _vm_object_allocate()
266 LIST_INIT(&object->rvq); in _vm_object_allocate()
268 umtx_shm_object_init(object); in _vm_object_allocate()
311 vm_object_clear_flag(vm_object_t object, u_short bits) in vm_object_clear_flag() argument
314 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_clear_flag()
315 object->flags &= ~bits; in vm_object_clear_flag()
328 vm_object_set_memattr(vm_object_t object, vm_memattr_t memattr) in vm_object_set_memattr() argument
331 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_set_memattr()
332 switch (object->type) { in vm_object_set_memattr()
340 if (!TAILQ_EMPTY(&object->memq)) in vm_object_set_memattr()
347 object); in vm_object_set_memattr()
349 object->memattr = memattr; in vm_object_set_memattr()
354 vm_object_pip_add(vm_object_t object, short i) in vm_object_pip_add() argument
358 blockcount_acquire(&object->paging_in_progress, i); in vm_object_pip_add()
362 vm_object_pip_wakeup(vm_object_t object) in vm_object_pip_wakeup() argument
365 vm_object_pip_wakeupn(object, 1); in vm_object_pip_wakeup()
369 vm_object_pip_wakeupn(vm_object_t object, short i) in vm_object_pip_wakeupn() argument
373 blockcount_release(&object->paging_in_progress, i); in vm_object_pip_wakeupn()
382 vm_object_pip_sleep(vm_object_t object, const char *waitid) in vm_object_pip_sleep() argument
385 (void)blockcount_sleep(&object->paging_in_progress, &object->lock, in vm_object_pip_sleep()
390 vm_object_pip_wait(vm_object_t object, const char *waitid) in vm_object_pip_wait() argument
393 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_pip_wait()
395 blockcount_wait(&object->paging_in_progress, &object->lock, waitid, in vm_object_pip_wait()
400 vm_object_pip_wait_unlocked(vm_object_t object, const char *waitid) in vm_object_pip_wait_unlocked() argument
403 VM_OBJECT_ASSERT_UNLOCKED(object); in vm_object_pip_wait_unlocked()
405 blockcount_wait(&object->paging_in_progress, NULL, waitid, PVM); in vm_object_pip_wait_unlocked()
416 vm_object_t object; in vm_object_allocate() local
442 object = (vm_object_t)uma_zalloc(obj_zone, M_WAITOK); in vm_object_allocate()
443 _vm_object_allocate(type, size, flags, object, NULL); in vm_object_allocate()
445 return (object); in vm_object_allocate()
459 vm_object_t handle, object; in vm_object_allocate_anon() local
467 object = uma_zalloc(obj_zone, M_WAITOK); in vm_object_allocate_anon()
469 object, handle); in vm_object_allocate_anon()
470 object->cred = cred; in vm_object_allocate_anon()
471 object->charge = cred != NULL ? charge : 0; in vm_object_allocate_anon()
472 return (object); in vm_object_allocate_anon()
476 vm_object_reference_vnode(vm_object_t object) in vm_object_reference_vnode() argument
484 if (!refcount_acquire_if_gt(&object->ref_count, 0)) { in vm_object_reference_vnode()
485 VM_OBJECT_RLOCK(object); in vm_object_reference_vnode()
486 old = refcount_acquire(&object->ref_count); in vm_object_reference_vnode()
487 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_vnode()
488 vref(object->handle); in vm_object_reference_vnode()
489 VM_OBJECT_RUNLOCK(object); in vm_object_reference_vnode()
499 vm_object_reference(vm_object_t object) in vm_object_reference() argument
502 if (object == NULL) in vm_object_reference()
505 if (object->type == OBJT_VNODE) in vm_object_reference()
506 vm_object_reference_vnode(object); in vm_object_reference()
508 refcount_acquire(&object->ref_count); in vm_object_reference()
509 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference()
521 vm_object_reference_locked(vm_object_t object) in vm_object_reference_locked() argument
525 VM_OBJECT_ASSERT_LOCKED(object); in vm_object_reference_locked()
526 old = refcount_acquire(&object->ref_count); in vm_object_reference_locked()
527 if (object->type == OBJT_VNODE && old == 0) in vm_object_reference_locked()
528 vref(object->handle); in vm_object_reference_locked()
529 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_reference_locked()
537 vm_object_deallocate_vnode(vm_object_t object) in vm_object_deallocate_vnode() argument
539 struct vnode *vp = (struct vnode *) object->handle; in vm_object_deallocate_vnode()
542 KASSERT(object->type == OBJT_VNODE, in vm_object_deallocate_vnode()
547 last = refcount_release(&object->ref_count); in vm_object_deallocate_vnode()
548 VM_OBJECT_RUNLOCK(object); in vm_object_deallocate_vnode()
554 umtx_shm_object_terminated(object); in vm_object_deallocate_vnode()
568 vm_object_t object; in vm_object_deallocate_anon() local
571 object = LIST_FIRST(&backing_object->shadow_head); in vm_object_deallocate_anon()
572 KASSERT(object != NULL && backing_object->shadow_count == 1, in vm_object_deallocate_anon()
575 KASSERT((object->flags & (OBJ_TMPFS_NODE | OBJ_ANON)) == OBJ_ANON, in vm_object_deallocate_anon()
576 ("invalid shadow object %p", object)); in vm_object_deallocate_anon()
578 if (!VM_OBJECT_TRYWLOCK(object)) { in vm_object_deallocate_anon()
583 vm_object_pip_add(object, 1); in vm_object_deallocate_anon()
585 VM_OBJECT_WLOCK(object); in vm_object_deallocate_anon()
586 vm_object_pip_wakeup(object); in vm_object_deallocate_anon()
593 if ((object->flags & (OBJ_DEAD | OBJ_COLLAPSING)) != 0 || in vm_object_deallocate_anon()
594 !refcount_acquire_if_not_zero(&object->ref_count)) { in vm_object_deallocate_anon()
595 VM_OBJECT_WUNLOCK(object); in vm_object_deallocate_anon()
598 backing_object = object->backing_object; in vm_object_deallocate_anon()
600 vm_object_collapse(object); in vm_object_deallocate_anon()
601 VM_OBJECT_WUNLOCK(object); in vm_object_deallocate_anon()
603 return (object); in vm_object_deallocate_anon()
618 vm_object_deallocate(vm_object_t object) in vm_object_deallocate() argument
623 while (object != NULL) { in vm_object_deallocate()
631 if ((object->flags & OBJ_ANON) == 0) in vm_object_deallocate()
632 released = refcount_release_if_gt(&object->ref_count, 1); in vm_object_deallocate()
634 released = refcount_release_if_gt(&object->ref_count, 2); in vm_object_deallocate()
638 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
639 VM_OBJECT_RLOCK(object); in vm_object_deallocate()
640 if (object->type == OBJT_VNODE) { in vm_object_deallocate()
641 vm_object_deallocate_vnode(object); in vm_object_deallocate()
644 VM_OBJECT_RUNLOCK(object); in vm_object_deallocate()
647 VM_OBJECT_WLOCK(object); in vm_object_deallocate()
648 KASSERT(object->ref_count > 0, in vm_object_deallocate()
650 object->type)); in vm_object_deallocate()
656 if (!refcount_release(&object->ref_count)) { in vm_object_deallocate()
657 if (object->ref_count > 1 || in vm_object_deallocate()
658 object->shadow_count == 0) { in vm_object_deallocate()
659 if ((object->flags & OBJ_ANON) != 0 && in vm_object_deallocate()
660 object->ref_count == 1) in vm_object_deallocate()
661 vm_object_set_flag(object, in vm_object_deallocate()
663 VM_OBJECT_WUNLOCK(object); in vm_object_deallocate()
668 object = vm_object_deallocate_anon(object); in vm_object_deallocate()
676 umtx_shm_object_terminated(object); in vm_object_deallocate()
677 temp = object->backing_object; in vm_object_deallocate()
679 KASSERT((object->flags & OBJ_TMPFS_NODE) == 0, in vm_object_deallocate()
680 ("shadowed tmpfs v_object 2 %p", object)); in vm_object_deallocate()
681 vm_object_backing_remove(object); in vm_object_deallocate()
684 KASSERT((object->flags & OBJ_DEAD) == 0, in vm_object_deallocate()
686 vm_object_set_flag(object, OBJ_DEAD); in vm_object_deallocate()
687 vm_object_terminate(object); in vm_object_deallocate()
688 object = temp; in vm_object_deallocate()
697 vm_object_destroy(vm_object_t object) in vm_object_destroy() argument
703 if (object->cred != NULL) { in vm_object_destroy()
704 swap_release_by_cred(object->charge, object->cred); in vm_object_destroy()
705 object->charge = 0; in vm_object_destroy()
706 crfree(object->cred); in vm_object_destroy()
707 object->cred = NULL; in vm_object_destroy()
713 uma_zfree(obj_zone, object); in vm_object_destroy()
717 vm_object_backing_remove_locked(vm_object_t object) in vm_object_backing_remove_locked() argument
721 backing_object = object->backing_object; in vm_object_backing_remove_locked()
722 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_remove_locked()
725 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_backing_remove_locked()
728 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove_locked()
729 LIST_REMOVE(object, shadow_list); in vm_object_backing_remove_locked()
731 object->flags &= ~OBJ_SHADOWLIST; in vm_object_backing_remove_locked()
733 object->backing_object = NULL; in vm_object_backing_remove_locked()
737 vm_object_backing_remove(vm_object_t object) in vm_object_backing_remove() argument
741 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_remove()
743 if ((object->flags & OBJ_SHADOWLIST) != 0) { in vm_object_backing_remove()
744 backing_object = object->backing_object; in vm_object_backing_remove()
746 vm_object_backing_remove_locked(object); in vm_object_backing_remove()
749 object->backing_object = NULL; in vm_object_backing_remove()
753 vm_object_backing_insert_locked(vm_object_t object, vm_object_t backing_object) in vm_object_backing_insert_locked() argument
756 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_insert_locked()
760 LIST_INSERT_HEAD(&backing_object->shadow_head, object, in vm_object_backing_insert_locked()
763 object->flags |= OBJ_SHADOWLIST; in vm_object_backing_insert_locked()
765 object->backing_object = backing_object; in vm_object_backing_insert_locked()
769 vm_object_backing_insert(vm_object_t object, vm_object_t backing_object) in vm_object_backing_insert() argument
772 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_insert()
776 vm_object_backing_insert_locked(object, backing_object); in vm_object_backing_insert()
779 object->backing_object = backing_object; in vm_object_backing_insert()
787 vm_object_backing_insert_ref(vm_object_t object, vm_object_t backing_object) in vm_object_backing_insert_ref() argument
790 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_insert_ref()
797 vm_object_backing_insert_locked(object, backing_object); in vm_object_backing_insert_ref()
802 object->backing_object = backing_object; in vm_object_backing_insert_ref()
810 vm_object_backing_transfer(vm_object_t object, vm_object_t backing_object) in vm_object_backing_transfer() argument
818 vm_object_backing_remove_locked(object); in vm_object_backing_transfer()
825 vm_object_backing_insert_locked(object, new_backing_object); in vm_object_backing_transfer()
828 object->backing_object = new_backing_object; in vm_object_backing_transfer()
837 vm_object_collapse_wait(vm_object_t object) in vm_object_collapse_wait() argument
840 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse_wait()
842 while ((object->flags & OBJ_COLLAPSING) != 0) { in vm_object_collapse_wait()
843 vm_object_pip_wait(object, "vmcolwait"); in vm_object_collapse_wait()
853 vm_object_backing_collapse_wait(vm_object_t object) in vm_object_backing_collapse_wait() argument
857 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_backing_collapse_wait()
860 backing_object = object->backing_object; in vm_object_backing_collapse_wait()
867 VM_OBJECT_WUNLOCK(object); in vm_object_backing_collapse_wait()
870 VM_OBJECT_WLOCK(object); in vm_object_backing_collapse_wait()
880 vm_object_terminate_pages(vm_object_t object) in vm_object_terminate_pages() argument
884 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_terminate_pages()
892 TAILQ_FOREACH_SAFE(p, &object->memq, listq, p_next) { in vm_object_terminate_pages()
894 KASSERT(p->object == object && in vm_object_terminate_pages()
898 p->object = NULL; in vm_object_terminate_pages()
910 if (object->resident_page_count != 0) { in vm_object_terminate_pages()
911 vm_radix_reclaim_allnodes(&object->rtree); in vm_object_terminate_pages()
912 TAILQ_INIT(&object->memq); in vm_object_terminate_pages()
913 object->resident_page_count = 0; in vm_object_terminate_pages()
914 if (object->type == OBJT_VNODE) in vm_object_terminate_pages()
915 vdrop(object->handle); in vm_object_terminate_pages()
927 vm_object_terminate(vm_object_t object) in vm_object_terminate() argument
930 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_terminate()
931 KASSERT((object->flags & OBJ_DEAD) != 0, in vm_object_terminate()
932 ("terminating non-dead obj %p", object)); in vm_object_terminate()
933 KASSERT((object->flags & OBJ_COLLAPSING) == 0, in vm_object_terminate()
934 ("terminating collapsing obj %p", object)); in vm_object_terminate()
935 KASSERT(object->backing_object == NULL, in vm_object_terminate()
936 ("terminating shadow obj %p", object)); in vm_object_terminate()
945 vm_object_pip_wait(object, "objtrm"); in vm_object_terminate()
947 KASSERT(object->ref_count == 0, in vm_object_terminate()
949 object->ref_count)); in vm_object_terminate()
951 if ((object->flags & OBJ_PG_DTOR) == 0) in vm_object_terminate()
952 vm_object_terminate_pages(object); in vm_object_terminate()
955 if (__predict_false(!LIST_EMPTY(&object->rvq))) in vm_object_terminate()
956 vm_reserv_break_all(object); in vm_object_terminate()
959 KASSERT(object->cred == NULL || object->type == OBJT_DEFAULT || in vm_object_terminate()
960 object->type == OBJT_SWAP, in vm_object_terminate()
961 ("%s: non-swap obj %p has cred", __func__, object)); in vm_object_terminate()
966 vm_pager_deallocate(object); in vm_object_terminate()
967 VM_OBJECT_WUNLOCK(object); in vm_object_terminate()
969 vm_object_destroy(object); in vm_object_terminate()
1021 vm_object_page_clean(vm_object_t object, vm_ooffset_t start, vm_ooffset_t end, in vm_object_page_clean() argument
1029 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_page_clean()
1031 if (!vm_object_mightbedirty(object) || object->resident_page_count == 0) in vm_object_page_clean()
1039 tend = (end == 0) ? object->size : OFF_TO_IDX(end + PAGE_MASK); in vm_object_page_clean()
1040 allclean = tstart == 0 && tend >= object->size; in vm_object_page_clean()
1044 curgeneration = object->generation; in vm_object_page_clean()
1046 for (p = vm_page_find_least(object, tstart); p != NULL; p = np) { in vm_object_page_clean()
1054 if (object->generation != curgeneration && in vm_object_page_clean()
1057 np = vm_page_find_least(object, pi); in vm_object_page_clean()
1064 if (object->type == OBJT_VNODE) { in vm_object_page_clean()
1065 n = vm_object_page_collect_flush(object, p, pagerflags, in vm_object_page_clean()
1071 if (object->generation != curgeneration && in vm_object_page_clean()
1095 np = vm_page_find_least(object, pi + n); in vm_object_page_clean()
1106 if (allclean && object->type == OBJT_VNODE) in vm_object_page_clean()
1107 object->cleangeneration = curgeneration; in vm_object_page_clean()
1112 vm_object_page_collect_flush(vm_object_t object, vm_page_t p, int pagerflags, in vm_object_page_collect_flush() argument
1120 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_page_collect_flush()
1169 vm_object_sync(vm_object_t object, vm_ooffset_t offset, vm_size_t size, in vm_object_sync() argument
1178 if (object == NULL) in vm_object_sync()
1182 VM_OBJECT_WLOCK(object); in vm_object_sync()
1183 while ((backing_object = object->backing_object) != NULL) { in vm_object_sync()
1185 offset += object->backing_object_offset; in vm_object_sync()
1186 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1187 object = backing_object; in vm_object_sync()
1188 if (object->size < OFF_TO_IDX(offset + size)) in vm_object_sync()
1189 size = IDX_TO_OFF(object->size) - offset; in vm_object_sync()
1203 if (object->type == OBJT_VNODE && in vm_object_sync()
1204 vm_object_mightbedirty(object) != 0 && in vm_object_sync()
1205 ((vp = object->handle)->v_vflag & VV_NOSYNC) == 0) { in vm_object_sync()
1206 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1210 atop(size) == object->size) { in vm_object_sync()
1224 VM_OBJECT_WLOCK(object); in vm_object_sync()
1225 res = vm_object_page_clean(object, offset, offset + size, in vm_object_sync()
1227 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1234 VM_OBJECT_WLOCK(object); in vm_object_sync()
1236 if ((object->type == OBJT_VNODE || in vm_object_sync()
1237 object->type == OBJT_DEVICE) && invalidate) { in vm_object_sync()
1238 if (object->type == OBJT_DEVICE) in vm_object_sync()
1249 vm_object_page_remove(object, OFF_TO_IDX(offset), in vm_object_sync()
1252 VM_OBJECT_WUNLOCK(object); in vm_object_sync()
1263 vm_object_advice_applies(vm_object_t object, int advice) in vm_object_advice_applies() argument
1266 if ((object->flags & OBJ_UNMANAGED) != 0) in vm_object_advice_applies()
1270 return ((object->flags & (OBJ_ONEMAPPING | OBJ_ANON)) == in vm_object_advice_applies()
1275 vm_object_madvise_freespace(vm_object_t object, int advice, vm_pindex_t pindex, in vm_object_madvise_freespace() argument
1279 if (advice == MADV_FREE && object->type == OBJT_SWAP) in vm_object_madvise_freespace()
1280 swap_pager_freespace(object, pindex, size); in vm_object_madvise_freespace()
1305 vm_object_madvise(vm_object_t object, vm_pindex_t pindex, vm_pindex_t end, in vm_object_madvise() argument
1312 if (object == NULL) in vm_object_madvise()
1316 VM_OBJECT_WLOCK(object); in vm_object_madvise()
1317 if (!vm_object_advice_applies(object, advice)) { in vm_object_madvise()
1318 VM_OBJECT_WUNLOCK(object); in vm_object_madvise()
1321 for (m = vm_page_find_least(object, pindex); pindex < end; pindex++) { in vm_object_madvise()
1322 tobject = object; in vm_object_madvise()
1336 if (object->backing_object == NULL) { in vm_object_madvise()
1339 vm_object_madvise_freespace(object, advice, in vm_object_madvise()
1360 if (tobject != object) in vm_object_madvise()
1384 if (object != tobject) in vm_object_madvise()
1385 VM_OBJECT_WUNLOCK(object); in vm_object_madvise()
1401 if (tobject != object) in vm_object_madvise()
1404 VM_OBJECT_WUNLOCK(object); in vm_object_madvise()
1418 vm_object_shadow(vm_object_t *object, vm_ooffset_t *offset, vm_size_t length, in vm_object_shadow() argument
1424 source = *object; in vm_object_shadow()
1487 *object = result; in vm_object_shadow()
1505 orig_object = entry->object.vm_object; in vm_object_split()
1642 entry->object.vm_object = new_object; in vm_object_split()
1649 vm_object_collapse_scan_wait(vm_object_t object, vm_page_t p) in vm_object_collapse_scan_wait() argument
1653 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse_scan_wait()
1654 backing_object = object->backing_object; in vm_object_collapse_scan_wait()
1657 KASSERT(p == NULL || p->object == object || p->object == backing_object, in vm_object_collapse_scan_wait()
1658 ("invalid ownership %p %p %p", p, object, backing_object)); in vm_object_collapse_scan_wait()
1661 VM_OBJECT_WUNLOCK(object); in vm_object_collapse_scan_wait()
1665 if (p->object == object) in vm_object_collapse_scan_wait()
1668 VM_OBJECT_WUNLOCK(object); in vm_object_collapse_scan_wait()
1671 VM_OBJECT_WLOCK(object); in vm_object_collapse_scan_wait()
1677 vm_object_scan_all_shadowed(vm_object_t object) in vm_object_scan_all_shadowed() argument
1683 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_scan_all_shadowed()
1684 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); in vm_object_scan_all_shadowed()
1686 backing_object = object->backing_object; in vm_object_scan_all_shadowed()
1691 pi = backing_offset_index = OFF_TO_IDX(object->backing_object_offset); in vm_object_scan_all_shadowed()
1712 if (new_pindex >= object->size) in vm_object_scan_all_shadowed()
1742 pp = vm_page_lookup(object, new_pindex); in vm_object_scan_all_shadowed()
1750 !vm_pager_has_page(object, new_pindex, NULL, NULL)) in vm_object_scan_all_shadowed()
1764 vm_object_collapse_scan(vm_object_t object) in vm_object_collapse_scan() argument
1770 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse_scan()
1771 VM_OBJECT_ASSERT_WLOCKED(object->backing_object); in vm_object_collapse_scan()
1773 backing_object = object->backing_object; in vm_object_collapse_scan()
1774 backing_offset_index = OFF_TO_IDX(object->backing_object_offset); in vm_object_collapse_scan()
1787 next = vm_object_collapse_scan_wait(object, p); in vm_object_collapse_scan()
1791 KASSERT(object->backing_object == backing_object, in vm_object_collapse_scan()
1793 object->backing_object, backing_object)); in vm_object_collapse_scan()
1794 KASSERT(p->object == backing_object, in vm_object_collapse_scan()
1796 p->object, backing_object)); in vm_object_collapse_scan()
1799 new_pindex >= object->size) { in vm_object_collapse_scan()
1819 pp = vm_page_lookup(object, new_pindex); in vm_object_collapse_scan()
1828 next = vm_object_collapse_scan_wait(object, pp); in vm_object_collapse_scan()
1843 if (pp != NULL || vm_pager_has_page(object, new_pindex, NULL, in vm_object_collapse_scan()
1870 if (vm_page_rename(p, object, new_pindex)) { in vm_object_collapse_scan()
1872 next = vm_object_collapse_scan_wait(object, NULL); in vm_object_collapse_scan()
1885 vm_reserv_rename(p, object, backing_object, in vm_object_collapse_scan()
1901 vm_object_collapse(vm_object_t object) in vm_object_collapse() argument
1905 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_collapse()
1908 KASSERT((object->flags & (OBJ_DEAD | OBJ_ANON)) == OBJ_ANON, in vm_object_collapse()
1916 backing_object = vm_object_backing_collapse_wait(object); in vm_object_collapse()
1920 KASSERT(object->ref_count > 0 && in vm_object_collapse()
1921 object->ref_count > object->shadow_count, in vm_object_collapse()
1923 object->ref_count, object->shadow_count)); in vm_object_collapse()
1927 KASSERT((object->flags & (OBJ_COLLAPSING | OBJ_DEAD)) == 0, in vm_object_collapse()
1940 vm_object_pip_add(object, 1); in vm_object_collapse()
1941 vm_object_set_flag(object, OBJ_COLLAPSING); in vm_object_collapse()
1949 vm_object_collapse_scan(object); in vm_object_collapse()
1973 object, in vm_object_collapse()
1974 OFF_TO_IDX(object->backing_object_offset), TRUE); in vm_object_collapse()
1980 vm_object_clear_flag(object, OBJ_COLLAPSING); in vm_object_collapse()
1981 vm_object_backing_transfer(object, backing_object); in vm_object_collapse()
1982 object->backing_object_offset += in vm_object_collapse()
1984 VM_OBJECT_WUNLOCK(object); in vm_object_collapse()
1985 vm_object_pip_wakeup(object); in vm_object_collapse()
2001 VM_OBJECT_WLOCK(object); in vm_object_collapse()
2010 if (!vm_object_scan_all_shadowed(object)) { in vm_object_collapse()
2020 vm_object_backing_remove_locked(object); in vm_object_collapse()
2023 vm_object_backing_insert_ref(object, in vm_object_collapse()
2025 object->backing_object_offset += in vm_object_collapse()
2074 vm_object_page_remove(vm_object_t object, vm_pindex_t start, vm_pindex_t end, in vm_object_page_remove() argument
2079 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_page_remove()
2080 KASSERT((object->flags & OBJ_UNMANAGED) == 0 || in vm_object_page_remove()
2082 ("vm_object_page_remove: illegal options for object %p", object)); in vm_object_page_remove()
2083 if (object->resident_page_count == 0) in vm_object_page_remove()
2085 vm_object_pip_add(object, 1); in vm_object_page_remove()
2087 p = vm_page_find_least(object, start); in vm_object_page_remove()
2111 object->ref_count != 0) in vm_object_page_remove()
2125 object->ref_count != 0 && in vm_object_page_remove()
2134 object->ref_count != 0 && !vm_page_try_remove_all(p)) in vm_object_page_remove()
2138 vm_object_pip_wakeup(object); in vm_object_page_remove()
2140 if (object->type == OBJT_SWAP) { in vm_object_page_remove()
2142 end = object->size; in vm_object_page_remove()
2143 swap_pager_freespace(object, start, end - start); in vm_object_page_remove()
2164 vm_object_page_noreuse(vm_object_t object, vm_pindex_t start, vm_pindex_t end) in vm_object_page_noreuse() argument
2168 VM_OBJECT_ASSERT_LOCKED(object); in vm_object_page_noreuse()
2169 KASSERT((object->flags & (OBJ_FICTITIOUS | OBJ_UNMANAGED)) == 0, in vm_object_page_noreuse()
2170 ("vm_object_page_noreuse: illegal object %p", object)); in vm_object_page_noreuse()
2171 if (object->resident_page_count == 0) in vm_object_page_noreuse()
2173 p = vm_page_find_least(object, start); in vm_object_page_noreuse()
2196 vm_object_populate(vm_object_t object, vm_pindex_t start, vm_pindex_t end) in vm_object_populate() argument
2202 VM_OBJECT_ASSERT_WLOCKED(object); in vm_object_populate()
2204 rv = vm_page_grab_valid(&m, object, pindex, VM_ALLOC_NORMAL); in vm_object_populate()
2214 m = vm_page_lookup(object, start); in vm_object_populate()
2334 vm_object_set_writeable_dirty(vm_object_t object) in vm_object_set_writeable_dirty() argument
2338 if (object->type != OBJT_VNODE && in vm_object_set_writeable_dirty()
2339 (object->flags & OBJ_TMPFS_NODE) == 0) in vm_object_set_writeable_dirty()
2341 atomic_add_int(&object->generation, 1); in vm_object_set_writeable_dirty()
2353 vm_object_unwire(vm_object_t object, vm_ooffset_t offset, vm_size_t length, in vm_object_unwire() argument
2366 if ((object->flags & OBJ_FICTITIOUS) != 0) in vm_object_unwire()
2372 VM_OBJECT_RLOCK(object); in vm_object_unwire()
2373 m = vm_page_find_least(object, pindex); in vm_object_unwire()
2381 tobject = object; in vm_object_unwire()
2404 for (tobject = object; locked_depth >= 1; in vm_object_unwire()
2407 if (tm->object != tobject) in vm_object_unwire()
2420 for (tobject = object; locked_depth >= 1; locked_depth--) { in vm_object_unwire()
2433 vm_object_vnode(vm_object_t object) in vm_object_vnode() argument
2437 VM_OBJECT_ASSERT_LOCKED(object); in vm_object_vnode()
2438 if (object->type == OBJT_VNODE) { in vm_object_vnode()
2439 vp = object->handle; in vm_object_vnode()
2441 } else if (object->type == OBJT_SWAP && in vm_object_vnode()
2442 (object->flags & OBJ_TMPFS) != 0) { in vm_object_vnode()
2443 vp = object->un_pager.swp.swp_tmpfs; in vm_object_vnode()
2488 vm_object_kvme_type(vm_object_t object, struct vnode **vpp) in vm_object_kvme_type() argument
2491 VM_OBJECT_ASSERT_LOCKED(object); in vm_object_kvme_type()
2493 *vpp = vm_object_vnode(object); in vm_object_kvme_type()
2494 switch (object->type) { in vm_object_kvme_type()
2500 if ((object->flags & OBJ_TMPFS_NODE) != 0) in vm_object_kvme_type()
2639 _vm_object_in_map(vm_map_t map, vm_object_t object, vm_map_entry_t entry) in _vm_object_in_map() argument
2650 if (_vm_object_in_map(map, object, tmpe)) { in _vm_object_in_map()
2655 tmpm = entry->object.sub_map; in _vm_object_in_map()
2657 if (_vm_object_in_map(tmpm, object, tmpe)) { in _vm_object_in_map()
2661 } else if ((obj = entry->object.vm_object) != NULL) { in _vm_object_in_map()
2663 if (obj == object) { in _vm_object_in_map()
2671 vm_object_in_map(vm_object_t object) in vm_object_in_map() argument
2679 if (_vm_object_in_map(&p->p_vmspace->vm_map, object, 0)) { in vm_object_in_map()
2685 if (_vm_object_in_map(kernel_map, object, 0)) in vm_object_in_map()
2692 vm_object_t object; in DB_SHOW_COMMAND() local
2698 TAILQ_FOREACH(object, &vm_object_list, object_list) { in DB_SHOW_COMMAND()
2699 if ((object->flags & OBJ_ANON) != 0) { in DB_SHOW_COMMAND()
2700 if (object->ref_count == 0) { in DB_SHOW_COMMAND()
2702 (long)object->size); in DB_SHOW_COMMAND()
2704 if (!vm_object_in_map(object)) { in DB_SHOW_COMMAND()
2708 object->ref_count, (u_long)object->size, in DB_SHOW_COMMAND()
2709 (u_long)object->size, in DB_SHOW_COMMAND()
2710 (void *)object->backing_object); in DB_SHOW_COMMAND()
2721 DB_SHOW_COMMAND(object, vm_object_print_static) in DB_SHOW_COMMAND() argument
2724 vm_object_t object = (vm_object_t)addr; in DB_SHOW_COMMAND() local
2734 if (object == NULL) in DB_SHOW_COMMAND()
2739 object, (int)object->type, (uintmax_t)object->size, in DB_SHOW_COMMAND()
2740 object->resident_page_count, object->ref_count, object->flags, in DB_SHOW_COMMAND()
2741 object->cred ? object->cred->cr_ruid : -1, (uintmax_t)object->charge); in DB_SHOW_COMMAND()
2743 object->shadow_count, in DB_SHOW_COMMAND()
2744 object->backing_object ? object->backing_object->ref_count : 0, in DB_SHOW_COMMAND()
2745 object->backing_object, (uintmax_t)object->backing_object_offset); in DB_SHOW_COMMAND()
2752 TAILQ_FOREACH(p, &object->memq, listq) { in DB_SHOW_COMMAND()
2790 vm_object_t object; in DB_SHOW_COMMAND() local
2797 TAILQ_FOREACH(object, &vm_object_list, object_list) { in DB_SHOW_COMMAND()
2798 db_printf("new object: %p\n", (void *)object); in DB_SHOW_COMMAND()
2809 TAILQ_FOREACH(m, &object->memq, listq) { in DB_SHOW_COMMAND()