Lines Matching refs:entry

135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
137 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
148 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
539 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add) in vm_map_entry_set_vnode_text() argument
545 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0) in vm_map_entry_set_vnode_text()
547 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_set_vnode_text()
549 object = entry->object.vm_object; in vm_map_entry_set_vnode_text()
550 KASSERT(object != NULL, ("No object for text, entry %p", entry)); in vm_map_entry_set_vnode_text()
557 entry, entry->object.vm_object)); in vm_map_entry_set_vnode_text()
576 "entry %p, object %p, add %d", entry, object, add)); in vm_map_entry_set_vnode_text()
594 "entry %p, object %p, add %d", entry, object, add)); in vm_map_entry_set_vnode_text()
619 vm_map_entry_t entry, next; in vm_map_process_deferred() local
623 entry = td->td_map_def_user; in vm_map_process_deferred()
625 while (entry != NULL) { in vm_map_process_deferred()
626 next = entry->defer_next; in vm_map_process_deferred()
627 MPASS((entry->eflags & (MAP_ENTRY_WRITECNT | in vm_map_process_deferred()
630 if ((entry->eflags & MAP_ENTRY_WRITECNT) != 0) { in vm_map_process_deferred()
635 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_process_deferred()
637 object = entry->object.vm_object; in vm_map_process_deferred()
639 vm_pager_release_writecount(object, entry->start, in vm_map_process_deferred()
640 entry->end); in vm_map_process_deferred()
642 vm_map_entry_set_vnode_text(entry, false); in vm_map_process_deferred()
643 vm_map_entry_deallocate(entry, FALSE); in vm_map_process_deferred()
644 entry = next; in vm_map_process_deferred()
958 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_dispose() argument
960 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry); in vm_map_entry_dispose()
1011 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior) in vm_map_entry_set_behavior() argument
1013 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) | in vm_map_entry_set_behavior()
1048 vm_map_entry_pred(vm_map_entry_t entry) in vm_map_entry_pred() argument
1052 prior = entry->left; in vm_map_entry_pred()
1053 if (prior->right->start < entry->start) { in vm_map_entry_pred()
1056 while (prior->right != entry); in vm_map_entry_pred()
1418 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_link() argument
1425 map->nentries, entry); in vm_map_entry_link()
1429 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_link()
1435 max_free_left = vm_map_splay_merge_pred(header, entry, llist); in vm_map_entry_link()
1436 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); in vm_map_entry_link()
1437 } else if (entry->start == root->start) { in vm_map_entry_link()
1444 KASSERT(entry->end < root->end, in vm_map_entry_link()
1447 root->offset += entry->end - root->start; in vm_map_entry_link()
1448 root->start = entry->end; in vm_map_entry_link()
1449 max_free_left = vm_map_splay_merge_pred(header, entry, llist); in vm_map_entry_link()
1451 vm_map_splay_merge_pred(entry, root, entry), in vm_map_entry_link()
1460 KASSERT(entry->end == root->end, in vm_map_entry_link()
1463 entry->offset += entry->start - root->start; in vm_map_entry_link()
1464 root->end = entry->start; in vm_map_entry_link()
1467 vm_map_splay_merge_succ(entry, root, entry)); in vm_map_entry_link()
1468 max_free_right = vm_map_splay_merge_succ(header, entry, rlist); in vm_map_entry_link()
1470 entry->max_free = vm_size_max(max_free_left, max_free_right); in vm_map_entry_link()
1471 map->root = entry; in vm_map_entry_link()
1481 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry, in vm_map_entry_unlink() argument
1489 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_unlink()
1519 map->nentries, entry); in vm_map_entry_unlink()
1531 vm_map_entry_resize(vm_map_t map, vm_map_entry_t entry, vm_size_t grow_amount) in vm_map_entry_resize() argument
1537 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist); in vm_map_entry_resize()
1540 entry->end += grow_amount; in vm_map_entry_resize()
1547 __func__, map, map->nentries, entry); in vm_map_entry_resize()
1564 vm_map_entry_t *entry) /* OUT */ in vm_map_lookup_entry() argument
1576 *entry = header; in vm_map_lookup_entry()
1580 *entry = cur; in vm_map_lookup_entry()
1603 *entry = header; in vm_map_lookup_entry()
1606 *entry = cur; in vm_map_lookup_entry()
1626 *entry = cur; in vm_map_lookup_entry()
1630 *entry = lbound; in vm_map_lookup_entry()
2308 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry) in vm_map_mergeable_neighbors() argument
2312 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0, in vm_map_mergeable_neighbors()
2314 prev, entry)); in vm_map_mergeable_neighbors()
2315 return (prev->end == entry->start && in vm_map_mergeable_neighbors()
2316 prev->object.vm_object == entry->object.vm_object && in vm_map_mergeable_neighbors()
2318 prev->offset + (prev->end - prev->start) == entry->offset) && in vm_map_mergeable_neighbors()
2319 prev->eflags == entry->eflags && in vm_map_mergeable_neighbors()
2320 prev->protection == entry->protection && in vm_map_mergeable_neighbors()
2321 prev->max_protection == entry->max_protection && in vm_map_mergeable_neighbors()
2322 prev->inheritance == entry->inheritance && in vm_map_mergeable_neighbors()
2323 prev->wired_count == entry->wired_count && in vm_map_mergeable_neighbors()
2324 prev->cred == entry->cred); in vm_map_mergeable_neighbors()
2328 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry) in vm_map_merged_neighbor_dispose() argument
2341 if (entry->object.vm_object != NULL) in vm_map_merged_neighbor_dispose()
2342 vm_object_deallocate(entry->object.vm_object); in vm_map_merged_neighbor_dispose()
2343 if (entry->cred != NULL) in vm_map_merged_neighbor_dispose()
2344 crfree(entry->cred); in vm_map_merged_neighbor_dispose()
2345 vm_map_entry_dispose(map, entry); in vm_map_merged_neighbor_dispose()
2359 vm_map_entry_t entry) in vm_map_try_merge_entries() argument
2363 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 && in vm_map_try_merge_entries()
2364 vm_map_mergeable_neighbors(prev_entry, entry)) { in vm_map_try_merge_entries()
2376 vm_map_entry_back(vm_map_entry_t entry) in vm_map_entry_back() argument
2380 KASSERT(entry->object.vm_object == NULL, in vm_map_entry_back()
2381 ("map entry %p has backing object", entry)); in vm_map_entry_back()
2382 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_back()
2383 ("map entry %p is a submap", entry)); in vm_map_entry_back()
2384 object = vm_object_allocate_anon(atop(entry->end - entry->start), NULL, in vm_map_entry_back()
2385 entry->cred, entry->end - entry->start); in vm_map_entry_back()
2386 entry->object.vm_object = object; in vm_map_entry_back()
2387 entry->offset = 0; in vm_map_entry_back()
2388 entry->cred = NULL; in vm_map_entry_back()
2398 vm_map_entry_charge_object(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_charge_object() argument
2402 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0, in vm_map_entry_charge_object()
2403 ("map entry %p is a submap", entry)); in vm_map_entry_charge_object()
2404 if (entry->object.vm_object == NULL && !map->system_map && in vm_map_entry_charge_object()
2405 (entry->eflags & MAP_ENTRY_GUARD) == 0) in vm_map_entry_charge_object()
2406 vm_map_entry_back(entry); in vm_map_entry_charge_object()
2407 else if (entry->object.vm_object != NULL && in vm_map_entry_charge_object()
2408 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) && in vm_map_entry_charge_object()
2409 entry->cred != NULL) { in vm_map_entry_charge_object()
2410 VM_OBJECT_WLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2411 KASSERT(entry->object.vm_object->cred == NULL, in vm_map_entry_charge_object()
2412 ("OVERCOMMIT: %s: both cred e %p", __func__, entry)); in vm_map_entry_charge_object()
2413 entry->object.vm_object->cred = entry->cred; in vm_map_entry_charge_object()
2414 entry->object.vm_object->charge = entry->end - entry->start; in vm_map_entry_charge_object()
2415 VM_OBJECT_WUNLOCK(entry->object.vm_object); in vm_map_entry_charge_object()
2416 entry->cred = NULL; in vm_map_entry_charge_object()
2426 vm_map_entry_clone(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_clone() argument
2436 vm_map_entry_charge_object(map, entry); in vm_map_entry_clone()
2440 *new_entry = *entry; in vm_map_entry_clone()
2442 crhold(entry->cred); in vm_map_entry_clone()
2443 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) { in vm_map_entry_clone()
2464 vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t startaddr) in vm_map_clip_start() argument
2471 "%s: map %p entry %p start 0x%jx", __func__, map, entry, in vm_map_clip_start()
2474 if (startaddr <= entry->start) in vm_map_clip_start()
2478 KASSERT(entry->end > startaddr && entry->start < startaddr, in vm_map_clip_start()
2479 ("%s: invalid clip of entry %p", __func__, entry)); in vm_map_clip_start()
2481 bdry_idx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> in vm_map_clip_start()
2488 new_entry = vm_map_entry_clone(map, entry); in vm_map_clip_start()
2510 vm_map_entry_t entry; in vm_map_lookup_clip_start() local
2519 entry = *prev_entry; in vm_map_lookup_clip_start()
2520 rv = vm_map_clip_start(map, entry, start); in vm_map_lookup_clip_start()
2523 *prev_entry = vm_map_entry_pred(entry); in vm_map_lookup_clip_start()
2525 entry = vm_map_entry_succ(*prev_entry); in vm_map_lookup_clip_start()
2526 *res_entry = entry; in vm_map_lookup_clip_start()
2538 vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t endaddr) in vm_map_clip_end() argument
2545 "%s: map %p entry %p end 0x%jx", __func__, map, entry, in vm_map_clip_end()
2548 if (endaddr >= entry->end) in vm_map_clip_end()
2552 KASSERT(entry->start < endaddr && entry->end > endaddr, in vm_map_clip_end()
2553 ("%s: invalid clip of entry %p", __func__, entry)); in vm_map_clip_end()
2555 bdry_idx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) >> in vm_map_clip_end()
2562 new_entry = vm_map_entry_clone(map, entry); in vm_map_clip_end()
2599 vm_map_entry_t entry; in vm_map_submap() local
2610 if (vm_map_lookup_entry(map, start, &entry) && entry->end >= end && in vm_map_submap()
2611 (entry->eflags & MAP_ENTRY_COW) == 0 && in vm_map_submap()
2612 entry->object.vm_object == NULL) { in vm_map_submap()
2613 result = vm_map_clip_start(map, entry, start); in vm_map_submap()
2616 result = vm_map_clip_end(map, entry, end); in vm_map_submap()
2619 entry->object.sub_map = submap; in vm_map_submap()
2620 entry->eflags |= MAP_ENTRY_IS_SUB_MAP; in vm_map_submap()
2743 vm_map_entry_t entry, first_entry, in_tran, prev_entry; in vm_map_protect() local
2785 for (entry = first_entry; entry->start < end; in vm_map_protect()
2786 entry = vm_map_entry_succ(entry)) { in vm_map_protect()
2787 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) in vm_map_protect()
2789 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_protect()
2794 new_prot = entry->protection; in vm_map_protect()
2796 new_maxprot = entry->max_protection; in vm_map_protect()
2797 if ((new_prot & entry->max_protection) != new_prot || in vm_map_protect()
2798 (new_maxprot & entry->max_protection) != new_maxprot) { in vm_map_protect()
2802 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0) in vm_map_protect()
2803 in_tran = entry; in vm_map_protect()
2831 for (entry = first_entry; entry->start < end; in vm_map_protect()
2832 entry = vm_map_entry_succ(entry)) { in vm_map_protect()
2833 rv = vm_map_clip_end(map, entry, end); in vm_map_protect()
2840 ((new_prot & ~entry->protection) & VM_PROT_WRITE) == 0 || in vm_map_protect()
2841 ENTRY_CHARGED(entry) || in vm_map_protect()
2842 (entry->eflags & MAP_ENTRY_GUARD) != 0) in vm_map_protect()
2846 obj = entry->object.vm_object; in vm_map_protect()
2849 (entry->eflags & MAP_ENTRY_NEEDS_COPY) != 0) { in vm_map_protect()
2850 if (!swap_reserve(entry->end - entry->start)) { in vm_map_protect()
2852 end = entry->end; in vm_map_protect()
2856 entry->cred = cred; in vm_map_protect()
2875 obj, entry)); in vm_map_protect()
2879 end = entry->end; in vm_map_protect()
2894 for (prev_entry = vm_map_entry_pred(first_entry), entry = first_entry; in vm_map_protect()
2895 entry->start < end; in vm_map_protect()
2896 vm_map_try_merge_entries(map, prev_entry, entry), in vm_map_protect()
2897 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_protect()
2899 (entry->eflags & MAP_ENTRY_GUARD) != 0) in vm_map_protect()
2902 old_prot = entry->protection; in vm_map_protect()
2905 entry->max_protection = new_maxprot; in vm_map_protect()
2906 entry->protection = new_maxprot & old_prot; in vm_map_protect()
2909 entry->protection = new_prot; in vm_map_protect()
2917 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0 && in vm_map_protect()
2918 (entry->protection & VM_PROT_WRITE) != 0 && in vm_map_protect()
2920 vm_fault_copy_entry(map, map, entry, entry, NULL); in vm_map_protect()
2926 if ((old_prot & ~entry->protection) != 0) { in vm_map_protect()
2927 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \ in vm_map_protect() argument
2929 pmap_protect(map->pmap, entry->start, in vm_map_protect()
2930 entry->end, in vm_map_protect()
2931 entry->protection & MASK(entry)); in vm_map_protect()
2935 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_protect()
2955 vm_map_entry_t entry, prev_entry; in vm_map_madvise() local
3002 rv = vm_map_lookup_clip_start(map, start, &entry, &prev_entry); in vm_map_madvise()
3008 for (; entry->start < end; prev_entry = entry, in vm_map_madvise()
3009 entry = vm_map_entry_succ(entry)) { in vm_map_madvise()
3010 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vm_map_madvise()
3013 rv = vm_map_clip_end(map, entry, end); in vm_map_madvise()
3021 vm_map_entry_set_behavior(entry, in vm_map_madvise()
3025 vm_map_entry_set_behavior(entry, in vm_map_madvise()
3029 vm_map_entry_set_behavior(entry, in vm_map_madvise()
3033 entry->eflags |= MAP_ENTRY_NOSYNC; in vm_map_madvise()
3036 entry->eflags &= ~MAP_ENTRY_NOSYNC; in vm_map_madvise()
3039 entry->eflags |= MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3042 entry->eflags &= ~MAP_ENTRY_NOCOREDUMP; in vm_map_madvise()
3047 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_madvise()
3049 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_madvise()
3061 if (!vm_map_lookup_entry(map, start, &entry)) in vm_map_madvise()
3062 entry = vm_map_entry_succ(entry); in vm_map_madvise()
3063 for (; entry->start < end; in vm_map_madvise()
3064 entry = vm_map_entry_succ(entry)) { in vm_map_madvise()
3067 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) in vm_map_madvise()
3078 entry->object.vm_object != NULL && in vm_map_madvise()
3079 entry->object.vm_object->backing_object != NULL) in vm_map_madvise()
3082 pstart = OFF_TO_IDX(entry->offset); in vm_map_madvise()
3083 pend = pstart + atop(entry->end - entry->start); in vm_map_madvise()
3084 useStart = entry->start; in vm_map_madvise()
3085 useEnd = entry->end; in vm_map_madvise()
3087 if (entry->start < start) { in vm_map_madvise()
3088 pstart += atop(start - entry->start); in vm_map_madvise()
3091 if (entry->end > end) { in vm_map_madvise()
3092 pend -= atop(entry->end - end); in vm_map_madvise()
3113 vm_object_madvise(entry->object.vm_object, pstart, in vm_map_madvise()
3122 entry->wired_count == 0) { in vm_map_madvise()
3125 entry->protection, in vm_map_madvise()
3126 entry->object.vm_object, in vm_map_madvise()
3150 vm_map_entry_t entry, lentry, prev_entry, start_entry; in vm_map_inherit() local
3175 for (entry = start_entry; entry->start < end; in vm_map_inherit()
3176 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_inherit()
3177 if ((entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) in vm_map_inherit()
3184 for (entry = start_entry; entry->start < end; prev_entry = entry, in vm_map_inherit()
3185 entry = vm_map_entry_succ(entry)) { in vm_map_inherit()
3186 KASSERT(entry->end <= end, ("non-clipped entry %p end %jx %jx", in vm_map_inherit()
3187 entry, (uintmax_t)entry->end, (uintmax_t)end)); in vm_map_inherit()
3188 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 || in vm_map_inherit()
3190 entry->inheritance = new_inheritance; in vm_map_inherit()
3191 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_inherit()
3193 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_inherit()
3211 vm_map_entry_t entry; in vm_map_entry_in_transition() local
3238 if (!vm_map_lookup_entry(map, start, &entry)) { in vm_map_entry_in_transition()
3243 entry = vm_map_entry_succ(entry); in vm_map_entry_in_transition()
3245 return (entry); in vm_map_entry_in_transition()
3257 vm_map_entry_t entry, first_entry, next_entry, prev_entry; in vm_map_unwire() local
3276 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_unwire()
3277 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_unwire()
3282 &end, holes_ok, entry); in vm_map_unwire()
3284 if (entry == first_entry) { in vm_map_unwire()
3291 first_entry = (entry == first_entry) ? in vm_map_unwire()
3295 rv = vm_map_clip_start(map, entry, start); in vm_map_unwire()
3298 rv = vm_map_clip_end(map, entry, end); in vm_map_unwire()
3306 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_unwire()
3307 entry->wiring_thread == NULL, in vm_map_unwire()
3308 ("owned map entry %p", entry)); in vm_map_unwire()
3309 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3310 entry->wiring_thread = curthread; in vm_map_unwire()
3311 next_entry = vm_map_entry_succ(entry); in vm_map_unwire()
3317 entry->end < end && next_entry->start > entry->end) { in vm_map_unwire()
3318 end = entry->end; in vm_map_unwire()
3326 vm_map_entry_system_wired_count(entry) == 0) { in vm_map_unwire()
3327 end = entry->end; in vm_map_unwire()
3337 entry = vm_map_entry_succ(first_entry); in vm_map_unwire()
3340 entry = first_entry; in vm_map_unwire()
3342 for (; entry->start < end; in vm_map_unwire()
3343 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_unwire()
3353 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_unwire()
3354 entry->wiring_thread != curthread) { in vm_map_unwire()
3361 (entry->eflags & MAP_ENTRY_USER_WIRED))) { in vm_map_unwire()
3362 if (entry->wired_count == 1) in vm_map_unwire()
3363 vm_map_entry_unwire(map, entry); in vm_map_unwire()
3365 entry->wired_count--; in vm_map_unwire()
3367 entry->eflags &= ~MAP_ENTRY_USER_WIRED; in vm_map_unwire()
3369 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_unwire()
3370 ("vm_map_unwire: in-transition flag missing %p", entry)); in vm_map_unwire()
3371 KASSERT(entry->wiring_thread == curthread, in vm_map_unwire()
3372 ("vm_map_unwire: alien wire %p", entry)); in vm_map_unwire()
3373 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION; in vm_map_unwire()
3374 entry->wiring_thread = NULL; in vm_map_unwire()
3375 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_unwire()
3376 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_unwire()
3379 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_unwire()
3381 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_unwire()
3418 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry, in vm_map_wire_entry_failure() argument
3423 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 && in vm_map_wire_entry_failure()
3424 entry->wired_count == 1, in vm_map_wire_entry_failure()
3425 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry)); in vm_map_wire_entry_failure()
3426 KASSERT(failed_addr < entry->end, in vm_map_wire_entry_failure()
3427 ("vm_map_wire_entry_failure: entry %p was fully wired", entry)); in vm_map_wire_entry_failure()
3433 if (failed_addr > entry->start) { in vm_map_wire_entry_failure()
3434 pmap_unwire(map->pmap, entry->start, failed_addr); in vm_map_wire_entry_failure()
3435 vm_object_unwire(entry->object.vm_object, entry->offset, in vm_map_wire_entry_failure()
3436 failed_addr - entry->start, PQ_ACTIVE); in vm_map_wire_entry_failure()
3443 entry->wired_count = -1; in vm_map_wire_entry_failure()
3466 vm_map_entry_t entry, first_entry, next_entry, prev_entry; in vm_map_wire_locked() local
3490 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_wire_locked()
3491 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) { in vm_map_wire_locked()
3496 &end, holes_ok, entry); in vm_map_wire_locked()
3498 if (entry == first_entry) in vm_map_wire_locked()
3503 first_entry = (entry == first_entry) ? in vm_map_wire_locked()
3507 rv = vm_map_clip_start(map, entry, start); in vm_map_wire_locked()
3510 rv = vm_map_clip_end(map, entry, end); in vm_map_wire_locked()
3518 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 && in vm_map_wire_locked()
3519 entry->wiring_thread == NULL, in vm_map_wire_locked()
3520 ("owned map entry %p", entry)); in vm_map_wire_locked()
3521 entry->eflags |= MAP_ENTRY_IN_TRANSITION; in vm_map_wire_locked()
3522 entry->wiring_thread = curthread; in vm_map_wire_locked()
3523 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 in vm_map_wire_locked()
3524 || (entry->protection & prot) != prot) { in vm_map_wire_locked()
3525 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED; in vm_map_wire_locked()
3527 end = entry->end; in vm_map_wire_locked()
3531 } else if (entry->wired_count == 0) { in vm_map_wire_locked()
3532 entry->wired_count++; in vm_map_wire_locked()
3534 npages = atop(entry->end - entry->start); in vm_map_wire_locked()
3536 vm_map_wire_entry_failure(map, entry, in vm_map_wire_locked()
3537 entry->start); in vm_map_wire_locked()
3538 end = entry->end; in vm_map_wire_locked()
3547 saved_start = entry->start; in vm_map_wire_locked()
3548 saved_end = entry->end; in vm_map_wire_locked()
3550 bidx = (entry->eflags & MAP_ENTRY_SPLIT_BOUNDARY_MASK) in vm_map_wire_locked()
3580 first_entry = (entry == first_entry) ? in vm_map_wire_locked()
3582 for (entry = next_entry; entry->end < saved_end; in vm_map_wire_locked()
3583 entry = vm_map_entry_succ(entry)) { in vm_map_wire_locked()
3591 faddr < entry->end) in vm_map_wire_locked()
3593 entry, faddr); in vm_map_wire_locked()
3597 vm_map_wire_entry_failure(map, entry, faddr); in vm_map_wire_locked()
3600 end = entry->end; in vm_map_wire_locked()
3604 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3605 entry->wired_count++; in vm_map_wire_locked()
3611 next_entry = vm_map_entry_succ(entry); in vm_map_wire_locked()
3613 entry->end < end && next_entry->start > entry->end) { in vm_map_wire_locked()
3614 end = entry->end; in vm_map_wire_locked()
3626 entry = vm_map_entry_succ(first_entry); in vm_map_wire_locked()
3629 entry = first_entry; in vm_map_wire_locked()
3631 for (; entry->start < end; in vm_map_wire_locked()
3632 prev_entry = entry, entry = vm_map_entry_succ(entry)) { in vm_map_wire_locked()
3646 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 || in vm_map_wire_locked()
3647 entry->wiring_thread != curthread) { in vm_map_wire_locked()
3653 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0) { in vm_map_wire_locked()
3657 entry->eflags |= MAP_ENTRY_USER_WIRED; in vm_map_wire_locked()
3658 } else if (entry->wired_count == -1) { in vm_map_wire_locked()
3663 entry->wired_count = 0; in vm_map_wire_locked()
3665 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) { in vm_map_wire_locked()
3670 if (entry->wired_count == 1) { in vm_map_wire_locked()
3671 vm_map_entry_unwire(map, entry); in vm_map_wire_locked()
3674 atop(entry->end - entry->start)); in vm_map_wire_locked()
3676 entry->wired_count--; in vm_map_wire_locked()
3678 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0, in vm_map_wire_locked()
3679 ("vm_map_wire: in-transition flag missing %p", entry)); in vm_map_wire_locked()
3680 KASSERT(entry->wiring_thread == curthread, in vm_map_wire_locked()
3681 ("vm_map_wire: alien wire %p", entry)); in vm_map_wire_locked()
3682 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION | in vm_map_wire_locked()
3684 entry->wiring_thread = NULL; in vm_map_wire_locked()
3685 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) { in vm_map_wire_locked()
3686 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP; in vm_map_wire_locked()
3689 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_wire_locked()
3691 vm_map_try_merge_entries(map, prev_entry, entry); in vm_map_wire_locked()
3721 vm_map_entry_t entry, first_entry, next_entry; in vm_map_sync() local
3743 for (entry = first_entry; entry->start < end; entry = next_entry) { in vm_map_sync()
3745 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) { in vm_map_sync()
3749 bdry_idx = (entry->eflags & in vm_map_sync()
3759 next_entry = vm_map_entry_succ(entry); in vm_map_sync()
3760 if (end > entry->end && in vm_map_sync()
3761 entry->end != next_entry->start) { in vm_map_sync()
3775 for (entry = first_entry; entry->start < end;) { in vm_map_sync()
3776 offset = entry->offset + (start - entry->start); in vm_map_sync()
3777 size = (end <= entry->end ? end : entry->end) - start; in vm_map_sync()
3778 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0) { in vm_map_sync()
3783 smap = entry->object.sub_map; in vm_map_sync()
3793 object = entry->object.vm_object; in vm_map_sync()
3804 !vm_map_lookup_entry(map, start, &entry)) in vm_map_sync()
3805 entry = vm_map_entry_succ(entry); in vm_map_sync()
3821 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_unwire() argument
3826 KASSERT(entry->wired_count > 0, in vm_map_entry_unwire()
3827 ("vm_map_entry_unwire: entry %p isn't wired", entry)); in vm_map_entry_unwire()
3829 size = entry->end - entry->start; in vm_map_entry_unwire()
3830 if ((entry->eflags & MAP_ENTRY_USER_WIRED) != 0) in vm_map_entry_unwire()
3832 pmap_unwire(map->pmap, entry->start, entry->end); in vm_map_entry_unwire()
3833 vm_object_unwire(entry->object.vm_object, entry->offset, size, in vm_map_entry_unwire()
3835 entry->wired_count = 0; in vm_map_entry_unwire()
3839 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map) in vm_map_entry_deallocate() argument
3842 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) in vm_map_entry_deallocate()
3843 vm_object_deallocate(entry->object.vm_object); in vm_map_entry_deallocate()
3844 uma_zfree(system_map ? kmapentzone : mapentzone, entry); in vm_map_entry_deallocate()
3853 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry) in vm_map_entry_delete() argument
3859 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE); in vm_map_entry_delete()
3860 object = entry->object.vm_object; in vm_map_entry_delete()
3862 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) { in vm_map_entry_delete()
3863 MPASS(entry->cred == NULL); in vm_map_entry_delete()
3864 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0); in vm_map_entry_delete()
3866 vm_map_entry_deallocate(entry, map->system_map); in vm_map_entry_delete()
3870 size = entry->end - entry->start; in vm_map_entry_delete()
3873 if (entry->cred != NULL) { in vm_map_entry_delete()
3874 swap_release_by_cred(size, entry->cred); in vm_map_entry_delete()
3875 crfree(entry->cred); in vm_map_entry_delete()
3878 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || object == NULL) { in vm_map_entry_delete()
3879 entry->object.vm_object = NULL; in vm_map_entry_delete()
3882 KASSERT(entry->cred == NULL || object->cred == NULL || in vm_map_entry_delete()
3883 (entry->eflags & MAP_ENTRY_NEEDS_COPY), in vm_map_entry_delete()
3884 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry)); in vm_map_entry_delete()
3885 offidxstart = OFF_TO_IDX(entry->offset); in vm_map_entry_delete()
3918 vm_map_entry_deallocate(entry, TRUE); in vm_map_entry_delete()
3920 entry->defer_next = curthread->td_map_def_user; in vm_map_entry_delete()
3921 curthread->td_map_def_user = entry; in vm_map_entry_delete()
3934 vm_map_entry_t entry, next_entry, scratch_entry; in vm_map_delete() local
3946 rv = vm_map_lookup_clip_start(map, start, &entry, &scratch_entry); in vm_map_delete()
3949 for (; entry->start < end; entry = next_entry) { in vm_map_delete()
3955 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 || in vm_map_delete()
3957 vm_map_entry_system_wired_count(entry) != 0)) { in vm_map_delete()
3961 saved_start = entry->start; in vm_map_delete()
3962 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP; in vm_map_delete()
3978 next_entry = entry; in vm_map_delete()
3983 rv = vm_map_clip_end(map, entry, end); in vm_map_delete()
3986 next_entry = vm_map_entry_succ(entry); in vm_map_delete()
3992 if (entry->wired_count != 0) in vm_map_delete()
3993 vm_map_entry_unwire(map, entry); in vm_map_delete()
4000 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 || in vm_map_delete()
4001 entry->object.vm_object != NULL) in vm_map_delete()
4002 pmap_remove(map->pmap, entry->start, entry->end); in vm_map_delete()
4004 if (entry->end == map->anon_loc) in vm_map_delete()
4005 map->anon_loc = entry->start; in vm_map_delete()
4013 vm_map_entry_delete(map, entry); in vm_map_delete()
4054 vm_map_entry_t entry; in vm_map_check_protection() local
4059 entry = tmp_entry; in vm_map_check_protection()
4065 if (start < entry->start) in vm_map_check_protection()
4070 if ((entry->protection & protection) != protection) in vm_map_check_protection()
4073 start = entry->end; in vm_map_check_protection()
4074 entry = vm_map_entry_succ(entry); in vm_map_check_protection()
4242 vm_map_entry_t entry) in vmspace_map_entry_forked() argument
4247 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) in vmspace_map_entry_forked()
4249 entrysize = entry->end - entry->start; in vmspace_map_entry_forked()
4251 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) { in vmspace_map_entry_forked()
4253 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr && in vmspace_map_entry_forked()
4254 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) { in vmspace_map_entry_forked()
4255 newend = MIN(entry->end, in vmspace_map_entry_forked()
4257 vm2->vm_dsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4258 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr && in vmspace_map_entry_forked()
4259 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) { in vmspace_map_entry_forked()
4260 newend = MIN(entry->end, in vmspace_map_entry_forked()
4262 vm2->vm_tsize += btoc(newend - entry->start); in vmspace_map_entry_forked()
4950 vm_map_entry_t entry; in vm_map_lookup() local
4971 entry = *out_entry; in vm_map_lookup()
4976 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_lookup()
4979 *var_map = map = entry->object.sub_map; in vm_map_lookup()
4987 prot = entry->protection; in vm_map_lookup()
4991 (entry->eflags & MAP_ENTRY_GUARD) != 0 && in vm_map_lookup()
4992 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN | in vm_map_lookup()
4994 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS) in vm_map_lookup()
5002 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags & in vm_map_lookup()
5005 ("entry %p flags %x", entry, entry->eflags)); in vm_map_lookup()
5007 (entry->max_protection & VM_PROT_WRITE) == 0 && in vm_map_lookup()
5008 (entry->eflags & MAP_ENTRY_COW) == 0) { in vm_map_lookup()
5017 *wired = (entry->wired_count != 0); in vm_map_lookup()
5019 fault_type = entry->protection; in vm_map_lookup()
5020 size = entry->end - entry->start; in vm_map_lookup()
5025 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup()
5044 if (entry->cred == NULL) { in vm_map_lookup()
5056 entry->cred = cred; in vm_map_lookup()
5058 eobject = entry->object.vm_object; in vm_map_lookup()
5059 vm_object_shadow(&entry->object.vm_object, in vm_map_lookup()
5060 &entry->offset, size, entry->cred, false); in vm_map_lookup()
5061 if (eobject == entry->object.vm_object) { in vm_map_lookup()
5065 swap_release_by_cred(size, entry->cred); in vm_map_lookup()
5066 crfree(entry->cred); in vm_map_lookup()
5068 entry->cred = NULL; in vm_map_lookup()
5069 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY; in vm_map_lookup()
5084 if (entry->object.vm_object == NULL && !map->system_map) { in vm_map_lookup()
5087 entry->object.vm_object = vm_object_allocate_anon(atop(size), in vm_map_lookup()
5088 NULL, entry->cred, entry->cred != NULL ? size : 0); in vm_map_lookup()
5089 entry->offset = 0; in vm_map_lookup()
5090 entry->cred = NULL; in vm_map_lookup()
5098 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup()
5099 *object = entry->object.vm_object; in vm_map_lookup()
5121 vm_map_entry_t entry; in vm_map_lookup_locked() local
5132 entry = *out_entry; in vm_map_lookup_locked()
5137 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) in vm_map_lookup_locked()
5143 prot = entry->protection; in vm_map_lookup_locked()
5152 *wired = (entry->wired_count != 0); in vm_map_lookup_locked()
5154 fault_type = entry->protection; in vm_map_lookup_locked()
5156 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) { in vm_map_lookup_locked()
5172 if (entry->object.vm_object == NULL && !map->system_map) in vm_map_lookup_locked()
5179 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset); in vm_map_lookup_locked()
5180 *object = entry->object.vm_object; in vm_map_lookup_locked()
5193 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry) in vm_map_lookup_done() argument
5233 vm_map_entry_t entry, prev; in _vm_map_assert_consistent() local
5244 VM_MAP_ENTRY_FOREACH(entry, map) { in _vm_map_assert_consistent()
5245 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5247 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5248 KASSERT(entry->start < entry->end, in _vm_map_assert_consistent()
5250 (uintmax_t)entry->start, (uintmax_t)entry->end)); in _vm_map_assert_consistent()
5251 KASSERT(entry->left == header || in _vm_map_assert_consistent()
5252 entry->left->start < entry->start, in _vm_map_assert_consistent()
5254 (uintmax_t)entry->left->start, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5255 KASSERT(entry->right == header || in _vm_map_assert_consistent()
5256 entry->start < entry->right->start, in _vm_map_assert_consistent()
5258 (uintmax_t)entry->start, (uintmax_t)entry->right->start)); in _vm_map_assert_consistent()
5262 if (entry->start < cur->start) { in _vm_map_assert_consistent()
5267 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5268 } else if (cur->end <= entry->start) { in _vm_map_assert_consistent()
5273 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5275 KASSERT(cur == entry, in _vm_map_assert_consistent()
5277 map, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5281 max_left = vm_map_entry_max_free_left(entry, lbound); in _vm_map_assert_consistent()
5282 max_right = vm_map_entry_max_free_right(entry, ubound); in _vm_map_assert_consistent()
5283 KASSERT(entry->max_free == vm_size_max(max_left, max_right), in _vm_map_assert_consistent()
5285 (uintmax_t)entry->max_free, in _vm_map_assert_consistent()
5287 prev = entry; in _vm_map_assert_consistent()
5289 KASSERT(prev->end <= entry->start, in _vm_map_assert_consistent()
5291 (uintmax_t)prev->end, (uintmax_t)entry->start)); in _vm_map_assert_consistent()
5304 vm_map_entry_t entry, prev; in vm_map_print() local
5312 VM_MAP_ENTRY_FOREACH(entry, map) { in vm_map_print()
5314 (void *)entry, (void *)entry->start, (void *)entry->end, in vm_map_print()
5315 entry->eflags); in vm_map_print()
5321 entry->protection, in vm_map_print()
5322 entry->max_protection, in vm_map_print()
5324 entry->inheritance]); in vm_map_print()
5325 if (entry->wired_count != 0) in vm_map_print()
5328 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) { in vm_map_print()
5330 (void *)entry->object.sub_map, in vm_map_print()
5331 (uintmax_t)entry->offset); in vm_map_print()
5334 entry->object.sub_map) { in vm_map_print()
5336 vm_map_print((vm_map_t)entry->object.sub_map); in vm_map_print()
5340 if (entry->cred != NULL) in vm_map_print()
5341 db_printf(", ruid %d", entry->cred->cr_ruid); in vm_map_print()
5343 (void *)entry->object.vm_object, in vm_map_print()
5344 (uintmax_t)entry->offset); in vm_map_print()
5345 if (entry->object.vm_object && entry->object.vm_object->cred) in vm_map_print()
5347 entry->object.vm_object->cred->cr_ruid, in vm_map_print()
5348 (uintmax_t)entry->object.vm_object->charge); in vm_map_print()
5349 if (entry->eflags & MAP_ENTRY_COW) in vm_map_print()
5351 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done"); in vm_map_print()
5356 entry->object.vm_object) { in vm_map_print()
5359 entry->object.vm_object, in vm_map_print()
5364 prev = entry; in vm_map_print()