1 /*-
2 * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3 *
4 * Copyright (c) 1991, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * The Mach Operating System project at Carnegie-Mellon University.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from: @(#)vm_map.c 8.3 (Berkeley) 1/12/94
35 *
36 *
37 * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38 * All rights reserved.
39 *
40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41 *
42 * Permission to use, copy, modify and distribute this software and
43 * its documentation is hereby granted, provided that both the copyright
44 * notice and this permission notice appear in all copies of the
45 * software, derivative works or modified versions, and any portions
46 * thereof, and that both notices appear in supporting documentation.
47 *
48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51 *
52 * Carnegie Mellon requests users of this software to return to
53 *
54 * Software Distribution Coordinator or [email protected]
55 * School of Computer Science
56 * Carnegie Mellon University
57 * Pittsburgh PA 15213-3890
58 *
59 * any improvements or extensions that they make and grant Carnegie the
60 * rights to redistribute these changes.
61 */
62
63 /*
64 * Virtual memory mapping module.
65 */
66
67 #include <sys/cdefs.h>
68 __FBSDID("$FreeBSD$");
69
70 #include <sys/param.h>
71 #include <sys/systm.h>
72 #include <sys/kernel.h>
73 #include <sys/ktr.h>
74 #include <sys/lock.h>
75 #include <sys/mutex.h>
76 #include <sys/proc.h>
77 #include <sys/vmmeter.h>
78 #include <sys/mman.h>
79 #include <sys/vnode.h>
80 #include <sys/racct.h>
81 #include <sys/resourcevar.h>
82 #include <sys/rwlock.h>
83 #include <sys/file.h>
84 #include <sys/sysctl.h>
85 #include <sys/sysent.h>
86 #include <sys/shm.h>
87
88 #include <vm/vm.h>
89 #include <vm/vm_param.h>
90 #include <vm/pmap.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_page.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_pager.h>
95 #include <vm/vm_kern.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vnode_pager.h>
98 #include <vm/swap_pager.h>
99 #include <vm/uma.h>
100
101 /*
102 * Virtual memory maps provide for the mapping, protection,
103 * and sharing of virtual memory objects. In addition,
104 * this module provides for an efficient virtual copy of
105 * memory from one map to another.
106 *
107 * Synchronization is required prior to most operations.
108 *
109 * Maps consist of an ordered doubly-linked list of simple
110 * entries; a self-adjusting binary search tree of these
111 * entries is used to speed up lookups.
112 *
113 * Since portions of maps are specified by start/end addresses,
114 * which may not align with existing map entries, all
115 * routines merely "clip" entries to these start/end values.
116 * [That is, an entry is split into two, bordering at a
117 * start or end value.] Note that these clippings may not
118 * always be necessary (as the two resulting entries are then
119 * not changed); however, the clipping is done for convenience.
120 *
121 * As mentioned above, virtual copy operations are performed
122 * by copying VM object references from one map to
123 * another, and then marking both regions as copy-on-write.
124 */
125
126 static struct mtx map_sleep_mtx;
127 static uma_zone_t mapentzone;
128 static uma_zone_t kmapentzone;
129 static uma_zone_t mapzone;
130 static uma_zone_t vmspace_zone;
131 static int vmspace_zinit(void *mem, int size, int flags);
132 static int vm_map_zinit(void *mem, int ize, int flags);
133 static void _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min,
134 vm_offset_t max);
135 static void vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map);
136 static void vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry);
137 static void vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry);
138 static int vm_map_growstack(vm_map_t map, vm_offset_t addr,
139 vm_map_entry_t gap_entry);
140 static void vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
141 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags);
142 #ifdef INVARIANTS
143 static void vm_map_zdtor(void *mem, int size, void *arg);
144 static void vmspace_zdtor(void *mem, int size, void *arg);
145 #endif
146 static int vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos,
147 vm_size_t max_ssize, vm_size_t growsize, vm_prot_t prot, vm_prot_t max,
148 int cow);
149 static void vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
150 vm_offset_t failed_addr);
151
152 #define ENTRY_CHARGED(e) ((e)->cred != NULL || \
153 ((e)->object.vm_object != NULL && (e)->object.vm_object->cred != NULL && \
154 !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
155
156 /*
157 * PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
158 * stable.
159 */
160 #define PROC_VMSPACE_LOCK(p) do { } while (0)
161 #define PROC_VMSPACE_UNLOCK(p) do { } while (0)
162
163 /*
164 * VM_MAP_RANGE_CHECK: [ internal use only ]
165 *
166 * Asserts that the starting and ending region
167 * addresses fall within the valid range of the map.
168 */
169 #define VM_MAP_RANGE_CHECK(map, start, end) \
170 { \
171 if (start < vm_map_min(map)) \
172 start = vm_map_min(map); \
173 if (end > vm_map_max(map)) \
174 end = vm_map_max(map); \
175 if (start > end) \
176 start = end; \
177 }
178
179 /*
180 * vm_map_startup:
181 *
182 * Initialize the vm_map module. Must be called before
183 * any other vm_map routines.
184 *
185 * Map and entry structures are allocated from the general
186 * purpose memory pool with some exceptions:
187 *
188 * - The kernel map and kmem submap are allocated statically.
189 * - Kernel map entries are allocated out of a static pool.
190 *
191 * These restrictions are necessary since malloc() uses the
192 * maps and requires map entries.
193 */
194
195 void
vm_map_startup(void)196 vm_map_startup(void)
197 {
198 mtx_init(&map_sleep_mtx, "vm map sleep mutex", NULL, MTX_DEF);
199 mapzone = uma_zcreate("MAP", sizeof(struct vm_map), NULL,
200 #ifdef INVARIANTS
201 vm_map_zdtor,
202 #else
203 NULL,
204 #endif
205 vm_map_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
206 uma_prealloc(mapzone, MAX_KMAP);
207 kmapentzone = uma_zcreate("KMAP ENTRY", sizeof(struct vm_map_entry),
208 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR,
209 UMA_ZONE_MTXCLASS | UMA_ZONE_VM);
210 mapentzone = uma_zcreate("MAP ENTRY", sizeof(struct vm_map_entry),
211 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
212 vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
213 #ifdef INVARIANTS
214 vmspace_zdtor,
215 #else
216 NULL,
217 #endif
218 vmspace_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
219 }
220
221 static int
vmspace_zinit(void * mem,int size,int flags)222 vmspace_zinit(void *mem, int size, int flags)
223 {
224 struct vmspace *vm;
225
226 vm = (struct vmspace *)mem;
227
228 vm->vm_map.pmap = NULL;
229 (void)vm_map_zinit(&vm->vm_map, sizeof(vm->vm_map), flags);
230 PMAP_LOCK_INIT(vmspace_pmap(vm));
231 return (0);
232 }
233
234 static int
vm_map_zinit(void * mem,int size,int flags)235 vm_map_zinit(void *mem, int size, int flags)
236 {
237 vm_map_t map;
238
239 map = (vm_map_t)mem;
240 memset(map, 0, sizeof(*map));
241 mtx_init(&map->system_mtx, "vm map (system)", NULL, MTX_DEF | MTX_DUPOK);
242 sx_init(&map->lock, "vm map (user)");
243 return (0);
244 }
245
246 #ifdef INVARIANTS
247 static void
vmspace_zdtor(void * mem,int size,void * arg)248 vmspace_zdtor(void *mem, int size, void *arg)
249 {
250 struct vmspace *vm;
251
252 vm = (struct vmspace *)mem;
253
254 vm_map_zdtor(&vm->vm_map, sizeof(vm->vm_map), arg);
255 }
256 static void
vm_map_zdtor(void * mem,int size,void * arg)257 vm_map_zdtor(void *mem, int size, void *arg)
258 {
259 vm_map_t map;
260
261 map = (vm_map_t)mem;
262 KASSERT(map->nentries == 0,
263 ("map %p nentries == %d on free.",
264 map, map->nentries));
265 KASSERT(map->size == 0,
266 ("map %p size == %lu on free.",
267 map, (unsigned long)map->size));
268 }
269 #endif /* INVARIANTS */
270
271 /*
272 * Allocate a vmspace structure, including a vm_map and pmap,
273 * and initialize those structures. The refcnt is set to 1.
274 *
275 * If 'pinit' is NULL then the embedded pmap is initialized via pmap_pinit().
276 */
277 struct vmspace *
vmspace_alloc(vm_offset_t min,vm_offset_t max,pmap_pinit_t pinit)278 vmspace_alloc(vm_offset_t min, vm_offset_t max, pmap_pinit_t pinit)
279 {
280 struct vmspace *vm;
281
282 vm = uma_zalloc(vmspace_zone, M_WAITOK);
283 KASSERT(vm->vm_map.pmap == NULL, ("vm_map.pmap must be NULL"));
284 if (!pinit(vmspace_pmap(vm))) {
285 uma_zfree(vmspace_zone, vm);
286 return (NULL);
287 }
288 CTR1(KTR_VM, "vmspace_alloc: %p", vm);
289 _vm_map_init(&vm->vm_map, vmspace_pmap(vm), min, max);
290 vm->vm_refcnt = 1;
291 vm->vm_shm = NULL;
292 vm->vm_swrss = 0;
293 vm->vm_tsize = 0;
294 vm->vm_dsize = 0;
295 vm->vm_ssize = 0;
296 vm->vm_taddr = 0;
297 vm->vm_daddr = 0;
298 vm->vm_maxsaddr = 0;
299 return (vm);
300 }
301
302 #ifdef RACCT
303 static void
vmspace_container_reset(struct proc * p)304 vmspace_container_reset(struct proc *p)
305 {
306
307 PROC_LOCK(p);
308 racct_set(p, RACCT_DATA, 0);
309 racct_set(p, RACCT_STACK, 0);
310 racct_set(p, RACCT_RSS, 0);
311 racct_set(p, RACCT_MEMLOCK, 0);
312 racct_set(p, RACCT_VMEM, 0);
313 PROC_UNLOCK(p);
314 }
315 #endif
316
317 static inline void
vmspace_dofree(struct vmspace * vm)318 vmspace_dofree(struct vmspace *vm)
319 {
320
321 CTR1(KTR_VM, "vmspace_free: %p", vm);
322
323 /*
324 * Make sure any SysV shm is freed, it might not have been in
325 * exit1().
326 */
327 shmexit(vm);
328
329 /*
330 * Lock the map, to wait out all other references to it.
331 * Delete all of the mappings and pages they hold, then call
332 * the pmap module to reclaim anything left.
333 */
334 (void)vm_map_remove(&vm->vm_map, vm_map_min(&vm->vm_map),
335 vm_map_max(&vm->vm_map));
336
337 pmap_release(vmspace_pmap(vm));
338 vm->vm_map.pmap = NULL;
339 uma_zfree(vmspace_zone, vm);
340 }
341
342 void
vmspace_free(struct vmspace * vm)343 vmspace_free(struct vmspace *vm)
344 {
345
346 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
347 "vmspace_free() called");
348
349 if (vm->vm_refcnt == 0)
350 panic("vmspace_free: attempt to free already freed vmspace");
351
352 if (atomic_fetchadd_int(&vm->vm_refcnt, -1) == 1)
353 vmspace_dofree(vm);
354 }
355
356 void
vmspace_exitfree(struct proc * p)357 vmspace_exitfree(struct proc *p)
358 {
359 struct vmspace *vm;
360
361 PROC_VMSPACE_LOCK(p);
362 vm = p->p_vmspace;
363 p->p_vmspace = NULL;
364 PROC_VMSPACE_UNLOCK(p);
365 KASSERT(vm == &vmspace0, ("vmspace_exitfree: wrong vmspace"));
366 vmspace_free(vm);
367 }
368
369 void
vmspace_exit(struct thread * td)370 vmspace_exit(struct thread *td)
371 {
372 int refcnt;
373 struct vmspace *vm;
374 struct proc *p;
375
376 /*
377 * Release user portion of address space.
378 * This releases references to vnodes,
379 * which could cause I/O if the file has been unlinked.
380 * Need to do this early enough that we can still sleep.
381 *
382 * The last exiting process to reach this point releases as
383 * much of the environment as it can. vmspace_dofree() is the
384 * slower fallback in case another process had a temporary
385 * reference to the vmspace.
386 */
387
388 p = td->td_proc;
389 vm = p->p_vmspace;
390 atomic_add_int(&vmspace0.vm_refcnt, 1);
391 do {
392 refcnt = vm->vm_refcnt;
393 if (refcnt > 1 && p->p_vmspace != &vmspace0) {
394 /* Switch now since other proc might free vmspace */
395 PROC_VMSPACE_LOCK(p);
396 p->p_vmspace = &vmspace0;
397 PROC_VMSPACE_UNLOCK(p);
398 pmap_activate(td);
399 }
400 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt - 1));
401 if (refcnt == 1) {
402 if (p->p_vmspace != vm) {
403 /* vmspace not yet freed, switch back */
404 PROC_VMSPACE_LOCK(p);
405 p->p_vmspace = vm;
406 PROC_VMSPACE_UNLOCK(p);
407 pmap_activate(td);
408 }
409 pmap_remove_pages(vmspace_pmap(vm));
410 /* Switch now since this proc will free vmspace */
411 PROC_VMSPACE_LOCK(p);
412 p->p_vmspace = &vmspace0;
413 PROC_VMSPACE_UNLOCK(p);
414 pmap_activate(td);
415 vmspace_dofree(vm);
416 }
417 #ifdef RACCT
418 if (racct_enable)
419 vmspace_container_reset(p);
420 #endif
421 }
422
423 /* Acquire reference to vmspace owned by another process. */
424
425 struct vmspace *
vmspace_acquire_ref(struct proc * p)426 vmspace_acquire_ref(struct proc *p)
427 {
428 struct vmspace *vm;
429 int refcnt;
430
431 PROC_VMSPACE_LOCK(p);
432 vm = p->p_vmspace;
433 if (vm == NULL) {
434 PROC_VMSPACE_UNLOCK(p);
435 return (NULL);
436 }
437 do {
438 refcnt = vm->vm_refcnt;
439 if (refcnt <= 0) { /* Avoid 0->1 transition */
440 PROC_VMSPACE_UNLOCK(p);
441 return (NULL);
442 }
443 } while (!atomic_cmpset_int(&vm->vm_refcnt, refcnt, refcnt + 1));
444 if (vm != p->p_vmspace) {
445 PROC_VMSPACE_UNLOCK(p);
446 vmspace_free(vm);
447 return (NULL);
448 }
449 PROC_VMSPACE_UNLOCK(p);
450 return (vm);
451 }
452
453 /*
454 * Switch between vmspaces in an AIO kernel process.
455 *
456 * The new vmspace is either the vmspace of a user process obtained
457 * from an active AIO request or the initial vmspace of the AIO kernel
458 * process (when it is idling). Because user processes will block to
459 * drain any active AIO requests before proceeding in exit() or
460 * execve(), the reference count for vmspaces from AIO requests can
461 * never be 0. Similarly, AIO kernel processes hold an extra
462 * reference on their initial vmspace for the life of the process. As
463 * a result, the 'newvm' vmspace always has a non-zero reference
464 * count. This permits an additional reference on 'newvm' to be
465 * acquired via a simple atomic increment rather than the loop in
466 * vmspace_acquire_ref() above.
467 */
468 void
vmspace_switch_aio(struct vmspace * newvm)469 vmspace_switch_aio(struct vmspace *newvm)
470 {
471 struct vmspace *oldvm;
472
473 /* XXX: Need some way to assert that this is an aio daemon. */
474
475 KASSERT(newvm->vm_refcnt > 0,
476 ("vmspace_switch_aio: newvm unreferenced"));
477
478 oldvm = curproc->p_vmspace;
479 if (oldvm == newvm)
480 return;
481
482 /*
483 * Point to the new address space and refer to it.
484 */
485 curproc->p_vmspace = newvm;
486 atomic_add_int(&newvm->vm_refcnt, 1);
487
488 /* Activate the new mapping. */
489 pmap_activate(curthread);
490
491 vmspace_free(oldvm);
492 }
493
494 void
_vm_map_lock(vm_map_t map,const char * file,int line)495 _vm_map_lock(vm_map_t map, const char *file, int line)
496 {
497
498 if (map->system_map)
499 mtx_lock_flags_(&map->system_mtx, 0, file, line);
500 else
501 sx_xlock_(&map->lock, file, line);
502 map->timestamp++;
503 }
504
505 void
vm_map_entry_set_vnode_text(vm_map_entry_t entry,bool add)506 vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add)
507 {
508 vm_object_t object, object1;
509 struct vnode *vp;
510
511 if ((entry->eflags & MAP_ENTRY_VN_EXEC) == 0)
512 return;
513 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
514 ("Submap with execs"));
515 object = entry->object.vm_object;
516 KASSERT(object != NULL, ("No object for text, entry %p", entry));
517 VM_OBJECT_RLOCK(object);
518 while ((object1 = object->backing_object) != NULL) {
519 VM_OBJECT_RLOCK(object1);
520 VM_OBJECT_RUNLOCK(object);
521 object = object1;
522 }
523
524 vp = NULL;
525 if (object->type == OBJT_DEAD) {
526 /*
527 * For OBJT_DEAD objects, v_writecount was handled in
528 * vnode_pager_dealloc().
529 */
530 } else if (object->type == OBJT_VNODE) {
531 vp = object->handle;
532 } else if (object->type == OBJT_SWAP) {
533 KASSERT((object->flags & OBJ_TMPFS_NODE) != 0,
534 ("vm_map_entry_set_vnode_text: swap and !TMPFS "
535 "entry %p, object %p, add %d", entry, object, add));
536 /*
537 * Tmpfs VREG node, which was reclaimed, has
538 * OBJ_TMPFS_NODE flag set, but not OBJ_TMPFS. In
539 * this case there is no v_writecount to adjust.
540 */
541 if ((object->flags & OBJ_TMPFS) != 0)
542 vp = object->un_pager.swp.swp_tmpfs;
543 } else {
544 KASSERT(0,
545 ("vm_map_entry_set_vnode_text: wrong object type, "
546 "entry %p, object %p, add %d", entry, object, add));
547 }
548 if (vp != NULL) {
549 if (add) {
550 VOP_SET_TEXT_CHECKED(vp);
551 VM_OBJECT_RUNLOCK(object);
552 } else {
553 vhold(vp);
554 VM_OBJECT_RUNLOCK(object);
555 vn_lock(vp, LK_SHARED | LK_RETRY);
556 VOP_UNSET_TEXT_CHECKED(vp);
557 VOP_UNLOCK(vp, 0);
558 vdrop(vp);
559 }
560 } else {
561 VM_OBJECT_RUNLOCK(object);
562 }
563 }
564
565 static void
vm_map_process_deferred(void)566 vm_map_process_deferred(void)
567 {
568 struct thread *td;
569 vm_map_entry_t entry, next;
570 vm_object_t object;
571
572 td = curthread;
573 entry = td->td_map_def_user;
574 td->td_map_def_user = NULL;
575 while (entry != NULL) {
576 next = entry->next;
577 MPASS((entry->eflags & (MAP_ENTRY_VN_WRITECNT |
578 MAP_ENTRY_VN_EXEC)) != (MAP_ENTRY_VN_WRITECNT |
579 MAP_ENTRY_VN_EXEC));
580 if ((entry->eflags & MAP_ENTRY_VN_WRITECNT) != 0) {
581 /*
582 * Decrement the object's writemappings and
583 * possibly the vnode's v_writecount.
584 */
585 KASSERT((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0,
586 ("Submap with writecount"));
587 object = entry->object.vm_object;
588 KASSERT(object != NULL, ("No object for writecount"));
589 vnode_pager_release_writecount(object, entry->start,
590 entry->end);
591 }
592 vm_map_entry_set_vnode_text(entry, false);
593 vm_map_entry_deallocate(entry, FALSE);
594 entry = next;
595 }
596 }
597
598 void
_vm_map_unlock(vm_map_t map,const char * file,int line)599 _vm_map_unlock(vm_map_t map, const char *file, int line)
600 {
601
602 if (map->system_map)
603 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
604 else {
605 sx_xunlock_(&map->lock, file, line);
606 vm_map_process_deferred();
607 }
608 }
609
610 void
_vm_map_lock_read(vm_map_t map,const char * file,int line)611 _vm_map_lock_read(vm_map_t map, const char *file, int line)
612 {
613
614 if (map->system_map)
615 mtx_lock_flags_(&map->system_mtx, 0, file, line);
616 else
617 sx_slock_(&map->lock, file, line);
618 }
619
620 void
_vm_map_unlock_read(vm_map_t map,const char * file,int line)621 _vm_map_unlock_read(vm_map_t map, const char *file, int line)
622 {
623
624 if (map->system_map)
625 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
626 else {
627 sx_sunlock_(&map->lock, file, line);
628 vm_map_process_deferred();
629 }
630 }
631
632 int
_vm_map_trylock(vm_map_t map,const char * file,int line)633 _vm_map_trylock(vm_map_t map, const char *file, int line)
634 {
635 int error;
636
637 error = map->system_map ?
638 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
639 !sx_try_xlock_(&map->lock, file, line);
640 if (error == 0)
641 map->timestamp++;
642 return (error == 0);
643 }
644
645 int
_vm_map_trylock_read(vm_map_t map,const char * file,int line)646 _vm_map_trylock_read(vm_map_t map, const char *file, int line)
647 {
648 int error;
649
650 error = map->system_map ?
651 !mtx_trylock_flags_(&map->system_mtx, 0, file, line) :
652 !sx_try_slock_(&map->lock, file, line);
653 return (error == 0);
654 }
655
656 /*
657 * _vm_map_lock_upgrade: [ internal use only ]
658 *
659 * Tries to upgrade a read (shared) lock on the specified map to a write
660 * (exclusive) lock. Returns the value "0" if the upgrade succeeds and a
661 * non-zero value if the upgrade fails. If the upgrade fails, the map is
662 * returned without a read or write lock held.
663 *
664 * Requires that the map be read locked.
665 */
666 int
_vm_map_lock_upgrade(vm_map_t map,const char * file,int line)667 _vm_map_lock_upgrade(vm_map_t map, const char *file, int line)
668 {
669 unsigned int last_timestamp;
670
671 if (map->system_map) {
672 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
673 } else {
674 if (!sx_try_upgrade_(&map->lock, file, line)) {
675 last_timestamp = map->timestamp;
676 sx_sunlock_(&map->lock, file, line);
677 vm_map_process_deferred();
678 /*
679 * If the map's timestamp does not change while the
680 * map is unlocked, then the upgrade succeeds.
681 */
682 sx_xlock_(&map->lock, file, line);
683 if (last_timestamp != map->timestamp) {
684 sx_xunlock_(&map->lock, file, line);
685 return (1);
686 }
687 }
688 }
689 map->timestamp++;
690 return (0);
691 }
692
693 void
_vm_map_lock_downgrade(vm_map_t map,const char * file,int line)694 _vm_map_lock_downgrade(vm_map_t map, const char *file, int line)
695 {
696
697 if (map->system_map) {
698 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
699 } else
700 sx_downgrade_(&map->lock, file, line);
701 }
702
703 /*
704 * vm_map_locked:
705 *
706 * Returns a non-zero value if the caller holds a write (exclusive) lock
707 * on the specified map and the value "0" otherwise.
708 */
709 int
vm_map_locked(vm_map_t map)710 vm_map_locked(vm_map_t map)
711 {
712
713 if (map->system_map)
714 return (mtx_owned(&map->system_mtx));
715 else
716 return (sx_xlocked(&map->lock));
717 }
718
719 #ifdef INVARIANTS
720 static void
_vm_map_assert_locked(vm_map_t map,const char * file,int line)721 _vm_map_assert_locked(vm_map_t map, const char *file, int line)
722 {
723
724 if (map->system_map)
725 mtx_assert_(&map->system_mtx, MA_OWNED, file, line);
726 else
727 sx_assert_(&map->lock, SA_XLOCKED, file, line);
728 }
729
730 #define VM_MAP_ASSERT_LOCKED(map) \
731 _vm_map_assert_locked(map, LOCK_FILE, LOCK_LINE)
732
733 #ifdef DIAGNOSTIC
734 static int enable_vmmap_check = 1;
735 #else
736 static int enable_vmmap_check = 0;
737 #endif
738 SYSCTL_INT(_debug, OID_AUTO, vmmap_check, CTLFLAG_RWTUN,
739 &enable_vmmap_check, 0, "Enable vm map consistency checking");
740
741 static void
_vm_map_assert_consistent(vm_map_t map)742 _vm_map_assert_consistent(vm_map_t map)
743 {
744 vm_map_entry_t entry;
745 vm_map_entry_t child;
746 vm_size_t max_left, max_right;
747
748 if (!enable_vmmap_check)
749 return;
750
751 for (entry = map->header.next; entry != &map->header;
752 entry = entry->next) {
753 KASSERT(entry->prev->end <= entry->start,
754 ("map %p prev->end = %jx, start = %jx", map,
755 (uintmax_t)entry->prev->end, (uintmax_t)entry->start));
756 KASSERT(entry->start < entry->end,
757 ("map %p start = %jx, end = %jx", map,
758 (uintmax_t)entry->start, (uintmax_t)entry->end));
759 KASSERT(entry->end <= entry->next->start,
760 ("map %p end = %jx, next->start = %jx", map,
761 (uintmax_t)entry->end, (uintmax_t)entry->next->start));
762 KASSERT(entry->left == NULL ||
763 entry->left->start < entry->start,
764 ("map %p left->start = %jx, start = %jx", map,
765 (uintmax_t)entry->left->start, (uintmax_t)entry->start));
766 KASSERT(entry->right == NULL ||
767 entry->start < entry->right->start,
768 ("map %p start = %jx, right->start = %jx", map,
769 (uintmax_t)entry->start, (uintmax_t)entry->right->start));
770 child = entry->left;
771 max_left = (child != NULL) ? child->max_free :
772 entry->start - entry->prev->end;
773 child = entry->right;
774 max_right = (child != NULL) ? child->max_free :
775 entry->next->start - entry->end;
776 KASSERT(entry->max_free == MAX(max_left, max_right),
777 ("map %p max = %jx, max_left = %jx, max_right = %jx", map,
778 (uintmax_t)entry->max_free,
779 (uintmax_t)max_left, (uintmax_t)max_right));
780 }
781 }
782
783 #define VM_MAP_ASSERT_CONSISTENT(map) \
784 _vm_map_assert_consistent(map)
785 #else
786 #define VM_MAP_ASSERT_LOCKED(map)
787 #define VM_MAP_ASSERT_CONSISTENT(map)
788 #endif /* INVARIANTS */
789
790 /*
791 * _vm_map_unlock_and_wait:
792 *
793 * Atomically releases the lock on the specified map and puts the calling
794 * thread to sleep. The calling thread will remain asleep until either
795 * vm_map_wakeup() is performed on the map or the specified timeout is
796 * exceeded.
797 *
798 * WARNING! This function does not perform deferred deallocations of
799 * objects and map entries. Therefore, the calling thread is expected to
800 * reacquire the map lock after reawakening and later perform an ordinary
801 * unlock operation, such as vm_map_unlock(), before completing its
802 * operation on the map.
803 */
804 int
_vm_map_unlock_and_wait(vm_map_t map,int timo,const char * file,int line)805 _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line)
806 {
807
808 mtx_lock(&map_sleep_mtx);
809 if (map->system_map)
810 mtx_unlock_flags_(&map->system_mtx, 0, file, line);
811 else
812 sx_xunlock_(&map->lock, file, line);
813 return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps",
814 timo));
815 }
816
817 /*
818 * vm_map_wakeup:
819 *
820 * Awaken any threads that have slept on the map using
821 * vm_map_unlock_and_wait().
822 */
823 void
vm_map_wakeup(vm_map_t map)824 vm_map_wakeup(vm_map_t map)
825 {
826
827 /*
828 * Acquire and release map_sleep_mtx to prevent a wakeup()
829 * from being performed (and lost) between the map unlock
830 * and the msleep() in _vm_map_unlock_and_wait().
831 */
832 mtx_lock(&map_sleep_mtx);
833 mtx_unlock(&map_sleep_mtx);
834 wakeup(&map->root);
835 }
836
837 void
vm_map_busy(vm_map_t map)838 vm_map_busy(vm_map_t map)
839 {
840
841 VM_MAP_ASSERT_LOCKED(map);
842 map->busy++;
843 }
844
845 void
vm_map_unbusy(vm_map_t map)846 vm_map_unbusy(vm_map_t map)
847 {
848
849 VM_MAP_ASSERT_LOCKED(map);
850 KASSERT(map->busy, ("vm_map_unbusy: not busy"));
851 if (--map->busy == 0 && (map->flags & MAP_BUSY_WAKEUP)) {
852 vm_map_modflags(map, 0, MAP_BUSY_WAKEUP);
853 wakeup(&map->busy);
854 }
855 }
856
857 void
vm_map_wait_busy(vm_map_t map)858 vm_map_wait_busy(vm_map_t map)
859 {
860
861 VM_MAP_ASSERT_LOCKED(map);
862 while (map->busy) {
863 vm_map_modflags(map, MAP_BUSY_WAKEUP, 0);
864 if (map->system_map)
865 msleep(&map->busy, &map->system_mtx, 0, "mbusy", 0);
866 else
867 sx_sleep(&map->busy, &map->lock, 0, "mbusy", 0);
868 }
869 map->timestamp++;
870 }
871
872 long
vmspace_resident_count(struct vmspace * vmspace)873 vmspace_resident_count(struct vmspace *vmspace)
874 {
875 return pmap_resident_count(vmspace_pmap(vmspace));
876 }
877
878 /*
879 * vm_map_create:
880 *
881 * Creates and returns a new empty VM map with
882 * the given physical map structure, and having
883 * the given lower and upper address bounds.
884 */
885 vm_map_t
vm_map_create(pmap_t pmap,vm_offset_t min,vm_offset_t max)886 vm_map_create(pmap_t pmap, vm_offset_t min, vm_offset_t max)
887 {
888 vm_map_t result;
889
890 result = uma_zalloc(mapzone, M_WAITOK);
891 CTR1(KTR_VM, "vm_map_create: %p", result);
892 _vm_map_init(result, pmap, min, max);
893 return (result);
894 }
895
896 /*
897 * Initialize an existing vm_map structure
898 * such as that in the vmspace structure.
899 */
900 static void
_vm_map_init(vm_map_t map,pmap_t pmap,vm_offset_t min,vm_offset_t max)901 _vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
902 {
903
904 map->header.next = map->header.prev = &map->header;
905 map->header.eflags = MAP_ENTRY_HEADER;
906 map->needs_wakeup = FALSE;
907 map->system_map = 0;
908 map->pmap = pmap;
909 map->header.end = min;
910 map->header.start = max;
911 map->flags = 0;
912 map->root = NULL;
913 map->timestamp = 0;
914 map->busy = 0;
915 map->anon_loc = 0;
916 }
917
918 void
vm_map_init(vm_map_t map,pmap_t pmap,vm_offset_t min,vm_offset_t max)919 vm_map_init(vm_map_t map, pmap_t pmap, vm_offset_t min, vm_offset_t max)
920 {
921
922 _vm_map_init(map, pmap, min, max);
923 mtx_init(&map->system_mtx, "system map", NULL, MTX_DEF | MTX_DUPOK);
924 sx_init(&map->lock, "user map");
925 }
926
927 /*
928 * vm_map_entry_dispose: [ internal use only ]
929 *
930 * Inverse of vm_map_entry_create.
931 */
932 static void
vm_map_entry_dispose(vm_map_t map,vm_map_entry_t entry)933 vm_map_entry_dispose(vm_map_t map, vm_map_entry_t entry)
934 {
935 uma_zfree(map->system_map ? kmapentzone : mapentzone, entry);
936 }
937
938 /*
939 * vm_map_entry_create: [ internal use only ]
940 *
941 * Allocates a VM map entry for insertion.
942 * No entry fields are filled in.
943 */
944 static vm_map_entry_t
vm_map_entry_create(vm_map_t map)945 vm_map_entry_create(vm_map_t map)
946 {
947 vm_map_entry_t new_entry;
948
949 if (map->system_map)
950 new_entry = uma_zalloc(kmapentzone, M_NOWAIT);
951 else
952 new_entry = uma_zalloc(mapentzone, M_WAITOK);
953 if (new_entry == NULL)
954 panic("vm_map_entry_create: kernel resources exhausted");
955 return (new_entry);
956 }
957
958 /*
959 * vm_map_entry_set_behavior:
960 *
961 * Set the expected access behavior, either normal, random, or
962 * sequential.
963 */
964 static inline void
vm_map_entry_set_behavior(vm_map_entry_t entry,u_char behavior)965 vm_map_entry_set_behavior(vm_map_entry_t entry, u_char behavior)
966 {
967 entry->eflags = (entry->eflags & ~MAP_ENTRY_BEHAV_MASK) |
968 (behavior & MAP_ENTRY_BEHAV_MASK);
969 }
970
971 /*
972 * vm_map_entry_max_free_{left,right}:
973 *
974 * Compute the size of the largest free gap between two entries,
975 * one the root of a tree and the other the ancestor of that root
976 * that is the least or greatest ancestor found on the search path.
977 */
978 static inline vm_size_t
vm_map_entry_max_free_left(vm_map_entry_t root,vm_map_entry_t left_ancestor)979 vm_map_entry_max_free_left(vm_map_entry_t root, vm_map_entry_t left_ancestor)
980 {
981
982 return (root->left != NULL ?
983 root->left->max_free : root->start - left_ancestor->end);
984 }
985
986 static inline vm_size_t
vm_map_entry_max_free_right(vm_map_entry_t root,vm_map_entry_t right_ancestor)987 vm_map_entry_max_free_right(vm_map_entry_t root, vm_map_entry_t right_ancestor)
988 {
989
990 return (root->right != NULL ?
991 root->right->max_free : right_ancestor->start - root->end);
992 }
993
994 #define SPLAY_LEFT_STEP(root, y, rlist, test) do { \
995 vm_size_t max_free; \
996 \
997 /* \
998 * Infer root->right->max_free == root->max_free when \
999 * y->max_free < root->max_free || root->max_free == 0. \
1000 * Otherwise, look right to find it. \
1001 */ \
1002 y = root->left; \
1003 max_free = root->max_free; \
1004 KASSERT(max_free >= vm_map_entry_max_free_right(root, rlist), \
1005 ("%s: max_free invariant fails", __func__)); \
1006 if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \
1007 max_free = vm_map_entry_max_free_right(root, rlist); \
1008 if (y != NULL && (test)) { \
1009 /* Rotate right and make y root. */ \
1010 root->left = y->right; \
1011 y->right = root; \
1012 if (max_free < y->max_free) \
1013 root->max_free = max_free = MAX(max_free, \
1014 vm_map_entry_max_free_left(root, y)); \
1015 root = y; \
1016 y = root->left; \
1017 } \
1018 /* Copy right->max_free. Put root on rlist. */ \
1019 root->max_free = max_free; \
1020 KASSERT(max_free == vm_map_entry_max_free_right(root, rlist), \
1021 ("%s: max_free not copied from right", __func__)); \
1022 root->left = rlist; \
1023 rlist = root; \
1024 root = y; \
1025 } while (0)
1026
1027 #define SPLAY_RIGHT_STEP(root, y, llist, test) do { \
1028 vm_size_t max_free; \
1029 \
1030 /* \
1031 * Infer root->left->max_free == root->max_free when \
1032 * y->max_free < root->max_free || root->max_free == 0. \
1033 * Otherwise, look left to find it. \
1034 */ \
1035 y = root->right; \
1036 max_free = root->max_free; \
1037 KASSERT(max_free >= vm_map_entry_max_free_left(root, llist), \
1038 ("%s: max_free invariant fails", __func__)); \
1039 if (y == NULL ? max_free > 0 : max_free - 1 < y->max_free) \
1040 max_free = vm_map_entry_max_free_left(root, llist); \
1041 if (y != NULL && (test)) { \
1042 /* Rotate left and make y root. */ \
1043 root->right = y->left; \
1044 y->left = root; \
1045 if (max_free < y->max_free) \
1046 root->max_free = max_free = MAX(max_free, \
1047 vm_map_entry_max_free_right(root, y)); \
1048 root = y; \
1049 y = root->right; \
1050 } \
1051 /* Copy left->max_free. Put root on llist. */ \
1052 root->max_free = max_free; \
1053 KASSERT(max_free == vm_map_entry_max_free_left(root, llist), \
1054 ("%s: max_free not copied from left", __func__)); \
1055 root->right = llist; \
1056 llist = root; \
1057 root = y; \
1058 } while (0)
1059
1060 /*
1061 * Walk down the tree until we find addr or a NULL pointer where addr would go,
1062 * breaking off left and right subtrees of nodes less than, or greater than
1063 * addr. Treat pointers to nodes with max_free < length as NULL pointers.
1064 * llist and rlist are the two sides in reverse order (bottom-up), with llist
1065 * linked by the right pointer and rlist linked by the left pointer in the
1066 * vm_map_entry, and both lists terminated by &map->header. This function, and
1067 * the subsequent call to vm_map_splay_merge, rely on the start and end address
1068 * values in &map->header.
1069 */
1070 static vm_map_entry_t
vm_map_splay_split(vm_map_t map,vm_offset_t addr,vm_size_t length,vm_map_entry_t * out_llist,vm_map_entry_t * out_rlist)1071 vm_map_splay_split(vm_map_t map, vm_offset_t addr, vm_size_t length,
1072 vm_map_entry_t *out_llist, vm_map_entry_t *out_rlist)
1073 {
1074 vm_map_entry_t llist, rlist, root, y;
1075
1076 llist = rlist = &map->header;
1077 root = map->root;
1078 while (root != NULL && root->max_free >= length) {
1079 KASSERT(llist->end <= root->start && root->end <= rlist->start,
1080 ("%s: root not within tree bounds", __func__));
1081 if (addr < root->start) {
1082 SPLAY_LEFT_STEP(root, y, rlist,
1083 y->max_free >= length && addr < y->start);
1084 } else if (addr >= root->end) {
1085 SPLAY_RIGHT_STEP(root, y, llist,
1086 y->max_free >= length && addr >= y->end);
1087 } else
1088 break;
1089 }
1090 *out_llist = llist;
1091 *out_rlist = rlist;
1092 return (root);
1093 }
1094
1095 static void
vm_map_splay_findnext(vm_map_entry_t root,vm_map_entry_t * iolist)1096 vm_map_splay_findnext(vm_map_entry_t root, vm_map_entry_t *iolist)
1097 {
1098 vm_map_entry_t rlist, y;
1099
1100 root = root->right;
1101 rlist = *iolist;
1102 while (root != NULL)
1103 SPLAY_LEFT_STEP(root, y, rlist, true);
1104 *iolist = rlist;
1105 }
1106
1107 static void
vm_map_splay_findprev(vm_map_entry_t root,vm_map_entry_t * iolist)1108 vm_map_splay_findprev(vm_map_entry_t root, vm_map_entry_t *iolist)
1109 {
1110 vm_map_entry_t llist, y;
1111
1112 root = root->left;
1113 llist = *iolist;
1114 while (root != NULL)
1115 SPLAY_RIGHT_STEP(root, y, llist, true);
1116 *iolist = llist;
1117 }
1118
1119 static inline void
vm_map_entry_swap(vm_map_entry_t * a,vm_map_entry_t * b)1120 vm_map_entry_swap(vm_map_entry_t *a, vm_map_entry_t *b)
1121 {
1122 vm_map_entry_t tmp;
1123
1124 tmp = *b;
1125 *b = *a;
1126 *a = tmp;
1127 }
1128
1129 /*
1130 * Walk back up the two spines, flip the pointers and set max_free. The
1131 * subtrees of the root go at the bottom of llist and rlist.
1132 */
1133 static void
vm_map_splay_merge(vm_map_t map,vm_map_entry_t root,vm_map_entry_t llist,vm_map_entry_t rlist)1134 vm_map_splay_merge(vm_map_t map, vm_map_entry_t root,
1135 vm_map_entry_t llist, vm_map_entry_t rlist)
1136 {
1137 vm_map_entry_t prev;
1138 vm_size_t max_free_left, max_free_right;
1139
1140 max_free_left = vm_map_entry_max_free_left(root, llist);
1141 if (llist != &map->header) {
1142 prev = root->left;
1143 do {
1144 /*
1145 * The max_free values of the children of llist are in
1146 * llist->max_free and max_free_left. Update with the
1147 * max value.
1148 */
1149 llist->max_free = max_free_left =
1150 MAX(llist->max_free, max_free_left);
1151 vm_map_entry_swap(&llist->right, &prev);
1152 vm_map_entry_swap(&prev, &llist);
1153 } while (llist != &map->header);
1154 root->left = prev;
1155 }
1156 max_free_right = vm_map_entry_max_free_right(root, rlist);
1157 if (rlist != &map->header) {
1158 prev = root->right;
1159 do {
1160 /*
1161 * The max_free values of the children of rlist are in
1162 * rlist->max_free and max_free_right. Update with the
1163 * max value.
1164 */
1165 rlist->max_free = max_free_right =
1166 MAX(rlist->max_free, max_free_right);
1167 vm_map_entry_swap(&rlist->left, &prev);
1168 vm_map_entry_swap(&prev, &rlist);
1169 } while (rlist != &map->header);
1170 root->right = prev;
1171 }
1172 root->max_free = MAX(max_free_left, max_free_right);
1173 map->root = root;
1174 }
1175
1176 /*
1177 * vm_map_splay:
1178 *
1179 * The Sleator and Tarjan top-down splay algorithm with the
1180 * following variation. Max_free must be computed bottom-up, so
1181 * on the downward pass, maintain the left and right spines in
1182 * reverse order. Then, make a second pass up each side to fix
1183 * the pointers and compute max_free. The time bound is O(log n)
1184 * amortized.
1185 *
1186 * The new root is the vm_map_entry containing "addr", or else an
1187 * adjacent entry (lower if possible) if addr is not in the tree.
1188 *
1189 * The map must be locked, and leaves it so.
1190 *
1191 * Returns: the new root.
1192 */
1193 static vm_map_entry_t
vm_map_splay(vm_map_t map,vm_offset_t addr)1194 vm_map_splay(vm_map_t map, vm_offset_t addr)
1195 {
1196 vm_map_entry_t llist, rlist, root;
1197
1198 root = vm_map_splay_split(map, addr, 0, &llist, &rlist);
1199 if (root != NULL) {
1200 /* do nothing */
1201 } else if (llist != &map->header) {
1202 /*
1203 * Recover the greatest node in the left
1204 * subtree and make it the root.
1205 */
1206 root = llist;
1207 llist = root->right;
1208 root->right = NULL;
1209 } else if (rlist != &map->header) {
1210 /*
1211 * Recover the least node in the right
1212 * subtree and make it the root.
1213 */
1214 root = rlist;
1215 rlist = root->left;
1216 root->left = NULL;
1217 } else {
1218 /* There is no root. */
1219 return (NULL);
1220 }
1221 vm_map_splay_merge(map, root, llist, rlist);
1222 VM_MAP_ASSERT_CONSISTENT(map);
1223 return (root);
1224 }
1225
1226 /*
1227 * vm_map_entry_{un,}link:
1228 *
1229 * Insert/remove entries from maps.
1230 */
1231 static void
vm_map_entry_link(vm_map_t map,vm_map_entry_t entry)1232 vm_map_entry_link(vm_map_t map, vm_map_entry_t entry)
1233 {
1234 vm_map_entry_t llist, rlist, root;
1235
1236 CTR3(KTR_VM,
1237 "vm_map_entry_link: map %p, nentries %d, entry %p", map,
1238 map->nentries, entry);
1239 VM_MAP_ASSERT_LOCKED(map);
1240 map->nentries++;
1241 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1242 KASSERT(root == NULL,
1243 ("vm_map_entry_link: link object already mapped"));
1244 entry->prev = llist;
1245 entry->next = rlist;
1246 llist->next = rlist->prev = entry;
1247 entry->left = entry->right = NULL;
1248 vm_map_splay_merge(map, entry, llist, rlist);
1249 VM_MAP_ASSERT_CONSISTENT(map);
1250 }
1251
1252 enum unlink_merge_type {
1253 UNLINK_MERGE_PREV,
1254 UNLINK_MERGE_NONE,
1255 UNLINK_MERGE_NEXT
1256 };
1257
1258 static void
vm_map_entry_unlink(vm_map_t map,vm_map_entry_t entry,enum unlink_merge_type op)1259 vm_map_entry_unlink(vm_map_t map, vm_map_entry_t entry,
1260 enum unlink_merge_type op)
1261 {
1262 vm_map_entry_t llist, rlist, root, y;
1263
1264 VM_MAP_ASSERT_LOCKED(map);
1265 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1266 KASSERT(root != NULL,
1267 ("vm_map_entry_unlink: unlink object not mapped"));
1268
1269 switch (op) {
1270 case UNLINK_MERGE_PREV:
1271 vm_map_splay_findprev(root, &llist);
1272 llist->end = root->end;
1273 y = root->right;
1274 root = llist;
1275 llist = root->right;
1276 root->right = y;
1277 break;
1278 case UNLINK_MERGE_NEXT:
1279 vm_map_splay_findnext(root, &rlist);
1280 rlist->start = root->start;
1281 rlist->offset = root->offset;
1282 y = root->left;
1283 root = rlist;
1284 rlist = root->left;
1285 root->left = y;
1286 break;
1287 case UNLINK_MERGE_NONE:
1288 vm_map_splay_findprev(root, &llist);
1289 vm_map_splay_findnext(root, &rlist);
1290 if (llist != &map->header) {
1291 root = llist;
1292 llist = root->right;
1293 root->right = NULL;
1294 } else if (rlist != &map->header) {
1295 root = rlist;
1296 rlist = root->left;
1297 root->left = NULL;
1298 } else
1299 root = NULL;
1300 break;
1301 }
1302 y = entry->next;
1303 y->prev = entry->prev;
1304 y->prev->next = y;
1305 if (root != NULL)
1306 vm_map_splay_merge(map, root, llist, rlist);
1307 else
1308 map->root = NULL;
1309 VM_MAP_ASSERT_CONSISTENT(map);
1310 map->nentries--;
1311 CTR3(KTR_VM, "vm_map_entry_unlink: map %p, nentries %d, entry %p", map,
1312 map->nentries, entry);
1313 }
1314
1315 /*
1316 * vm_map_entry_resize_free:
1317 *
1318 * Recompute the amount of free space following a modified vm_map_entry
1319 * and propagate those values up the tree. Call this function after
1320 * resizing a map entry in-place by changing the end value, without a
1321 * call to vm_map_entry_link() or _unlink().
1322 *
1323 * The map must be locked, and leaves it so.
1324 */
1325 static void
vm_map_entry_resize_free(vm_map_t map,vm_map_entry_t entry)1326 vm_map_entry_resize_free(vm_map_t map, vm_map_entry_t entry)
1327 {
1328 vm_map_entry_t llist, rlist, root;
1329
1330 VM_MAP_ASSERT_LOCKED(map);
1331 root = vm_map_splay_split(map, entry->start, 0, &llist, &rlist);
1332 KASSERT(root != NULL,
1333 ("vm_map_entry_resize_free: resize_free object not mapped"));
1334 vm_map_splay_findnext(root, &rlist);
1335 root->right = NULL;
1336 vm_map_splay_merge(map, root, llist, rlist);
1337 VM_MAP_ASSERT_CONSISTENT(map);
1338 CTR3(KTR_VM, "vm_map_entry_resize_free: map %p, nentries %d, entry %p", map,
1339 map->nentries, entry);
1340 }
1341
1342 /*
1343 * vm_map_lookup_entry: [ internal use only ]
1344 *
1345 * Finds the map entry containing (or
1346 * immediately preceding) the specified address
1347 * in the given map; the entry is returned
1348 * in the "entry" parameter. The boolean
1349 * result indicates whether the address is
1350 * actually contained in the map.
1351 */
1352 boolean_t
vm_map_lookup_entry(vm_map_t map,vm_offset_t address,vm_map_entry_t * entry)1353 vm_map_lookup_entry(
1354 vm_map_t map,
1355 vm_offset_t address,
1356 vm_map_entry_t *entry) /* OUT */
1357 {
1358 vm_map_entry_t cur, lbound;
1359 boolean_t locked;
1360
1361 /*
1362 * If the map is empty, then the map entry immediately preceding
1363 * "address" is the map's header.
1364 */
1365 cur = map->root;
1366 if (cur == NULL) {
1367 *entry = &map->header;
1368 return (FALSE);
1369 }
1370 if (address >= cur->start && cur->end > address) {
1371 *entry = cur;
1372 return (TRUE);
1373 }
1374 if ((locked = vm_map_locked(map)) ||
1375 sx_try_upgrade(&map->lock)) {
1376 /*
1377 * Splay requires a write lock on the map. However, it only
1378 * restructures the binary search tree; it does not otherwise
1379 * change the map. Thus, the map's timestamp need not change
1380 * on a temporary upgrade.
1381 */
1382 cur = vm_map_splay(map, address);
1383 if (!locked)
1384 sx_downgrade(&map->lock);
1385
1386 /*
1387 * If "address" is contained within a map entry, the new root
1388 * is that map entry. Otherwise, the new root is a map entry
1389 * immediately before or after "address".
1390 */
1391 if (address < cur->start) {
1392 *entry = &map->header;
1393 return (FALSE);
1394 }
1395 *entry = cur;
1396 return (address < cur->end);
1397 }
1398 /*
1399 * Since the map is only locked for read access, perform a
1400 * standard binary search tree lookup for "address".
1401 */
1402 lbound = &map->header;
1403 do {
1404 if (address < cur->start) {
1405 cur = cur->left;
1406 } else if (cur->end <= address) {
1407 lbound = cur;
1408 cur = cur->right;
1409 } else {
1410 *entry = cur;
1411 return (TRUE);
1412 }
1413 } while (cur != NULL);
1414 *entry = lbound;
1415 return (FALSE);
1416 }
1417
1418 /*
1419 * vm_map_insert:
1420 *
1421 * Inserts the given whole VM object into the target
1422 * map at the specified address range. The object's
1423 * size should match that of the address range.
1424 *
1425 * Requires that the map be locked, and leaves it so.
1426 *
1427 * If object is non-NULL, ref count must be bumped by caller
1428 * prior to making call to account for the new entry.
1429 */
1430 int
vm_map_insert(vm_map_t map,vm_object_t object,vm_ooffset_t offset,vm_offset_t start,vm_offset_t end,vm_prot_t prot,vm_prot_t max,int cow)1431 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1432 vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
1433 {
1434 vm_map_entry_t new_entry, prev_entry, temp_entry;
1435 struct ucred *cred;
1436 vm_eflags_t protoeflags;
1437 vm_inherit_t inheritance;
1438
1439 VM_MAP_ASSERT_LOCKED(map);
1440 KASSERT(object != kernel_object ||
1441 (cow & MAP_COPY_ON_WRITE) == 0,
1442 ("vm_map_insert: kernel object and COW"));
1443 KASSERT(object == NULL || (cow & MAP_NOFAULT) == 0,
1444 ("vm_map_insert: paradoxical MAP_NOFAULT request"));
1445 KASSERT((prot & ~max) == 0,
1446 ("prot %#x is not subset of max_prot %#x", prot, max));
1447
1448 /*
1449 * Check that the start and end points are not bogus.
1450 */
1451 if (start < vm_map_min(map) || end > vm_map_max(map) ||
1452 start >= end)
1453 return (KERN_INVALID_ADDRESS);
1454
1455 /*
1456 * Find the entry prior to the proposed starting address; if it's part
1457 * of an existing entry, this range is bogus.
1458 */
1459 if (vm_map_lookup_entry(map, start, &temp_entry))
1460 return (KERN_NO_SPACE);
1461
1462 prev_entry = temp_entry;
1463
1464 /*
1465 * Assert that the next entry doesn't overlap the end point.
1466 */
1467 if (prev_entry->next->start < end)
1468 return (KERN_NO_SPACE);
1469
1470 if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
1471 max != VM_PROT_NONE))
1472 return (KERN_INVALID_ARGUMENT);
1473
1474 protoeflags = 0;
1475 if (cow & MAP_COPY_ON_WRITE)
1476 protoeflags |= MAP_ENTRY_COW | MAP_ENTRY_NEEDS_COPY;
1477 if (cow & MAP_NOFAULT)
1478 protoeflags |= MAP_ENTRY_NOFAULT;
1479 if (cow & MAP_DISABLE_SYNCER)
1480 protoeflags |= MAP_ENTRY_NOSYNC;
1481 if (cow & MAP_DISABLE_COREDUMP)
1482 protoeflags |= MAP_ENTRY_NOCOREDUMP;
1483 if (cow & MAP_STACK_GROWS_DOWN)
1484 protoeflags |= MAP_ENTRY_GROWS_DOWN;
1485 if (cow & MAP_STACK_GROWS_UP)
1486 protoeflags |= MAP_ENTRY_GROWS_UP;
1487 if (cow & MAP_VN_WRITECOUNT)
1488 protoeflags |= MAP_ENTRY_VN_WRITECNT;
1489 if (cow & MAP_VN_EXEC)
1490 protoeflags |= MAP_ENTRY_VN_EXEC;
1491 if ((cow & MAP_CREATE_GUARD) != 0)
1492 protoeflags |= MAP_ENTRY_GUARD;
1493 if ((cow & MAP_CREATE_STACK_GAP_DN) != 0)
1494 protoeflags |= MAP_ENTRY_STACK_GAP_DN;
1495 if ((cow & MAP_CREATE_STACK_GAP_UP) != 0)
1496 protoeflags |= MAP_ENTRY_STACK_GAP_UP;
1497 if (cow & MAP_INHERIT_SHARE)
1498 inheritance = VM_INHERIT_SHARE;
1499 else
1500 inheritance = VM_INHERIT_DEFAULT;
1501
1502 cred = NULL;
1503 if ((cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT | MAP_CREATE_GUARD)) != 0)
1504 goto charged;
1505 if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
1506 ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
1507 if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
1508 return (KERN_RESOURCE_SHORTAGE);
1509 KASSERT(object == NULL ||
1510 (protoeflags & MAP_ENTRY_NEEDS_COPY) != 0 ||
1511 object->cred == NULL,
1512 ("overcommit: vm_map_insert o %p", object));
1513 cred = curthread->td_ucred;
1514 }
1515
1516 charged:
1517 /* Expand the kernel pmap, if necessary. */
1518 if (map == kernel_map && end > kernel_vm_end)
1519 pmap_growkernel(end);
1520 if (object != NULL) {
1521 /*
1522 * OBJ_ONEMAPPING must be cleared unless this mapping
1523 * is trivially proven to be the only mapping for any
1524 * of the object's pages. (Object granularity
1525 * reference counting is insufficient to recognize
1526 * aliases with precision.)
1527 */
1528 VM_OBJECT_WLOCK(object);
1529 if (object->ref_count > 1 || object->shadow_count != 0)
1530 vm_object_clear_flag(object, OBJ_ONEMAPPING);
1531 VM_OBJECT_WUNLOCK(object);
1532 } else if ((prev_entry->eflags & ~MAP_ENTRY_USER_WIRED) ==
1533 protoeflags &&
1534 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP |
1535 MAP_VN_EXEC)) == 0 &&
1536 prev_entry->end == start && (prev_entry->cred == cred ||
1537 (prev_entry->object.vm_object != NULL &&
1538 prev_entry->object.vm_object->cred == cred)) &&
1539 vm_object_coalesce(prev_entry->object.vm_object,
1540 prev_entry->offset,
1541 (vm_size_t)(prev_entry->end - prev_entry->start),
1542 (vm_size_t)(end - prev_entry->end), cred != NULL &&
1543 (protoeflags & MAP_ENTRY_NEEDS_COPY) == 0)) {
1544 /*
1545 * We were able to extend the object. Determine if we
1546 * can extend the previous map entry to include the
1547 * new range as well.
1548 */
1549 if (prev_entry->inheritance == inheritance &&
1550 prev_entry->protection == prot &&
1551 prev_entry->max_protection == max &&
1552 prev_entry->wired_count == 0) {
1553 KASSERT((prev_entry->eflags & MAP_ENTRY_USER_WIRED) ==
1554 0, ("prev_entry %p has incoherent wiring",
1555 prev_entry));
1556 if ((prev_entry->eflags & MAP_ENTRY_GUARD) == 0)
1557 map->size += end - prev_entry->end;
1558 prev_entry->end = end;
1559 vm_map_entry_resize_free(map, prev_entry);
1560 vm_map_simplify_entry(map, prev_entry);
1561 return (KERN_SUCCESS);
1562 }
1563
1564 /*
1565 * If we can extend the object but cannot extend the
1566 * map entry, we have to create a new map entry. We
1567 * must bump the ref count on the extended object to
1568 * account for it. object may be NULL.
1569 */
1570 object = prev_entry->object.vm_object;
1571 offset = prev_entry->offset +
1572 (prev_entry->end - prev_entry->start);
1573 vm_object_reference(object);
1574 if (cred != NULL && object != NULL && object->cred != NULL &&
1575 !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
1576 /* Object already accounts for this uid. */
1577 cred = NULL;
1578 }
1579 }
1580 if (cred != NULL)
1581 crhold(cred);
1582
1583 /*
1584 * Create a new entry
1585 */
1586 new_entry = vm_map_entry_create(map);
1587 new_entry->start = start;
1588 new_entry->end = end;
1589 new_entry->cred = NULL;
1590
1591 new_entry->eflags = protoeflags;
1592 new_entry->object.vm_object = object;
1593 new_entry->offset = offset;
1594
1595 new_entry->inheritance = inheritance;
1596 new_entry->protection = prot;
1597 new_entry->max_protection = max;
1598 new_entry->wired_count = 0;
1599 new_entry->wiring_thread = NULL;
1600 new_entry->read_ahead = VM_FAULT_READ_AHEAD_INIT;
1601 new_entry->next_read = start;
1602
1603 KASSERT(cred == NULL || !ENTRY_CHARGED(new_entry),
1604 ("overcommit: vm_map_insert leaks vm_map %p", new_entry));
1605 new_entry->cred = cred;
1606
1607 /*
1608 * Insert the new entry into the list
1609 */
1610 vm_map_entry_link(map, new_entry);
1611 if ((new_entry->eflags & MAP_ENTRY_GUARD) == 0)
1612 map->size += new_entry->end - new_entry->start;
1613
1614 /*
1615 * Try to coalesce the new entry with both the previous and next
1616 * entries in the list. Previously, we only attempted to coalesce
1617 * with the previous entry when object is NULL. Here, we handle the
1618 * other cases, which are less common.
1619 */
1620 vm_map_simplify_entry(map, new_entry);
1621
1622 if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
1623 vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
1624 end - start, cow & MAP_PREFAULT_PARTIAL);
1625 }
1626
1627 return (KERN_SUCCESS);
1628 }
1629
1630 /*
1631 * vm_map_findspace:
1632 *
1633 * Find the first fit (lowest VM address) for "length" free bytes
1634 * beginning at address >= start in the given map.
1635 *
1636 * In a vm_map_entry, "max_free" is the maximum amount of
1637 * contiguous free space between an entry in its subtree and a
1638 * neighbor of that entry. This allows finding a free region in
1639 * one path down the tree, so O(log n) amortized with splay
1640 * trees.
1641 *
1642 * The map must be locked, and leaves it so.
1643 *
1644 * Returns: starting address if sufficient space,
1645 * vm_map_max(map)-length+1 if insufficient space.
1646 */
1647 vm_offset_t
vm_map_findspace(vm_map_t map,vm_offset_t start,vm_size_t length)1648 vm_map_findspace(vm_map_t map, vm_offset_t start, vm_size_t length)
1649 {
1650 vm_map_entry_t llist, rlist, root, y;
1651 vm_size_t left_length;
1652 vm_offset_t gap_end;
1653
1654 /*
1655 * Request must fit within min/max VM address and must avoid
1656 * address wrap.
1657 */
1658 start = MAX(start, vm_map_min(map));
1659 if (start >= vm_map_max(map) || length > vm_map_max(map) - start)
1660 return (vm_map_max(map) - length + 1);
1661
1662 /* Empty tree means wide open address space. */
1663 if (map->root == NULL)
1664 return (start);
1665
1666 /*
1667 * After splay_split, if start is within an entry, push it to the start
1668 * of the following gap. If rlist is at the end of the gap containing
1669 * start, save the end of that gap in gap_end to see if the gap is big
1670 * enough; otherwise set gap_end to start skip gap-checking and move
1671 * directly to a search of the right subtree.
1672 */
1673 root = vm_map_splay_split(map, start, length, &llist, &rlist);
1674 gap_end = rlist->start;
1675 if (root != NULL) {
1676 start = root->end;
1677 if (root->right != NULL)
1678 gap_end = start;
1679 } else if (rlist != &map->header) {
1680 root = rlist;
1681 rlist = root->left;
1682 root->left = NULL;
1683 } else {
1684 root = llist;
1685 llist = root->right;
1686 root->right = NULL;
1687 }
1688 vm_map_splay_merge(map, root, llist, rlist);
1689 VM_MAP_ASSERT_CONSISTENT(map);
1690 if (length <= gap_end - start)
1691 return (start);
1692
1693 /* With max_free, can immediately tell if no solution. */
1694 if (root->right == NULL || length > root->right->max_free)
1695 return (vm_map_max(map) - length + 1);
1696
1697 /*
1698 * Splay for the least large-enough gap in the right subtree.
1699 */
1700 llist = rlist = &map->header;
1701 for (left_length = 0;;
1702 left_length = vm_map_entry_max_free_left(root, llist)) {
1703 if (length <= left_length)
1704 SPLAY_LEFT_STEP(root, y, rlist,
1705 length <= vm_map_entry_max_free_left(y, llist));
1706 else
1707 SPLAY_RIGHT_STEP(root, y, llist,
1708 length > vm_map_entry_max_free_left(y, root));
1709 if (root == NULL)
1710 break;
1711 }
1712 root = llist;
1713 llist = root->right;
1714 root->right = NULL;
1715 if (rlist != &map->header) {
1716 y = rlist;
1717 rlist = y->left;
1718 y->left = NULL;
1719 vm_map_splay_merge(map, y, &map->header, rlist);
1720 y->max_free = MAX(
1721 vm_map_entry_max_free_left(y, root),
1722 vm_map_entry_max_free_right(y, &map->header));
1723 root->right = y;
1724 }
1725 vm_map_splay_merge(map, root, llist, &map->header);
1726 VM_MAP_ASSERT_CONSISTENT(map);
1727 return (root->end);
1728 }
1729
1730 int
vm_map_fixed(vm_map_t map,vm_object_t object,vm_ooffset_t offset,vm_offset_t start,vm_size_t length,vm_prot_t prot,vm_prot_t max,int cow)1731 vm_map_fixed(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1732 vm_offset_t start, vm_size_t length, vm_prot_t prot,
1733 vm_prot_t max, int cow)
1734 {
1735 vm_offset_t end;
1736 int result;
1737
1738 end = start + length;
1739 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1740 object == NULL,
1741 ("vm_map_fixed: non-NULL backing object for stack"));
1742 vm_map_lock(map);
1743 VM_MAP_RANGE_CHECK(map, start, end);
1744 if ((cow & MAP_CHECK_EXCL) == 0)
1745 vm_map_delete(map, start, end);
1746 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1747 result = vm_map_stack_locked(map, start, length, sgrowsiz,
1748 prot, max, cow);
1749 } else {
1750 result = vm_map_insert(map, object, offset, start, end,
1751 prot, max, cow);
1752 }
1753 vm_map_unlock(map);
1754 return (result);
1755 }
1756
1757 static const int aslr_pages_rnd_64[2] = {0x1000, 0x10};
1758 static const int aslr_pages_rnd_32[2] = {0x100, 0x4};
1759
1760 static int cluster_anon = 1;
1761 SYSCTL_INT(_vm, OID_AUTO, cluster_anon, CTLFLAG_RW,
1762 &cluster_anon, 0,
1763 "Cluster anonymous mappings: 0 = no, 1 = yes if no hint, 2 = always");
1764
1765 static bool
clustering_anon_allowed(vm_offset_t addr)1766 clustering_anon_allowed(vm_offset_t addr)
1767 {
1768
1769 switch (cluster_anon) {
1770 case 0:
1771 return (false);
1772 case 1:
1773 return (addr == 0);
1774 case 2:
1775 default:
1776 return (true);
1777 }
1778 }
1779
1780 static long aslr_restarts;
1781 SYSCTL_LONG(_vm, OID_AUTO, aslr_restarts, CTLFLAG_RD,
1782 &aslr_restarts, 0,
1783 "Number of aslr failures");
1784
1785 #define MAP_32BIT_MAX_ADDR ((vm_offset_t)1 << 31)
1786
1787 /*
1788 * Searches for the specified amount of free space in the given map with the
1789 * specified alignment. Performs an address-ordered, first-fit search from
1790 * the given address "*addr", with an optional upper bound "max_addr". If the
1791 * parameter "alignment" is zero, then the alignment is computed from the
1792 * given (object, offset) pair so as to enable the greatest possible use of
1793 * superpage mappings. Returns KERN_SUCCESS and the address of the free space
1794 * in "*addr" if successful. Otherwise, returns KERN_NO_SPACE.
1795 *
1796 * The map must be locked. Initially, there must be at least "length" bytes
1797 * of free space at the given address.
1798 */
1799 static int
vm_map_alignspace(vm_map_t map,vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t length,vm_offset_t max_addr,vm_offset_t alignment)1800 vm_map_alignspace(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1801 vm_offset_t *addr, vm_size_t length, vm_offset_t max_addr,
1802 vm_offset_t alignment)
1803 {
1804 vm_offset_t aligned_addr, free_addr;
1805
1806 VM_MAP_ASSERT_LOCKED(map);
1807 free_addr = *addr;
1808 KASSERT(free_addr == vm_map_findspace(map, free_addr, length),
1809 ("caller failed to provide space %#jx at address %p",
1810 (uintmax_t)length, (void *)free_addr));
1811 for (;;) {
1812 /*
1813 * At the start of every iteration, the free space at address
1814 * "*addr" is at least "length" bytes.
1815 */
1816 if (alignment == 0)
1817 pmap_align_superpage(object, offset, addr, length);
1818 else if ((*addr & (alignment - 1)) != 0) {
1819 *addr &= ~(alignment - 1);
1820 *addr += alignment;
1821 }
1822 aligned_addr = *addr;
1823 if (aligned_addr == free_addr) {
1824 /*
1825 * Alignment did not change "*addr", so "*addr" must
1826 * still provide sufficient free space.
1827 */
1828 return (KERN_SUCCESS);
1829 }
1830
1831 /*
1832 * Test for address wrap on "*addr". A wrapped "*addr" could
1833 * be a valid address, in which case vm_map_findspace() cannot
1834 * be relied upon to fail.
1835 */
1836 if (aligned_addr < free_addr)
1837 return (KERN_NO_SPACE);
1838 *addr = vm_map_findspace(map, aligned_addr, length);
1839 if (*addr + length > vm_map_max(map) ||
1840 (max_addr != 0 && *addr + length > max_addr))
1841 return (KERN_NO_SPACE);
1842 free_addr = *addr;
1843 if (free_addr == aligned_addr) {
1844 /*
1845 * If a successful call to vm_map_findspace() did not
1846 * change "*addr", then "*addr" must still be aligned
1847 * and provide sufficient free space.
1848 */
1849 return (KERN_SUCCESS);
1850 }
1851 }
1852 }
1853
1854 /*
1855 * vm_map_find finds an unallocated region in the target address
1856 * map with the given length. The search is defined to be
1857 * first-fit from the specified address; the region found is
1858 * returned in the same parameter.
1859 *
1860 * If object is non-NULL, ref count must be bumped by caller
1861 * prior to making call to account for the new entry.
1862 */
1863 int
vm_map_find(vm_map_t map,vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t length,vm_offset_t max_addr,int find_space,vm_prot_t prot,vm_prot_t max,int cow)1864 vm_map_find(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
1865 vm_offset_t *addr, /* IN/OUT */
1866 vm_size_t length, vm_offset_t max_addr, int find_space,
1867 vm_prot_t prot, vm_prot_t max, int cow)
1868 {
1869 vm_offset_t alignment, curr_min_addr, min_addr;
1870 int gap, pidx, rv, try;
1871 bool cluster, en_aslr, update_anon;
1872
1873 KASSERT((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0 ||
1874 object == NULL,
1875 ("vm_map_find: non-NULL backing object for stack"));
1876 MPASS((cow & MAP_REMAP) == 0 || (find_space == VMFS_NO_SPACE &&
1877 (cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) == 0));
1878 if (find_space == VMFS_OPTIMAL_SPACE && (object == NULL ||
1879 (object->flags & OBJ_COLORED) == 0))
1880 find_space = VMFS_ANY_SPACE;
1881 if (find_space >> 8 != 0) {
1882 KASSERT((find_space & 0xff) == 0, ("bad VMFS flags"));
1883 alignment = (vm_offset_t)1 << (find_space >> 8);
1884 } else
1885 alignment = 0;
1886 en_aslr = (map->flags & MAP_ASLR) != 0;
1887 update_anon = cluster = clustering_anon_allowed(*addr) &&
1888 (map->flags & MAP_IS_SUB_MAP) == 0 && max_addr == 0 &&
1889 find_space != VMFS_NO_SPACE && object == NULL &&
1890 (cow & (MAP_INHERIT_SHARE | MAP_STACK_GROWS_UP |
1891 MAP_STACK_GROWS_DOWN)) == 0 && prot != PROT_NONE;
1892 curr_min_addr = min_addr = *addr;
1893 if (en_aslr && min_addr == 0 && !cluster &&
1894 find_space != VMFS_NO_SPACE &&
1895 (map->flags & MAP_ASLR_IGNSTART) != 0)
1896 curr_min_addr = min_addr = vm_map_min(map);
1897 try = 0;
1898 vm_map_lock(map);
1899 if (cluster) {
1900 curr_min_addr = map->anon_loc;
1901 if (curr_min_addr == 0)
1902 cluster = false;
1903 }
1904 if (find_space != VMFS_NO_SPACE) {
1905 KASSERT(find_space == VMFS_ANY_SPACE ||
1906 find_space == VMFS_OPTIMAL_SPACE ||
1907 find_space == VMFS_SUPER_SPACE ||
1908 alignment != 0, ("unexpected VMFS flag"));
1909 again:
1910 /*
1911 * When creating an anonymous mapping, try clustering
1912 * with an existing anonymous mapping first.
1913 *
1914 * We make up to two attempts to find address space
1915 * for a given find_space value. The first attempt may
1916 * apply randomization or may cluster with an existing
1917 * anonymous mapping. If this first attempt fails,
1918 * perform a first-fit search of the available address
1919 * space.
1920 *
1921 * If all tries failed, and find_space is
1922 * VMFS_OPTIMAL_SPACE, fallback to VMFS_ANY_SPACE.
1923 * Again enable clustering and randomization.
1924 */
1925 try++;
1926 MPASS(try <= 2);
1927
1928 if (try == 2) {
1929 /*
1930 * Second try: we failed either to find a
1931 * suitable region for randomizing the
1932 * allocation, or to cluster with an existing
1933 * mapping. Retry with free run.
1934 */
1935 curr_min_addr = (map->flags & MAP_ASLR_IGNSTART) != 0 ?
1936 vm_map_min(map) : min_addr;
1937 atomic_add_long(&aslr_restarts, 1);
1938 }
1939
1940 if (try == 1 && en_aslr && !cluster) {
1941 /*
1942 * Find space for allocation, including
1943 * gap needed for later randomization.
1944 */
1945 pidx = MAXPAGESIZES > 1 && pagesizes[1] != 0 &&
1946 (find_space == VMFS_SUPER_SPACE || find_space ==
1947 VMFS_OPTIMAL_SPACE) ? 1 : 0;
1948 gap = vm_map_max(map) > MAP_32BIT_MAX_ADDR &&
1949 (max_addr == 0 || max_addr > MAP_32BIT_MAX_ADDR) ?
1950 aslr_pages_rnd_64[pidx] : aslr_pages_rnd_32[pidx];
1951 *addr = vm_map_findspace(map, curr_min_addr,
1952 length + gap * pagesizes[pidx]);
1953 if (*addr + length + gap * pagesizes[pidx] >
1954 vm_map_max(map))
1955 goto again;
1956 /* And randomize the start address. */
1957 *addr += (arc4random() % gap) * pagesizes[pidx];
1958 if (max_addr != 0 && *addr + length > max_addr)
1959 goto again;
1960 } else {
1961 *addr = vm_map_findspace(map, curr_min_addr, length);
1962 if (*addr + length > vm_map_max(map) ||
1963 (max_addr != 0 && *addr + length > max_addr)) {
1964 if (cluster) {
1965 cluster = false;
1966 MPASS(try == 1);
1967 goto again;
1968 }
1969 rv = KERN_NO_SPACE;
1970 goto done;
1971 }
1972 }
1973
1974 if (find_space != VMFS_ANY_SPACE &&
1975 (rv = vm_map_alignspace(map, object, offset, addr, length,
1976 max_addr, alignment)) != KERN_SUCCESS) {
1977 if (find_space == VMFS_OPTIMAL_SPACE) {
1978 find_space = VMFS_ANY_SPACE;
1979 curr_min_addr = min_addr;
1980 cluster = update_anon;
1981 try = 0;
1982 goto again;
1983 }
1984 goto done;
1985 }
1986 } else if ((cow & MAP_REMAP) != 0) {
1987 if (*addr < vm_map_min(map) ||
1988 *addr + length > vm_map_max(map) ||
1989 *addr + length <= length) {
1990 rv = KERN_INVALID_ADDRESS;
1991 goto done;
1992 }
1993 vm_map_delete(map, *addr, *addr + length);
1994 }
1995 if ((cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP)) != 0) {
1996 rv = vm_map_stack_locked(map, *addr, length, sgrowsiz, prot,
1997 max, cow);
1998 } else {
1999 rv = vm_map_insert(map, object, offset, *addr, *addr + length,
2000 prot, max, cow);
2001 }
2002 if (rv == KERN_SUCCESS && update_anon)
2003 map->anon_loc = *addr + length;
2004 done:
2005 vm_map_unlock(map);
2006 return (rv);
2007 }
2008
2009 /*
2010 * vm_map_find_min() is a variant of vm_map_find() that takes an
2011 * additional parameter (min_addr) and treats the given address
2012 * (*addr) differently. Specifically, it treats *addr as a hint
2013 * and not as the minimum address where the mapping is created.
2014 *
2015 * This function works in two phases. First, it tries to
2016 * allocate above the hint. If that fails and the hint is
2017 * greater than min_addr, it performs a second pass, replacing
2018 * the hint with min_addr as the minimum address for the
2019 * allocation.
2020 */
2021 int
vm_map_find_min(vm_map_t map,vm_object_t object,vm_ooffset_t offset,vm_offset_t * addr,vm_size_t length,vm_offset_t min_addr,vm_offset_t max_addr,int find_space,vm_prot_t prot,vm_prot_t max,int cow)2022 vm_map_find_min(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
2023 vm_offset_t *addr, vm_size_t length, vm_offset_t min_addr,
2024 vm_offset_t max_addr, int find_space, vm_prot_t prot, vm_prot_t max,
2025 int cow)
2026 {
2027 vm_offset_t hint;
2028 int rv;
2029
2030 hint = *addr;
2031 for (;;) {
2032 rv = vm_map_find(map, object, offset, addr, length, max_addr,
2033 find_space, prot, max, cow);
2034 if (rv == KERN_SUCCESS || min_addr >= hint)
2035 return (rv);
2036 *addr = hint = min_addr;
2037 }
2038 }
2039
2040 /*
2041 * A map entry with any of the following flags set must not be merged with
2042 * another entry.
2043 */
2044 #define MAP_ENTRY_NOMERGE_MASK (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP | \
2045 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_IS_SUB_MAP | MAP_ENTRY_VN_EXEC)
2046
2047 static bool
vm_map_mergeable_neighbors(vm_map_entry_t prev,vm_map_entry_t entry)2048 vm_map_mergeable_neighbors(vm_map_entry_t prev, vm_map_entry_t entry)
2049 {
2050
2051 KASSERT((prev->eflags & MAP_ENTRY_NOMERGE_MASK) == 0 ||
2052 (entry->eflags & MAP_ENTRY_NOMERGE_MASK) == 0,
2053 ("vm_map_mergeable_neighbors: neither %p nor %p are mergeable",
2054 prev, entry));
2055 return (prev->end == entry->start &&
2056 prev->object.vm_object == entry->object.vm_object &&
2057 (prev->object.vm_object == NULL ||
2058 prev->offset + (prev->end - prev->start) == entry->offset) &&
2059 prev->eflags == entry->eflags &&
2060 prev->protection == entry->protection &&
2061 prev->max_protection == entry->max_protection &&
2062 prev->inheritance == entry->inheritance &&
2063 prev->wired_count == entry->wired_count &&
2064 prev->cred == entry->cred);
2065 }
2066
2067 static void
vm_map_merged_neighbor_dispose(vm_map_t map,vm_map_entry_t entry)2068 vm_map_merged_neighbor_dispose(vm_map_t map, vm_map_entry_t entry)
2069 {
2070
2071 /*
2072 * If the backing object is a vnode object, vm_object_deallocate()
2073 * calls vrele(). However, vrele() does not lock the vnode because
2074 * the vnode has additional references. Thus, the map lock can be
2075 * kept without causing a lock-order reversal with the vnode lock.
2076 *
2077 * Since we count the number of virtual page mappings in
2078 * object->un_pager.vnp.writemappings, the writemappings value
2079 * should not be adjusted when the entry is disposed of.
2080 */
2081 if (entry->object.vm_object != NULL)
2082 vm_object_deallocate(entry->object.vm_object);
2083 if (entry->cred != NULL)
2084 crfree(entry->cred);
2085 vm_map_entry_dispose(map, entry);
2086 }
2087
2088 /*
2089 * vm_map_simplify_entry:
2090 *
2091 * Simplify the given map entry by merging with either neighbor. This
2092 * routine also has the ability to merge with both neighbors.
2093 *
2094 * The map must be locked.
2095 *
2096 * This routine guarantees that the passed entry remains valid (though
2097 * possibly extended). When merging, this routine may delete one or
2098 * both neighbors.
2099 */
2100 void
vm_map_simplify_entry(vm_map_t map,vm_map_entry_t entry)2101 vm_map_simplify_entry(vm_map_t map, vm_map_entry_t entry)
2102 {
2103 vm_map_entry_t next, prev;
2104
2105 if ((entry->eflags & MAP_ENTRY_NOMERGE_MASK) != 0)
2106 return;
2107 prev = entry->prev;
2108 if (vm_map_mergeable_neighbors(prev, entry)) {
2109 vm_map_entry_unlink(map, prev, UNLINK_MERGE_NEXT);
2110 vm_map_merged_neighbor_dispose(map, prev);
2111 }
2112 next = entry->next;
2113 if (vm_map_mergeable_neighbors(entry, next)) {
2114 vm_map_entry_unlink(map, next, UNLINK_MERGE_PREV);
2115 vm_map_merged_neighbor_dispose(map, next);
2116 }
2117 }
2118
2119 /*
2120 * vm_map_clip_start: [ internal use only ]
2121 *
2122 * Asserts that the given entry begins at or after
2123 * the specified address; if necessary,
2124 * it splits the entry into two.
2125 */
2126 #define vm_map_clip_start(map, entry, startaddr) \
2127 { \
2128 if (startaddr > entry->start) \
2129 _vm_map_clip_start(map, entry, startaddr); \
2130 }
2131
2132 /*
2133 * This routine is called only when it is known that
2134 * the entry must be split.
2135 */
2136 static void
_vm_map_clip_start(vm_map_t map,vm_map_entry_t entry,vm_offset_t start)2137 _vm_map_clip_start(vm_map_t map, vm_map_entry_t entry, vm_offset_t start)
2138 {
2139 vm_map_entry_t new_entry;
2140
2141 VM_MAP_ASSERT_LOCKED(map);
2142 KASSERT(entry->end > start && entry->start < start,
2143 ("_vm_map_clip_start: invalid clip of entry %p", entry));
2144
2145 /*
2146 * Split off the front portion -- note that we must insert the new
2147 * entry BEFORE this one, so that this entry has the specified
2148 * starting address.
2149 */
2150 vm_map_simplify_entry(map, entry);
2151
2152 /*
2153 * If there is no object backing this entry, we might as well create
2154 * one now. If we defer it, an object can get created after the map
2155 * is clipped, and individual objects will be created for the split-up
2156 * map. This is a bit of a hack, but is also about the best place to
2157 * put this improvement.
2158 */
2159 if (entry->object.vm_object == NULL && !map->system_map &&
2160 (entry->eflags & MAP_ENTRY_GUARD) == 0) {
2161 vm_object_t object;
2162 object = vm_object_allocate(OBJT_DEFAULT,
2163 atop(entry->end - entry->start));
2164 entry->object.vm_object = object;
2165 entry->offset = 0;
2166 if (entry->cred != NULL) {
2167 object->cred = entry->cred;
2168 object->charge = entry->end - entry->start;
2169 entry->cred = NULL;
2170 }
2171 } else if (entry->object.vm_object != NULL &&
2172 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2173 entry->cred != NULL) {
2174 VM_OBJECT_WLOCK(entry->object.vm_object);
2175 KASSERT(entry->object.vm_object->cred == NULL,
2176 ("OVERCOMMIT: vm_entry_clip_start: both cred e %p", entry));
2177 entry->object.vm_object->cred = entry->cred;
2178 entry->object.vm_object->charge = entry->end - entry->start;
2179 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2180 entry->cred = NULL;
2181 }
2182
2183 new_entry = vm_map_entry_create(map);
2184 *new_entry = *entry;
2185
2186 new_entry->end = start;
2187 entry->offset += (start - entry->start);
2188 entry->start = start;
2189 if (new_entry->cred != NULL)
2190 crhold(entry->cred);
2191
2192 vm_map_entry_link(map, new_entry);
2193
2194 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2195 vm_object_reference(new_entry->object.vm_object);
2196 vm_map_entry_set_vnode_text(new_entry, true);
2197 /*
2198 * The object->un_pager.vnp.writemappings for the
2199 * object of MAP_ENTRY_VN_WRITECNT type entry shall be
2200 * kept as is here. The virtual pages are
2201 * re-distributed among the clipped entries, so the sum is
2202 * left the same.
2203 */
2204 }
2205 }
2206
2207 /*
2208 * vm_map_clip_end: [ internal use only ]
2209 *
2210 * Asserts that the given entry ends at or before
2211 * the specified address; if necessary,
2212 * it splits the entry into two.
2213 */
2214 #define vm_map_clip_end(map, entry, endaddr) \
2215 { \
2216 if ((endaddr) < (entry->end)) \
2217 _vm_map_clip_end((map), (entry), (endaddr)); \
2218 }
2219
2220 /*
2221 * This routine is called only when it is known that
2222 * the entry must be split.
2223 */
2224 static void
_vm_map_clip_end(vm_map_t map,vm_map_entry_t entry,vm_offset_t end)2225 _vm_map_clip_end(vm_map_t map, vm_map_entry_t entry, vm_offset_t end)
2226 {
2227 vm_map_entry_t new_entry;
2228
2229 VM_MAP_ASSERT_LOCKED(map);
2230 KASSERT(entry->start < end && entry->end > end,
2231 ("_vm_map_clip_end: invalid clip of entry %p", entry));
2232
2233 /*
2234 * If there is no object backing this entry, we might as well create
2235 * one now. If we defer it, an object can get created after the map
2236 * is clipped, and individual objects will be created for the split-up
2237 * map. This is a bit of a hack, but is also about the best place to
2238 * put this improvement.
2239 */
2240 if (entry->object.vm_object == NULL && !map->system_map &&
2241 (entry->eflags & MAP_ENTRY_GUARD) == 0) {
2242 vm_object_t object;
2243 object = vm_object_allocate(OBJT_DEFAULT,
2244 atop(entry->end - entry->start));
2245 entry->object.vm_object = object;
2246 entry->offset = 0;
2247 if (entry->cred != NULL) {
2248 object->cred = entry->cred;
2249 object->charge = entry->end - entry->start;
2250 entry->cred = NULL;
2251 }
2252 } else if (entry->object.vm_object != NULL &&
2253 ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
2254 entry->cred != NULL) {
2255 VM_OBJECT_WLOCK(entry->object.vm_object);
2256 KASSERT(entry->object.vm_object->cred == NULL,
2257 ("OVERCOMMIT: vm_entry_clip_end: both cred e %p", entry));
2258 entry->object.vm_object->cred = entry->cred;
2259 entry->object.vm_object->charge = entry->end - entry->start;
2260 VM_OBJECT_WUNLOCK(entry->object.vm_object);
2261 entry->cred = NULL;
2262 }
2263
2264 /*
2265 * Create a new entry and insert it AFTER the specified entry
2266 */
2267 new_entry = vm_map_entry_create(map);
2268 *new_entry = *entry;
2269
2270 new_entry->start = entry->end = end;
2271 new_entry->offset += (end - entry->start);
2272 if (new_entry->cred != NULL)
2273 crhold(entry->cred);
2274
2275 vm_map_entry_link(map, new_entry);
2276
2277 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0) {
2278 vm_object_reference(new_entry->object.vm_object);
2279 vm_map_entry_set_vnode_text(new_entry, true);
2280 }
2281 }
2282
2283 /*
2284 * vm_map_submap: [ kernel use only ]
2285 *
2286 * Mark the given range as handled by a subordinate map.
2287 *
2288 * This range must have been created with vm_map_find,
2289 * and no other operations may have been performed on this
2290 * range prior to calling vm_map_submap.
2291 *
2292 * Only a limited number of operations can be performed
2293 * within this rage after calling vm_map_submap:
2294 * vm_fault
2295 * [Don't try vm_map_copy!]
2296 *
2297 * To remove a submapping, one must first remove the
2298 * range from the superior map, and then destroy the
2299 * submap (if desired). [Better yet, don't try it.]
2300 */
2301 int
vm_map_submap(vm_map_t map,vm_offset_t start,vm_offset_t end,vm_map_t submap)2302 vm_map_submap(
2303 vm_map_t map,
2304 vm_offset_t start,
2305 vm_offset_t end,
2306 vm_map_t submap)
2307 {
2308 vm_map_entry_t entry;
2309 int result;
2310
2311 result = KERN_INVALID_ARGUMENT;
2312
2313 vm_map_lock(submap);
2314 submap->flags |= MAP_IS_SUB_MAP;
2315 vm_map_unlock(submap);
2316
2317 vm_map_lock(map);
2318
2319 VM_MAP_RANGE_CHECK(map, start, end);
2320
2321 if (vm_map_lookup_entry(map, start, &entry)) {
2322 vm_map_clip_start(map, entry, start);
2323 } else
2324 entry = entry->next;
2325
2326 vm_map_clip_end(map, entry, end);
2327
2328 if ((entry->start == start) && (entry->end == end) &&
2329 ((entry->eflags & MAP_ENTRY_COW) == 0) &&
2330 (entry->object.vm_object == NULL)) {
2331 entry->object.sub_map = submap;
2332 entry->eflags |= MAP_ENTRY_IS_SUB_MAP;
2333 result = KERN_SUCCESS;
2334 }
2335 vm_map_unlock(map);
2336
2337 if (result != KERN_SUCCESS) {
2338 vm_map_lock(submap);
2339 submap->flags &= ~MAP_IS_SUB_MAP;
2340 vm_map_unlock(submap);
2341 }
2342 return (result);
2343 }
2344
2345 /*
2346 * The maximum number of pages to map if MAP_PREFAULT_PARTIAL is specified
2347 */
2348 #define MAX_INIT_PT 96
2349
2350 /*
2351 * vm_map_pmap_enter:
2352 *
2353 * Preload the specified map's pmap with mappings to the specified
2354 * object's memory-resident pages. No further physical pages are
2355 * allocated, and no further virtual pages are retrieved from secondary
2356 * storage. If the specified flags include MAP_PREFAULT_PARTIAL, then a
2357 * limited number of page mappings are created at the low-end of the
2358 * specified address range. (For this purpose, a superpage mapping
2359 * counts as one page mapping.) Otherwise, all resident pages within
2360 * the specified address range are mapped.
2361 */
2362 static void
vm_map_pmap_enter(vm_map_t map,vm_offset_t addr,vm_prot_t prot,vm_object_t object,vm_pindex_t pindex,vm_size_t size,int flags)2363 vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot,
2364 vm_object_t object, vm_pindex_t pindex, vm_size_t size, int flags)
2365 {
2366 vm_offset_t start;
2367 vm_page_t p, p_start;
2368 vm_pindex_t mask, psize, threshold, tmpidx;
2369
2370 if ((prot & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0 || object == NULL)
2371 return;
2372 VM_OBJECT_RLOCK(object);
2373 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2374 VM_OBJECT_RUNLOCK(object);
2375 VM_OBJECT_WLOCK(object);
2376 if (object->type == OBJT_DEVICE || object->type == OBJT_SG) {
2377 pmap_object_init_pt(map->pmap, addr, object, pindex,
2378 size);
2379 VM_OBJECT_WUNLOCK(object);
2380 return;
2381 }
2382 VM_OBJECT_LOCK_DOWNGRADE(object);
2383 }
2384
2385 psize = atop(size);
2386 if (psize + pindex > object->size) {
2387 if (object->size < pindex) {
2388 VM_OBJECT_RUNLOCK(object);
2389 return;
2390 }
2391 psize = object->size - pindex;
2392 }
2393
2394 start = 0;
2395 p_start = NULL;
2396 threshold = MAX_INIT_PT;
2397
2398 p = vm_page_find_least(object, pindex);
2399 /*
2400 * Assert: the variable p is either (1) the page with the
2401 * least pindex greater than or equal to the parameter pindex
2402 * or (2) NULL.
2403 */
2404 for (;
2405 p != NULL && (tmpidx = p->pindex - pindex) < psize;
2406 p = TAILQ_NEXT(p, listq)) {
2407 /*
2408 * don't allow an madvise to blow away our really
2409 * free pages allocating pv entries.
2410 */
2411 if (((flags & MAP_PREFAULT_MADVISE) != 0 &&
2412 vm_page_count_severe()) ||
2413 ((flags & MAP_PREFAULT_PARTIAL) != 0 &&
2414 tmpidx >= threshold)) {
2415 psize = tmpidx;
2416 break;
2417 }
2418 if (p->valid == VM_PAGE_BITS_ALL) {
2419 if (p_start == NULL) {
2420 start = addr + ptoa(tmpidx);
2421 p_start = p;
2422 }
2423 /* Jump ahead if a superpage mapping is possible. */
2424 if (p->psind > 0 && ((addr + ptoa(tmpidx)) &
2425 (pagesizes[p->psind] - 1)) == 0) {
2426 mask = atop(pagesizes[p->psind]) - 1;
2427 if (tmpidx + mask < psize &&
2428 vm_page_ps_test(p, PS_ALL_VALID, NULL)) {
2429 p += mask;
2430 threshold += mask;
2431 }
2432 }
2433 } else if (p_start != NULL) {
2434 pmap_enter_object(map->pmap, start, addr +
2435 ptoa(tmpidx), p_start, prot);
2436 p_start = NULL;
2437 }
2438 }
2439 if (p_start != NULL)
2440 pmap_enter_object(map->pmap, start, addr + ptoa(psize),
2441 p_start, prot);
2442 VM_OBJECT_RUNLOCK(object);
2443 }
2444
2445 /*
2446 * vm_map_protect:
2447 *
2448 * Sets the protection of the specified address
2449 * region in the target map. If "set_max" is
2450 * specified, the maximum protection is to be set;
2451 * otherwise, only the current protection is affected.
2452 */
2453 int
vm_map_protect(vm_map_t map,vm_offset_t start,vm_offset_t end,vm_prot_t new_prot,boolean_t set_max)2454 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
2455 vm_prot_t new_prot, boolean_t set_max)
2456 {
2457 vm_map_entry_t current, entry, in_tran;
2458 vm_object_t obj;
2459 struct ucred *cred;
2460 vm_prot_t old_prot;
2461
2462 if (start == end)
2463 return (KERN_SUCCESS);
2464
2465 again:
2466 in_tran = NULL;
2467 vm_map_lock(map);
2468
2469 /*
2470 * Ensure that we are not concurrently wiring pages. vm_map_wire() may
2471 * need to fault pages into the map and will drop the map lock while
2472 * doing so, and the VM object may end up in an inconsistent state if we
2473 * update the protection on the map entry in between faults.
2474 */
2475 vm_map_wait_busy(map);
2476
2477 VM_MAP_RANGE_CHECK(map, start, end);
2478
2479 if (vm_map_lookup_entry(map, start, &entry)) {
2480 vm_map_clip_start(map, entry, start);
2481 } else {
2482 entry = entry->next;
2483 }
2484
2485 /*
2486 * Make a first pass to check for protection violations.
2487 */
2488 for (current = entry; current->start < end; current = current->next) {
2489 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2490 continue;
2491 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
2492 vm_map_unlock(map);
2493 return (KERN_INVALID_ARGUMENT);
2494 }
2495 if ((new_prot & current->max_protection) != new_prot) {
2496 vm_map_unlock(map);
2497 return (KERN_PROTECTION_FAILURE);
2498 }
2499 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0)
2500 in_tran = entry;
2501 }
2502
2503 /*
2504 * Postpone the operation until all in transition map entries
2505 * are stabilized. In-transition entry might already have its
2506 * pages wired and wired_count incremented, but
2507 * MAP_ENTRY_USER_WIRED flag not yet set, and visible to other
2508 * threads because the map lock is dropped. In this case we
2509 * would miss our call to vm_fault_copy_entry().
2510 */
2511 if (in_tran != NULL) {
2512 in_tran->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2513 vm_map_unlock_and_wait(map, 0);
2514 goto again;
2515 }
2516
2517 /*
2518 * Do an accounting pass for private read-only mappings that
2519 * now will do cow due to allowed write (e.g. debugger sets
2520 * breakpoint on text segment)
2521 */
2522 for (current = entry; current->start < end; current = current->next) {
2523
2524 vm_map_clip_end(map, current, end);
2525
2526 if (set_max ||
2527 ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
2528 ENTRY_CHARGED(current) ||
2529 (current->eflags & MAP_ENTRY_GUARD) != 0) {
2530 continue;
2531 }
2532
2533 cred = curthread->td_ucred;
2534 obj = current->object.vm_object;
2535
2536 if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
2537 if (!swap_reserve(current->end - current->start)) {
2538 vm_map_unlock(map);
2539 return (KERN_RESOURCE_SHORTAGE);
2540 }
2541 crhold(cred);
2542 current->cred = cred;
2543 continue;
2544 }
2545
2546 VM_OBJECT_WLOCK(obj);
2547 if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
2548 VM_OBJECT_WUNLOCK(obj);
2549 continue;
2550 }
2551
2552 /*
2553 * Charge for the whole object allocation now, since
2554 * we cannot distinguish between non-charged and
2555 * charged clipped mapping of the same object later.
2556 */
2557 KASSERT(obj->charge == 0,
2558 ("vm_map_protect: object %p overcharged (entry %p)",
2559 obj, current));
2560 if (!swap_reserve(ptoa(obj->size))) {
2561 VM_OBJECT_WUNLOCK(obj);
2562 vm_map_unlock(map);
2563 return (KERN_RESOURCE_SHORTAGE);
2564 }
2565
2566 crhold(cred);
2567 obj->cred = cred;
2568 obj->charge = ptoa(obj->size);
2569 VM_OBJECT_WUNLOCK(obj);
2570 }
2571
2572 /*
2573 * Go back and fix up protections. [Note that clipping is not
2574 * necessary the second time.]
2575 */
2576 for (current = entry; current->start < end; current = current->next) {
2577 if ((current->eflags & MAP_ENTRY_GUARD) != 0)
2578 continue;
2579
2580 old_prot = current->protection;
2581
2582 if (set_max)
2583 current->protection =
2584 (current->max_protection = new_prot) &
2585 old_prot;
2586 else
2587 current->protection = new_prot;
2588
2589 /*
2590 * For user wired map entries, the normal lazy evaluation of
2591 * write access upgrades through soft page faults is
2592 * undesirable. Instead, immediately copy any pages that are
2593 * copy-on-write and enable write access in the physical map.
2594 */
2595 if ((current->eflags & MAP_ENTRY_USER_WIRED) != 0 &&
2596 (current->protection & VM_PROT_WRITE) != 0 &&
2597 (old_prot & VM_PROT_WRITE) == 0)
2598 vm_fault_copy_entry(map, map, current, current, NULL);
2599
2600 /*
2601 * When restricting access, update the physical map. Worry
2602 * about copy-on-write here.
2603 */
2604 if ((old_prot & ~current->protection) != 0) {
2605 #define MASK(entry) (((entry)->eflags & MAP_ENTRY_COW) ? ~VM_PROT_WRITE : \
2606 VM_PROT_ALL)
2607 pmap_protect(map->pmap, current->start,
2608 current->end,
2609 current->protection & MASK(current));
2610 #undef MASK
2611 }
2612 vm_map_simplify_entry(map, current);
2613 }
2614 vm_map_unlock(map);
2615 return (KERN_SUCCESS);
2616 }
2617
2618 /*
2619 * vm_map_madvise:
2620 *
2621 * This routine traverses a processes map handling the madvise
2622 * system call. Advisories are classified as either those effecting
2623 * the vm_map_entry structure, or those effecting the underlying
2624 * objects.
2625 */
2626 int
vm_map_madvise(vm_map_t map,vm_offset_t start,vm_offset_t end,int behav)2627 vm_map_madvise(
2628 vm_map_t map,
2629 vm_offset_t start,
2630 vm_offset_t end,
2631 int behav)
2632 {
2633 vm_map_entry_t current, entry;
2634 bool modify_map;
2635
2636 /*
2637 * Some madvise calls directly modify the vm_map_entry, in which case
2638 * we need to use an exclusive lock on the map and we need to perform
2639 * various clipping operations. Otherwise we only need a read-lock
2640 * on the map.
2641 */
2642 switch(behav) {
2643 case MADV_NORMAL:
2644 case MADV_SEQUENTIAL:
2645 case MADV_RANDOM:
2646 case MADV_NOSYNC:
2647 case MADV_AUTOSYNC:
2648 case MADV_NOCORE:
2649 case MADV_CORE:
2650 if (start == end)
2651 return (0);
2652 modify_map = true;
2653 vm_map_lock(map);
2654 break;
2655 case MADV_WILLNEED:
2656 case MADV_DONTNEED:
2657 case MADV_FREE:
2658 if (start == end)
2659 return (0);
2660 modify_map = false;
2661 vm_map_lock_read(map);
2662 break;
2663 default:
2664 return (EINVAL);
2665 }
2666
2667 /*
2668 * Locate starting entry and clip if necessary.
2669 */
2670 VM_MAP_RANGE_CHECK(map, start, end);
2671
2672 if (vm_map_lookup_entry(map, start, &entry)) {
2673 if (modify_map)
2674 vm_map_clip_start(map, entry, start);
2675 } else {
2676 entry = entry->next;
2677 }
2678
2679 if (modify_map) {
2680 /*
2681 * madvise behaviors that are implemented in the vm_map_entry.
2682 *
2683 * We clip the vm_map_entry so that behavioral changes are
2684 * limited to the specified address range.
2685 */
2686 for (current = entry; current->start < end;
2687 current = current->next) {
2688 if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2689 continue;
2690
2691 vm_map_clip_end(map, current, end);
2692
2693 switch (behav) {
2694 case MADV_NORMAL:
2695 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_NORMAL);
2696 break;
2697 case MADV_SEQUENTIAL:
2698 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_SEQUENTIAL);
2699 break;
2700 case MADV_RANDOM:
2701 vm_map_entry_set_behavior(current, MAP_ENTRY_BEHAV_RANDOM);
2702 break;
2703 case MADV_NOSYNC:
2704 current->eflags |= MAP_ENTRY_NOSYNC;
2705 break;
2706 case MADV_AUTOSYNC:
2707 current->eflags &= ~MAP_ENTRY_NOSYNC;
2708 break;
2709 case MADV_NOCORE:
2710 current->eflags |= MAP_ENTRY_NOCOREDUMP;
2711 break;
2712 case MADV_CORE:
2713 current->eflags &= ~MAP_ENTRY_NOCOREDUMP;
2714 break;
2715 default:
2716 break;
2717 }
2718 vm_map_simplify_entry(map, current);
2719 }
2720 vm_map_unlock(map);
2721 } else {
2722 vm_pindex_t pstart, pend;
2723
2724 /*
2725 * madvise behaviors that are implemented in the underlying
2726 * vm_object.
2727 *
2728 * Since we don't clip the vm_map_entry, we have to clip
2729 * the vm_object pindex and count.
2730 */
2731 for (current = entry; current->start < end;
2732 current = current->next) {
2733 vm_offset_t useEnd, useStart;
2734
2735 if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
2736 continue;
2737
2738 /*
2739 * MADV_FREE would otherwise rewind time to
2740 * the creation of the shadow object. Because
2741 * we hold the VM map read-locked, neither the
2742 * entry's object nor the presence of a
2743 * backing object can change.
2744 */
2745 if (behav == MADV_FREE &&
2746 current->object.vm_object != NULL &&
2747 current->object.vm_object->backing_object != NULL)
2748 continue;
2749
2750 pstart = OFF_TO_IDX(current->offset);
2751 pend = pstart + atop(current->end - current->start);
2752 useStart = current->start;
2753 useEnd = current->end;
2754
2755 if (current->start < start) {
2756 pstart += atop(start - current->start);
2757 useStart = start;
2758 }
2759 if (current->end > end) {
2760 pend -= atop(current->end - end);
2761 useEnd = end;
2762 }
2763
2764 if (pstart >= pend)
2765 continue;
2766
2767 /*
2768 * Perform the pmap_advise() before clearing
2769 * PGA_REFERENCED in vm_page_advise(). Otherwise, a
2770 * concurrent pmap operation, such as pmap_remove(),
2771 * could clear a reference in the pmap and set
2772 * PGA_REFERENCED on the page before the pmap_advise()
2773 * had completed. Consequently, the page would appear
2774 * referenced based upon an old reference that
2775 * occurred before this pmap_advise() ran.
2776 */
2777 if (behav == MADV_DONTNEED || behav == MADV_FREE)
2778 pmap_advise(map->pmap, useStart, useEnd,
2779 behav);
2780
2781 vm_object_madvise(current->object.vm_object, pstart,
2782 pend, behav);
2783
2784 /*
2785 * Pre-populate paging structures in the
2786 * WILLNEED case. For wired entries, the
2787 * paging structures are already populated.
2788 */
2789 if (behav == MADV_WILLNEED &&
2790 current->wired_count == 0) {
2791 vm_map_pmap_enter(map,
2792 useStart,
2793 current->protection,
2794 current->object.vm_object,
2795 pstart,
2796 ptoa(pend - pstart),
2797 MAP_PREFAULT_MADVISE
2798 );
2799 }
2800 }
2801 vm_map_unlock_read(map);
2802 }
2803 return (0);
2804 }
2805
2806
2807 /*
2808 * vm_map_inherit:
2809 *
2810 * Sets the inheritance of the specified address
2811 * range in the target map. Inheritance
2812 * affects how the map will be shared with
2813 * child maps at the time of vmspace_fork.
2814 */
2815 int
vm_map_inherit(vm_map_t map,vm_offset_t start,vm_offset_t end,vm_inherit_t new_inheritance)2816 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
2817 vm_inherit_t new_inheritance)
2818 {
2819 vm_map_entry_t entry;
2820 vm_map_entry_t temp_entry;
2821
2822 switch (new_inheritance) {
2823 case VM_INHERIT_NONE:
2824 case VM_INHERIT_COPY:
2825 case VM_INHERIT_SHARE:
2826 case VM_INHERIT_ZERO:
2827 break;
2828 default:
2829 return (KERN_INVALID_ARGUMENT);
2830 }
2831 if (start == end)
2832 return (KERN_SUCCESS);
2833 vm_map_lock(map);
2834 VM_MAP_RANGE_CHECK(map, start, end);
2835 if (vm_map_lookup_entry(map, start, &temp_entry)) {
2836 entry = temp_entry;
2837 vm_map_clip_start(map, entry, start);
2838 } else
2839 entry = temp_entry->next;
2840 while (entry->start < end) {
2841 vm_map_clip_end(map, entry, end);
2842 if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
2843 new_inheritance != VM_INHERIT_ZERO)
2844 entry->inheritance = new_inheritance;
2845 vm_map_simplify_entry(map, entry);
2846 entry = entry->next;
2847 }
2848 vm_map_unlock(map);
2849 return (KERN_SUCCESS);
2850 }
2851
2852 /*
2853 * vm_map_unwire:
2854 *
2855 * Implements both kernel and user unwiring.
2856 */
2857 int
vm_map_unwire(vm_map_t map,vm_offset_t start,vm_offset_t end,int flags)2858 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
2859 int flags)
2860 {
2861 vm_map_entry_t entry, first_entry, tmp_entry;
2862 vm_offset_t saved_start;
2863 unsigned int last_timestamp;
2864 int rv;
2865 boolean_t need_wakeup, result, user_unwire;
2866
2867 if (start == end)
2868 return (KERN_SUCCESS);
2869 user_unwire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
2870 vm_map_lock(map);
2871 VM_MAP_RANGE_CHECK(map, start, end);
2872 if (!vm_map_lookup_entry(map, start, &first_entry)) {
2873 if (flags & VM_MAP_WIRE_HOLESOK)
2874 first_entry = first_entry->next;
2875 else {
2876 vm_map_unlock(map);
2877 return (KERN_INVALID_ADDRESS);
2878 }
2879 }
2880 last_timestamp = map->timestamp;
2881 entry = first_entry;
2882 while (entry->start < end) {
2883 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
2884 /*
2885 * We have not yet clipped the entry.
2886 */
2887 saved_start = (start >= entry->start) ? start :
2888 entry->start;
2889 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
2890 if (vm_map_unlock_and_wait(map, 0)) {
2891 /*
2892 * Allow interruption of user unwiring?
2893 */
2894 }
2895 vm_map_lock(map);
2896 if (last_timestamp+1 != map->timestamp) {
2897 /*
2898 * Look again for the entry because the map was
2899 * modified while it was unlocked.
2900 * Specifically, the entry may have been
2901 * clipped, merged, or deleted.
2902 */
2903 if (!vm_map_lookup_entry(map, saved_start,
2904 &tmp_entry)) {
2905 if (flags & VM_MAP_WIRE_HOLESOK)
2906 tmp_entry = tmp_entry->next;
2907 else {
2908 if (saved_start == start) {
2909 /*
2910 * First_entry has been deleted.
2911 */
2912 vm_map_unlock(map);
2913 return (KERN_INVALID_ADDRESS);
2914 }
2915 end = saved_start;
2916 rv = KERN_INVALID_ADDRESS;
2917 goto done;
2918 }
2919 }
2920 if (entry == first_entry)
2921 first_entry = tmp_entry;
2922 else
2923 first_entry = NULL;
2924 entry = tmp_entry;
2925 }
2926 last_timestamp = map->timestamp;
2927 continue;
2928 }
2929 vm_map_clip_start(map, entry, start);
2930 vm_map_clip_end(map, entry, end);
2931 /*
2932 * Mark the entry in case the map lock is released. (See
2933 * above.)
2934 */
2935 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
2936 entry->wiring_thread == NULL,
2937 ("owned map entry %p", entry));
2938 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
2939 entry->wiring_thread = curthread;
2940 /*
2941 * Check the map for holes in the specified region.
2942 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
2943 */
2944 if (((flags & VM_MAP_WIRE_HOLESOK) == 0) &&
2945 (entry->end < end && entry->next->start > entry->end)) {
2946 end = entry->end;
2947 rv = KERN_INVALID_ADDRESS;
2948 goto done;
2949 }
2950 /*
2951 * If system unwiring, require that the entry is system wired.
2952 */
2953 if (!user_unwire &&
2954 vm_map_entry_system_wired_count(entry) == 0) {
2955 end = entry->end;
2956 rv = KERN_INVALID_ARGUMENT;
2957 goto done;
2958 }
2959 entry = entry->next;
2960 }
2961 rv = KERN_SUCCESS;
2962 done:
2963 need_wakeup = FALSE;
2964 if (first_entry == NULL) {
2965 result = vm_map_lookup_entry(map, start, &first_entry);
2966 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
2967 first_entry = first_entry->next;
2968 else
2969 KASSERT(result, ("vm_map_unwire: lookup failed"));
2970 }
2971 for (entry = first_entry; entry->start < end; entry = entry->next) {
2972 /*
2973 * If VM_MAP_WIRE_HOLESOK was specified, an empty
2974 * space in the unwired region could have been mapped
2975 * while the map lock was dropped for draining
2976 * MAP_ENTRY_IN_TRANSITION. Moreover, another thread
2977 * could be simultaneously wiring this new mapping
2978 * entry. Detect these cases and skip any entries
2979 * marked as in transition by us.
2980 */
2981 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
2982 entry->wiring_thread != curthread) {
2983 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
2984 ("vm_map_unwire: !HOLESOK and new/changed entry"));
2985 continue;
2986 }
2987
2988 if (rv == KERN_SUCCESS && (!user_unwire ||
2989 (entry->eflags & MAP_ENTRY_USER_WIRED))) {
2990 if (user_unwire)
2991 entry->eflags &= ~MAP_ENTRY_USER_WIRED;
2992 if (entry->wired_count == 1)
2993 vm_map_entry_unwire(map, entry);
2994 else
2995 entry->wired_count--;
2996 }
2997 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
2998 ("vm_map_unwire: in-transition flag missing %p", entry));
2999 KASSERT(entry->wiring_thread == curthread,
3000 ("vm_map_unwire: alien wire %p", entry));
3001 entry->eflags &= ~MAP_ENTRY_IN_TRANSITION;
3002 entry->wiring_thread = NULL;
3003 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3004 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3005 need_wakeup = TRUE;
3006 }
3007 vm_map_simplify_entry(map, entry);
3008 }
3009 vm_map_unlock(map);
3010 if (need_wakeup)
3011 vm_map_wakeup(map);
3012 return (rv);
3013 }
3014
3015 /*
3016 * vm_map_wire_entry_failure:
3017 *
3018 * Handle a wiring failure on the given entry.
3019 *
3020 * The map should be locked.
3021 */
3022 static void
vm_map_wire_entry_failure(vm_map_t map,vm_map_entry_t entry,vm_offset_t failed_addr)3023 vm_map_wire_entry_failure(vm_map_t map, vm_map_entry_t entry,
3024 vm_offset_t failed_addr)
3025 {
3026
3027 VM_MAP_ASSERT_LOCKED(map);
3028 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 &&
3029 entry->wired_count == 1,
3030 ("vm_map_wire_entry_failure: entry %p isn't being wired", entry));
3031 KASSERT(failed_addr < entry->end,
3032 ("vm_map_wire_entry_failure: entry %p was fully wired", entry));
3033
3034 /*
3035 * If any pages at the start of this entry were successfully wired,
3036 * then unwire them.
3037 */
3038 if (failed_addr > entry->start) {
3039 pmap_unwire(map->pmap, entry->start, failed_addr);
3040 vm_object_unwire(entry->object.vm_object, entry->offset,
3041 failed_addr - entry->start, PQ_ACTIVE);
3042 }
3043
3044 /*
3045 * Assign an out-of-range value to represent the failure to wire this
3046 * entry.
3047 */
3048 entry->wired_count = -1;
3049 }
3050
3051 /*
3052 * vm_map_wire:
3053 *
3054 * Implements both kernel and user wiring.
3055 */
3056 int
vm_map_wire(vm_map_t map,vm_offset_t start,vm_offset_t end,int flags)3057 vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
3058 int flags)
3059 {
3060 vm_map_entry_t entry, first_entry, tmp_entry;
3061 vm_offset_t faddr, saved_end, saved_start;
3062 unsigned int last_timestamp;
3063 int rv;
3064 boolean_t need_wakeup, result, user_wire;
3065 vm_prot_t prot;
3066
3067 if (start == end)
3068 return (KERN_SUCCESS);
3069 prot = 0;
3070 if (flags & VM_MAP_WIRE_WRITE)
3071 prot |= VM_PROT_WRITE;
3072 user_wire = (flags & VM_MAP_WIRE_USER) ? TRUE : FALSE;
3073 vm_map_lock(map);
3074 VM_MAP_RANGE_CHECK(map, start, end);
3075 if (!vm_map_lookup_entry(map, start, &first_entry)) {
3076 if (flags & VM_MAP_WIRE_HOLESOK)
3077 first_entry = first_entry->next;
3078 else {
3079 vm_map_unlock(map);
3080 return (KERN_INVALID_ADDRESS);
3081 }
3082 }
3083 last_timestamp = map->timestamp;
3084 entry = first_entry;
3085 while (entry->start < end) {
3086 if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
3087 /*
3088 * We have not yet clipped the entry.
3089 */
3090 saved_start = (start >= entry->start) ? start :
3091 entry->start;
3092 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3093 if (vm_map_unlock_and_wait(map, 0)) {
3094 /*
3095 * Allow interruption of user wiring?
3096 */
3097 }
3098 vm_map_lock(map);
3099 if (last_timestamp + 1 != map->timestamp) {
3100 /*
3101 * Look again for the entry because the map was
3102 * modified while it was unlocked.
3103 * Specifically, the entry may have been
3104 * clipped, merged, or deleted.
3105 */
3106 if (!vm_map_lookup_entry(map, saved_start,
3107 &tmp_entry)) {
3108 if (flags & VM_MAP_WIRE_HOLESOK)
3109 tmp_entry = tmp_entry->next;
3110 else {
3111 if (saved_start == start) {
3112 /*
3113 * first_entry has been deleted.
3114 */
3115 vm_map_unlock(map);
3116 return (KERN_INVALID_ADDRESS);
3117 }
3118 end = saved_start;
3119 rv = KERN_INVALID_ADDRESS;
3120 goto done;
3121 }
3122 }
3123 if (entry == first_entry)
3124 first_entry = tmp_entry;
3125 else
3126 first_entry = NULL;
3127 entry = tmp_entry;
3128 }
3129 last_timestamp = map->timestamp;
3130 continue;
3131 }
3132 vm_map_clip_start(map, entry, start);
3133 vm_map_clip_end(map, entry, end);
3134 /*
3135 * Mark the entry in case the map lock is released. (See
3136 * above.)
3137 */
3138 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 &&
3139 entry->wiring_thread == NULL,
3140 ("owned map entry %p", entry));
3141 entry->eflags |= MAP_ENTRY_IN_TRANSITION;
3142 entry->wiring_thread = curthread;
3143 if ((entry->protection & (VM_PROT_READ | VM_PROT_EXECUTE)) == 0
3144 || (entry->protection & prot) != prot) {
3145 entry->eflags |= MAP_ENTRY_WIRE_SKIPPED;
3146 if ((flags & VM_MAP_WIRE_HOLESOK) == 0) {
3147 end = entry->end;
3148 rv = KERN_INVALID_ADDRESS;
3149 goto done;
3150 }
3151 goto next_entry;
3152 }
3153 if (entry->wired_count == 0) {
3154 entry->wired_count++;
3155 saved_start = entry->start;
3156 saved_end = entry->end;
3157
3158 /*
3159 * Release the map lock, relying on the in-transition
3160 * mark. Mark the map busy for fork.
3161 */
3162 vm_map_busy(map);
3163 vm_map_unlock(map);
3164
3165 faddr = saved_start;
3166 do {
3167 /*
3168 * Simulate a fault to get the page and enter
3169 * it into the physical map.
3170 */
3171 if ((rv = vm_fault(map, faddr, VM_PROT_NONE,
3172 VM_FAULT_WIRE)) != KERN_SUCCESS)
3173 break;
3174 } while ((faddr += PAGE_SIZE) < saved_end);
3175 vm_map_lock(map);
3176 vm_map_unbusy(map);
3177 if (last_timestamp + 1 != map->timestamp) {
3178 /*
3179 * Look again for the entry because the map was
3180 * modified while it was unlocked. The entry
3181 * may have been clipped, but NOT merged or
3182 * deleted.
3183 */
3184 result = vm_map_lookup_entry(map, saved_start,
3185 &tmp_entry);
3186 KASSERT(result, ("vm_map_wire: lookup failed"));
3187 if (entry == first_entry)
3188 first_entry = tmp_entry;
3189 else
3190 first_entry = NULL;
3191 entry = tmp_entry;
3192 while (entry->end < saved_end) {
3193 /*
3194 * In case of failure, handle entries
3195 * that were not fully wired here;
3196 * fully wired entries are handled
3197 * later.
3198 */
3199 if (rv != KERN_SUCCESS &&
3200 faddr < entry->end)
3201 vm_map_wire_entry_failure(map,
3202 entry, faddr);
3203 entry = entry->next;
3204 }
3205 }
3206 last_timestamp = map->timestamp;
3207 if (rv != KERN_SUCCESS) {
3208 vm_map_wire_entry_failure(map, entry, faddr);
3209 end = entry->end;
3210 goto done;
3211 }
3212 } else if (!user_wire ||
3213 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3214 entry->wired_count++;
3215 }
3216 /*
3217 * Check the map for holes in the specified region.
3218 * If VM_MAP_WIRE_HOLESOK was specified, skip this check.
3219 */
3220 next_entry:
3221 if ((flags & VM_MAP_WIRE_HOLESOK) == 0 &&
3222 entry->end < end && entry->next->start > entry->end) {
3223 end = entry->end;
3224 rv = KERN_INVALID_ADDRESS;
3225 goto done;
3226 }
3227 entry = entry->next;
3228 }
3229 rv = KERN_SUCCESS;
3230 done:
3231 need_wakeup = FALSE;
3232 if (first_entry == NULL) {
3233 result = vm_map_lookup_entry(map, start, &first_entry);
3234 if (!result && (flags & VM_MAP_WIRE_HOLESOK))
3235 first_entry = first_entry->next;
3236 else
3237 KASSERT(result, ("vm_map_wire: lookup failed"));
3238 }
3239 for (entry = first_entry; entry->start < end; entry = entry->next) {
3240 /*
3241 * If VM_MAP_WIRE_HOLESOK was specified, an empty
3242 * space in the unwired region could have been mapped
3243 * while the map lock was dropped for faulting in the
3244 * pages or draining MAP_ENTRY_IN_TRANSITION.
3245 * Moreover, another thread could be simultaneously
3246 * wiring this new mapping entry. Detect these cases
3247 * and skip any entries marked as in transition not by us.
3248 */
3249 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) == 0 ||
3250 entry->wiring_thread != curthread) {
3251 KASSERT((flags & VM_MAP_WIRE_HOLESOK) != 0,
3252 ("vm_map_wire: !HOLESOK and new/changed entry"));
3253 continue;
3254 }
3255
3256 if ((entry->eflags & MAP_ENTRY_WIRE_SKIPPED) != 0)
3257 goto next_entry_done;
3258
3259 if (rv == KERN_SUCCESS) {
3260 if (user_wire)
3261 entry->eflags |= MAP_ENTRY_USER_WIRED;
3262 } else if (entry->wired_count == -1) {
3263 /*
3264 * Wiring failed on this entry. Thus, unwiring is
3265 * unnecessary.
3266 */
3267 entry->wired_count = 0;
3268 } else if (!user_wire ||
3269 (entry->eflags & MAP_ENTRY_USER_WIRED) == 0) {
3270 /*
3271 * Undo the wiring. Wiring succeeded on this entry
3272 * but failed on a later entry.
3273 */
3274 if (entry->wired_count == 1)
3275 vm_map_entry_unwire(map, entry);
3276 else
3277 entry->wired_count--;
3278 }
3279 next_entry_done:
3280 KASSERT((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0,
3281 ("vm_map_wire: in-transition flag missing %p", entry));
3282 KASSERT(entry->wiring_thread == curthread,
3283 ("vm_map_wire: alien wire %p", entry));
3284 entry->eflags &= ~(MAP_ENTRY_IN_TRANSITION |
3285 MAP_ENTRY_WIRE_SKIPPED);
3286 entry->wiring_thread = NULL;
3287 if (entry->eflags & MAP_ENTRY_NEEDS_WAKEUP) {
3288 entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
3289 need_wakeup = TRUE;
3290 }
3291 vm_map_simplify_entry(map, entry);
3292 }
3293 vm_map_unlock(map);
3294 if (need_wakeup)
3295 vm_map_wakeup(map);
3296 return (rv);
3297 }
3298
3299 /*
3300 * vm_map_sync
3301 *
3302 * Push any dirty cached pages in the address range to their pager.
3303 * If syncio is TRUE, dirty pages are written synchronously.
3304 * If invalidate is TRUE, any cached pages are freed as well.
3305 *
3306 * If the size of the region from start to end is zero, we are
3307 * supposed to flush all modified pages within the region containing
3308 * start. Unfortunately, a region can be split or coalesced with
3309 * neighboring regions, making it difficult to determine what the
3310 * original region was. Therefore, we approximate this requirement by
3311 * flushing the current region containing start.
3312 *
3313 * Returns an error if any part of the specified range is not mapped.
3314 */
3315 int
vm_map_sync(vm_map_t map,vm_offset_t start,vm_offset_t end,boolean_t syncio,boolean_t invalidate)3316 vm_map_sync(
3317 vm_map_t map,
3318 vm_offset_t start,
3319 vm_offset_t end,
3320 boolean_t syncio,
3321 boolean_t invalidate)
3322 {
3323 vm_map_entry_t current;
3324 vm_map_entry_t entry;
3325 vm_size_t size;
3326 vm_object_t object;
3327 vm_ooffset_t offset;
3328 unsigned int last_timestamp;
3329 boolean_t failed;
3330
3331 vm_map_lock_read(map);
3332 VM_MAP_RANGE_CHECK(map, start, end);
3333 if (!vm_map_lookup_entry(map, start, &entry)) {
3334 vm_map_unlock_read(map);
3335 return (KERN_INVALID_ADDRESS);
3336 } else if (start == end) {
3337 start = entry->start;
3338 end = entry->end;
3339 }
3340 /*
3341 * Make a first pass to check for user-wired memory and holes.
3342 */
3343 for (current = entry; current->start < end; current = current->next) {
3344 if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
3345 vm_map_unlock_read(map);
3346 return (KERN_INVALID_ARGUMENT);
3347 }
3348 if (end > current->end &&
3349 current->end != current->next->start) {
3350 vm_map_unlock_read(map);
3351 return (KERN_INVALID_ADDRESS);
3352 }
3353 }
3354
3355 if (invalidate)
3356 pmap_remove(map->pmap, start, end);
3357 failed = FALSE;
3358
3359 /*
3360 * Make a second pass, cleaning/uncaching pages from the indicated
3361 * objects as we go.
3362 */
3363 for (current = entry; current->start < end;) {
3364 offset = current->offset + (start - current->start);
3365 size = (end <= current->end ? end : current->end) - start;
3366 if (current->eflags & MAP_ENTRY_IS_SUB_MAP) {
3367 vm_map_t smap;
3368 vm_map_entry_t tentry;
3369 vm_size_t tsize;
3370
3371 smap = current->object.sub_map;
3372 vm_map_lock_read(smap);
3373 (void) vm_map_lookup_entry(smap, offset, &tentry);
3374 tsize = tentry->end - offset;
3375 if (tsize < size)
3376 size = tsize;
3377 object = tentry->object.vm_object;
3378 offset = tentry->offset + (offset - tentry->start);
3379 vm_map_unlock_read(smap);
3380 } else {
3381 object = current->object.vm_object;
3382 }
3383 vm_object_reference(object);
3384 last_timestamp = map->timestamp;
3385 vm_map_unlock_read(map);
3386 if (!vm_object_sync(object, offset, size, syncio, invalidate))
3387 failed = TRUE;
3388 start += size;
3389 vm_object_deallocate(object);
3390 vm_map_lock_read(map);
3391 if (last_timestamp == map->timestamp ||
3392 !vm_map_lookup_entry(map, start, ¤t))
3393 current = current->next;
3394 }
3395
3396 vm_map_unlock_read(map);
3397 return (failed ? KERN_FAILURE : KERN_SUCCESS);
3398 }
3399
3400 /*
3401 * vm_map_entry_unwire: [ internal use only ]
3402 *
3403 * Make the region specified by this entry pageable.
3404 *
3405 * The map in question should be locked.
3406 * [This is the reason for this routine's existence.]
3407 */
3408 static void
vm_map_entry_unwire(vm_map_t map,vm_map_entry_t entry)3409 vm_map_entry_unwire(vm_map_t map, vm_map_entry_t entry)
3410 {
3411
3412 VM_MAP_ASSERT_LOCKED(map);
3413 KASSERT(entry->wired_count > 0,
3414 ("vm_map_entry_unwire: entry %p isn't wired", entry));
3415 pmap_unwire(map->pmap, entry->start, entry->end);
3416 vm_object_unwire(entry->object.vm_object, entry->offset, entry->end -
3417 entry->start, PQ_ACTIVE);
3418 entry->wired_count = 0;
3419 }
3420
3421 static void
vm_map_entry_deallocate(vm_map_entry_t entry,boolean_t system_map)3422 vm_map_entry_deallocate(vm_map_entry_t entry, boolean_t system_map)
3423 {
3424
3425 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0)
3426 vm_object_deallocate(entry->object.vm_object);
3427 uma_zfree(system_map ? kmapentzone : mapentzone, entry);
3428 }
3429
3430 /*
3431 * vm_map_entry_delete: [ internal use only ]
3432 *
3433 * Deallocate the given entry from the target map.
3434 */
3435 static void
vm_map_entry_delete(vm_map_t map,vm_map_entry_t entry)3436 vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
3437 {
3438 vm_object_t object;
3439 vm_pindex_t offidxstart, offidxend, count, size1;
3440 vm_size_t size;
3441
3442 vm_map_entry_unlink(map, entry, UNLINK_MERGE_NONE);
3443 object = entry->object.vm_object;
3444
3445 if ((entry->eflags & MAP_ENTRY_GUARD) != 0) {
3446 MPASS(entry->cred == NULL);
3447 MPASS((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0);
3448 MPASS(object == NULL);
3449 vm_map_entry_deallocate(entry, map->system_map);
3450 return;
3451 }
3452
3453 size = entry->end - entry->start;
3454 map->size -= size;
3455
3456 if (entry->cred != NULL) {
3457 swap_release_by_cred(size, entry->cred);
3458 crfree(entry->cred);
3459 }
3460
3461 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
3462 (object != NULL)) {
3463 KASSERT(entry->cred == NULL || object->cred == NULL ||
3464 (entry->eflags & MAP_ENTRY_NEEDS_COPY),
3465 ("OVERCOMMIT vm_map_entry_delete: both cred %p", entry));
3466 count = atop(size);
3467 offidxstart = OFF_TO_IDX(entry->offset);
3468 offidxend = offidxstart + count;
3469 VM_OBJECT_WLOCK(object);
3470 if (object->ref_count != 1 && ((object->flags & (OBJ_NOSPLIT |
3471 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING ||
3472 object == kernel_object)) {
3473 vm_object_collapse(object);
3474
3475 /*
3476 * The option OBJPR_NOTMAPPED can be passed here
3477 * because vm_map_delete() already performed
3478 * pmap_remove() on the only mapping to this range
3479 * of pages.
3480 */
3481 vm_object_page_remove(object, offidxstart, offidxend,
3482 OBJPR_NOTMAPPED);
3483 if (object->type == OBJT_SWAP)
3484 swap_pager_freespace(object, offidxstart,
3485 count);
3486 if (offidxend >= object->size &&
3487 offidxstart < object->size) {
3488 size1 = object->size;
3489 object->size = offidxstart;
3490 if (object->cred != NULL) {
3491 size1 -= object->size;
3492 KASSERT(object->charge >= ptoa(size1),
3493 ("object %p charge < 0", object));
3494 swap_release_by_cred(ptoa(size1),
3495 object->cred);
3496 object->charge -= ptoa(size1);
3497 }
3498 }
3499 }
3500 VM_OBJECT_WUNLOCK(object);
3501 } else
3502 entry->object.vm_object = NULL;
3503 if (map->system_map)
3504 vm_map_entry_deallocate(entry, TRUE);
3505 else {
3506 entry->next = curthread->td_map_def_user;
3507 curthread->td_map_def_user = entry;
3508 }
3509 }
3510
3511 /*
3512 * vm_map_delete: [ internal use only ]
3513 *
3514 * Deallocates the given address range from the target
3515 * map.
3516 */
3517 int
vm_map_delete(vm_map_t map,vm_offset_t start,vm_offset_t end)3518 vm_map_delete(vm_map_t map, vm_offset_t start, vm_offset_t end)
3519 {
3520 vm_map_entry_t entry;
3521 vm_map_entry_t first_entry;
3522
3523 VM_MAP_ASSERT_LOCKED(map);
3524 if (start == end)
3525 return (KERN_SUCCESS);
3526
3527 /*
3528 * Find the start of the region, and clip it
3529 */
3530 if (!vm_map_lookup_entry(map, start, &first_entry))
3531 entry = first_entry->next;
3532 else {
3533 entry = first_entry;
3534 vm_map_clip_start(map, entry, start);
3535 }
3536
3537 /*
3538 * Step through all entries in this region
3539 */
3540 while (entry->start < end) {
3541 vm_map_entry_t next;
3542
3543 /*
3544 * Wait for wiring or unwiring of an entry to complete.
3545 * Also wait for any system wirings to disappear on
3546 * user maps.
3547 */
3548 if ((entry->eflags & MAP_ENTRY_IN_TRANSITION) != 0 ||
3549 (vm_map_pmap(map) != kernel_pmap &&
3550 vm_map_entry_system_wired_count(entry) != 0)) {
3551 unsigned int last_timestamp;
3552 vm_offset_t saved_start;
3553 vm_map_entry_t tmp_entry;
3554
3555 saved_start = entry->start;
3556 entry->eflags |= MAP_ENTRY_NEEDS_WAKEUP;
3557 last_timestamp = map->timestamp;
3558 (void) vm_map_unlock_and_wait(map, 0);
3559 vm_map_lock(map);
3560 if (last_timestamp + 1 != map->timestamp) {
3561 /*
3562 * Look again for the entry because the map was
3563 * modified while it was unlocked.
3564 * Specifically, the entry may have been
3565 * clipped, merged, or deleted.
3566 */
3567 if (!vm_map_lookup_entry(map, saved_start,
3568 &tmp_entry))
3569 entry = tmp_entry->next;
3570 else {
3571 entry = tmp_entry;
3572 vm_map_clip_start(map, entry,
3573 saved_start);
3574 }
3575 }
3576 continue;
3577 }
3578 vm_map_clip_end(map, entry, end);
3579
3580 next = entry->next;
3581
3582 /*
3583 * Unwire before removing addresses from the pmap; otherwise,
3584 * unwiring will put the entries back in the pmap.
3585 */
3586 if (entry->wired_count != 0)
3587 vm_map_entry_unwire(map, entry);
3588
3589 /*
3590 * Remove mappings for the pages, but only if the
3591 * mappings could exist. For instance, it does not
3592 * make sense to call pmap_remove() for guard entries.
3593 */
3594 if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) != 0 ||
3595 entry->object.vm_object != NULL)
3596 pmap_remove(map->pmap, entry->start, entry->end);
3597
3598 if (entry->end == map->anon_loc)
3599 map->anon_loc = entry->start;
3600
3601 /*
3602 * Delete the entry only after removing all pmap
3603 * entries pointing to its pages. (Otherwise, its
3604 * page frames may be reallocated, and any modify bits
3605 * will be set in the wrong object!)
3606 */
3607 vm_map_entry_delete(map, entry);
3608 entry = next;
3609 }
3610 return (KERN_SUCCESS);
3611 }
3612
3613 /*
3614 * vm_map_remove:
3615 *
3616 * Remove the given address range from the target map.
3617 * This is the exported form of vm_map_delete.
3618 */
3619 int
vm_map_remove(vm_map_t map,vm_offset_t start,vm_offset_t end)3620 vm_map_remove(vm_map_t map, vm_offset_t start, vm_offset_t end)
3621 {
3622 int result;
3623
3624 vm_map_lock(map);
3625 VM_MAP_RANGE_CHECK(map, start, end);
3626 result = vm_map_delete(map, start, end);
3627 vm_map_unlock(map);
3628 return (result);
3629 }
3630
3631 /*
3632 * vm_map_check_protection:
3633 *
3634 * Assert that the target map allows the specified privilege on the
3635 * entire address region given. The entire region must be allocated.
3636 *
3637 * WARNING! This code does not and should not check whether the
3638 * contents of the region is accessible. For example a smaller file
3639 * might be mapped into a larger address space.
3640 *
3641 * NOTE! This code is also called by munmap().
3642 *
3643 * The map must be locked. A read lock is sufficient.
3644 */
3645 boolean_t
vm_map_check_protection(vm_map_t map,vm_offset_t start,vm_offset_t end,vm_prot_t protection)3646 vm_map_check_protection(vm_map_t map, vm_offset_t start, vm_offset_t end,
3647 vm_prot_t protection)
3648 {
3649 vm_map_entry_t entry;
3650 vm_map_entry_t tmp_entry;
3651
3652 if (!vm_map_lookup_entry(map, start, &tmp_entry))
3653 return (FALSE);
3654 entry = tmp_entry;
3655
3656 while (start < end) {
3657 /*
3658 * No holes allowed!
3659 */
3660 if (start < entry->start)
3661 return (FALSE);
3662 /*
3663 * Check protection associated with entry.
3664 */
3665 if ((entry->protection & protection) != protection)
3666 return (FALSE);
3667 /* go to next entry */
3668 start = entry->end;
3669 entry = entry->next;
3670 }
3671 return (TRUE);
3672 }
3673
3674 /*
3675 * vm_map_copy_entry:
3676 *
3677 * Copies the contents of the source entry to the destination
3678 * entry. The entries *must* be aligned properly.
3679 */
3680 static void
vm_map_copy_entry(vm_map_t src_map,vm_map_t dst_map,vm_map_entry_t src_entry,vm_map_entry_t dst_entry,vm_ooffset_t * fork_charge)3681 vm_map_copy_entry(
3682 vm_map_t src_map,
3683 vm_map_t dst_map,
3684 vm_map_entry_t src_entry,
3685 vm_map_entry_t dst_entry,
3686 vm_ooffset_t *fork_charge)
3687 {
3688 vm_object_t src_object;
3689 vm_map_entry_t fake_entry;
3690 vm_offset_t size;
3691 struct ucred *cred;
3692 int charged;
3693
3694 VM_MAP_ASSERT_LOCKED(dst_map);
3695
3696 if ((dst_entry->eflags|src_entry->eflags) & MAP_ENTRY_IS_SUB_MAP)
3697 return;
3698
3699 if (src_entry->wired_count == 0 ||
3700 (src_entry->protection & VM_PROT_WRITE) == 0) {
3701 /*
3702 * If the source entry is marked needs_copy, it is already
3703 * write-protected.
3704 */
3705 if ((src_entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0 &&
3706 (src_entry->protection & VM_PROT_WRITE) != 0) {
3707 pmap_protect(src_map->pmap,
3708 src_entry->start,
3709 src_entry->end,
3710 src_entry->protection & ~VM_PROT_WRITE);
3711 }
3712
3713 /*
3714 * Make a copy of the object.
3715 */
3716 size = src_entry->end - src_entry->start;
3717 if ((src_object = src_entry->object.vm_object) != NULL) {
3718 VM_OBJECT_WLOCK(src_object);
3719 charged = ENTRY_CHARGED(src_entry);
3720 if (src_object->handle == NULL &&
3721 (src_object->type == OBJT_DEFAULT ||
3722 src_object->type == OBJT_SWAP)) {
3723 vm_object_collapse(src_object);
3724 if ((src_object->flags & (OBJ_NOSPLIT |
3725 OBJ_ONEMAPPING)) == OBJ_ONEMAPPING) {
3726 vm_object_split(src_entry);
3727 src_object =
3728 src_entry->object.vm_object;
3729 }
3730 }
3731 vm_object_reference_locked(src_object);
3732 vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
3733 if (src_entry->cred != NULL &&
3734 !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
3735 KASSERT(src_object->cred == NULL,
3736 ("OVERCOMMIT: vm_map_copy_entry: cred %p",
3737 src_object));
3738 src_object->cred = src_entry->cred;
3739 src_object->charge = size;
3740 }
3741 VM_OBJECT_WUNLOCK(src_object);
3742 dst_entry->object.vm_object = src_object;
3743 if (charged) {
3744 cred = curthread->td_ucred;
3745 crhold(cred);
3746 dst_entry->cred = cred;
3747 *fork_charge += size;
3748 if (!(src_entry->eflags &
3749 MAP_ENTRY_NEEDS_COPY)) {
3750 crhold(cred);
3751 src_entry->cred = cred;
3752 *fork_charge += size;
3753 }
3754 }
3755 src_entry->eflags |= MAP_ENTRY_COW |
3756 MAP_ENTRY_NEEDS_COPY;
3757 dst_entry->eflags |= MAP_ENTRY_COW |
3758 MAP_ENTRY_NEEDS_COPY;
3759 dst_entry->offset = src_entry->offset;
3760 if (src_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3761 /*
3762 * MAP_ENTRY_VN_WRITECNT cannot
3763 * indicate write reference from
3764 * src_entry, since the entry is
3765 * marked as needs copy. Allocate a
3766 * fake entry that is used to
3767 * decrement object->un_pager.vnp.writecount
3768 * at the appropriate time. Attach
3769 * fake_entry to the deferred list.
3770 */
3771 fake_entry = vm_map_entry_create(dst_map);
3772 fake_entry->eflags = MAP_ENTRY_VN_WRITECNT;
3773 src_entry->eflags &= ~MAP_ENTRY_VN_WRITECNT;
3774 vm_object_reference(src_object);
3775 fake_entry->object.vm_object = src_object;
3776 fake_entry->start = src_entry->start;
3777 fake_entry->end = src_entry->end;
3778 fake_entry->next = curthread->td_map_def_user;
3779 curthread->td_map_def_user = fake_entry;
3780 }
3781
3782 pmap_copy(dst_map->pmap, src_map->pmap,
3783 dst_entry->start, dst_entry->end - dst_entry->start,
3784 src_entry->start);
3785 } else {
3786 dst_entry->object.vm_object = NULL;
3787 dst_entry->offset = 0;
3788 if (src_entry->cred != NULL) {
3789 dst_entry->cred = curthread->td_ucred;
3790 crhold(dst_entry->cred);
3791 *fork_charge += size;
3792 }
3793 }
3794 } else {
3795 /*
3796 * We don't want to make writeable wired pages copy-on-write.
3797 * Immediately copy these pages into the new map by simulating
3798 * page faults. The new pages are pageable.
3799 */
3800 vm_fault_copy_entry(dst_map, src_map, dst_entry, src_entry,
3801 fork_charge);
3802 }
3803 }
3804
3805 /*
3806 * vmspace_map_entry_forked:
3807 * Update the newly-forked vmspace each time a map entry is inherited
3808 * or copied. The values for vm_dsize and vm_tsize are approximate
3809 * (and mostly-obsolete ideas in the face of mmap(2) et al.)
3810 */
3811 static void
vmspace_map_entry_forked(const struct vmspace * vm1,struct vmspace * vm2,vm_map_entry_t entry)3812 vmspace_map_entry_forked(const struct vmspace *vm1, struct vmspace *vm2,
3813 vm_map_entry_t entry)
3814 {
3815 vm_size_t entrysize;
3816 vm_offset_t newend;
3817
3818 if ((entry->eflags & MAP_ENTRY_GUARD) != 0)
3819 return;
3820 entrysize = entry->end - entry->start;
3821 vm2->vm_map.size += entrysize;
3822 if (entry->eflags & (MAP_ENTRY_GROWS_DOWN | MAP_ENTRY_GROWS_UP)) {
3823 vm2->vm_ssize += btoc(entrysize);
3824 } else if (entry->start >= (vm_offset_t)vm1->vm_daddr &&
3825 entry->start < (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize)) {
3826 newend = MIN(entry->end,
3827 (vm_offset_t)vm1->vm_daddr + ctob(vm1->vm_dsize));
3828 vm2->vm_dsize += btoc(newend - entry->start);
3829 } else if (entry->start >= (vm_offset_t)vm1->vm_taddr &&
3830 entry->start < (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize)) {
3831 newend = MIN(entry->end,
3832 (vm_offset_t)vm1->vm_taddr + ctob(vm1->vm_tsize));
3833 vm2->vm_tsize += btoc(newend - entry->start);
3834 }
3835 }
3836
3837 /*
3838 * vmspace_fork:
3839 * Create a new process vmspace structure and vm_map
3840 * based on those of an existing process. The new map
3841 * is based on the old map, according to the inheritance
3842 * values on the regions in that map.
3843 *
3844 * XXX It might be worth coalescing the entries added to the new vmspace.
3845 *
3846 * The source map must not be locked.
3847 */
3848 struct vmspace *
vmspace_fork(struct vmspace * vm1,vm_ooffset_t * fork_charge)3849 vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
3850 {
3851 struct vmspace *vm2;
3852 vm_map_t new_map, old_map;
3853 vm_map_entry_t new_entry, old_entry;
3854 vm_object_t object;
3855 int error, locked;
3856 vm_inherit_t inh;
3857
3858 old_map = &vm1->vm_map;
3859 /* Copy immutable fields of vm1 to vm2. */
3860 vm2 = vmspace_alloc(vm_map_min(old_map), vm_map_max(old_map),
3861 pmap_pinit);
3862 if (vm2 == NULL)
3863 return (NULL);
3864
3865 vm2->vm_taddr = vm1->vm_taddr;
3866 vm2->vm_daddr = vm1->vm_daddr;
3867 vm2->vm_maxsaddr = vm1->vm_maxsaddr;
3868 vm_map_lock(old_map);
3869 if (old_map->busy)
3870 vm_map_wait_busy(old_map);
3871 new_map = &vm2->vm_map;
3872 locked = vm_map_trylock(new_map); /* trylock to silence WITNESS */
3873 KASSERT(locked, ("vmspace_fork: lock failed"));
3874
3875 error = pmap_vmspace_copy(new_map->pmap, old_map->pmap);
3876 if (error != 0) {
3877 sx_xunlock(&old_map->lock);
3878 sx_xunlock(&new_map->lock);
3879 vm_map_process_deferred();
3880 vmspace_free(vm2);
3881 return (NULL);
3882 }
3883
3884 new_map->anon_loc = old_map->anon_loc;
3885
3886 old_entry = old_map->header.next;
3887
3888 while (old_entry != &old_map->header) {
3889 if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)
3890 panic("vm_map_fork: encountered a submap");
3891
3892 inh = old_entry->inheritance;
3893 if ((old_entry->eflags & MAP_ENTRY_GUARD) != 0 &&
3894 inh != VM_INHERIT_NONE)
3895 inh = VM_INHERIT_COPY;
3896
3897 switch (inh) {
3898 case VM_INHERIT_NONE:
3899 break;
3900
3901 case VM_INHERIT_SHARE:
3902 /*
3903 * Clone the entry, creating the shared object if necessary.
3904 */
3905 object = old_entry->object.vm_object;
3906 if (object == NULL) {
3907 object = vm_object_allocate(OBJT_DEFAULT,
3908 atop(old_entry->end - old_entry->start));
3909 old_entry->object.vm_object = object;
3910 old_entry->offset = 0;
3911 if (old_entry->cred != NULL) {
3912 object->cred = old_entry->cred;
3913 object->charge = old_entry->end -
3914 old_entry->start;
3915 old_entry->cred = NULL;
3916 }
3917 }
3918
3919 /*
3920 * Add the reference before calling vm_object_shadow
3921 * to insure that a shadow object is created.
3922 */
3923 vm_object_reference(object);
3924 if (old_entry->eflags & MAP_ENTRY_NEEDS_COPY) {
3925 vm_object_shadow(&old_entry->object.vm_object,
3926 &old_entry->offset,
3927 old_entry->end - old_entry->start);
3928 old_entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
3929 /* Transfer the second reference too. */
3930 vm_object_reference(
3931 old_entry->object.vm_object);
3932
3933 /*
3934 * As in vm_map_simplify_entry(), the
3935 * vnode lock will not be acquired in
3936 * this call to vm_object_deallocate().
3937 */
3938 vm_object_deallocate(object);
3939 object = old_entry->object.vm_object;
3940 }
3941 VM_OBJECT_WLOCK(object);
3942 vm_object_clear_flag(object, OBJ_ONEMAPPING);
3943 if (old_entry->cred != NULL) {
3944 KASSERT(object->cred == NULL, ("vmspace_fork both cred"));
3945 object->cred = old_entry->cred;
3946 object->charge = old_entry->end - old_entry->start;
3947 old_entry->cred = NULL;
3948 }
3949
3950 /*
3951 * Assert the correct state of the vnode
3952 * v_writecount while the object is locked, to
3953 * not relock it later for the assertion
3954 * correctness.
3955 */
3956 if (old_entry->eflags & MAP_ENTRY_VN_WRITECNT &&
3957 object->type == OBJT_VNODE) {
3958 KASSERT(((struct vnode *)object->handle)->
3959 v_writecount > 0,
3960 ("vmspace_fork: v_writecount %p", object));
3961 KASSERT(object->un_pager.vnp.writemappings > 0,
3962 ("vmspace_fork: vnp.writecount %p",
3963 object));
3964 }
3965 VM_OBJECT_WUNLOCK(object);
3966
3967 /*
3968 * Clone the entry, referencing the shared object.
3969 */
3970 new_entry = vm_map_entry_create(new_map);
3971 *new_entry = *old_entry;
3972 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
3973 MAP_ENTRY_IN_TRANSITION);
3974 new_entry->wiring_thread = NULL;
3975 new_entry->wired_count = 0;
3976 if (new_entry->eflags & MAP_ENTRY_VN_WRITECNT) {
3977 vnode_pager_update_writecount(object,
3978 new_entry->start, new_entry->end);
3979 }
3980 vm_map_entry_set_vnode_text(new_entry, true);
3981
3982 /*
3983 * Insert the entry into the new map -- we know we're
3984 * inserting at the end of the new map.
3985 */
3986 vm_map_entry_link(new_map, new_entry);
3987 vmspace_map_entry_forked(vm1, vm2, new_entry);
3988
3989 /*
3990 * Update the physical map
3991 */
3992 pmap_copy(new_map->pmap, old_map->pmap,
3993 new_entry->start,
3994 (old_entry->end - old_entry->start),
3995 old_entry->start);
3996 break;
3997
3998 case VM_INHERIT_COPY:
3999 /*
4000 * Clone the entry and link into the map.
4001 */
4002 new_entry = vm_map_entry_create(new_map);
4003 *new_entry = *old_entry;
4004 /*
4005 * Copied entry is COW over the old object.
4006 */
4007 new_entry->eflags &= ~(MAP_ENTRY_USER_WIRED |
4008 MAP_ENTRY_IN_TRANSITION | MAP_ENTRY_VN_WRITECNT);
4009 new_entry->wiring_thread = NULL;
4010 new_entry->wired_count = 0;
4011 new_entry->object.vm_object = NULL;
4012 new_entry->cred = NULL;
4013 vm_map_entry_link(new_map, new_entry);
4014 vmspace_map_entry_forked(vm1, vm2, new_entry);
4015 vm_map_copy_entry(old_map, new_map, old_entry,
4016 new_entry, fork_charge);
4017 vm_map_entry_set_vnode_text(new_entry, true);
4018 break;
4019
4020 case VM_INHERIT_ZERO:
4021 /*
4022 * Create a new anonymous mapping entry modelled from
4023 * the old one.
4024 */
4025 new_entry = vm_map_entry_create(new_map);
4026 memset(new_entry, 0, sizeof(*new_entry));
4027
4028 new_entry->start = old_entry->start;
4029 new_entry->end = old_entry->end;
4030 new_entry->eflags = old_entry->eflags &
4031 ~(MAP_ENTRY_USER_WIRED | MAP_ENTRY_IN_TRANSITION |
4032 MAP_ENTRY_VN_WRITECNT | MAP_ENTRY_VN_EXEC);
4033 new_entry->protection = old_entry->protection;
4034 new_entry->max_protection = old_entry->max_protection;
4035 new_entry->inheritance = VM_INHERIT_ZERO;
4036
4037 vm_map_entry_link(new_map, new_entry);
4038 vmspace_map_entry_forked(vm1, vm2, new_entry);
4039
4040 new_entry->cred = curthread->td_ucred;
4041 crhold(new_entry->cred);
4042 *fork_charge += (new_entry->end - new_entry->start);
4043
4044 break;
4045 }
4046 old_entry = old_entry->next;
4047 }
4048 /*
4049 * Use inlined vm_map_unlock() to postpone handling the deferred
4050 * map entries, which cannot be done until both old_map and
4051 * new_map locks are released.
4052 */
4053 sx_xunlock(&old_map->lock);
4054 sx_xunlock(&new_map->lock);
4055 vm_map_process_deferred();
4056
4057 return (vm2);
4058 }
4059
4060 /*
4061 * Create a process's stack for exec_new_vmspace(). This function is never
4062 * asked to wire the newly created stack.
4063 */
4064 int
vm_map_stack(vm_map_t map,vm_offset_t addrbos,vm_size_t max_ssize,vm_prot_t prot,vm_prot_t max,int cow)4065 vm_map_stack(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4066 vm_prot_t prot, vm_prot_t max, int cow)
4067 {
4068 vm_size_t growsize, init_ssize;
4069 rlim_t vmemlim;
4070 int rv;
4071
4072 MPASS((map->flags & MAP_WIREFUTURE) == 0);
4073 growsize = sgrowsiz;
4074 init_ssize = (max_ssize < growsize) ? max_ssize : growsize;
4075 vm_map_lock(map);
4076 vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4077 /* If we would blow our VMEM resource limit, no go */
4078 if (map->size + init_ssize > vmemlim) {
4079 rv = KERN_NO_SPACE;
4080 goto out;
4081 }
4082 rv = vm_map_stack_locked(map, addrbos, max_ssize, growsize, prot,
4083 max, cow);
4084 out:
4085 vm_map_unlock(map);
4086 return (rv);
4087 }
4088
4089 static int stack_guard_page = 1;
4090 SYSCTL_INT(_security_bsd, OID_AUTO, stack_guard_page, CTLFLAG_RWTUN,
4091 &stack_guard_page, 0,
4092 "Specifies the number of guard pages for a stack that grows");
4093
4094 static int
vm_map_stack_locked(vm_map_t map,vm_offset_t addrbos,vm_size_t max_ssize,vm_size_t growsize,vm_prot_t prot,vm_prot_t max,int cow)4095 vm_map_stack_locked(vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize,
4096 vm_size_t growsize, vm_prot_t prot, vm_prot_t max, int cow)
4097 {
4098 vm_map_entry_t new_entry, prev_entry;
4099 vm_offset_t bot, gap_bot, gap_top, top;
4100 vm_size_t init_ssize, sgp;
4101 int orient, rv;
4102
4103 /*
4104 * The stack orientation is piggybacked with the cow argument.
4105 * Extract it into orient and mask the cow argument so that we
4106 * don't pass it around further.
4107 */
4108 orient = cow & (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP);
4109 KASSERT(orient != 0, ("No stack grow direction"));
4110 KASSERT(orient != (MAP_STACK_GROWS_DOWN | MAP_STACK_GROWS_UP),
4111 ("bi-dir stack"));
4112
4113 if (addrbos < vm_map_min(map) ||
4114 addrbos + max_ssize > vm_map_max(map) ||
4115 addrbos + max_ssize <= addrbos)
4116 return (KERN_INVALID_ADDRESS);
4117 sgp = (curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ? 0 :
4118 (vm_size_t)stack_guard_page * PAGE_SIZE;
4119 if (sgp >= max_ssize)
4120 return (KERN_INVALID_ARGUMENT);
4121
4122 init_ssize = growsize;
4123 if (max_ssize < init_ssize + sgp)
4124 init_ssize = max_ssize - sgp;
4125
4126 /* If addr is already mapped, no go */
4127 if (vm_map_lookup_entry(map, addrbos, &prev_entry))
4128 return (KERN_NO_SPACE);
4129
4130 /*
4131 * If we can't accommodate max_ssize in the current mapping, no go.
4132 */
4133 if (prev_entry->next->start < addrbos + max_ssize)
4134 return (KERN_NO_SPACE);
4135
4136 /*
4137 * We initially map a stack of only init_ssize. We will grow as
4138 * needed later. Depending on the orientation of the stack (i.e.
4139 * the grow direction) we either map at the top of the range, the
4140 * bottom of the range or in the middle.
4141 *
4142 * Note: we would normally expect prot and max to be VM_PROT_ALL,
4143 * and cow to be 0. Possibly we should eliminate these as input
4144 * parameters, and just pass these values here in the insert call.
4145 */
4146 if (orient == MAP_STACK_GROWS_DOWN) {
4147 bot = addrbos + max_ssize - init_ssize;
4148 top = bot + init_ssize;
4149 gap_bot = addrbos;
4150 gap_top = bot;
4151 } else /* if (orient == MAP_STACK_GROWS_UP) */ {
4152 bot = addrbos;
4153 top = bot + init_ssize;
4154 gap_bot = top;
4155 gap_top = addrbos + max_ssize;
4156 }
4157 rv = vm_map_insert(map, NULL, 0, bot, top, prot, max, cow);
4158 if (rv != KERN_SUCCESS)
4159 return (rv);
4160 new_entry = prev_entry->next;
4161 KASSERT(new_entry->end == top || new_entry->start == bot,
4162 ("Bad entry start/end for new stack entry"));
4163 KASSERT((orient & MAP_STACK_GROWS_DOWN) == 0 ||
4164 (new_entry->eflags & MAP_ENTRY_GROWS_DOWN) != 0,
4165 ("new entry lacks MAP_ENTRY_GROWS_DOWN"));
4166 KASSERT((orient & MAP_STACK_GROWS_UP) == 0 ||
4167 (new_entry->eflags & MAP_ENTRY_GROWS_UP) != 0,
4168 ("new entry lacks MAP_ENTRY_GROWS_UP"));
4169 if (gap_bot == gap_top)
4170 return (KERN_SUCCESS);
4171 rv = vm_map_insert(map, NULL, 0, gap_bot, gap_top, VM_PROT_NONE,
4172 VM_PROT_NONE, MAP_CREATE_GUARD | (orient == MAP_STACK_GROWS_DOWN ?
4173 MAP_CREATE_STACK_GAP_DN : MAP_CREATE_STACK_GAP_UP));
4174 if (rv == KERN_SUCCESS) {
4175 /*
4176 * Gap can never successfully handle a fault, so
4177 * read-ahead logic is never used for it. Re-use
4178 * next_read of the gap entry to store
4179 * stack_guard_page for vm_map_growstack().
4180 */
4181 if (orient == MAP_STACK_GROWS_DOWN)
4182 new_entry->prev->next_read = sgp;
4183 else
4184 new_entry->next->next_read = sgp;
4185 } else {
4186 (void)vm_map_delete(map, bot, top);
4187 }
4188 return (rv);
4189 }
4190
4191 /*
4192 * Attempts to grow a vm stack entry. Returns KERN_SUCCESS if we
4193 * successfully grow the stack.
4194 */
4195 static int
vm_map_growstack(vm_map_t map,vm_offset_t addr,vm_map_entry_t gap_entry)4196 vm_map_growstack(vm_map_t map, vm_offset_t addr, vm_map_entry_t gap_entry)
4197 {
4198 vm_map_entry_t stack_entry;
4199 struct proc *p;
4200 struct vmspace *vm;
4201 struct ucred *cred;
4202 vm_offset_t gap_end, gap_start, grow_start;
4203 size_t grow_amount, guard, max_grow;
4204 rlim_t lmemlim, stacklim, vmemlim;
4205 int rv, rv1;
4206 bool gap_deleted, grow_down, is_procstack;
4207 #ifdef notyet
4208 uint64_t limit;
4209 #endif
4210 #ifdef RACCT
4211 int error;
4212 #endif
4213
4214 p = curproc;
4215 vm = p->p_vmspace;
4216
4217 /*
4218 * Disallow stack growth when the access is performed by a
4219 * debugger or AIO daemon. The reason is that the wrong
4220 * resource limits are applied.
4221 */
4222 if (p != initproc && (map != &p->p_vmspace->vm_map ||
4223 p->p_textvp == NULL))
4224 return (KERN_FAILURE);
4225
4226 MPASS(!map->system_map);
4227
4228 lmemlim = lim_cur(curthread, RLIMIT_MEMLOCK);
4229 stacklim = lim_cur(curthread, RLIMIT_STACK);
4230 vmemlim = lim_cur(curthread, RLIMIT_VMEM);
4231 retry:
4232 /* If addr is not in a hole for a stack grow area, no need to grow. */
4233 if (gap_entry == NULL && !vm_map_lookup_entry(map, addr, &gap_entry))
4234 return (KERN_FAILURE);
4235 if ((gap_entry->eflags & MAP_ENTRY_GUARD) == 0)
4236 return (KERN_SUCCESS);
4237 if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_DN) != 0) {
4238 stack_entry = gap_entry->next;
4239 if ((stack_entry->eflags & MAP_ENTRY_GROWS_DOWN) == 0 ||
4240 stack_entry->start != gap_entry->end)
4241 return (KERN_FAILURE);
4242 grow_amount = round_page(stack_entry->start - addr);
4243 grow_down = true;
4244 } else if ((gap_entry->eflags & MAP_ENTRY_STACK_GAP_UP) != 0) {
4245 stack_entry = gap_entry->prev;
4246 if ((stack_entry->eflags & MAP_ENTRY_GROWS_UP) == 0 ||
4247 stack_entry->end != gap_entry->start)
4248 return (KERN_FAILURE);
4249 grow_amount = round_page(addr + 1 - stack_entry->end);
4250 grow_down = false;
4251 } else {
4252 return (KERN_FAILURE);
4253 }
4254 guard = (curproc->p_flag2 & P2_STKGAP_DISABLE) != 0 ? 0 :
4255 gap_entry->next_read;
4256 max_grow = gap_entry->end - gap_entry->start;
4257 if (guard > max_grow)
4258 return (KERN_NO_SPACE);
4259 max_grow -= guard;
4260 if (grow_amount > max_grow)
4261 return (KERN_NO_SPACE);
4262
4263 /*
4264 * If this is the main process stack, see if we're over the stack
4265 * limit.
4266 */
4267 is_procstack = addr >= (vm_offset_t)vm->vm_maxsaddr &&
4268 addr < (vm_offset_t)p->p_sysent->sv_usrstack;
4269 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim))
4270 return (KERN_NO_SPACE);
4271
4272 #ifdef RACCT
4273 if (racct_enable) {
4274 PROC_LOCK(p);
4275 if (is_procstack && racct_set(p, RACCT_STACK,
4276 ctob(vm->vm_ssize) + grow_amount)) {
4277 PROC_UNLOCK(p);
4278 return (KERN_NO_SPACE);
4279 }
4280 PROC_UNLOCK(p);
4281 }
4282 #endif
4283
4284 grow_amount = roundup(grow_amount, sgrowsiz);
4285 if (grow_amount > max_grow)
4286 grow_amount = max_grow;
4287 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > stacklim)) {
4288 grow_amount = trunc_page((vm_size_t)stacklim) -
4289 ctob(vm->vm_ssize);
4290 }
4291
4292 #ifdef notyet
4293 PROC_LOCK(p);
4294 limit = racct_get_available(p, RACCT_STACK);
4295 PROC_UNLOCK(p);
4296 if (is_procstack && (ctob(vm->vm_ssize) + grow_amount > limit))
4297 grow_amount = limit - ctob(vm->vm_ssize);
4298 #endif
4299
4300 if (!old_mlock && (map->flags & MAP_WIREFUTURE) != 0) {
4301 if (ptoa(pmap_wired_count(map->pmap)) + grow_amount > lmemlim) {
4302 rv = KERN_NO_SPACE;
4303 goto out;
4304 }
4305 #ifdef RACCT
4306 if (racct_enable) {
4307 PROC_LOCK(p);
4308 if (racct_set(p, RACCT_MEMLOCK,
4309 ptoa(pmap_wired_count(map->pmap)) + grow_amount)) {
4310 PROC_UNLOCK(p);
4311 rv = KERN_NO_SPACE;
4312 goto out;
4313 }
4314 PROC_UNLOCK(p);
4315 }
4316 #endif
4317 }
4318
4319 /* If we would blow our VMEM resource limit, no go */
4320 if (map->size + grow_amount > vmemlim) {
4321 rv = KERN_NO_SPACE;
4322 goto out;
4323 }
4324 #ifdef RACCT
4325 if (racct_enable) {
4326 PROC_LOCK(p);
4327 if (racct_set(p, RACCT_VMEM, map->size + grow_amount)) {
4328 PROC_UNLOCK(p);
4329 rv = KERN_NO_SPACE;
4330 goto out;
4331 }
4332 PROC_UNLOCK(p);
4333 }
4334 #endif
4335
4336 if (vm_map_lock_upgrade(map)) {
4337 gap_entry = NULL;
4338 vm_map_lock_read(map);
4339 goto retry;
4340 }
4341
4342 if (grow_down) {
4343 grow_start = gap_entry->end - grow_amount;
4344 if (gap_entry->start + grow_amount == gap_entry->end) {
4345 gap_start = gap_entry->start;
4346 gap_end = gap_entry->end;
4347 vm_map_entry_delete(map, gap_entry);
4348 gap_deleted = true;
4349 } else {
4350 MPASS(gap_entry->start < gap_entry->end - grow_amount);
4351 gap_entry->end -= grow_amount;
4352 vm_map_entry_resize_free(map, gap_entry);
4353 gap_deleted = false;
4354 }
4355 rv = vm_map_insert(map, NULL, 0, grow_start,
4356 grow_start + grow_amount,
4357 stack_entry->protection, stack_entry->max_protection,
4358 MAP_STACK_GROWS_DOWN);
4359 if (rv != KERN_SUCCESS) {
4360 if (gap_deleted) {
4361 rv1 = vm_map_insert(map, NULL, 0, gap_start,
4362 gap_end, VM_PROT_NONE, VM_PROT_NONE,
4363 MAP_CREATE_GUARD | MAP_CREATE_STACK_GAP_DN);
4364 MPASS(rv1 == KERN_SUCCESS);
4365 } else {
4366 gap_entry->end += grow_amount;
4367 vm_map_entry_resize_free(map, gap_entry);
4368 }
4369 }
4370 } else {
4371 grow_start = stack_entry->end;
4372 cred = stack_entry->cred;
4373 if (cred == NULL && stack_entry->object.vm_object != NULL)
4374 cred = stack_entry->object.vm_object->cred;
4375 if (cred != NULL && !swap_reserve_by_cred(grow_amount, cred))
4376 rv = KERN_NO_SPACE;
4377 /* Grow the underlying object if applicable. */
4378 else if (stack_entry->object.vm_object == NULL ||
4379 vm_object_coalesce(stack_entry->object.vm_object,
4380 stack_entry->offset,
4381 (vm_size_t)(stack_entry->end - stack_entry->start),
4382 (vm_size_t)grow_amount, cred != NULL)) {
4383 if (gap_entry->start + grow_amount == gap_entry->end)
4384 vm_map_entry_delete(map, gap_entry);
4385 else
4386 gap_entry->start += grow_amount;
4387 stack_entry->end += grow_amount;
4388 map->size += grow_amount;
4389 vm_map_entry_resize_free(map, stack_entry);
4390 rv = KERN_SUCCESS;
4391 } else
4392 rv = KERN_FAILURE;
4393 }
4394 if (rv == KERN_SUCCESS && is_procstack)
4395 vm->vm_ssize += btoc(grow_amount);
4396
4397 /*
4398 * Heed the MAP_WIREFUTURE flag if it was set for this process.
4399 */
4400 if (rv == KERN_SUCCESS && (map->flags & MAP_WIREFUTURE) != 0) {
4401 vm_map_unlock(map);
4402 vm_map_wire(map, grow_start, grow_start + grow_amount,
4403 VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
4404 vm_map_lock_read(map);
4405 } else
4406 vm_map_lock_downgrade(map);
4407
4408 out:
4409 #ifdef RACCT
4410 if (racct_enable && rv != KERN_SUCCESS) {
4411 PROC_LOCK(p);
4412 error = racct_set(p, RACCT_VMEM, map->size);
4413 KASSERT(error == 0, ("decreasing RACCT_VMEM failed"));
4414 if (!old_mlock) {
4415 error = racct_set(p, RACCT_MEMLOCK,
4416 ptoa(pmap_wired_count(map->pmap)));
4417 KASSERT(error == 0, ("decreasing RACCT_MEMLOCK failed"));
4418 }
4419 error = racct_set(p, RACCT_STACK, ctob(vm->vm_ssize));
4420 KASSERT(error == 0, ("decreasing RACCT_STACK failed"));
4421 PROC_UNLOCK(p);
4422 }
4423 #endif
4424
4425 return (rv);
4426 }
4427
4428 /*
4429 * Unshare the specified VM space for exec. If other processes are
4430 * mapped to it, then create a new one. The new vmspace is null.
4431 */
4432 int
vmspace_exec(struct proc * p,vm_offset_t minuser,vm_offset_t maxuser)4433 vmspace_exec(struct proc *p, vm_offset_t minuser, vm_offset_t maxuser)
4434 {
4435 struct vmspace *oldvmspace = p->p_vmspace;
4436 struct vmspace *newvmspace;
4437
4438 KASSERT((curthread->td_pflags & TDP_EXECVMSPC) == 0,
4439 ("vmspace_exec recursed"));
4440 newvmspace = vmspace_alloc(minuser, maxuser, pmap_pinit);
4441 if (newvmspace == NULL)
4442 return (ENOMEM);
4443 newvmspace->vm_swrss = oldvmspace->vm_swrss;
4444 /*
4445 * This code is written like this for prototype purposes. The
4446 * goal is to avoid running down the vmspace here, but let the
4447 * other process's that are still using the vmspace to finally
4448 * run it down. Even though there is little or no chance of blocking
4449 * here, it is a good idea to keep this form for future mods.
4450 */
4451 PROC_VMSPACE_LOCK(p);
4452 p->p_vmspace = newvmspace;
4453 PROC_VMSPACE_UNLOCK(p);
4454 if (p == curthread->td_proc)
4455 pmap_activate(curthread);
4456 curthread->td_pflags |= TDP_EXECVMSPC;
4457 return (0);
4458 }
4459
4460 /*
4461 * Unshare the specified VM space for forcing COW. This
4462 * is called by rfork, for the (RFMEM|RFPROC) == 0 case.
4463 */
4464 int
vmspace_unshare(struct proc * p)4465 vmspace_unshare(struct proc *p)
4466 {
4467 struct vmspace *oldvmspace = p->p_vmspace;
4468 struct vmspace *newvmspace;
4469 vm_ooffset_t fork_charge;
4470
4471 if (oldvmspace->vm_refcnt == 1)
4472 return (0);
4473 fork_charge = 0;
4474 newvmspace = vmspace_fork(oldvmspace, &fork_charge);
4475 if (newvmspace == NULL)
4476 return (ENOMEM);
4477 if (!swap_reserve_by_cred(fork_charge, p->p_ucred)) {
4478 vmspace_free(newvmspace);
4479 return (ENOMEM);
4480 }
4481 PROC_VMSPACE_LOCK(p);
4482 p->p_vmspace = newvmspace;
4483 PROC_VMSPACE_UNLOCK(p);
4484 if (p == curthread->td_proc)
4485 pmap_activate(curthread);
4486 vmspace_free(oldvmspace);
4487 return (0);
4488 }
4489
4490 /*
4491 * vm_map_lookup:
4492 *
4493 * Finds the VM object, offset, and
4494 * protection for a given virtual address in the
4495 * specified map, assuming a page fault of the
4496 * type specified.
4497 *
4498 * Leaves the map in question locked for read; return
4499 * values are guaranteed until a vm_map_lookup_done
4500 * call is performed. Note that the map argument
4501 * is in/out; the returned map must be used in
4502 * the call to vm_map_lookup_done.
4503 *
4504 * A handle (out_entry) is returned for use in
4505 * vm_map_lookup_done, to make that fast.
4506 *
4507 * If a lookup is requested with "write protection"
4508 * specified, the map may be changed to perform virtual
4509 * copying operations, although the data referenced will
4510 * remain the same.
4511 */
4512 int
vm_map_lookup(vm_map_t * var_map,vm_offset_t vaddr,vm_prot_t fault_typea,vm_map_entry_t * out_entry,vm_object_t * object,vm_pindex_t * pindex,vm_prot_t * out_prot,boolean_t * wired)4513 vm_map_lookup(vm_map_t *var_map, /* IN/OUT */
4514 vm_offset_t vaddr,
4515 vm_prot_t fault_typea,
4516 vm_map_entry_t *out_entry, /* OUT */
4517 vm_object_t *object, /* OUT */
4518 vm_pindex_t *pindex, /* OUT */
4519 vm_prot_t *out_prot, /* OUT */
4520 boolean_t *wired) /* OUT */
4521 {
4522 vm_map_entry_t entry;
4523 vm_map_t map = *var_map;
4524 vm_prot_t prot;
4525 vm_prot_t fault_type = fault_typea;
4526 vm_object_t eobject;
4527 vm_size_t size;
4528 struct ucred *cred;
4529
4530 RetryLookup:
4531
4532 vm_map_lock_read(map);
4533
4534 RetryLookupLocked:
4535 /*
4536 * Lookup the faulting address.
4537 */
4538 if (!vm_map_lookup_entry(map, vaddr, out_entry)) {
4539 vm_map_unlock_read(map);
4540 return (KERN_INVALID_ADDRESS);
4541 }
4542
4543 entry = *out_entry;
4544
4545 /*
4546 * Handle submaps.
4547 */
4548 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4549 vm_map_t old_map = map;
4550
4551 *var_map = map = entry->object.sub_map;
4552 vm_map_unlock_read(old_map);
4553 goto RetryLookup;
4554 }
4555
4556 /*
4557 * Check whether this task is allowed to have this page.
4558 */
4559 prot = entry->protection;
4560 if ((fault_typea & VM_PROT_FAULT_LOOKUP) != 0) {
4561 fault_typea &= ~VM_PROT_FAULT_LOOKUP;
4562 if (prot == VM_PROT_NONE && map != kernel_map &&
4563 (entry->eflags & MAP_ENTRY_GUARD) != 0 &&
4564 (entry->eflags & (MAP_ENTRY_STACK_GAP_DN |
4565 MAP_ENTRY_STACK_GAP_UP)) != 0 &&
4566 vm_map_growstack(map, vaddr, entry) == KERN_SUCCESS)
4567 goto RetryLookupLocked;
4568 }
4569 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4570 if ((fault_type & prot) != fault_type || prot == VM_PROT_NONE) {
4571 vm_map_unlock_read(map);
4572 return (KERN_PROTECTION_FAILURE);
4573 }
4574 KASSERT((prot & VM_PROT_WRITE) == 0 || (entry->eflags &
4575 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY)) !=
4576 (MAP_ENTRY_USER_WIRED | MAP_ENTRY_NEEDS_COPY),
4577 ("entry %p flags %x", entry, entry->eflags));
4578 if ((fault_typea & VM_PROT_COPY) != 0 &&
4579 (entry->max_protection & VM_PROT_WRITE) == 0 &&
4580 (entry->eflags & MAP_ENTRY_COW) == 0) {
4581 vm_map_unlock_read(map);
4582 return (KERN_PROTECTION_FAILURE);
4583 }
4584
4585 /*
4586 * If this page is not pageable, we have to get it for all possible
4587 * accesses.
4588 */
4589 *wired = (entry->wired_count != 0);
4590 if (*wired)
4591 fault_type = entry->protection;
4592 size = entry->end - entry->start;
4593 /*
4594 * If the entry was copy-on-write, we either ...
4595 */
4596 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4597 /*
4598 * If we want to write the page, we may as well handle that
4599 * now since we've got the map locked.
4600 *
4601 * If we don't need to write the page, we just demote the
4602 * permissions allowed.
4603 */
4604 if ((fault_type & VM_PROT_WRITE) != 0 ||
4605 (fault_typea & VM_PROT_COPY) != 0) {
4606 /*
4607 * Make a new object, and place it in the object
4608 * chain. Note that no new references have appeared
4609 * -- one just moved from the map to the new
4610 * object.
4611 */
4612 if (vm_map_lock_upgrade(map))
4613 goto RetryLookup;
4614
4615 if (entry->cred == NULL) {
4616 /*
4617 * The debugger owner is charged for
4618 * the memory.
4619 */
4620 cred = curthread->td_ucred;
4621 crhold(cred);
4622 if (!swap_reserve_by_cred(size, cred)) {
4623 crfree(cred);
4624 vm_map_unlock(map);
4625 return (KERN_RESOURCE_SHORTAGE);
4626 }
4627 entry->cred = cred;
4628 }
4629 vm_object_shadow(&entry->object.vm_object,
4630 &entry->offset, size);
4631 entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
4632 eobject = entry->object.vm_object;
4633 if (eobject->cred != NULL) {
4634 /*
4635 * The object was not shadowed.
4636 */
4637 swap_release_by_cred(size, entry->cred);
4638 crfree(entry->cred);
4639 entry->cred = NULL;
4640 } else if (entry->cred != NULL) {
4641 VM_OBJECT_WLOCK(eobject);
4642 eobject->cred = entry->cred;
4643 eobject->charge = size;
4644 VM_OBJECT_WUNLOCK(eobject);
4645 entry->cred = NULL;
4646 }
4647
4648 vm_map_lock_downgrade(map);
4649 } else {
4650 /*
4651 * We're attempting to read a copy-on-write page --
4652 * don't allow writes.
4653 */
4654 prot &= ~VM_PROT_WRITE;
4655 }
4656 }
4657
4658 /*
4659 * Create an object if necessary.
4660 */
4661 if (entry->object.vm_object == NULL &&
4662 !map->system_map) {
4663 if (vm_map_lock_upgrade(map))
4664 goto RetryLookup;
4665 entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
4666 atop(size));
4667 entry->offset = 0;
4668 if (entry->cred != NULL) {
4669 VM_OBJECT_WLOCK(entry->object.vm_object);
4670 entry->object.vm_object->cred = entry->cred;
4671 entry->object.vm_object->charge = size;
4672 VM_OBJECT_WUNLOCK(entry->object.vm_object);
4673 entry->cred = NULL;
4674 }
4675 vm_map_lock_downgrade(map);
4676 }
4677
4678 /*
4679 * Return the object/offset from this entry. If the entry was
4680 * copy-on-write or empty, it has been fixed up.
4681 */
4682 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4683 *object = entry->object.vm_object;
4684
4685 *out_prot = prot;
4686 return (KERN_SUCCESS);
4687 }
4688
4689 /*
4690 * vm_map_lookup_locked:
4691 *
4692 * Lookup the faulting address. A version of vm_map_lookup that returns
4693 * KERN_FAILURE instead of blocking on map lock or memory allocation.
4694 */
4695 int
vm_map_lookup_locked(vm_map_t * var_map,vm_offset_t vaddr,vm_prot_t fault_typea,vm_map_entry_t * out_entry,vm_object_t * object,vm_pindex_t * pindex,vm_prot_t * out_prot,boolean_t * wired)4696 vm_map_lookup_locked(vm_map_t *var_map, /* IN/OUT */
4697 vm_offset_t vaddr,
4698 vm_prot_t fault_typea,
4699 vm_map_entry_t *out_entry, /* OUT */
4700 vm_object_t *object, /* OUT */
4701 vm_pindex_t *pindex, /* OUT */
4702 vm_prot_t *out_prot, /* OUT */
4703 boolean_t *wired) /* OUT */
4704 {
4705 vm_map_entry_t entry;
4706 vm_map_t map = *var_map;
4707 vm_prot_t prot;
4708 vm_prot_t fault_type = fault_typea;
4709
4710 /*
4711 * Lookup the faulting address.
4712 */
4713 if (!vm_map_lookup_entry(map, vaddr, out_entry))
4714 return (KERN_INVALID_ADDRESS);
4715
4716 entry = *out_entry;
4717
4718 /*
4719 * Fail if the entry refers to a submap.
4720 */
4721 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP)
4722 return (KERN_FAILURE);
4723
4724 /*
4725 * Check whether this task is allowed to have this page.
4726 */
4727 prot = entry->protection;
4728 fault_type &= VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE;
4729 if ((fault_type & prot) != fault_type)
4730 return (KERN_PROTECTION_FAILURE);
4731
4732 /*
4733 * If this page is not pageable, we have to get it for all possible
4734 * accesses.
4735 */
4736 *wired = (entry->wired_count != 0);
4737 if (*wired)
4738 fault_type = entry->protection;
4739
4740 if (entry->eflags & MAP_ENTRY_NEEDS_COPY) {
4741 /*
4742 * Fail if the entry was copy-on-write for a write fault.
4743 */
4744 if (fault_type & VM_PROT_WRITE)
4745 return (KERN_FAILURE);
4746 /*
4747 * We're attempting to read a copy-on-write page --
4748 * don't allow writes.
4749 */
4750 prot &= ~VM_PROT_WRITE;
4751 }
4752
4753 /*
4754 * Fail if an object should be created.
4755 */
4756 if (entry->object.vm_object == NULL && !map->system_map)
4757 return (KERN_FAILURE);
4758
4759 /*
4760 * Return the object/offset from this entry. If the entry was
4761 * copy-on-write or empty, it has been fixed up.
4762 */
4763 *pindex = OFF_TO_IDX((vaddr - entry->start) + entry->offset);
4764 *object = entry->object.vm_object;
4765
4766 *out_prot = prot;
4767 return (KERN_SUCCESS);
4768 }
4769
4770 /*
4771 * vm_map_lookup_done:
4772 *
4773 * Releases locks acquired by a vm_map_lookup
4774 * (according to the handle returned by that lookup).
4775 */
4776 void
vm_map_lookup_done(vm_map_t map,vm_map_entry_t entry)4777 vm_map_lookup_done(vm_map_t map, vm_map_entry_t entry)
4778 {
4779 /*
4780 * Unlock the main-level map
4781 */
4782 vm_map_unlock_read(map);
4783 }
4784
4785 vm_offset_t
vm_map_max_KBI(const struct vm_map * map)4786 vm_map_max_KBI(const struct vm_map *map)
4787 {
4788
4789 return (vm_map_max(map));
4790 }
4791
4792 vm_offset_t
vm_map_min_KBI(const struct vm_map * map)4793 vm_map_min_KBI(const struct vm_map *map)
4794 {
4795
4796 return (vm_map_min(map));
4797 }
4798
4799 pmap_t
vm_map_pmap_KBI(vm_map_t map)4800 vm_map_pmap_KBI(vm_map_t map)
4801 {
4802
4803 return (map->pmap);
4804 }
4805
4806 #include "opt_ddb.h"
4807 #ifdef DDB
4808 #include <sys/kernel.h>
4809
4810 #include <ddb/ddb.h>
4811
4812 static void
vm_map_print(vm_map_t map)4813 vm_map_print(vm_map_t map)
4814 {
4815 vm_map_entry_t entry;
4816
4817 db_iprintf("Task map %p: pmap=%p, nentries=%d, version=%u\n",
4818 (void *)map,
4819 (void *)map->pmap, map->nentries, map->timestamp);
4820
4821 db_indent += 2;
4822 for (entry = map->header.next; entry != &map->header;
4823 entry = entry->next) {
4824 db_iprintf("map entry %p: start=%p, end=%p, eflags=%#x, \n",
4825 (void *)entry, (void *)entry->start, (void *)entry->end,
4826 entry->eflags);
4827 {
4828 static char *inheritance_name[4] =
4829 {"share", "copy", "none", "donate_copy"};
4830
4831 db_iprintf(" prot=%x/%x/%s",
4832 entry->protection,
4833 entry->max_protection,
4834 inheritance_name[(int)(unsigned char)entry->inheritance]);
4835 if (entry->wired_count != 0)
4836 db_printf(", wired");
4837 }
4838 if (entry->eflags & MAP_ENTRY_IS_SUB_MAP) {
4839 db_printf(", share=%p, offset=0x%jx\n",
4840 (void *)entry->object.sub_map,
4841 (uintmax_t)entry->offset);
4842 if ((entry->prev == &map->header) ||
4843 (entry->prev->object.sub_map !=
4844 entry->object.sub_map)) {
4845 db_indent += 2;
4846 vm_map_print((vm_map_t)entry->object.sub_map);
4847 db_indent -= 2;
4848 }
4849 } else {
4850 if (entry->cred != NULL)
4851 db_printf(", ruid %d", entry->cred->cr_ruid);
4852 db_printf(", object=%p, offset=0x%jx",
4853 (void *)entry->object.vm_object,
4854 (uintmax_t)entry->offset);
4855 if (entry->object.vm_object && entry->object.vm_object->cred)
4856 db_printf(", obj ruid %d charge %jx",
4857 entry->object.vm_object->cred->cr_ruid,
4858 (uintmax_t)entry->object.vm_object->charge);
4859 if (entry->eflags & MAP_ENTRY_COW)
4860 db_printf(", copy (%s)",
4861 (entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
4862 db_printf("\n");
4863
4864 if ((entry->prev == &map->header) ||
4865 (entry->prev->object.vm_object !=
4866 entry->object.vm_object)) {
4867 db_indent += 2;
4868 vm_object_print((db_expr_t)(intptr_t)
4869 entry->object.vm_object,
4870 0, 0, (char *)0);
4871 db_indent -= 2;
4872 }
4873 }
4874 }
4875 db_indent -= 2;
4876 }
4877
DB_SHOW_COMMAND(map,map)4878 DB_SHOW_COMMAND(map, map)
4879 {
4880
4881 if (!have_addr) {
4882 db_printf("usage: show map <addr>\n");
4883 return;
4884 }
4885 vm_map_print((vm_map_t)addr);
4886 }
4887
DB_SHOW_COMMAND(procvm,procvm)4888 DB_SHOW_COMMAND(procvm, procvm)
4889 {
4890 struct proc *p;
4891
4892 if (have_addr) {
4893 p = db_lookup_proc(addr);
4894 } else {
4895 p = curproc;
4896 }
4897
4898 db_printf("p = %p, vmspace = %p, map = %p, pmap = %p\n",
4899 (void *)p, (void *)p->p_vmspace, (void *)&p->p_vmspace->vm_map,
4900 (void *)vmspace_pmap(p->p_vmspace));
4901
4902 vm_map_print((vm_map_t)&p->p_vmspace->vm_map);
4903 }
4904
4905 #endif /* DDB */
4906