1a9643ea8Slogwang /*-
2*22ce4affSfengbojiang * SPDX-License-Identifier: (BSD-3-Clause AND MIT-CMU)
3*22ce4affSfengbojiang *
4a9643ea8Slogwang * Copyright (c) 1991, 1993
5a9643ea8Slogwang * The Regents of the University of California. All rights reserved.
6a9643ea8Slogwang *
7a9643ea8Slogwang * This code is derived from software contributed to Berkeley by
8a9643ea8Slogwang * The Mach Operating System project at Carnegie-Mellon University.
9a9643ea8Slogwang *
10a9643ea8Slogwang * Redistribution and use in source and binary forms, with or without
11a9643ea8Slogwang * modification, are permitted provided that the following conditions
12a9643ea8Slogwang * are met:
13a9643ea8Slogwang * 1. Redistributions of source code must retain the above copyright
14a9643ea8Slogwang * notice, this list of conditions and the following disclaimer.
15a9643ea8Slogwang * 2. Redistributions in binary form must reproduce the above copyright
16a9643ea8Slogwang * notice, this list of conditions and the following disclaimer in the
17a9643ea8Slogwang * documentation and/or other materials provided with the distribution.
18*22ce4affSfengbojiang * 3. Neither the name of the University nor the names of its contributors
19a9643ea8Slogwang * may be used to endorse or promote products derived from this software
20a9643ea8Slogwang * without specific prior written permission.
21a9643ea8Slogwang *
22a9643ea8Slogwang * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23a9643ea8Slogwang * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24a9643ea8Slogwang * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25a9643ea8Slogwang * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26a9643ea8Slogwang * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27a9643ea8Slogwang * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28a9643ea8Slogwang * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29a9643ea8Slogwang * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30a9643ea8Slogwang * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31a9643ea8Slogwang * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32a9643ea8Slogwang * SUCH DAMAGE.
33a9643ea8Slogwang *
34a9643ea8Slogwang * @(#)vm_map.h 8.9 (Berkeley) 5/17/95
35a9643ea8Slogwang *
36a9643ea8Slogwang *
37a9643ea8Slogwang * Copyright (c) 1987, 1990 Carnegie-Mellon University.
38a9643ea8Slogwang * All rights reserved.
39a9643ea8Slogwang *
40a9643ea8Slogwang * Authors: Avadis Tevanian, Jr., Michael Wayne Young
41a9643ea8Slogwang *
42a9643ea8Slogwang * Permission to use, copy, modify and distribute this software and
43a9643ea8Slogwang * its documentation is hereby granted, provided that both the copyright
44a9643ea8Slogwang * notice and this permission notice appear in all copies of the
45a9643ea8Slogwang * software, derivative works or modified versions, and any portions
46a9643ea8Slogwang * thereof, and that both notices appear in supporting documentation.
47a9643ea8Slogwang *
48a9643ea8Slogwang * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
49a9643ea8Slogwang * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
50a9643ea8Slogwang * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
51a9643ea8Slogwang *
52a9643ea8Slogwang * Carnegie Mellon requests users of this software to return to
53a9643ea8Slogwang *
54a9643ea8Slogwang * Software Distribution Coordinator or [email protected]
55a9643ea8Slogwang * School of Computer Science
56a9643ea8Slogwang * Carnegie Mellon University
57a9643ea8Slogwang * Pittsburgh PA 15213-3890
58a9643ea8Slogwang *
59a9643ea8Slogwang * any improvements or extensions that they make and grant Carnegie the
60a9643ea8Slogwang * rights to redistribute these changes.
61a9643ea8Slogwang *
62a9643ea8Slogwang * $FreeBSD$
63a9643ea8Slogwang */
64a9643ea8Slogwang
65a9643ea8Slogwang /*
66a9643ea8Slogwang * Virtual memory map module definitions.
67a9643ea8Slogwang */
68a9643ea8Slogwang #ifndef _VM_MAP_
69a9643ea8Slogwang #define _VM_MAP_
70a9643ea8Slogwang
71a9643ea8Slogwang #include <sys/lock.h>
72a9643ea8Slogwang #include <sys/sx.h>
73a9643ea8Slogwang #include <sys/_mutex.h>
74a9643ea8Slogwang
75a9643ea8Slogwang /*
76a9643ea8Slogwang * Types defined:
77a9643ea8Slogwang *
78a9643ea8Slogwang * vm_map_t the high-level address map data structure.
79a9643ea8Slogwang * vm_map_entry_t an entry in an address map.
80a9643ea8Slogwang */
81a9643ea8Slogwang
82a9643ea8Slogwang typedef u_char vm_flags_t;
83a9643ea8Slogwang typedef u_int vm_eflags_t;
84a9643ea8Slogwang
85a9643ea8Slogwang /*
86a9643ea8Slogwang * Objects which live in maps may be either VM objects, or
87a9643ea8Slogwang * another map (called a "sharing map") which denotes read-write
88a9643ea8Slogwang * sharing with other maps.
89a9643ea8Slogwang */
90a9643ea8Slogwang union vm_map_object {
91a9643ea8Slogwang struct vm_object *vm_object; /* object object */
92a9643ea8Slogwang struct vm_map *sub_map; /* belongs to another map */
93a9643ea8Slogwang };
94a9643ea8Slogwang
95a9643ea8Slogwang /*
96a9643ea8Slogwang * Address map entries consist of start and end addresses,
97a9643ea8Slogwang * a VM object (or sharing map) and offset into that object,
98a9643ea8Slogwang * and user-exported inheritance and protection information.
99a9643ea8Slogwang * Also included is control information for virtual copy operations.
100a9643ea8Slogwang */
101a9643ea8Slogwang struct vm_map_entry {
102*22ce4affSfengbojiang struct vm_map_entry *left; /* left child or previous entry */
103*22ce4affSfengbojiang struct vm_map_entry *right; /* right child or next entry */
104a9643ea8Slogwang vm_offset_t start; /* start address */
105a9643ea8Slogwang vm_offset_t end; /* end address */
106a9643ea8Slogwang vm_offset_t next_read; /* vaddr of the next sequential read */
107a9643ea8Slogwang vm_size_t max_free; /* max free space in subtree */
108a9643ea8Slogwang union vm_map_object object; /* object I point to */
109a9643ea8Slogwang vm_ooffset_t offset; /* offset into object */
110a9643ea8Slogwang vm_eflags_t eflags; /* map entry flags */
111a9643ea8Slogwang vm_prot_t protection; /* protection code */
112a9643ea8Slogwang vm_prot_t max_protection; /* maximum protection */
113a9643ea8Slogwang vm_inherit_t inheritance; /* inheritance */
114a9643ea8Slogwang uint8_t read_ahead; /* pages in the read-ahead window */
115a9643ea8Slogwang int wired_count; /* can be paged if = 0 */
116a9643ea8Slogwang struct ucred *cred; /* tmp storage for creator ref */
117a9643ea8Slogwang struct thread *wiring_thread;
118a9643ea8Slogwang };
119a9643ea8Slogwang
120*22ce4affSfengbojiang #define MAP_ENTRY_NOSYNC 0x00000001
121*22ce4affSfengbojiang #define MAP_ENTRY_IS_SUB_MAP 0x00000002
122*22ce4affSfengbojiang #define MAP_ENTRY_COW 0x00000004
123*22ce4affSfengbojiang #define MAP_ENTRY_NEEDS_COPY 0x00000008
124*22ce4affSfengbojiang #define MAP_ENTRY_NOFAULT 0x00000010
125*22ce4affSfengbojiang #define MAP_ENTRY_USER_WIRED 0x00000020
126a9643ea8Slogwang
127*22ce4affSfengbojiang #define MAP_ENTRY_BEHAV_NORMAL 0x00000000 /* default behavior */
128*22ce4affSfengbojiang #define MAP_ENTRY_BEHAV_SEQUENTIAL 0x00000040 /* expect sequential
129*22ce4affSfengbojiang access */
130*22ce4affSfengbojiang #define MAP_ENTRY_BEHAV_RANDOM 0x00000080 /* expect random
131*22ce4affSfengbojiang access */
132*22ce4affSfengbojiang #define MAP_ENTRY_BEHAV_RESERVED 0x000000c0 /* future use */
133*22ce4affSfengbojiang #define MAP_ENTRY_BEHAV_MASK 0x000000c0
134*22ce4affSfengbojiang #define MAP_ENTRY_IN_TRANSITION 0x00000100 /* entry being
135*22ce4affSfengbojiang changed */
136*22ce4affSfengbojiang #define MAP_ENTRY_NEEDS_WAKEUP 0x00000200 /* waiters in
137*22ce4affSfengbojiang transition */
138*22ce4affSfengbojiang #define MAP_ENTRY_NOCOREDUMP 0x00000400 /* don't include in
139*22ce4affSfengbojiang a core */
140*22ce4affSfengbojiang #define MAP_ENTRY_VN_EXEC 0x00000800 /* text vnode mapping */
141*22ce4affSfengbojiang #define MAP_ENTRY_GROWS_DOWN 0x00001000 /* top-down stacks */
142*22ce4affSfengbojiang #define MAP_ENTRY_GROWS_UP 0x00002000 /* bottom-up stacks */
143a9643ea8Slogwang
144*22ce4affSfengbojiang #define MAP_ENTRY_WIRE_SKIPPED 0x00004000
145*22ce4affSfengbojiang #define MAP_ENTRY_WRITECNT 0x00008000 /* tracked writeable
146*22ce4affSfengbojiang mapping */
147*22ce4affSfengbojiang #define MAP_ENTRY_GUARD 0x00010000
148*22ce4affSfengbojiang #define MAP_ENTRY_STACK_GAP_DN 0x00020000
149*22ce4affSfengbojiang #define MAP_ENTRY_STACK_GAP_UP 0x00040000
150*22ce4affSfengbojiang #define MAP_ENTRY_HEADER 0x00080000
151a9643ea8Slogwang
152*22ce4affSfengbojiang #define MAP_ENTRY_SPLIT_BOUNDARY_MASK 0x00300000
153a9643ea8Slogwang
154*22ce4affSfengbojiang #define MAP_ENTRY_SPLIT_BOUNDARY_SHIFT 20
155a9643ea8Slogwang
156a9643ea8Slogwang #ifdef _KERNEL
157a9643ea8Slogwang static __inline u_char
vm_map_entry_behavior(vm_map_entry_t entry)158a9643ea8Slogwang vm_map_entry_behavior(vm_map_entry_t entry)
159a9643ea8Slogwang {
160a9643ea8Slogwang return (entry->eflags & MAP_ENTRY_BEHAV_MASK);
161a9643ea8Slogwang }
162a9643ea8Slogwang
163a9643ea8Slogwang static __inline int
vm_map_entry_user_wired_count(vm_map_entry_t entry)164a9643ea8Slogwang vm_map_entry_user_wired_count(vm_map_entry_t entry)
165a9643ea8Slogwang {
166a9643ea8Slogwang if (entry->eflags & MAP_ENTRY_USER_WIRED)
167a9643ea8Slogwang return (1);
168a9643ea8Slogwang return (0);
169a9643ea8Slogwang }
170a9643ea8Slogwang
171a9643ea8Slogwang static __inline int
vm_map_entry_system_wired_count(vm_map_entry_t entry)172a9643ea8Slogwang vm_map_entry_system_wired_count(vm_map_entry_t entry)
173a9643ea8Slogwang {
174a9643ea8Slogwang return (entry->wired_count - vm_map_entry_user_wired_count(entry));
175a9643ea8Slogwang }
176a9643ea8Slogwang #endif /* _KERNEL */
177a9643ea8Slogwang
178a9643ea8Slogwang /*
179a9643ea8Slogwang * A map is a set of map entries. These map entries are
180*22ce4affSfengbojiang * organized as a threaded binary search tree. Both structures
181*22ce4affSfengbojiang * are ordered based upon the start and end addresses contained
182*22ce4affSfengbojiang * within each map entry. The largest gap between an entry in a
183*22ce4affSfengbojiang * subtree and one of its neighbors is saved in the max_free
184*22ce4affSfengbojiang * field, and that field is updated when the tree is
185*22ce4affSfengbojiang * restructured.
186*22ce4affSfengbojiang *
187*22ce4affSfengbojiang * Sleator and Tarjan's top-down splay algorithm is employed to
188*22ce4affSfengbojiang * control height imbalance in the binary search tree.
189*22ce4affSfengbojiang *
190*22ce4affSfengbojiang * The map's min offset value is stored in map->header.end, and
191*22ce4affSfengbojiang * its max offset value is stored in map->header.start. These
192*22ce4affSfengbojiang * values act as sentinels for any forward or backward address
193*22ce4affSfengbojiang * scan of the list. The right and left fields of the map
194*22ce4affSfengbojiang * header point to the first and list map entries. The map
195*22ce4affSfengbojiang * header has a special value for the eflags field,
196*22ce4affSfengbojiang * MAP_ENTRY_HEADER, that is set initially, is never changed,
197*22ce4affSfengbojiang * and prevents an eflags match of the header with any other map
198*22ce4affSfengbojiang * entry.
199a9643ea8Slogwang *
200a9643ea8Slogwang * List of locks
201a9643ea8Slogwang * (c) const until freed
202a9643ea8Slogwang */
203a9643ea8Slogwang struct vm_map {
204a9643ea8Slogwang struct vm_map_entry header; /* List of entries */
205a9643ea8Slogwang struct sx lock; /* Lock for map data */
206a9643ea8Slogwang struct mtx system_mtx;
207a9643ea8Slogwang int nentries; /* Number of entries */
208a9643ea8Slogwang vm_size_t size; /* virtual size */
209a9643ea8Slogwang u_int timestamp; /* Version number */
210a9643ea8Slogwang u_char needs_wakeup;
211a9643ea8Slogwang u_char system_map; /* (c) Am I a system map? */
212a9643ea8Slogwang vm_flags_t flags; /* flags for this vm_map */
213a9643ea8Slogwang vm_map_entry_t root; /* Root of a binary search tree */
214a9643ea8Slogwang pmap_t pmap; /* (c) Physical map */
215*22ce4affSfengbojiang vm_offset_t anon_loc;
216a9643ea8Slogwang int busy;
217*22ce4affSfengbojiang #ifdef DIAGNOSTIC
218*22ce4affSfengbojiang int nupdates;
219*22ce4affSfengbojiang #endif
220a9643ea8Slogwang };
221a9643ea8Slogwang
222a9643ea8Slogwang /*
223a9643ea8Slogwang * vm_flags_t values
224a9643ea8Slogwang */
225a9643ea8Slogwang #define MAP_WIREFUTURE 0x01 /* wire all future pages */
226a9643ea8Slogwang #define MAP_BUSY_WAKEUP 0x02
227*22ce4affSfengbojiang #define MAP_IS_SUB_MAP 0x04 /* has parent */
228*22ce4affSfengbojiang #define MAP_ASLR 0x08 /* enabled ASLR */
229*22ce4affSfengbojiang #define MAP_ASLR_IGNSTART 0x10
230*22ce4affSfengbojiang #define MAP_REPLENISH 0x20
231*22ce4affSfengbojiang #define MAP_WXORX 0x40 /* enforce W^X */
232a9643ea8Slogwang
233a9643ea8Slogwang #ifdef _KERNEL
234*22ce4affSfengbojiang #if defined(KLD_MODULE) && !defined(KLD_TIED)
235*22ce4affSfengbojiang #define vm_map_max(map) vm_map_max_KBI((map))
236*22ce4affSfengbojiang #define vm_map_min(map) vm_map_min_KBI((map))
237*22ce4affSfengbojiang #define vm_map_pmap(map) vm_map_pmap_KBI((map))
238*22ce4affSfengbojiang #define vm_map_range_valid(map, start, end) \
239*22ce4affSfengbojiang vm_map_range_valid_KBI((map), (start), (end))
240*22ce4affSfengbojiang #else
241a9643ea8Slogwang static __inline vm_offset_t
vm_map_max(const struct vm_map * map)242a9643ea8Slogwang vm_map_max(const struct vm_map *map)
243a9643ea8Slogwang {
244*22ce4affSfengbojiang
245*22ce4affSfengbojiang return (map->header.start);
246a9643ea8Slogwang }
247a9643ea8Slogwang
248a9643ea8Slogwang static __inline vm_offset_t
vm_map_min(const struct vm_map * map)249a9643ea8Slogwang vm_map_min(const struct vm_map *map)
250a9643ea8Slogwang {
251*22ce4affSfengbojiang
252*22ce4affSfengbojiang return (map->header.end);
253a9643ea8Slogwang }
254a9643ea8Slogwang
255a9643ea8Slogwang static __inline pmap_t
vm_map_pmap(vm_map_t map)256a9643ea8Slogwang vm_map_pmap(vm_map_t map)
257a9643ea8Slogwang {
258a9643ea8Slogwang return (map->pmap);
259a9643ea8Slogwang }
260a9643ea8Slogwang
261a9643ea8Slogwang static __inline void
vm_map_modflags(vm_map_t map,vm_flags_t set,vm_flags_t clear)262a9643ea8Slogwang vm_map_modflags(vm_map_t map, vm_flags_t set, vm_flags_t clear)
263a9643ea8Slogwang {
264a9643ea8Slogwang map->flags = (map->flags | set) & ~clear;
265a9643ea8Slogwang }
266*22ce4affSfengbojiang
267*22ce4affSfengbojiang static inline bool
vm_map_range_valid(vm_map_t map,vm_offset_t start,vm_offset_t end)268*22ce4affSfengbojiang vm_map_range_valid(vm_map_t map, vm_offset_t start, vm_offset_t end)
269*22ce4affSfengbojiang {
270*22ce4affSfengbojiang if (end < start)
271*22ce4affSfengbojiang return (false);
272*22ce4affSfengbojiang if (start < vm_map_min(map) || end > vm_map_max(map))
273*22ce4affSfengbojiang return (false);
274*22ce4affSfengbojiang return (true);
275*22ce4affSfengbojiang }
276*22ce4affSfengbojiang
277*22ce4affSfengbojiang #endif /* KLD_MODULE */
278a9643ea8Slogwang #endif /* _KERNEL */
279a9643ea8Slogwang
280a9643ea8Slogwang /*
281a9643ea8Slogwang * Shareable process virtual address space.
282a9643ea8Slogwang *
283a9643ea8Slogwang * List of locks
284a9643ea8Slogwang * (c) const until freed
285a9643ea8Slogwang */
286a9643ea8Slogwang struct vmspace {
287a9643ea8Slogwang struct vm_map vm_map; /* VM address map */
288a9643ea8Slogwang struct shmmap_state *vm_shm; /* SYS5 shared memory private data XXX */
289a9643ea8Slogwang segsz_t vm_swrss; /* resident set size before last swap */
290a9643ea8Slogwang segsz_t vm_tsize; /* text size (pages) XXX */
291a9643ea8Slogwang segsz_t vm_dsize; /* data size (pages) XXX */
292a9643ea8Slogwang segsz_t vm_ssize; /* stack size (pages) */
293a9643ea8Slogwang caddr_t vm_taddr; /* (c) user virtual address of text */
294a9643ea8Slogwang caddr_t vm_daddr; /* (c) user virtual address of data */
295a9643ea8Slogwang caddr_t vm_maxsaddr; /* user VA at max stack growth */
296*22ce4affSfengbojiang u_int vm_refcnt; /* number of references */
297a9643ea8Slogwang /*
298a9643ea8Slogwang * Keep the PMAP last, so that CPU-specific variations of that
299a9643ea8Slogwang * structure on a single architecture don't result in offset
300a9643ea8Slogwang * variations of the machine-independent fields in the vmspace.
301a9643ea8Slogwang */
302a9643ea8Slogwang struct pmap vm_pmap; /* private physical map */
303a9643ea8Slogwang };
304a9643ea8Slogwang
305a9643ea8Slogwang #ifdef _KERNEL
306a9643ea8Slogwang static __inline pmap_t
vmspace_pmap(struct vmspace * vmspace)307a9643ea8Slogwang vmspace_pmap(struct vmspace *vmspace)
308a9643ea8Slogwang {
309a9643ea8Slogwang return &vmspace->vm_pmap;
310a9643ea8Slogwang }
311a9643ea8Slogwang #endif /* _KERNEL */
312a9643ea8Slogwang
313a9643ea8Slogwang #ifdef _KERNEL
314a9643ea8Slogwang /*
315a9643ea8Slogwang * Macros: vm_map_lock, etc.
316a9643ea8Slogwang * Function:
317a9643ea8Slogwang * Perform locking on the data portion of a map. Note that
318a9643ea8Slogwang * these macros mimic procedure calls returning void. The
319a9643ea8Slogwang * semicolon is supplied by the user of these macros, not
320a9643ea8Slogwang * by the macros themselves. The macros can safely be used
321a9643ea8Slogwang * as unbraced elements in a higher level statement.
322a9643ea8Slogwang */
323a9643ea8Slogwang
324a9643ea8Slogwang void _vm_map_lock(vm_map_t map, const char *file, int line);
325a9643ea8Slogwang void _vm_map_unlock(vm_map_t map, const char *file, int line);
326a9643ea8Slogwang int _vm_map_unlock_and_wait(vm_map_t map, int timo, const char *file, int line);
327a9643ea8Slogwang void _vm_map_lock_read(vm_map_t map, const char *file, int line);
328a9643ea8Slogwang void _vm_map_unlock_read(vm_map_t map, const char *file, int line);
329a9643ea8Slogwang int _vm_map_trylock(vm_map_t map, const char *file, int line);
330a9643ea8Slogwang int _vm_map_trylock_read(vm_map_t map, const char *file, int line);
331a9643ea8Slogwang int _vm_map_lock_upgrade(vm_map_t map, const char *file, int line);
332a9643ea8Slogwang void _vm_map_lock_downgrade(vm_map_t map, const char *file, int line);
333a9643ea8Slogwang int vm_map_locked(vm_map_t map);
334a9643ea8Slogwang void vm_map_wakeup(vm_map_t map);
335a9643ea8Slogwang void vm_map_busy(vm_map_t map);
336a9643ea8Slogwang void vm_map_unbusy(vm_map_t map);
337a9643ea8Slogwang void vm_map_wait_busy(vm_map_t map);
338*22ce4affSfengbojiang vm_offset_t vm_map_max_KBI(const struct vm_map *map);
339*22ce4affSfengbojiang vm_offset_t vm_map_min_KBI(const struct vm_map *map);
340*22ce4affSfengbojiang pmap_t vm_map_pmap_KBI(vm_map_t map);
341*22ce4affSfengbojiang bool vm_map_range_valid_KBI(vm_map_t map, vm_offset_t start, vm_offset_t end);
342a9643ea8Slogwang
343a9643ea8Slogwang #define vm_map_lock(map) _vm_map_lock(map, LOCK_FILE, LOCK_LINE)
344a9643ea8Slogwang #define vm_map_unlock(map) _vm_map_unlock(map, LOCK_FILE, LOCK_LINE)
345a9643ea8Slogwang #define vm_map_unlock_and_wait(map, timo) \
346a9643ea8Slogwang _vm_map_unlock_and_wait(map, timo, LOCK_FILE, LOCK_LINE)
347a9643ea8Slogwang #define vm_map_lock_read(map) _vm_map_lock_read(map, LOCK_FILE, LOCK_LINE)
348a9643ea8Slogwang #define vm_map_unlock_read(map) _vm_map_unlock_read(map, LOCK_FILE, LOCK_LINE)
349a9643ea8Slogwang #define vm_map_trylock(map) _vm_map_trylock(map, LOCK_FILE, LOCK_LINE)
350a9643ea8Slogwang #define vm_map_trylock_read(map) \
351a9643ea8Slogwang _vm_map_trylock_read(map, LOCK_FILE, LOCK_LINE)
352a9643ea8Slogwang #define vm_map_lock_upgrade(map) \
353a9643ea8Slogwang _vm_map_lock_upgrade(map, LOCK_FILE, LOCK_LINE)
354a9643ea8Slogwang #define vm_map_lock_downgrade(map) \
355a9643ea8Slogwang _vm_map_lock_downgrade(map, LOCK_FILE, LOCK_LINE)
356a9643ea8Slogwang
357a9643ea8Slogwang long vmspace_resident_count(struct vmspace *vmspace);
358a9643ea8Slogwang #endif /* _KERNEL */
359a9643ea8Slogwang
360a9643ea8Slogwang /*
361a9643ea8Slogwang * Copy-on-write flags for vm_map operations
362a9643ea8Slogwang */
363*22ce4affSfengbojiang #define MAP_INHERIT_SHARE 0x00000001
364*22ce4affSfengbojiang #define MAP_COPY_ON_WRITE 0x00000002
365*22ce4affSfengbojiang #define MAP_NOFAULT 0x00000004
366*22ce4affSfengbojiang #define MAP_PREFAULT 0x00000008
367*22ce4affSfengbojiang #define MAP_PREFAULT_PARTIAL 0x00000010
368*22ce4affSfengbojiang #define MAP_DISABLE_SYNCER 0x00000020
369*22ce4affSfengbojiang #define MAP_CHECK_EXCL 0x00000040
370*22ce4affSfengbojiang #define MAP_CREATE_GUARD 0x00000080
371*22ce4affSfengbojiang #define MAP_DISABLE_COREDUMP 0x00000100
372*22ce4affSfengbojiang #define MAP_PREFAULT_MADVISE 0x00000200 /* from (user) madvise request */
373*22ce4affSfengbojiang #define MAP_WRITECOUNT 0x00000400
374*22ce4affSfengbojiang #define MAP_REMAP 0x00000800
375*22ce4affSfengbojiang #define MAP_STACK_GROWS_DOWN 0x00001000
376*22ce4affSfengbojiang #define MAP_STACK_GROWS_UP 0x00002000
377*22ce4affSfengbojiang #define MAP_ACC_CHARGED 0x00004000
378*22ce4affSfengbojiang #define MAP_ACC_NO_CHARGE 0x00008000
379*22ce4affSfengbojiang #define MAP_CREATE_STACK_GAP_UP 0x00010000
380*22ce4affSfengbojiang #define MAP_CREATE_STACK_GAP_DN 0x00020000
381*22ce4affSfengbojiang #define MAP_VN_EXEC 0x00040000
382*22ce4affSfengbojiang #define MAP_SPLIT_BOUNDARY_MASK 0x00180000
383*22ce4affSfengbojiang
384*22ce4affSfengbojiang #define MAP_SPLIT_BOUNDARY_SHIFT 19
385a9643ea8Slogwang
386a9643ea8Slogwang /*
387a9643ea8Slogwang * vm_fault option flags
388a9643ea8Slogwang */
389*22ce4affSfengbojiang #define VM_FAULT_NORMAL 0x00 /* Nothing special */
390*22ce4affSfengbojiang #define VM_FAULT_WIRE 0x01 /* Wire the mapped page */
391*22ce4affSfengbojiang #define VM_FAULT_DIRTY 0x02 /* Dirty the page; use w/VM_PROT_COPY */
392*22ce4affSfengbojiang #define VM_FAULT_NOFILL 0x04 /* Fail if the pager doesn't have a copy */
393a9643ea8Slogwang
394a9643ea8Slogwang /*
395a9643ea8Slogwang * Initially, mappings are slightly sequential. The maximum window size must
396a9643ea8Slogwang * account for the map entry's "read_ahead" field being defined as an uint8_t.
397a9643ea8Slogwang */
398a9643ea8Slogwang #define VM_FAULT_READ_AHEAD_MIN 7
399a9643ea8Slogwang #define VM_FAULT_READ_AHEAD_INIT 15
400*22ce4affSfengbojiang #define VM_FAULT_READ_AHEAD_MAX min(atop(maxphys) - 1, UINT8_MAX)
401a9643ea8Slogwang
402a9643ea8Slogwang /*
403a9643ea8Slogwang * The following "find_space" options are supported by vm_map_find().
404a9643ea8Slogwang *
405a9643ea8Slogwang * For VMFS_ALIGNED_SPACE, the desired alignment is specified to
406a9643ea8Slogwang * the macro argument as log base 2 of the desired alignment.
407a9643ea8Slogwang */
408a9643ea8Slogwang #define VMFS_NO_SPACE 0 /* don't find; use the given range */
409a9643ea8Slogwang #define VMFS_ANY_SPACE 1 /* find a range with any alignment */
410a9643ea8Slogwang #define VMFS_OPTIMAL_SPACE 2 /* find a range with optimal alignment*/
411a9643ea8Slogwang #define VMFS_SUPER_SPACE 3 /* find a superpage-aligned range */
412a9643ea8Slogwang #define VMFS_ALIGNED_SPACE(x) ((x) << 8) /* find a range with fixed alignment */
413a9643ea8Slogwang
414a9643ea8Slogwang /*
415a9643ea8Slogwang * vm_map_wire and vm_map_unwire option flags
416a9643ea8Slogwang */
417a9643ea8Slogwang #define VM_MAP_WIRE_SYSTEM 0 /* wiring in a kernel map */
418a9643ea8Slogwang #define VM_MAP_WIRE_USER 1 /* wiring in a user map */
419a9643ea8Slogwang
420a9643ea8Slogwang #define VM_MAP_WIRE_NOHOLES 0 /* region must not have holes */
421a9643ea8Slogwang #define VM_MAP_WIRE_HOLESOK 2 /* region may have holes */
422a9643ea8Slogwang
423a9643ea8Slogwang #define VM_MAP_WIRE_WRITE 4 /* Validate writable. */
424a9643ea8Slogwang
425*22ce4affSfengbojiang typedef int vm_map_entry_reader(void *token, vm_map_entry_t addr,
426*22ce4affSfengbojiang vm_map_entry_t dest);
427*22ce4affSfengbojiang
428*22ce4affSfengbojiang #ifndef _KERNEL
429*22ce4affSfengbojiang /*
430*22ce4affSfengbojiang * Find the successor of a map_entry, using a reader to dereference pointers.
431*22ce4affSfengbojiang * '*clone' is a copy of a vm_map entry. 'reader' is used to copy a map entry
432*22ce4affSfengbojiang * at some address into '*clone'. Change *clone to a copy of the next map
433*22ce4affSfengbojiang * entry, and return the address of that entry, or NULL if copying has failed.
434*22ce4affSfengbojiang *
435*22ce4affSfengbojiang * This function is made available to user-space code that needs to traverse
436*22ce4affSfengbojiang * map entries.
437*22ce4affSfengbojiang */
438*22ce4affSfengbojiang static inline vm_map_entry_t
vm_map_entry_read_succ(void * token,struct vm_map_entry * const clone,vm_map_entry_reader reader)439*22ce4affSfengbojiang vm_map_entry_read_succ(void *token, struct vm_map_entry *const clone,
440*22ce4affSfengbojiang vm_map_entry_reader reader)
441*22ce4affSfengbojiang {
442*22ce4affSfengbojiang vm_map_entry_t after, backup;
443*22ce4affSfengbojiang vm_offset_t start;
444*22ce4affSfengbojiang
445*22ce4affSfengbojiang after = clone->right;
446*22ce4affSfengbojiang start = clone->start;
447*22ce4affSfengbojiang if (!reader(token, after, clone))
448*22ce4affSfengbojiang return (NULL);
449*22ce4affSfengbojiang backup = clone->left;
450*22ce4affSfengbojiang if (!reader(token, backup, clone))
451*22ce4affSfengbojiang return (NULL);
452*22ce4affSfengbojiang if (clone->start > start) {
453*22ce4affSfengbojiang do {
454*22ce4affSfengbojiang after = backup;
455*22ce4affSfengbojiang backup = clone->left;
456*22ce4affSfengbojiang if (!reader(token, backup, clone))
457*22ce4affSfengbojiang return (NULL);
458*22ce4affSfengbojiang } while (clone->start != start);
459*22ce4affSfengbojiang }
460*22ce4affSfengbojiang if (!reader(token, after, clone))
461*22ce4affSfengbojiang return (NULL);
462*22ce4affSfengbojiang return (after);
463*22ce4affSfengbojiang }
464*22ce4affSfengbojiang #endif /* ! _KERNEL */
465*22ce4affSfengbojiang
466a9643ea8Slogwang #ifdef _KERNEL
467a9643ea8Slogwang boolean_t vm_map_check_protection (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t);
468a9643ea8Slogwang int vm_map_delete(vm_map_t, vm_offset_t, vm_offset_t);
469a9643ea8Slogwang int vm_map_find(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *, vm_size_t,
470a9643ea8Slogwang vm_offset_t, int, vm_prot_t, vm_prot_t, int);
471*22ce4affSfengbojiang int vm_map_find_min(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t *,
472*22ce4affSfengbojiang vm_size_t, vm_offset_t, vm_offset_t, int, vm_prot_t, vm_prot_t, int);
473*22ce4affSfengbojiang int vm_map_find_aligned(vm_map_t map, vm_offset_t *addr, vm_size_t length,
474*22ce4affSfengbojiang vm_offset_t max_addr, vm_offset_t alignment);
475a9643ea8Slogwang int vm_map_fixed(vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_size_t,
476a9643ea8Slogwang vm_prot_t, vm_prot_t, int);
477*22ce4affSfengbojiang vm_offset_t vm_map_findspace(vm_map_t, vm_offset_t, vm_size_t);
478a9643ea8Slogwang int vm_map_inherit (vm_map_t, vm_offset_t, vm_offset_t, vm_inherit_t);
479a9643ea8Slogwang void vm_map_init(vm_map_t, pmap_t, vm_offset_t, vm_offset_t);
480a9643ea8Slogwang int vm_map_insert (vm_map_t, vm_object_t, vm_ooffset_t, vm_offset_t, vm_offset_t, vm_prot_t, vm_prot_t, int);
481a9643ea8Slogwang int vm_map_lookup (vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
482a9643ea8Slogwang vm_pindex_t *, vm_prot_t *, boolean_t *);
483a9643ea8Slogwang int vm_map_lookup_locked(vm_map_t *, vm_offset_t, vm_prot_t, vm_map_entry_t *, vm_object_t *,
484a9643ea8Slogwang vm_pindex_t *, vm_prot_t *, boolean_t *);
485a9643ea8Slogwang void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
486a9643ea8Slogwang boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
487*22ce4affSfengbojiang
488*22ce4affSfengbojiang static inline vm_map_entry_t
vm_map_entry_first(vm_map_t map)489*22ce4affSfengbojiang vm_map_entry_first(vm_map_t map)
490*22ce4affSfengbojiang {
491*22ce4affSfengbojiang
492*22ce4affSfengbojiang return (map->header.right);
493*22ce4affSfengbojiang }
494*22ce4affSfengbojiang
495*22ce4affSfengbojiang static inline vm_map_entry_t
vm_map_entry_succ(vm_map_entry_t entry)496*22ce4affSfengbojiang vm_map_entry_succ(vm_map_entry_t entry)
497*22ce4affSfengbojiang {
498*22ce4affSfengbojiang vm_map_entry_t after;
499*22ce4affSfengbojiang
500*22ce4affSfengbojiang after = entry->right;
501*22ce4affSfengbojiang if (after->left->start > entry->start) {
502*22ce4affSfengbojiang do
503*22ce4affSfengbojiang after = after->left;
504*22ce4affSfengbojiang while (after->left != entry);
505*22ce4affSfengbojiang }
506*22ce4affSfengbojiang return (after);
507*22ce4affSfengbojiang }
508*22ce4affSfengbojiang
509*22ce4affSfengbojiang #define VM_MAP_ENTRY_FOREACH(it, map) \
510*22ce4affSfengbojiang for ((it) = vm_map_entry_first(map); \
511*22ce4affSfengbojiang (it) != &(map)->header; \
512*22ce4affSfengbojiang (it) = vm_map_entry_succ(it))
513*22ce4affSfengbojiang
514*22ce4affSfengbojiang #define VM_MAP_PROTECT_SET_PROT 0x0001
515*22ce4affSfengbojiang #define VM_MAP_PROTECT_SET_MAXPROT 0x0002
516*22ce4affSfengbojiang
517*22ce4affSfengbojiang int vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
518*22ce4affSfengbojiang vm_prot_t new_prot, vm_prot_t new_maxprot, int flags);
519a9643ea8Slogwang int vm_map_remove (vm_map_t, vm_offset_t, vm_offset_t);
520*22ce4affSfengbojiang void vm_map_try_merge_entries(vm_map_t map, vm_map_entry_t prev,
521*22ce4affSfengbojiang vm_map_entry_t entry);
522a9643ea8Slogwang void vm_map_startup (void);
523a9643ea8Slogwang int vm_map_submap (vm_map_t, vm_offset_t, vm_offset_t, vm_map_t);
524a9643ea8Slogwang int vm_map_sync(vm_map_t, vm_offset_t, vm_offset_t, boolean_t, boolean_t);
525a9643ea8Slogwang int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int);
526a9643ea8Slogwang int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int);
527a9643ea8Slogwang int vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
528a9643ea8Slogwang int flags);
529*22ce4affSfengbojiang int vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags);
530*22ce4affSfengbojiang int vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end,
531a9643ea8Slogwang int flags);
532a9643ea8Slogwang long vmspace_swap_count(struct vmspace *vmspace);
533*22ce4affSfengbojiang void vm_map_entry_set_vnode_text(vm_map_entry_t entry, bool add);
534a9643ea8Slogwang #endif /* _KERNEL */
535a9643ea8Slogwang #endif /* _VM_MAP_ */
536