xref: /xnu-11215/osfmk/vm/vm_map.h (revision 8d741a5d)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 /*
60  *	File:	vm/vm_map.h
61  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
62  *	Date:	1985
63  *
64  *	Virtual memory map module definitions.
65  *
66  * Contributors:
67  *	avie, dlb, mwyoung
68  */
69 
70 #ifndef _VM_VM_MAP_H_
71 #define _VM_VM_MAP_H_
72 
73 #include <sys/cdefs.h>
74 
75 #include <mach/mach_types.h>
76 #include <mach/kern_return.h>
77 #include <mach/boolean.h>
78 #include <mach/vm_types.h>
79 #include <mach/vm_prot.h>
80 #include <mach/vm_inherit.h>
81 #include <mach/vm_behavior.h>
82 #include <mach/vm_param.h>
83 #include <mach/sdt.h>
84 #include <vm/pmap.h>
85 #include <os/overflow.h>
86 #ifdef XNU_KERNEL_PRIVATE
87 #include <vm/vm_protos.h>
88 #endif /* XNU_KERNEL_PRIVATE */
89 #ifdef  MACH_KERNEL_PRIVATE
90 #include <mach_assert.h>
91 #include <vm/vm_map_store_internal.h>
92 #include <vm/vm_object_xnu.h>
93 #include <vm/vm_page.h>
94 #include <kern/locks.h>
95 #include <kern/zalloc.h>
96 #include <kern/macro_help.h>
97 
98 #include <kern/thread.h>
99 #include <os/refcnt.h>
100 #endif /* MACH_KERNEL_PRIVATE */
101 
102 
103 __BEGIN_DECLS
104 
105 #ifdef KERNEL_PRIVATE
106 
107 #pragma mark - VM map basics
108 
109 /*!
110  * @function vm_map_create()
111  *
112  * @brief
113  * Creates an empty VM map.
114  *
115  * @discussion
116  * A VM map represents an address space or sub-address space that is managed
117  * by the Mach VM.
118  *
119  * It consists of a complex data structure which represents all the VM regions
120  * as configured by the address space client, which supports fast lookup
121  * by address or range.
122  *
123  * In the Mach VM, the VM map is the source of truth for all the configuration
124  * of regions, and the machine dependent physical map layer is used as a cache
125  * of that information.
126  *
127  * Most of the kernel clients never have to make VM maps themselves
128  * and will instead interact with:
129  * - the @c current_map() (or a given task map),
130  * - the @c kernel_map (which is the kernel's own map) or one of its submaps.
131  *
132  #ifdef XNU_KERNEL_PRIVATE
133  * Inside XNU, using @c vm_map_create_options() is preferred.
134  *
135  #endif XNU_KERNEL_PRIVATE
136  * @param pmap          the physical map to associated with this map
137  * @param min_off       the lower address bound of this map
138  * @param max_off       the upper address bound of this map
139  * @param pageable      whether the map will support paging.
140  */
141 extern vm_map_t         vm_map_create(
142 	pmap_t                  pmap,
143 	vm_map_offset_t         min_off,
144 	vm_map_offset_t         max_off,
145 	boolean_t               pageable);
146 
147 
148 /*!
149  * @function vm_map_deallocate()
150  *
151  * @brief
152  * Deallocates a VM map.
153  *
154  * @discussion
155  * VM maps are refcounted objects, however most clients will instead
156  * hold references to the owning task, or manipulate them via Mach ports.
157  *
158  * @param map           the map to deallocate.
159  */
160 extern void             vm_map_deallocate(
161 	vm_map_t                map);
162 
163 
164 /*!
165  * @function vm_map_page_shift()
166  *
167  * @brief
168  * Returns the page shift for a given map.
169  *
170  * @param map           the specified map
171  * @returns             the page shift for this map
172  */
173 extern int              vm_map_page_shift(
174 	vm_map_t                map) __pure2;
175 
176 
177 /*!
178  * @function vm_map_page_mask()
179  *
180  * @brief
181  * Returns the page mask for a given map.
182  *
183  * @discussion
184  * This is equivalent to @c ((1ull << vm_page_shift(mask)) - 1).
185  *
186  * @param map           the specified map
187  * @returns             the page mask for this map
188  */
189 extern vm_map_offset_t  vm_map_page_mask(
190 	vm_map_t                map) __pure2;
191 
192 
193 /*!
194  * @function vm_map_page_size()
195  *
196  * @brief
197  * Returns the page size for a given map.
198  *
199  * @discussion
200  * This is equivalent to @c ((1 << vm_page_shift(mask)))
201  *
202  * @param map           the specified map
203  * @returns             the page size for this map
204  */
205 extern int              vm_map_page_size(
206 	vm_map_t                map) __pure2;
207 
208 
209 /*!
210  * @function vm_map_round_page()
211  *
212  * @brief
213  * Rounds up a given address to the next page boundary for a given page mask.
214  *
215  * @discussion
216  * @warning
217  * This function doesn't check for overflow,
218  * clients are expected to verify the returned value wasn't 0.
219  *
220  * @param offset        the address to round to a page boundary
221  * @param mask          the page mask to use for the operation
222  * @returns             @c offset rounded up to the next page boundary
223  */
224 #define vm_map_round_page(x, pgmask) \
225 	(((vm_map_offset_t)(x) + (pgmask)) & ~((signed)(pgmask)))
226 
227 /*!
228  * @function vm_map_round_page_mask()
229  *
230  * @brief
231  * Rounds up a given address to the next page boundary for a given page mask.
232  *
233  * @discussion
234  * This is equivalent to @c vm_map_round_page(offset, mask)
235  *
236  * @warning
237  * This function doesn't check for overflow,
238  * clients are expected to verify the returned value wasn't 0.
239  *
240  * @param offset        the address to round to a page boundary
241  * @param mask          the page mask to use for the operation
242  * @returns             @c offset rounded up to the next page boundary
243  */
244 extern vm_map_offset_t  vm_map_round_page_mask(
245 	vm_map_offset_t         offset,
246 	vm_map_offset_t         mask) __pure2;
247 
248 
249 /*!
250  * @function vm_map_trunc_page()
251  *
252  * @brief
253  * Truncates a given address to the previous page boundary for a given page mask.
254  *
255  * @discussion
256  * This is equivalent to @c vm_map_trunc_page(offset, mask)
257  *
258  * @param offset        the address to truncate to a page boundary
259  * @param mask          the page mask to use for the operation
260  * @returns             @c offset truncated to the previous page boundary
261  */
262 #define vm_map_trunc_page(offset, pgmask) \
263 	((vm_map_offset_t)(offset) & ~((signed)(pgmask)))
264 
265 /*!
266  * @function vm_map_trunc_page_mask()
267  *
268  * @brief
269  * Truncates a given address to the previous page boundary for a given page mask.
270  *
271  * @discussion
272  * This is equivalent to @c vm_map_trunc_page(offset, mask)
273  *
274  * @param offset        the address to truncate to a page boundary
275  * @param mask          the page mask to use for the operation
276  * @returns             @c offset truncated to the previous page boundary
277  */
278 extern vm_map_offset_t  vm_map_trunc_page_mask(
279 	vm_map_offset_t         offset,
280 	vm_map_offset_t         mask) __pure2;
281 
282 
283 /*!
284  * @function vm_map_disable_hole_optimization()
285  *
286  * @brief
287  * Disables hole list optimization
288  *
289  * @discussion
290  * This function disables the hole list optimization and deallocates all
291  * associated resources.
292  *
293  * @param map           the map to disable hole list for.
294  */
295 extern void vm_map_disable_hole_optimization(
296 	vm_map_t                map);
297 
298 #ifdef MACH_KERNEL_PRIVATE
299 
300 #pragma mark - MIG helpers
301 #pragma GCC visibility push(hidden)
302 
303 /*!
304  * @function convert_port_entry_to_map()
305  *
306  * @brief
307  * MIG intran for the @c vm_task_entry_t type, do not use directly.
308  */
309 extern vm_map_t         convert_port_entry_to_map(
310 	ipc_port_t              port) __exported;
311 
312 /*!
313  * @function vm_map_inspect_deallocate()
314  *
315  * @brief
316  * MIG destructor function for the @c vm_map_inspect_t type,
317  * do not use directly.
318  */
319 extern void             vm_map_inspect_deallocate(
320 	vm_map_inspect_t        map);
321 
322 
323 /*!
324  * @function vm_map_read_deallocate()
325  *
326  * @brief
327  * MIG destructor function for the @c vm_map_read_t type,
328  * do not use directly.
329  */
330 extern void             vm_map_read_deallocate(
331 	vm_map_read_t           map);
332 
333 #pragma GCC visibility pop
334 #endif /* MACH_KERNEL_PRIVATE */
335 
336 #pragma mark - vm map wiring
337 #if !XNU_KERNEL_PRIVATE
338 
339 /*!
340  * @function vm_map_wire()
341  *
342  * @brief
343  * Sets the pageability of the specified address range in the
344  * target map as wired.
345  *
346  * @discussion
347  * Regions specified as not pageable require locked-down physical memory
348  * and physical page maps.
349  *
350  * The prot_u variable indicates types of accesses that must not
351  * generate page faults. This is checked against protection of memory
352  * being locked-down.
353  *
354  * The map must not be locked, but a reference must remain
355  * to the map throughout the call.
356  *
357  *
358  * @param map           the target VM map (the call will recurse in submaps).
359  * @param start_u       the lower bound of the address range to wire
360  * @param end_u         the upper bound of the address range to wire
361  * @param prot_u        the access for which to perform the wiring
362  * @param user_wire     whether the wiring is on behalf of userspace.
363  *                      userspace wiring is equivalent to an mlock() call from
364  *                      userspace and will be undone at process death unlike
365  *                      kernel wiring which must always be undone explicitly.
366  *
367  * @returns
368  * - KERN_SUCCESS       the operation was successful
369  * - KERN_INVALID_ARGUMENT
370  *                      @c [start_u, end_u) didn't form a valid region
371  * - KERN_RESOURCE_SHORTAGE
372  *                      the kernel was out of physical memory to perform
373  *                      the operation.
374  * - KERN_INVALID_ADDRESS
375  *                      some address in the range wasn't mapped.
376  * - KERN_PROTECTION_FAILURE
377  *                      the region doesn't support wiring for this access.
378  * - KERN_MEMORY_ERROR  faulting failed.
379  * - MACH_SEND_INTERRUPTED
380  *                      a signal was received during the wiring
381  *
382  * User wirings:
383  * - KERN_FAILURE       the process was terminated during the wiring.
384  * - KERN_FAILURE       the user wire counts would overflow @c MAX_WIRE_COUNT
385  *                      for this region.
386  * - KERN_RESOURCE_SHORTAGE
387  *                      the process would overflow its user wiring limits.
388  */
389 extern kern_return_t    vm_map_wire(
390 	vm_map_t                map,
391 	vm_map_offset_ut        start_u,
392 	vm_map_offset_ut        end_u,
393 	vm_prot_ut              prot_u,
394 	boolean_t               user_wire);
395 
396 #endif /* !XNU_KERNEL_PRIVATE */
397 
398 /*!
399  * @function vm_map_unwire()
400  *
401  * @brief
402  * Sets the pageability of the specified address range in the target
403  * as pageable.
404  *
405  * @discussion
406  * Regions specified must have been wired previously.
407  *
408  * The map must not be locked, but a reference must remain to the map
409  * throughout the call.
410  *
411  * User unwire ignores holes and unwired and intransition entries to avoid
412  * losing memory by leaving it unwired. Kernel unwires will panic on failures.
413  *
414  *
415  * @param map           the target VM map (the call will recurse in submaps).
416  * @param start_u       the lower bound of the address range to wire
417  * @param end_u         the upper bound of the address range to wire
418  * @param user_wire     whether the wiring is on behalf of userspace.
419  */
420 extern kern_return_t    vm_map_unwire(
421 	vm_map_t                map,
422 	vm_map_offset_ut        start_u,
423 	vm_map_offset_ut        end_u,
424 	boolean_t               user_wire);
425 
426 
427 #if XNU_PLATFORM_MacOSX
428 
429 /*!
430  * @function vm_map_wire_and_extract()
431  *
432  * @brief
433  * Sets the pageability of the specified page in the target map,
434  * and returns the resulting physical page number for it.
435  *
436  * @discussion
437  * This function should not be called by kernel extensions and is only here
438  * for backward compatibility of macOS kernels.
439  *
440  *
441  * @param map           the target VM map (the call will recurse in submaps).
442  * @param address       the address of the page to wire
443  * @param access_type   the access for which to perform the wiring
444  * @param user_wire     whether the wiring is on behalf of userspace.
445  *                      userspace wiring is equivalent to an mlock() call from
446  *                      userspace and will be undone at process death unlike
447  *                      kernel wiring which must always be undone explicitly.
448  * @param physpage_p    a pointer filled with the page number for the wired down
449  *                      physical page, or 0 in case of failure.
450  *
451  * @returns             @c KERN_SUCCESS or an error denoting the reason for
452  *                      failure.
453  */
454 extern kern_return_t    vm_map_wire_and_extract(
455 	vm_map_t                map,
456 	vm_map_offset_ut        address,
457 	vm_prot_ut              access_type,
458 	boolean_t               user_wire,
459 	ppnum_t                *physpage_p);
460 
461 #endif /* XNU_PLATFORM_MacOSX */
462 
463 #pragma mark - vm map copy
464 
465 /*!
466  * @const VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES
467  * Number of pages under which the VM will copy by content rather
468  * than trying to do a copy-on-write mapping to form a vm_map_copy_t.
469  *
470  * Note: this constant has unfortunately been exposed historically
471  *       but should not be considered ABI.
472  */
473 #define VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES      (3)
474 
475 
476 /*!
477  * @function vm_map_copyin()
478  *
479  * @brief
480  * Copy the specified region from the source address space.
481  *
482  * @description
483  * The source map should not be locked on entry.
484  *
485  *
486  * @param [in] src_map       the source address space to copy from.
487  * @param [in] src_addr      the address at which to start copying memory.
488  * @param [in] len           the size of the region to copy.
489  * @param [in] src_destroy   whether the copy also removes the region
490  *                           from the source address space.
491  * @param [out] copy_result  the out parameter, to be filled with the created
492  *                           vm map copy on success.
493  * @returns
494  * - KERN_SUCCESS       the operation was successful
495  * - KERN_INVALID_ARGUMENT
496  *                      @c (src_addr, len) didn't form a valid region
497  * - KERN_RESOURCE_SHORTAGE
498  *                      the kernel was out of physical memory to perform
499  *                      the operation.
500  * - KERN_INVALID_ADDRESS
501  *                      some address in the range wasn't mapped
502  * - KERN_PROTECTION_FAILURE
503  *                      the region isn't readable (it doesn't have
504  *                      @c VM_PROT_READ set).
505  * - KERN_PROTECTION_FAILURE
506  *                      the memory range contains a physically contiguous
507  *                      object
508  * - KERN_MEMORY_ERROR  faulting failed.
509  * - MACH_SEND_INTERRUPTED
510  *                      a signal was received during the copy
511  *
512  */
513 extern kern_return_t    vm_map_copyin(
514 	vm_map_t                src_map,
515 	vm_map_address_ut       src_addr,
516 	vm_map_size_ut          len,
517 	boolean_t               src_destroy,
518 	vm_map_copy_t          *copy_result); /* OUT */
519 
520 
521 /*!
522  * @function vm_map_copyout()
523  *
524  * @brief
525  * Place a VM map copy made with @c vm_map_copyin() into a destination map.
526  *
527  * @description
528  * The specified VM map copy is consumed on success,
529  * otherwise the caller is responsible for it.
530  * @see @c vm_map_copy_discard below.
531  *
532  * @param [in]  dst_map the destination address space to insert into.
533  * @param [out] addr    the address at which the data was inserted.
534  * @param [in]  copy    the VM map copy to place.
535  * @returns
536  * - KERN_SUCCESS       the operation succeeded, @c copy has been consumed.
537  * - KERN_NO_SPACE      the destination map was out of address space.
538  * - KERN_NOT_SUPPORTED the vm map copy can't be mapped in this address space.
539  *                      This can for example happen for certain cases of a VM
540  *                      map copy using a 4k page size into a space that is 16k
541  *                      aligned, requiring different physical pages within the
542  *                      same 16k page boundary.
543  */
544 extern kern_return_t    vm_map_copyout(
545 	vm_map_t                dst_map,
546 	vm_map_address_t       *addr, /* OUT */
547 	vm_map_copy_t           copy);
548 
549 /*!
550  * @function vm_map_copy_discard()
551  *
552  * @brief
553  * Dispose of a @c vm_map_copy_t object made by @c vm_map_copyin().
554  *
555  * @description
556  * VM map copies are typically placed in an address space using
557  * @c vm_map_copyout(), but when that has not happened, this function must be
558  * used to dispose of it.
559  *
560  * @param copy          the VM map copy object to dispose of.
561  */
562 extern void             vm_map_copy_discard(
563 	vm_map_copy_t           copy);
564 
565 #endif  /* KERNEL_PRIVATE */
566 
567 __END_DECLS
568 
569 #endif  /* _VM_VM_MAP_H_ */
570