1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/pmap.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1985 62 * 63 * Machine address mapping definitions -- machine-independent 64 * section. [For machine-dependent section, see "machine/pmap.h".] 65 */ 66 67 #ifndef _VM_PMAP_H_ 68 #define _VM_PMAP_H_ 69 70 #include <mach/kern_return.h> 71 #include <mach/vm_param.h> 72 #include <mach/vm_types.h> 73 #include <mach/vm_attributes.h> 74 #include <mach/boolean.h> 75 #include <mach/vm_prot.h> 76 #include <kern/trustcache.h> 77 78 #if __has_include(<CoreEntitlements/CoreEntitlements.h>) 79 #include <CoreEntitlements/CoreEntitlements.h> 80 #endif 81 82 #ifdef KERNEL_PRIVATE 83 84 /* 85 * The following is a description of the interface to the 86 * machine-dependent "physical map" data structure. The module 87 * must provide a "pmap_t" data type that represents the 88 * set of valid virtual-to-physical addresses for one user 89 * address space. [The kernel address space is represented 90 * by a distinguished "pmap_t".] The routines described manage 91 * this type, install and update virtual-to-physical mappings, 92 * and perform operations on physical addresses common to 93 * many address spaces. 94 */ 95 96 /* Copy between a physical page and a virtual address */ 97 /* LP64todo - switch to vm_map_offset_t when it grows */ 98 extern kern_return_t copypv( 99 addr64_t source, 100 addr64_t sink, 101 unsigned int size, 102 int which); 103 #define cppvPsnk 1 104 #define cppvPsnkb 31 105 #define cppvPsrc 2 106 #define cppvPsrcb 30 107 #define cppvFsnk 4 108 #define cppvFsnkb 29 109 #define cppvFsrc 8 110 #define cppvFsrcb 28 111 #define cppvNoModSnk 16 112 #define cppvNoModSnkb 27 113 #define cppvNoRefSrc 32 114 #define cppvNoRefSrcb 26 115 #define cppvKmap 64 /* Use the kernel's vm_map */ 116 #define cppvKmapb 25 117 118 extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); 119 120 #if MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE 121 #include <mach/mach_types.h> 122 #include <vm/memory_types.h> 123 124 /* 125 * Routines used during BSD process creation. 126 */ 127 128 extern pmap_t pmap_create_options( /* Create a pmap_t. */ 129 ledger_t ledger, 130 vm_map_size_t size, 131 unsigned int flags); 132 133 #if __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) 134 /** 135 * Informs the pmap layer that a process will be running with user JOP disabled, 136 * as if PMAP_CREATE_DISABLE_JOP had been passed during pmap creation. 137 * 138 * @note This function cannot be used once the target process has started 139 * executing code. It is intended for cases where user JOP is disabled based on 140 * the code signature (e.g., special "keys-off" entitlements), which is too late 141 * to change the flags passed to pmap_create_options. 142 * 143 * @param pmap The pmap belonging to the target process 144 */ 145 extern void pmap_disable_user_jop( 146 pmap_t pmap); 147 #endif /* __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) */ 148 #endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */ 149 150 #ifdef MACH_KERNEL_PRIVATE 151 152 #include <mach_assert.h> 153 154 #include <machine/pmap.h> 155 /* 156 * Routines used for initialization. 157 * There is traditionally also a pmap_bootstrap, 158 * used very early by machine-dependent code, 159 * but it is not part of the interface. 160 * 161 * LP64todo - 162 * These interfaces are tied to the size of the 163 * kernel pmap - and therefore use the "local" 164 * vm_offset_t, etc... types. 165 */ 166 167 extern void *pmap_steal_memory(vm_size_t size, vm_size_t alignment); /* Early memory allocation */ 168 extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */ 169 170 extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */ 171 #if defined(__arm__) || defined(__arm64__) 172 extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */ 173 #endif /* defined(__arm__) || defined(__arm64__) */ 174 175 176 extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */ 177 178 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */ 179 180 extern void mapping_adjust(void); /* Adjust free mapping count */ 181 182 extern void mapping_free_prime(void); /* Primes the mapping block release list */ 183 184 #ifndef MACHINE_PAGES 185 /* 186 * If machine/pmap.h defines MACHINE_PAGES, it must implement 187 * the above functions. The pmap module has complete control. 188 * Otherwise, it must implement the following functions: 189 * pmap_free_pages 190 * pmap_virtual_space 191 * pmap_next_page 192 * pmap_init 193 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup 194 * using pmap_free_pages, pmap_next_page, pmap_virtual_space, 195 * and pmap_enter. pmap_free_pages may over-estimate the number 196 * of unused physical pages, and pmap_next_page may return FALSE 197 * to indicate that there are no more unused pages to return. 198 * However, for best performance pmap_free_pages should be accurate. 199 */ 200 201 /* 202 * Routines to return the next unused physical page. 203 */ 204 extern boolean_t pmap_next_page(ppnum_t *pnum); 205 extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free); 206 #ifdef __x86_64__ 207 extern kern_return_t pmap_next_page_large(ppnum_t *pnum); 208 extern void pmap_hi_pages_done(void); 209 #endif 210 211 #if CONFIG_SPTM 212 __enum_decl(pmap_mapping_type_t, uint8_t, { 213 PMAP_MAPPING_TYPE_INFER = SPTM_UNTYPED, 214 PMAP_MAPPING_TYPE_DEFAULT = XNU_DEFAULT, 215 PMAP_MAPPING_TYPE_ROZONE = XNU_ROZONE, 216 PMAP_MAPPING_TYPE_RESTRICTED = XNU_KERNEL_RESTRICTED 217 }); 218 #else 219 __enum_decl(pmap_mapping_type_t, uint8_t, { 220 PMAP_MAPPING_TYPE_INFER = 0, 221 PMAP_MAPPING_TYPE_DEFAULT, 222 PMAP_MAPPING_TYPE_ROZONE, 223 PMAP_MAPPING_TYPE_RESTRICTED 224 }); 225 #endif 226 227 /* 228 * Report virtual space available for the kernel. 229 */ 230 extern void pmap_virtual_space( 231 vm_offset_t *virtual_start, 232 vm_offset_t *virtual_end); 233 #endif /* MACHINE_PAGES */ 234 235 /* 236 * Routines to manage the physical map data structure. 237 */ 238 extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */ 239 extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ 240 extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ 241 extern void pmap_switch(pmap_t); 242 extern void pmap_require(pmap_t pmap); 243 244 #if MACH_ASSERT 245 extern void pmap_set_process(pmap_t pmap, 246 int pid, 247 char *procname); 248 #endif /* MACH_ASSERT */ 249 250 extern kern_return_t pmap_enter( /* Enter a mapping */ 251 pmap_t pmap, 252 vm_map_offset_t v, 253 ppnum_t pn, 254 vm_prot_t prot, 255 vm_prot_t fault_type, 256 unsigned int flags, 257 boolean_t wired, 258 pmap_mapping_type_t mapping_type); 259 260 extern kern_return_t pmap_enter_options( 261 pmap_t pmap, 262 vm_map_offset_t v, 263 ppnum_t pn, 264 vm_prot_t prot, 265 vm_prot_t fault_type, 266 unsigned int flags, 267 boolean_t wired, 268 unsigned int options, 269 void *arg, 270 pmap_mapping_type_t mapping_type); 271 extern kern_return_t pmap_enter_options_addr( 272 pmap_t pmap, 273 vm_map_offset_t v, 274 pmap_paddr_t pa, 275 vm_prot_t prot, 276 vm_prot_t fault_type, 277 unsigned int flags, 278 boolean_t wired, 279 unsigned int options, 280 void *arg, 281 pmap_mapping_type_t mapping_type); 282 283 extern void pmap_remove_some_phys( 284 pmap_t pmap, 285 ppnum_t pn); 286 287 extern void pmap_lock_phys_page( 288 ppnum_t pn); 289 290 extern void pmap_unlock_phys_page( 291 ppnum_t pn); 292 293 294 /* 295 * Routines that operate on physical addresses. 296 */ 297 298 extern void pmap_page_protect( /* Restrict access to page. */ 299 ppnum_t phys, 300 vm_prot_t prot); 301 302 extern void pmap_page_protect_options( /* Restrict access to page. */ 303 ppnum_t phys, 304 vm_prot_t prot, 305 unsigned int options, 306 void *arg); 307 308 extern void(pmap_zero_page)( 309 ppnum_t pn); 310 311 extern void(pmap_zero_part_page)( 312 ppnum_t pn, 313 vm_offset_t offset, 314 vm_size_t len); 315 316 extern void(pmap_copy_page)( 317 ppnum_t src, 318 ppnum_t dest); 319 320 extern void(pmap_copy_part_page)( 321 ppnum_t src, 322 vm_offset_t src_offset, 323 ppnum_t dst, 324 vm_offset_t dst_offset, 325 vm_size_t len); 326 327 extern void(pmap_copy_part_lpage)( 328 vm_offset_t src, 329 ppnum_t dst, 330 vm_offset_t dst_offset, 331 vm_size_t len); 332 333 extern void(pmap_copy_part_rpage)( 334 ppnum_t src, 335 vm_offset_t src_offset, 336 vm_offset_t dst, 337 vm_size_t len); 338 339 extern unsigned int(pmap_disconnect)( /* disconnect mappings and return reference and change */ 340 ppnum_t phys); 341 342 extern unsigned int(pmap_disconnect_options)( /* disconnect mappings and return reference and change */ 343 ppnum_t phys, 344 unsigned int options, 345 void *arg); 346 347 extern kern_return_t(pmap_attribute_cache_sync)( /* Flush appropriate 348 * cache based on 349 * page number sent */ 350 ppnum_t pn, 351 vm_size_t size, 352 vm_machine_attribute_t attribute, 353 vm_machine_attribute_val_t* value); 354 355 extern unsigned int(pmap_cache_attributes)( 356 ppnum_t pn); 357 358 /* 359 * Set (override) cache attributes for the specified physical page 360 */ 361 extern void pmap_set_cache_attributes( 362 ppnum_t, 363 unsigned int); 364 365 extern void *pmap_map_compressor_page( 366 ppnum_t); 367 368 extern void pmap_unmap_compressor_page( 369 ppnum_t, 370 void*); 371 372 /** 373 * The following declarations are meant to provide a uniform interface by which the VM layer can 374 * pass batches of pages to the pmap layer directly, in the various page list formats natively 375 * used by the VM. If a new type of list is to be added, the various structures and iterator 376 * functions below should be updated to understand it, and then it should "just work" with the 377 * pmap layer. 378 */ 379 380 /* The various supported page list types. */ 381 __enum_decl(unified_page_list_type_t, uint8_t, { 382 /* Universal page list array, essentially an array of ppnum_t. */ 383 UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY, 384 /** 385 * Singly-linked list of vm_page_t, using vmp_snext field. 386 * This is typically used to construct local lists of pages to be freed. 387 */ 388 UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST, 389 /* Doubly-linked queue of vm_page_t's associated with a VM object, using vmp_listq field. */ 390 UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q, 391 /* Doubly-linked queue of vm_page_t's in a FIFO queue or global free list, using vmp_pageq field. */ 392 UNIFIED_PAGE_LIST_TYPE_VM_PAGE_FIFO_Q, 393 }); 394 395 /* Uniform data structure encompassing the various page list types handled by the VM layer. */ 396 typedef struct { 397 union { 398 /* Base address and size (in pages) of UPL array for type UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY */ 399 struct { 400 upl_page_info_array_t upl_info; 401 unsigned int upl_size; 402 } upl; 403 /* Head of singly-linked vm_page_t list for UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST */ 404 vm_page_t page_slist; 405 /* Head of queue for UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q and UNIFIED_PAGE_LIST_TYPE_VM_PAGE_FIFO_Q */ 406 void *pageq; /* vm_page_queue_head_t* */ 407 }; 408 unified_page_list_type_t type; 409 } unified_page_list_t; 410 411 /* Uniform data structure representing an iterator position within a unified_page_list_t object. */ 412 typedef struct { 413 /* Pointer to list structure from which this iterator was created. */ 414 const unified_page_list_t *list; 415 union { 416 /* Position within UPL array, for UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY */ 417 unsigned int upl_index; 418 /* Position within page list or page queue, for all other types */ 419 vm_page_t pageq_pos; 420 }; 421 } unified_page_list_iterator_t; 422 423 extern void unified_page_list_iterator_init( 424 const unified_page_list_t *page_list, 425 unified_page_list_iterator_t *iter); 426 427 extern void unified_page_list_iterator_next(unified_page_list_iterator_t *iter); 428 429 extern bool unified_page_list_iterator_end(const unified_page_list_iterator_t *iter); 430 431 extern ppnum_t unified_page_list_iterator_page( 432 const unified_page_list_iterator_t *iter, 433 bool *is_fictitious); 434 435 extern void pmap_batch_set_cache_attributes( 436 const unified_page_list_t *, 437 unsigned int); 438 extern void pmap_sync_page_data_phys(ppnum_t pa); 439 extern void pmap_sync_page_attributes_phys(ppnum_t pa); 440 441 /* 442 * debug/assertions. pmap_verify_free returns true iff 443 * the given physical page is mapped into no pmap. 444 * pmap_assert_free() will panic() if pn is not free. 445 */ 446 extern bool pmap_verify_free(ppnum_t pn); 447 #if MACH_ASSERT 448 extern void pmap_assert_free(ppnum_t pn); 449 #endif 450 451 452 /* 453 * Sundry required (internal) routines 454 */ 455 #ifdef CURRENTLY_UNUSED_AND_UNTESTED 456 extern void pmap_collect(pmap_t pmap);/* Perform garbage 457 * collection, if any */ 458 #endif 459 /* 460 * Optional routines 461 */ 462 extern void(pmap_copy)( /* Copy range of mappings, 463 * if desired. */ 464 pmap_t dest, 465 pmap_t source, 466 vm_map_offset_t dest_va, 467 vm_map_size_t size, 468 vm_map_offset_t source_va); 469 470 extern kern_return_t(pmap_attribute)( /* Get/Set special memory 471 * attributes */ 472 pmap_t pmap, 473 vm_map_offset_t va, 474 vm_map_size_t size, 475 vm_machine_attribute_t attribute, 476 vm_machine_attribute_val_t* value); 477 478 /* 479 * Routines defined as macros. 480 */ 481 #ifndef PMAP_ACTIVATE_USER 482 #ifndef PMAP_ACTIVATE 483 #define PMAP_ACTIVATE_USER(thr, cpu) 484 #else /* PMAP_ACTIVATE */ 485 #define PMAP_ACTIVATE_USER(thr, cpu) { \ 486 pmap_t pmap; \ 487 \ 488 pmap = (thr)->map->pmap; \ 489 if (pmap != pmap_kernel()) \ 490 PMAP_ACTIVATE(pmap, (thr), (cpu)); \ 491 } 492 #endif /* PMAP_ACTIVATE */ 493 #endif /* PMAP_ACTIVATE_USER */ 494 495 #ifndef PMAP_DEACTIVATE_USER 496 #ifndef PMAP_DEACTIVATE 497 #define PMAP_DEACTIVATE_USER(thr, cpu) 498 #else /* PMAP_DEACTIVATE */ 499 #define PMAP_DEACTIVATE_USER(thr, cpu) { \ 500 pmap_t pmap; \ 501 \ 502 pmap = (thr)->map->pmap; \ 503 if ((pmap) != pmap_kernel()) \ 504 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ 505 } 506 #endif /* PMAP_DEACTIVATE */ 507 #endif /* PMAP_DEACTIVATE_USER */ 508 509 #ifndef PMAP_ACTIVATE_KERNEL 510 #ifndef PMAP_ACTIVATE 511 #define PMAP_ACTIVATE_KERNEL(cpu) 512 #else /* PMAP_ACTIVATE */ 513 #define PMAP_ACTIVATE_KERNEL(cpu) \ 514 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) 515 #endif /* PMAP_ACTIVATE */ 516 #endif /* PMAP_ACTIVATE_KERNEL */ 517 518 #ifndef PMAP_DEACTIVATE_KERNEL 519 #ifndef PMAP_DEACTIVATE 520 #define PMAP_DEACTIVATE_KERNEL(cpu) 521 #else /* PMAP_DEACTIVATE */ 522 #define PMAP_DEACTIVATE_KERNEL(cpu) \ 523 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) 524 #endif /* PMAP_DEACTIVATE */ 525 #endif /* PMAP_DEACTIVATE_KERNEL */ 526 527 #ifndef PMAP_SET_CACHE_ATTR 528 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ 529 MACRO_BEGIN \ 530 if (!batch_pmap_op) { \ 531 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ 532 (object)->set_cache_attr = TRUE; \ 533 } \ 534 MACRO_END 535 #endif /* PMAP_SET_CACHE_ATTR */ 536 537 #ifndef PMAP_BATCH_SET_CACHE_ATTR 538 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ 539 cache_attr, num_pages, batch_pmap_op) \ 540 MACRO_BEGIN \ 541 if ((batch_pmap_op)) { \ 542 const unified_page_list_t __pmap_batch_list = { \ 543 .upl = {.upl_info = (user_page_list), \ 544 .upl_size = (num_pages),}, \ 545 .type = UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY, \ 546 }; \ 547 pmap_batch_set_cache_attributes( \ 548 &__pmap_batch_list, \ 549 (cache_attr)); \ 550 (object)->set_cache_attr = TRUE; \ 551 } \ 552 MACRO_END 553 #endif /* PMAP_BATCH_SET_CACHE_ATTR */ 554 555 /* 556 * Routines to manage reference/modify bits based on 557 * physical addresses, simulating them if not provided 558 * by the hardware. 559 */ 560 struct pfc { 561 long pfc_cpus; 562 long pfc_invalid_global; 563 }; 564 565 typedef struct pfc pmap_flush_context; 566 567 /* Clear reference bit */ 568 extern void pmap_clear_reference(ppnum_t pn); 569 /* Return reference bit */ 570 extern boolean_t(pmap_is_referenced)(ppnum_t pn); 571 /* Set modify bit */ 572 extern void pmap_set_modify(ppnum_t pn); 573 /* Clear modify bit */ 574 extern void pmap_clear_modify(ppnum_t pn); 575 /* Return modify bit */ 576 extern boolean_t pmap_is_modified(ppnum_t pn); 577 /* Return modified and referenced bits */ 578 extern unsigned int pmap_get_refmod(ppnum_t pn); 579 /* Clear modified and referenced bits */ 580 extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); 581 #define VM_MEM_MODIFIED 0x01 /* Modified bit */ 582 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ 583 extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); 584 585 /* 586 * Clears the reference and/or modified bits on a range of virtually 587 * contiguous pages. 588 * It returns true if the operation succeeded. If it returns false, 589 * nothing has been modified. 590 * This operation is only supported on some platforms, so callers MUST 591 * handle the case where it returns false. 592 */ 593 extern bool 594 pmap_clear_refmod_range_options( 595 pmap_t pmap, 596 vm_map_address_t start, 597 vm_map_address_t end, 598 unsigned int mask, 599 unsigned int options); 600 601 602 extern void pmap_flush_context_init(pmap_flush_context *); 603 extern void pmap_flush(pmap_flush_context *); 604 605 /* 606 * Routines that operate on ranges of virtual addresses. 607 */ 608 extern void pmap_protect( /* Change protections. */ 609 pmap_t map, 610 vm_map_offset_t s, 611 vm_map_offset_t e, 612 vm_prot_t prot); 613 614 extern void pmap_protect_options( /* Change protections. */ 615 pmap_t map, 616 vm_map_offset_t s, 617 vm_map_offset_t e, 618 vm_prot_t prot, 619 unsigned int options, 620 void *arg); 621 622 extern void(pmap_pageable)( 623 pmap_t pmap, 624 vm_map_offset_t start, 625 vm_map_offset_t end, 626 boolean_t pageable); 627 628 extern uint64_t pmap_shared_region_size_min(pmap_t map); 629 630 extern kern_return_t pmap_nest(pmap_t, 631 pmap_t, 632 addr64_t, 633 uint64_t); 634 extern kern_return_t pmap_unnest(pmap_t, 635 addr64_t, 636 uint64_t); 637 638 #define PMAP_UNNEST_CLEAN 1 639 640 #if __arm64__ 641 #if CONFIG_SPTM 642 #define PMAP_FORK_NEST 1 643 #endif /* CONFIG_SPTM */ 644 645 #if PMAP_FORK_NEST 646 extern kern_return_t pmap_fork_nest( 647 pmap_t old_pmap, 648 pmap_t new_pmap, 649 vm_map_offset_t *nesting_start, 650 vm_map_offset_t *nesting_end); 651 #endif /* PMAP_FORK_NEST */ 652 #endif /* __arm64__ */ 653 654 extern kern_return_t pmap_unnest_options(pmap_t, 655 addr64_t, 656 uint64_t, 657 unsigned int); 658 extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); 659 extern void pmap_advise_pagezero_range(pmap_t, uint64_t); 660 #endif /* MACH_KERNEL_PRIVATE */ 661 662 extern boolean_t pmap_is_noencrypt(ppnum_t); 663 extern void pmap_set_noencrypt(ppnum_t pn); 664 extern void pmap_clear_noencrypt(ppnum_t pn); 665 666 /* 667 * JMM - This portion is exported to other kernel components right now, 668 * but will be pulled back in the future when the needed functionality 669 * is provided in a cleaner manner. 670 */ 671 672 extern const pmap_t kernel_pmap; /* The kernel's map */ 673 #define pmap_kernel() (kernel_pmap) 674 675 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ 676 #define VM_MEM_STACK 0x200 677 678 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS 679 * definitions in i386/pmap_internal.h 680 */ 681 #define PMAP_CREATE_64BIT 0x1 682 683 #if __x86_64__ 684 685 #define PMAP_CREATE_EPT 0x2 686 #define PMAP_CREATE_TEST 0x4 /* pmap will be used for testing purposes only */ 687 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT | PMAP_CREATE_TEST) 688 689 #else 690 691 #define PMAP_CREATE_STAGE2 0 692 #if __arm64e__ 693 #define PMAP_CREATE_DISABLE_JOP 0x4 694 #else 695 #define PMAP_CREATE_DISABLE_JOP 0 696 #endif 697 #if __ARM_MIXED_PAGE_SIZE__ 698 #define PMAP_CREATE_FORCE_4K_PAGES 0x8 699 #else 700 #define PMAP_CREATE_FORCE_4K_PAGES 0 701 #endif /* __ARM_MIXED_PAGE_SIZE__ */ 702 #define PMAP_CREATE_X86_64 0 703 #if CONFIG_ROSETTA 704 #define PMAP_CREATE_ROSETTA 0x20 705 #else 706 #define PMAP_CREATE_ROSETTA 0 707 #endif /* CONFIG_ROSETTA */ 708 709 #define PMAP_CREATE_TEST 0x40 /* pmap will be used for testing purposes only */ 710 711 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */ 712 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | \ 713 PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64 | PMAP_CREATE_ROSETTA | PMAP_CREATE_TEST) 714 715 #endif /* __x86_64__ */ 716 717 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return 718 * KERN_RESOURCE_SHORTAGE 719 * instead */ 720 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed 721 * but don't enter mapping 722 */ 723 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for 724 * this operation */ 725 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ 726 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ 727 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ 728 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ 729 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ 730 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ 731 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ 732 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ 733 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor 734 * iff page was modified */ 735 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be 736 * be upgraded */ 737 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000 738 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */ 739 #if defined(__arm__) || defined(__arm64__) 740 #define PMAP_OPTIONS_FF_LOCKED 0x8000 741 #define PMAP_OPTIONS_FF_WIRED 0x10000 742 #endif 743 #define PMAP_OPTIONS_XNU_USER_DEBUG 0x20000 744 745 /* Indicates that pmap_enter() or pmap_remove() is being called with preemption already disabled. */ 746 #define PMAP_OPTIONS_NOPREEMPT 0x80000 747 748 #define PMAP_OPTIONS_MAP_TPRO 0x40000 749 750 #define PMAP_OPTIONS_RESERVED_MASK 0xFF000000 /* encoding space reserved for internal pmap use */ 751 752 #if !defined(__LP64__) 753 extern vm_offset_t pmap_extract(pmap_t pmap, 754 vm_map_offset_t va); 755 #endif 756 extern void pmap_change_wiring( /* Specify pageability */ 757 pmap_t pmap, 758 vm_map_offset_t va, 759 boolean_t wired); 760 761 /* LP64todo - switch to vm_map_offset_t when it grows */ 762 extern void pmap_remove( /* Remove mappings. */ 763 pmap_t map, 764 vm_map_offset_t s, 765 vm_map_offset_t e); 766 767 extern void pmap_remove_options( /* Remove mappings. */ 768 pmap_t map, 769 vm_map_offset_t s, 770 vm_map_offset_t e, 771 int options); 772 773 extern void fillPage(ppnum_t pa, unsigned int fill); 774 775 #if defined(__LP64__) 776 extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); 777 extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr); 778 extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr); 779 #endif 780 781 mach_vm_size_t pmap_query_resident(pmap_t pmap, 782 vm_map_offset_t s, 783 vm_map_offset_t e, 784 mach_vm_size_t *compressed_bytes_p); 785 786 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value); 787 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap); 788 789 /* Inform the pmap layer that there is a JIT entry in this map. */ 790 extern void pmap_set_jit_entitled(pmap_t pmap); 791 792 /* Ask the pmap layer if there is a JIT entry in this map. */ 793 extern bool pmap_get_jit_entitled(pmap_t pmap); 794 795 /* Inform the pmap layer that the XO register is repurposed for this map */ 796 extern void pmap_set_tpro(pmap_t pmap); 797 798 /* Ask the pmap layer if there is a TPRO entry in this map. */ 799 extern bool pmap_get_tpro(pmap_t pmap); 800 801 /* 802 * Tell the pmap layer what range within the nested region the VM intends to 803 * use. 804 */ 805 extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size); 806 807 extern bool pmap_is_nested(pmap_t pmap); 808 809 /* 810 * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE 811 * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration. 812 * This is expected to only be called from kernel debugger context, 813 * so synchronization is not required. 814 */ 815 816 extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied); 817 818 /* Asks the pmap layer for number of bits used for VA address. */ 819 extern uint32_t pmap_user_va_bits(pmap_t pmap); 820 extern uint32_t pmap_kernel_va_bits(void); 821 822 /* 823 * Indicates if any special policy is applied to this protection by the pmap 824 * layer. 825 */ 826 bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot); 827 828 /* 829 * Causes the pmap to return any available pages that it can return cheaply to 830 * the VM. 831 */ 832 uint64_t pmap_release_pages_fast(void); 833 834 #define PMAP_QUERY_PAGE_PRESENT 0x01 835 #define PMAP_QUERY_PAGE_REUSABLE 0x02 836 #define PMAP_QUERY_PAGE_INTERNAL 0x04 837 #define PMAP_QUERY_PAGE_ALTACCT 0x08 838 #define PMAP_QUERY_PAGE_COMPRESSED 0x10 839 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 840 extern kern_return_t pmap_query_page_info( 841 pmap_t pmap, 842 vm_map_offset_t va, 843 int *disp); 844 845 extern bool pmap_in_ppl(void); 846 847 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]); 848 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]); 849 850 /** 851 * Indicates whether the device supports register-level MMIO access control. 852 * 853 * @note Unlike the pmap-io-ranges mechanism, which enforces PPL-only register 854 * writability at page granularity, this mechanism allows specific registers 855 * on a read-mostly page to be written using a dedicated guarded mode trap 856 * without requiring a full PPL driver extension. 857 * 858 * @return True if the device supports register-level MMIO access control. 859 */ 860 extern bool pmap_has_iofilter_protected_write(void); 861 862 /** 863 * Performs a write to the I/O register specified by addr on supported devices. 864 * 865 * @note On supported devices (determined by pmap_has_iofilter_protected_write()), this 866 * function goes over the sorted I/O filter entry table. If there is a hit, the 867 * write is performed from Guarded Mode. Otherwise, the write is performed from 868 * Normal Mode (kernel mode). Note that you can still hit an exception if the 869 * register is owned by PPL but not allowed by an io-filter-entry in the device tree. 870 * 871 * @note On unsupported devices, this function will panic. 872 * 873 * @param addr The address of the register. 874 * @param value The value to be written. 875 * @param width The width of the I/O register, supported values are 1, 2, 4 and 8. 876 */ 877 extern void pmap_iofilter_protected_write(vm_address_t addr, uint64_t value, uint64_t width); 878 879 extern void *pmap_claim_reserved_ppl_page(void); 880 extern void pmap_free_reserved_ppl_page(void *kva); 881 882 extern void pmap_ledger_verify_size(size_t); 883 extern ledger_t pmap_ledger_alloc(void); 884 extern void pmap_ledger_free(ledger_t); 885 886 extern bool pmap_is_bad_ram(ppnum_t ppn); 887 888 #if __arm64__ 889 extern bool pmap_is_exotic(pmap_t pmap); 890 #else /* __arm64__ */ 891 #define pmap_is_exotic(pmap) false 892 #endif /* __arm64__ */ 893 894 895 /* 896 * Returns a subset of pmap_cs non-default configuration, 897 * e.g. loosening up of some restrictions through pmap_cs or amfi 898 * boot-args. The return value is a bit field with possible bits 899 * described below. If default, the function will return 0. Note that 900 * this does not work the other way: 0 does not imply that pmap_cs 901 * runs in default configuration, and only a small configuration 902 * subset is returned by this function. 903 * 904 * Never assume the system is "secure" if this returns 0. 905 */ 906 extern int pmap_cs_configuration(void); 907 908 #if XNU_KERNEL_PRIVATE 909 910 typedef enum { 911 PMAP_FEAT_UEXEC = 1 912 } pmap_feature_flags_t; 913 914 #if defined(__x86_64__) 915 916 extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat); 917 918 #endif 919 #if defined(__arm64__) 920 921 /** 922 * Check if a particular pmap is used for stage2 translations or not. 923 */ 924 extern bool 925 pmap_performs_stage2_translations(const pmap_t pmap); 926 927 #endif /* defined(__arm64__) */ 928 929 extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr); 930 931 #endif /* XNU_KERNEL_PRIVATE */ 932 933 #if CONFIG_SPTM 934 /* 935 * The TrustedExecutionMonitor address space data structure is kept within the 936 * pmap structure in order to provide a coherent API to the rest of the kernel 937 * for working with code signing monitors. 938 * 939 * However, a lot of parts of the kernel don't have visibility into the pmap 940 * data structure as they are opaque unless you're in the Mach portion of the 941 * kernel. To allievate this, we provide pmap APIs to the rest of the kernel. 942 */ 943 #include <TrustedExecutionMonitor/API.h> 944 945 /* 946 * All pages allocated by TXM are also kept within the TXM VM object, which allows 947 * tracking it for accounting and debugging purposes. 948 */ 949 extern vm_object_t txm_vm_object; 950 951 /** 952 * Acquire the pointer of the kernel pmap being used for the system. 953 */ 954 extern pmap_t 955 pmap_txm_kernel_pmap(void); 956 957 /** 958 * Acquire the TXM address space object stored within the pmap. 959 */ 960 extern TXMAddressSpace_t* 961 pmap_txm_addr_space(const pmap_t pmap); 962 963 /** 964 * Set the TXM address space object within the pmap. 965 */ 966 extern void 967 pmap_txm_set_addr_space( 968 pmap_t pmap, 969 TXMAddressSpace_t *txm_addr_space); 970 971 /** 972 * Set the trust level of the TXM address space object within the pmap. 973 */ 974 extern void 975 pmap_txm_set_trust_level( 976 pmap_t pmap, 977 CSTrust_t trust_level); 978 979 /** 980 * Get the trust level of the TXM address space object within the pmap. 981 */ 982 extern kern_return_t 983 pmap_txm_get_trust_level_kdp( 984 pmap_t pmap, 985 CSTrust_t *trust_level); 986 987 /** 988 * Get the address range of the JIT region within the pmap, if any. 989 */ 990 kern_return_t 991 pmap_txm_get_jit_address_range_kdp( 992 pmap_t pmap, 993 uintptr_t *jit_region_start, 994 uintptr_t *jit_region_end); 995 996 /** 997 * Take a shared lock on the pmap in order to enforce safe concurrency for 998 * an operation on the TXM address space object. Passing in NULL takes the lock 999 * on the current pmap. 1000 */ 1001 extern void 1002 pmap_txm_acquire_shared_lock(pmap_t pmap); 1003 1004 /** 1005 * Release the shared lock which was previously acquired for operations on 1006 * the TXM address space object. Passing in NULL releases the lock for the 1007 * current pmap. 1008 */ 1009 extern void 1010 pmap_txm_release_shared_lock(pmap_t pmap); 1011 1012 /** 1013 * Take an exclusive lock on the pmap in order to enforce safe concurrency for 1014 * an operation on the TXM address space object. Passing in NULL takes the lock 1015 * on the current pmap. 1016 */ 1017 extern void 1018 pmap_txm_acquire_exclusive_lock(pmap_t pmap); 1019 1020 /** 1021 * Release the exclusive lock which was previously acquired for operations on 1022 * the TXM address space object. Passing in NULL releases the lock for the 1023 * current pmap. 1024 */ 1025 extern void 1026 pmap_txm_release_exclusive_lock(pmap_t pmap); 1027 1028 /** 1029 * Transfer a page to the TXM_DEFAULT type after resolving its mapping from its 1030 * virtual to physical address. 1031 */ 1032 extern void 1033 pmap_txm_transfer_page(const vm_address_t addr); 1034 1035 /** 1036 * Grab an available page from the VM free list, add it to the TXM VM object and 1037 * then transfer it to be owned by TXM. 1038 * 1039 * Returns the physical address of the page allocated. 1040 */ 1041 extern vm_map_address_t 1042 pmap_txm_allocate_page(void); 1043 1044 #endif /* CONFIG_SPTM */ 1045 1046 1047 #endif /* KERNEL_PRIVATE */ 1048 1049 #endif /* _VM_PMAP_H_ */ 1050