1 /* 2 * Copyright (c) 2000-2016 Apple Computer, Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: memory_object.h 60 * Author: Michael Wayne Young 61 * 62 * External memory management interface definition. 63 */ 64 65 #ifndef _MACH_MEMORY_OBJECT_TYPES_H_ 66 #define _MACH_MEMORY_OBJECT_TYPES_H_ 67 68 /* 69 * User-visible types used in the external memory 70 * management interface: 71 */ 72 73 #include <mach/port.h> 74 #include <mach/message.h> 75 #include <mach/vm_prot.h> 76 #include <mach/vm_sync.h> 77 #include <mach/vm_types.h> 78 #include <mach/machine/vm_types.h> 79 80 #include <sys/cdefs.h> 81 82 #if XNU_KERNEL_PRIVATE 83 #include <os/refcnt.h> 84 #if __LP64__ 85 #define MEMORY_OBJECT_HAS_REFCOUNT 1 86 #else 87 #define MEMORY_OBJECT_HAS_REFCOUNT 0 88 #endif 89 #endif /* XNU_KERNEL_PRIVATE */ 90 91 #define VM_64_BIT_DATA_OBJECTS 92 93 typedef unsigned long long memory_object_offset_t; 94 typedef unsigned long long memory_object_size_t; 95 typedef natural_t memory_object_cluster_size_t; 96 typedef natural_t * memory_object_fault_info_t; 97 98 typedef unsigned long long vm_object_id_t; 99 100 101 /* 102 * Temporary until real EMMI version gets re-implemented 103 */ 104 105 #ifdef KERNEL_PRIVATE 106 107 /* IMPORTANT: this type must match "ipc_object_bits_t" from ipc/ipc_port.h */ 108 typedef natural_t mo_ipc_object_bits_t; 109 110 struct memory_object_pager_ops; /* forward declaration */ 111 112 typedef struct vm_object *memory_object_control_t; 113 /* 114 * "memory_object" used to be a Mach port in user space and could be passed 115 * as such to some kernel APIs. 116 * 117 * Its first field must match the "io_bits" field of a 118 * "struct ipc_object" to identify them as a "IKOT_MEMORY_OBJECT". 119 */ 120 typedef struct memory_object { 121 mo_ipc_object_bits_t mo_ikot; /* DO NOT CHANGE */ 122 #if __LP64__ 123 #if XNU_KERNEL_PRIVATE 124 /* 125 * On LP64 there's a 4 byte hole that is perfect for a refcount. 126 * Expose it so that all pagers can take advantage of it. 127 */ 128 os_ref_atomic_t mo_ref; 129 #else 130 unsigned int __mo_padding; 131 #endif /* XNU_KERNEL_PRIVATE */ 132 #endif /* __LP64__ */ 133 const struct memory_object_pager_ops *mo_pager_ops; 134 memory_object_control_t mo_control; 135 } *memory_object_t; 136 137 typedef const struct memory_object_pager_ops { 138 void (*memory_object_reference)( 139 memory_object_t mem_obj); 140 void (*memory_object_deallocate)( 141 memory_object_t mem_obj); 142 kern_return_t (*memory_object_init)( 143 memory_object_t mem_obj, 144 memory_object_control_t mem_control, 145 memory_object_cluster_size_t size); 146 kern_return_t (*memory_object_terminate)( 147 memory_object_t mem_obj); 148 kern_return_t (*memory_object_data_request)( 149 memory_object_t mem_obj, 150 memory_object_offset_t offset, 151 memory_object_cluster_size_t length, 152 vm_prot_t desired_access, 153 memory_object_fault_info_t fault_info); 154 kern_return_t (*memory_object_data_return)( 155 memory_object_t mem_obj, 156 memory_object_offset_t offset, 157 memory_object_cluster_size_t size, 158 memory_object_offset_t *resid_offset, 159 int *io_error, 160 boolean_t dirty, 161 boolean_t kernel_copy, 162 int upl_flags); 163 kern_return_t (*memory_object_data_initialize)( 164 memory_object_t mem_obj, 165 memory_object_offset_t offset, 166 memory_object_cluster_size_t size); 167 #if XNU_KERNEL_PRIVATE 168 void *__obsolete_memory_object_data_unlock; 169 void *__obsolete_memory_object_synchronize; 170 #else 171 kern_return_t (*memory_object_data_unlock)( 172 memory_object_t mem_obj, 173 memory_object_offset_t offset, 174 memory_object_size_t size, 175 vm_prot_t desired_access); /* obsolete */ 176 kern_return_t (*memory_object_synchronize)( 177 memory_object_t mem_obj, 178 memory_object_offset_t offset, 179 memory_object_size_t size, 180 vm_sync_t sync_flags); /* obsolete */ 181 #endif /* !XNU_KERNEL_PRIVATE */ 182 kern_return_t (*memory_object_map)( 183 memory_object_t mem_obj, 184 vm_prot_t prot); 185 kern_return_t (*memory_object_last_unmap)( 186 memory_object_t mem_obj); 187 #if XNU_KERNEL_PRIVATE 188 void *__obsolete_memory_object_data_reclaim; 189 #else 190 kern_return_t (*memory_object_data_reclaim)( 191 memory_object_t mem_obj, 192 boolean_t reclaim_backing_store); /* obsolete */ 193 #endif /* !XNU_KERNEL_PRIVATE */ 194 boolean_t (*memory_object_backing_object)( 195 memory_object_t mem_obj, 196 memory_object_offset_t mem_obj_offset, 197 vm_object_t *backing_object, 198 vm_object_offset_t *backing_offset); 199 const char *memory_object_pager_name; 200 } * memory_object_pager_ops_t; 201 202 #else /* KERNEL_PRIVATE */ 203 204 typedef mach_port_t memory_object_t; 205 /* 206 * vestigial, maintained for source compatibility, 207 * no MIG interface will accept or return non NULL 208 * objects for those. 209 */ 210 typedef mach_port_t memory_object_control_t; 211 212 #endif /* KERNEL_PRIVATE */ 213 214 typedef memory_object_t *memory_object_array_t; 215 /* A memory object ... */ 216 /* Used by the kernel to retrieve */ 217 /* or store data */ 218 219 typedef mach_port_t memory_object_name_t; 220 /* Used to describe the memory ... */ 221 /* object in vm_regions() calls */ 222 223 typedef mach_port_t memory_object_default_t; 224 /* Registered with the host ... */ 225 /* for creating new internal objects */ 226 227 #define MEMORY_OBJECT_NULL ((memory_object_t) 0) 228 #define MEMORY_OBJECT_CONTROL_NULL ((memory_object_control_t) 0) 229 #define MEMORY_OBJECT_NAME_NULL ((memory_object_name_t) 0) 230 #define MEMORY_OBJECT_DEFAULT_NULL ((memory_object_default_t) 0) 231 232 233 typedef int memory_object_copy_strategy_t; 234 /* How memory manager handles copy: */ 235 #define MEMORY_OBJECT_COPY_NONE 0 236 /* ... No special support */ 237 #define MEMORY_OBJECT_COPY_CALL 1 238 /* ... Make call on memory manager */ 239 #define MEMORY_OBJECT_COPY_DELAY 2 240 /* ... Memory manager doesn't 241 * change data externally. 242 */ 243 #define MEMORY_OBJECT_COPY_TEMPORARY 3 244 /* ... Memory manager doesn't 245 * change data externally, and 246 * doesn't need to see changes. 247 */ 248 #define MEMORY_OBJECT_COPY_SYMMETRIC 4 249 /* ... Memory manager doesn't 250 * change data externally, 251 * doesn't need to see changes, 252 * and object will not be 253 * multiply mapped. 254 * 255 * XXX 256 * Not yet safe for non-kernel use. 257 */ 258 259 #define MEMORY_OBJECT_COPY_INVALID 5 260 /* ... An invalid copy strategy, 261 * for external objects which 262 * have not been initialized. 263 * Allows copy_strategy to be 264 * examined without also 265 * examining pager_ready and 266 * internal. 267 */ 268 269 #define MEMORY_OBJECT_COPY_DELAY_FORK 6 270 /* 271 * ... Like MEMORY_OBJECT_COPY_DELAY for vm_map_fork() but like 272 * MEMORY_OBJECT_COPY_NONE otherwise. 273 */ 274 275 typedef int memory_object_return_t; 276 /* Which pages to return to manager 277 * this time (lock_request) */ 278 #define MEMORY_OBJECT_RETURN_NONE 0 279 /* ... don't return any. */ 280 #define MEMORY_OBJECT_RETURN_DIRTY 1 281 /* ... only dirty pages. */ 282 #define MEMORY_OBJECT_RETURN_ALL 2 283 /* ... dirty and precious pages. */ 284 #define MEMORY_OBJECT_RETURN_ANYTHING 3 285 /* ... any resident page. */ 286 287 /* 288 * Data lock request flags 289 */ 290 291 #define MEMORY_OBJECT_DATA_FLUSH 0x1 292 #define MEMORY_OBJECT_DATA_NO_CHANGE 0x2 293 #define MEMORY_OBJECT_DATA_PURGE 0x4 294 #define MEMORY_OBJECT_COPY_SYNC 0x8 295 #define MEMORY_OBJECT_DATA_SYNC 0x10 296 #define MEMORY_OBJECT_IO_SYNC 0x20 297 #define MEMORY_OBJECT_DATA_FLUSH_ALL 0x40 298 299 /* 300 * Types for the memory object flavor interfaces 301 */ 302 303 #define MEMORY_OBJECT_INFO_MAX (1024) 304 typedef int *memory_object_info_t; 305 typedef int memory_object_flavor_t; 306 typedef int memory_object_info_data_t[MEMORY_OBJECT_INFO_MAX]; 307 308 309 #define MEMORY_OBJECT_PERFORMANCE_INFO 11 310 #define MEMORY_OBJECT_ATTRIBUTE_INFO 14 311 #define MEMORY_OBJECT_BEHAVIOR_INFO 15 312 313 #ifdef PRIVATE 314 315 #define OLD_MEMORY_OBJECT_BEHAVIOR_INFO 10 316 #define OLD_MEMORY_OBJECT_ATTRIBUTE_INFO 12 317 318 struct old_memory_object_behave_info { 319 memory_object_copy_strategy_t copy_strategy; 320 boolean_t temporary; 321 boolean_t invalidate; 322 }; 323 324 struct old_memory_object_attr_info { /* old attr list */ 325 boolean_t object_ready; 326 boolean_t may_cache; 327 memory_object_copy_strategy_t copy_strategy; 328 }; 329 330 typedef struct old_memory_object_behave_info *old_memory_object_behave_info_t; 331 typedef struct old_memory_object_behave_info old_memory_object_behave_info_data_t; 332 typedef struct old_memory_object_attr_info *old_memory_object_attr_info_t; 333 typedef struct old_memory_object_attr_info old_memory_object_attr_info_data_t; 334 335 #define OLD_MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ 336 (sizeof(old_memory_object_behave_info_data_t)/sizeof(int))) 337 #define OLD_MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ 338 (sizeof(old_memory_object_attr_info_data_t)/sizeof(int))) 339 340 #ifdef KERNEL 341 342 __BEGIN_DECLS 343 extern void memory_object_reference(memory_object_t object); 344 extern void memory_object_deallocate(memory_object_t object); 345 extern boolean_t memory_object_backing_object( 346 memory_object_t mem_obj, 347 memory_object_offset_t offset, 348 vm_object_t *backing_object, 349 vm_object_offset_t *backing_offset); 350 351 extern void memory_object_control_reference(memory_object_control_t control); 352 extern void memory_object_control_deallocate(memory_object_control_t control); 353 extern int memory_object_control_uiomove(memory_object_control_t, memory_object_offset_t, void *, int, int, int, int); 354 __END_DECLS 355 356 #endif /* KERNEL */ 357 358 #endif /* PRIVATE */ 359 360 struct memory_object_perf_info { 361 memory_object_cluster_size_t cluster_size; 362 boolean_t may_cache; 363 }; 364 365 struct memory_object_attr_info { 366 memory_object_copy_strategy_t copy_strategy; 367 memory_object_cluster_size_t cluster_size; 368 boolean_t may_cache_object; 369 boolean_t temporary; 370 }; 371 372 struct memory_object_behave_info { 373 memory_object_copy_strategy_t copy_strategy; 374 boolean_t temporary; 375 boolean_t invalidate; 376 boolean_t silent_overwrite; 377 boolean_t advisory_pageout; 378 }; 379 380 381 typedef struct memory_object_behave_info *memory_object_behave_info_t; 382 typedef struct memory_object_behave_info memory_object_behave_info_data_t; 383 384 typedef struct memory_object_perf_info *memory_object_perf_info_t; 385 typedef struct memory_object_perf_info memory_object_perf_info_data_t; 386 387 typedef struct memory_object_attr_info *memory_object_attr_info_t; 388 typedef struct memory_object_attr_info memory_object_attr_info_data_t; 389 390 #define MEMORY_OBJECT_BEHAVE_INFO_COUNT ((mach_msg_type_number_t) \ 391 (sizeof(memory_object_behave_info_data_t)/sizeof(int))) 392 #define MEMORY_OBJECT_PERF_INFO_COUNT ((mach_msg_type_number_t) \ 393 (sizeof(memory_object_perf_info_data_t)/sizeof(int))) 394 #define MEMORY_OBJECT_ATTR_INFO_COUNT ((mach_msg_type_number_t) \ 395 (sizeof(memory_object_attr_info_data_t)/sizeof(int))) 396 397 #define invalid_memory_object_flavor(f) \ 398 (f != MEMORY_OBJECT_ATTRIBUTE_INFO && \ 399 f != MEMORY_OBJECT_PERFORMANCE_INFO && \ 400 f != OLD_MEMORY_OBJECT_BEHAVIOR_INFO && \ 401 f != MEMORY_OBJECT_BEHAVIOR_INFO && \ 402 f != OLD_MEMORY_OBJECT_ATTRIBUTE_INFO) 403 404 405 /* 406 * Used to support options on memory_object_release_name call 407 */ 408 #define MEMORY_OBJECT_TERMINATE_IDLE 0x1 409 #define MEMORY_OBJECT_RESPECT_CACHE 0x2 410 #define MEMORY_OBJECT_RELEASE_NO_OP 0x4 411 412 413 /* named entry processor mapping options */ 414 /* enumerated */ 415 #define MAP_MEM_NOOP 0 416 #define MAP_MEM_COPYBACK 1 417 #define MAP_MEM_IO 2 418 #define MAP_MEM_WTHRU 3 419 #define MAP_MEM_WCOMB 4 /* Write combining mode */ 420 /* aka store gather */ 421 #define MAP_MEM_INNERWBACK 5 422 #define MAP_MEM_POSTED 6 423 #define MAP_MEM_RT 7 424 #define MAP_MEM_POSTED_REORDERED 8 425 #define MAP_MEM_POSTED_COMBINED_REORDERED 9 426 427 #define GET_MAP_MEM(flags) \ 428 ((((unsigned int)(flags)) >> 24) & 0xFF) 429 430 #define SET_MAP_MEM(caching, flags) \ 431 ((flags) = ((((unsigned int)(caching)) << 24) \ 432 & 0xFF000000) | ((flags) & 0xFFFFFF)); 433 434 /* leave room for vm_prot bits (0xFF ?) */ 435 #define MAP_MEM_PROT_MASK 0xFF 436 #define MAP_MEM_LEDGER_TAGGED 0x002000 /* object owned by a specific task and ledger */ 437 #define MAP_MEM_PURGABLE_KERNEL_ONLY 0x004000 /* volatility controlled by kernel */ 438 #define MAP_MEM_GRAB_SECLUDED 0x008000 /* can grab secluded pages */ 439 #define MAP_MEM_ONLY 0x010000 /* change processor caching */ 440 #define MAP_MEM_NAMED_CREATE 0x020000 /* create extant object */ 441 #define MAP_MEM_PURGABLE 0x040000 /* create a purgable VM object */ 442 #define MAP_MEM_NAMED_REUSE 0x080000 /* reuse provided entry if identical */ 443 #define MAP_MEM_USE_DATA_ADDR 0x100000 /* preserve address of data, rather than base of page */ 444 #define MAP_MEM_VM_COPY 0x200000 /* make a copy of a VM range */ 445 #define MAP_MEM_VM_SHARE 0x400000 /* extract a VM range for remap */ 446 #define MAP_MEM_4K_DATA_ADDR 0x800000 /* preserve 4K aligned address of data */ 447 448 #define MAP_MEM_FLAGS_MASK 0x00FFFF00 449 #define MAP_MEM_FLAGS_USER ( \ 450 MAP_MEM_PURGABLE_KERNEL_ONLY | \ 451 MAP_MEM_GRAB_SECLUDED | \ 452 MAP_MEM_ONLY | \ 453 MAP_MEM_NAMED_CREATE | \ 454 MAP_MEM_PURGABLE | \ 455 MAP_MEM_NAMED_REUSE | \ 456 MAP_MEM_USE_DATA_ADDR | \ 457 MAP_MEM_VM_COPY | \ 458 MAP_MEM_VM_SHARE | \ 459 MAP_MEM_LEDGER_TAGGED | \ 460 MAP_MEM_4K_DATA_ADDR) 461 #define MAP_MEM_FLAGS_ALL ( \ 462 MAP_MEM_FLAGS_USER) 463 464 #ifdef KERNEL 465 466 /* 467 * Universal Page List data structures 468 * 469 * A UPL describes a bounded set of physical pages 470 * associated with some range of an object or map 471 * and a snapshot of the attributes associated with 472 * each of those pages. 473 */ 474 #ifdef PRIVATE 475 #define MAX_UPL_TRANSFER_BYTES (1024 * 1024) 476 #define MAX_UPL_SIZE_BYTES (1024 * 1024 * 64) 477 478 #define MAX_UPL_SIZE (MAX_UPL_SIZE_BYTES / PAGE_SIZE) 479 #define MAX_UPL_TRANSFER (MAX_UPL_TRANSFER_BYTES / PAGE_SIZE) 480 481 struct upl_page_info { 482 ppnum_t phys_addr; /* physical page index number */ 483 unsigned int 484 #ifdef XNU_KERNEL_PRIVATE 485 free_when_done:1, /* page is to be freed on commit */ 486 absent:1, /* No valid data in this page */ 487 dirty:1, /* Page must be cleaned (O) */ 488 precious:1, /* must be cleaned, we have only copy */ 489 device:1, /* no page data, mapped dev memory */ 490 speculative:1, /* page is valid, but not yet accessed */ 491 #define VMP_CS_BITS 4 492 #define VMP_CS_ALL_FALSE 0x0 493 #define VMP_CS_ALL_TRUE 0xF 494 cs_validated:VMP_CS_BITS, /* CODE SIGNING: page was validated */ 495 cs_tainted:VMP_CS_BITS, /* CODE SIGNING: page is tainted */ 496 cs_nx:VMP_CS_BITS, /* CODE SIGNING: page is NX */ 497 498 needed:1, /* page should be left in cache on abort */ 499 mark:1, /* a mark flag for the creator to use as they wish */ 500 :0; /* force to long boundary */ 501 #else 502 opaque; /* use upl_page_xxx() accessor funcs */ 503 #endif /* XNU_KERNEL_PRIVATE */ 504 }; 505 506 #else 507 508 struct upl_page_info { 509 unsigned int opaque[2]; /* use upl_page_xxx() accessor funcs */ 510 }; 511 512 #endif /* PRIVATE */ 513 514 typedef struct upl_page_info upl_page_info_t; 515 typedef upl_page_info_t *upl_page_info_array_t; 516 typedef upl_page_info_array_t upl_page_list_ptr_t; 517 518 typedef uint32_t upl_offset_t; /* page-aligned byte offset */ 519 typedef uint32_t upl_size_t; /* page-aligned byte size */ 520 #define UPL_SIZE_MAX (UINT32_MAX & ~PAGE_MASK) 521 522 /* upl invocation flags */ 523 /* top nibble is used by super upl */ 524 525 typedef uint64_t upl_control_flags_t; 526 527 #define UPL_FLAGS_NONE 0x00000000ULL 528 #define UPL_COPYOUT_FROM 0x00000001ULL 529 #define UPL_PRECIOUS 0x00000002ULL 530 #define UPL_NO_SYNC 0x00000004ULL 531 #define UPL_CLEAN_IN_PLACE 0x00000008ULL 532 #define UPL_NOBLOCK 0x00000010ULL 533 #define UPL_RET_ONLY_DIRTY 0x00000020ULL 534 #define UPL_SET_INTERNAL 0x00000040ULL 535 #define UPL_QUERY_OBJECT_TYPE 0x00000080ULL 536 #define UPL_RET_ONLY_ABSENT 0x00000100ULL /* used only for COPY_FROM = FALSE */ 537 #define UPL_FILE_IO 0x00000200ULL 538 #define UPL_SET_LITE 0x00000400ULL 539 #define UPL_SET_INTERRUPTIBLE 0x00000800ULL 540 #define UPL_SET_IO_WIRE 0x00001000ULL 541 #define UPL_FOR_PAGEOUT 0x00002000ULL 542 #define UPL_WILL_BE_DUMPED 0x00004000ULL 543 #define UPL_FORCE_DATA_SYNC 0x00008000ULL 544 /* continued after the ticket bits... */ 545 546 #define UPL_PAGE_TICKET_MASK 0x000F0000ULL 547 #define UPL_PAGE_TICKET_SHIFT 16 548 549 /* ... flags resume here */ 550 #define UPL_BLOCK_ACCESS 0x00100000ULL 551 #define UPL_ENCRYPT 0x00200000ULL 552 #define UPL_NOZEROFILL 0x00400000ULL 553 #define UPL_WILL_MODIFY 0x00800000ULL /* caller will modify the pages */ 554 555 #define UPL_NEED_32BIT_ADDR 0x01000000ULL 556 #define UPL_UBC_MSYNC 0x02000000ULL 557 #define UPL_UBC_PAGEOUT 0x04000000ULL 558 #define UPL_UBC_PAGEIN 0x08000000ULL 559 #define UPL_REQUEST_SET_DIRTY 0x10000000ULL 560 #define UPL_REQUEST_NO_FAULT 0x20000000ULL /* fail if pages not all resident */ 561 #define UPL_NOZEROFILLIO 0x40000000ULL /* allow non zerofill pages present */ 562 #define UPL_REQUEST_FORCE_COHERENCY 0x80000000ULL 563 564 /* UPL flags known by this kernel */ 565 #define UPL_VALID_FLAGS 0xFFFFFFFFFFULL 566 567 568 /* upl abort error flags */ 569 #define UPL_ABORT_RESTART 0x1 570 #define UPL_ABORT_UNAVAILABLE 0x2 571 #define UPL_ABORT_ERROR 0x4 572 #define UPL_ABORT_FREE_ON_EMPTY 0x8 /* only implemented in wrappers */ 573 #define UPL_ABORT_DUMP_PAGES 0x10 574 #define UPL_ABORT_NOTIFY_EMPTY 0x20 575 /* deprecated: #define UPL_ABORT_ALLOW_ACCESS 0x40 */ 576 #define UPL_ABORT_REFERENCE 0x80 577 578 /* upl pages check flags */ 579 #define UPL_CHECK_DIRTY 0x1 580 581 582 /* 583 * upl pagein/pageout flags 584 * 585 * 586 * when I/O is issued from this UPL it should be done synchronously 587 */ 588 #define UPL_IOSYNC 0x1 589 590 /* 591 * the passed in UPL should not have either a commit or abort 592 * applied to it by the underlying layers... the site that 593 * created the UPL is responsible for cleaning it up. 594 */ 595 #define UPL_NOCOMMIT 0x2 596 597 /* 598 * turn off any speculative read-ahead applied at the I/O layer 599 */ 600 #define UPL_NORDAHEAD 0x4 601 602 /* 603 * pageout request is targeting a real file 604 * as opposed to a swap file. 605 */ 606 607 #define UPL_VNODE_PAGER 0x8 608 /* 609 * this pageout is being originated as part of an explicit 610 * memory synchronization operation... no speculative clustering 611 * should be applied, only the range specified should be pushed. 612 */ 613 #define UPL_MSYNC 0x10 614 615 /* 616 * 617 */ 618 #define UPL_PAGING_ENCRYPTED 0x20 619 620 /* 621 * this pageout is being originated as part of an explicit 622 * memory synchronization operation that is checking for I/O 623 * errors and taking it's own action... if an error occurs, 624 * just abort the pages back into the cache unchanged 625 */ 626 #define UPL_KEEPCACHED 0x40 627 628 /* 629 * this pageout originated from within cluster_io to deal 630 * with a dirty page that hasn't yet been seen by the FS 631 * that backs it... tag it so that the FS can take the 632 * appropriate action w/r to its locking model since the 633 * pageout will reenter the FS for the same file currently 634 * being handled in this context. 635 */ 636 #define UPL_NESTED_PAGEOUT 0x80 637 638 /* 639 * we've detected a sequential access pattern and 640 * we are speculatively and aggressively pulling 641 * pages in... do not count these as real PAGEINs 642 * w/r to our hard throttle maintenance 643 */ 644 #define UPL_IOSTREAMING 0x100 645 646 /* 647 * Currently, it's only used for the swap pagein path. 648 * Since the swap + compressed pager layer manage their 649 * pages, these pages are not marked "absent" i.e. these 650 * are "valid" pages. The pagein path will _not_ issue an 651 * I/O (correctly) for valid pages. So, this flag is used 652 * to override that logic in the vnode I/O path. 653 */ 654 #define UPL_IGNORE_VALID_PAGE_CHECK 0x200 655 656 657 658 /* upl commit flags */ 659 #define UPL_COMMIT_FREE_ON_EMPTY 0x1 /* only implemented in wrappers */ 660 #define UPL_COMMIT_CLEAR_DIRTY 0x2 661 #define UPL_COMMIT_SET_DIRTY 0x4 662 #define UPL_COMMIT_INACTIVATE 0x8 663 #define UPL_COMMIT_NOTIFY_EMPTY 0x10 664 /* deprecated: #define UPL_COMMIT_ALLOW_ACCESS 0x20 */ 665 #define UPL_COMMIT_CS_VALIDATED 0x40 666 #define UPL_COMMIT_CLEAR_PRECIOUS 0x80 667 #define UPL_COMMIT_SPECULATE 0x100 668 #define UPL_COMMIT_FREE_ABSENT 0x200 669 #define UPL_COMMIT_WRITTEN_BY_KERNEL 0x400 670 671 #define UPL_COMMIT_KERNEL_ONLY_FLAGS (UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_FREE_ABSENT) 672 673 /* flags for return of state from vm_map_get_upl, vm_upl address space */ 674 /* based call */ 675 #define UPL_DEV_MEMORY 0x1 676 #define UPL_PHYS_CONTIG 0x2 677 678 679 /* 680 * Flags for the UPL page ops routine. This routine is not exported 681 * out of the kernel at the moment and so the defs live here. 682 */ 683 #define UPL_POP_DIRTY 0x1 684 #define UPL_POP_PAGEOUT 0x2 685 #define UPL_POP_PRECIOUS 0x4 686 #define UPL_POP_ABSENT 0x8 687 #define UPL_POP_BUSY 0x10 688 689 #define UPL_POP_PHYSICAL 0x10000000 690 #define UPL_POP_DUMP 0x20000000 691 #define UPL_POP_SET 0x40000000 692 #define UPL_POP_CLR 0x80000000 693 694 /* 695 * Flags for the UPL range op routine. This routine is not exported 696 * out of the kernel at the moemet and so the defs live here. 697 */ 698 /* 699 * UPL_ROP_ABSENT: Returns the extent of the range presented which 700 * is absent, starting with the start address presented 701 */ 702 #define UPL_ROP_ABSENT 0x01 703 /* 704 * UPL_ROP_PRESENT: Returns the extent of the range presented which 705 * is present (i.e. resident), starting with the start address presented 706 */ 707 #define UPL_ROP_PRESENT 0x02 708 /* 709 * UPL_ROP_DUMP: Dump the pages which are found in the target object 710 * for the target range. 711 */ 712 #define UPL_ROP_DUMP 0x04 713 714 #ifdef PRIVATE 715 716 #define UPL_REPRIO_INFO_MASK (0xFFFFFFFF) 717 #define UPL_REPRIO_INFO_SHIFT 32 718 719 /* access macros for upl_t */ 720 721 #define UPL_DEVICE_PAGE(upl) \ 722 (((upl)[0].phys_addr != 0) ? ((upl)[0].device) : FALSE) 723 724 #define UPL_PAGE_PRESENT(upl, index) \ 725 ((upl)[(index)].phys_addr != 0) 726 727 #define UPL_PHYS_PAGE(upl, index) \ 728 ((upl)[(index)].phys_addr) 729 730 #define UPL_SPECULATIVE_PAGE(upl, index) \ 731 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].speculative) : FALSE) 732 733 #define UPL_DIRTY_PAGE(upl, index) \ 734 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].dirty) : FALSE) 735 736 #define UPL_PRECIOUS_PAGE(upl, index) \ 737 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].precious) : FALSE) 738 739 #define UPL_VALID_PAGE(upl, index) \ 740 (((upl)[(index)].phys_addr != 0) ? (!((upl)[(index)].absent)) : FALSE) 741 742 #define UPL_PAGEOUT_PAGE(upl, index) \ 743 (((upl)[(index)].phys_addr != 0) ? ((upl)[(index)].free_when_done) : FALSE) 744 745 #define UPL_SET_PAGE_FREE_ON_COMMIT(upl, index) \ 746 (((upl)[(index)].phys_addr != 0) ? \ 747 ((upl)[(index)].free_when_done = TRUE) : FALSE) 748 749 #define UPL_CLR_PAGE_FREE_ON_COMMIT(upl, index) \ 750 (((upl)[(index)].phys_addr != 0) ? \ 751 ((upl)[(index)].free_when_done = FALSE) : FALSE) 752 753 #define UPL_REPRIO_INFO_BLKNO(upl, index) \ 754 (((upl)->upl_reprio_info[(index)]) & UPL_REPRIO_INFO_MASK) 755 756 #define UPL_REPRIO_INFO_LEN(upl, index) \ 757 ((((upl)->upl_reprio_info[(index)]) >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK) 758 759 /* modifier macros for upl_t */ 760 761 #define UPL_SET_CS_VALIDATED(upl, index, value) \ 762 ((upl)[(index)].cs_validated = (value)) 763 764 #define UPL_SET_CS_TAINTED(upl, index, value) \ 765 ((upl)[(index)].cs_tainted = (value)) 766 767 #define UPL_SET_CS_NX(upl, index, value) \ 768 ((upl)[(index)].cs_nx = (value)) 769 770 #define UPL_SET_REPRIO_INFO(upl, index, blkno, len) \ 771 ((upl)->upl_reprio_info[(index)]) = (((uint64_t)(blkno) & UPL_REPRIO_INFO_MASK) | \ 772 (((uint64_t)(len) & UPL_REPRIO_INFO_MASK) << UPL_REPRIO_INFO_SHIFT)) 773 774 /* UPL_GET_INTERNAL_PAGE_LIST is only valid on internal objects where the */ 775 /* list request was made with the UPL_INTERNAL flag */ 776 777 #define UPL_GET_INTERNAL_PAGE_LIST(upl) upl_get_internal_page_list(upl) 778 779 __BEGIN_DECLS 780 781 extern void *upl_get_internal_vectorupl(upl_t); 782 extern upl_page_info_t *upl_get_internal_vectorupl_pagelist(upl_t); 783 extern upl_page_info_t *upl_get_internal_page_list(upl_t upl); 784 extern ppnum_t upl_phys_page(upl_page_info_t *upl, int index); 785 extern boolean_t upl_device_page(upl_page_info_t *upl); 786 extern boolean_t upl_speculative_page(upl_page_info_t *upl, int index); 787 extern void upl_clear_dirty(upl_t upl, boolean_t value); 788 extern void upl_set_referenced(upl_t upl, boolean_t value); 789 extern void upl_range_needed(upl_t upl, int index, int count); 790 #if CONFIG_IOSCHED 791 extern int64_t upl_blkno(upl_page_info_t *upl, int index); 792 extern void upl_set_blkno(upl_t upl, vm_offset_t upl_offset, int size, int64_t blkno); 793 #endif 794 795 __END_DECLS 796 797 #endif /* PRIVATE */ 798 799 __BEGIN_DECLS 800 801 extern boolean_t upl_page_present(upl_page_info_t *upl, int index); 802 extern boolean_t upl_dirty_page(upl_page_info_t *upl, int index); 803 extern boolean_t upl_valid_page(upl_page_info_t *upl, int index); 804 extern void upl_deallocate(upl_t upl); 805 extern void upl_mark_decmp(upl_t upl); 806 extern void upl_unmark_decmp(upl_t upl); 807 808 #ifdef KERNEL_PRIVATE 809 810 void upl_page_set_mark(upl_page_info_t *upl, int index, boolean_t v); 811 boolean_t upl_page_get_mark(upl_page_info_t *upl, int index); 812 813 #endif // KERNEL_PRIVATE 814 815 __END_DECLS 816 817 #endif /* KERNEL */ 818 819 #endif /* _MACH_MEMORY_OBJECT_TYPES_H_ */ 820