1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: zalloc.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 */
64
65 #ifdef KERNEL_PRIVATE
66
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74 #include <os/alloc_util.h>
75 #include <os/atomic.h>
76
77 #ifdef XNU_KERNEL_PRIVATE
78 #include <kern/startup.h>
79 #endif /* XNU_KERNEL_PRIVATE */
80
81 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
82 #define __zalloc_deprecated(msg) __deprecated_msg(msg)
83 #else
84 #define __zalloc_deprecated(msg)
85 #endif
86
87 /*
88 * Enable this macro to force type safe zalloc/zalloc_ro/...
89 */
90 #ifndef ZALLOC_TYPE_SAFE
91 #if __has_ptrcheck
92 #define ZALLOC_TYPE_SAFE 1
93 #else
94 #define ZALLOC_TYPE_SAFE 0
95 #endif
96 #endif /* !ZALLOC_TYPE_SAFE */
97
98 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
99
100 /*!
101 * @macro __zpercpu
102 *
103 * @abstract
104 * Annotation that helps denoting a per-cpu pointer that requires usage of
105 * @c zpercpu_*() for access.
106 */
107 #define __zpercpu __unsafe_indexable
108
109 /*!
110 * @typedef zone_id_t
111 *
112 * @abstract
113 * The type for a zone ID.
114 */
115 typedef uint16_t zone_id_t;
116
117 /**
118 * @enum zone_create_flags_t
119 *
120 * @abstract
121 * Set of flags to pass to zone_create().
122 *
123 * @discussion
124 * Some kernel-wide policies affect all possible created zones.
125 * Explicit @c ZC_* win over such policies.
126 */
127 __options_decl(zone_create_flags_t, uint64_t, {
128 /** The default value to pass to zone_create() */
129 ZC_NONE = 0x00000000,
130
131 /** (obsolete) */
132 ZC_SEQUESTER = 0x00000001,
133 /** (obsolete) */
134 ZC_NOSEQUESTER = 0x00000002,
135
136 /** Enable per-CPU zone caching for this zone */
137 ZC_CACHING = 0x00000010,
138 /** Disable per-CPU zone caching for this zone */
139 ZC_NOCACHING = 0x00000020,
140
141 /** Allocate zone pages as Read-only **/
142 ZC_READONLY = 0x00800000,
143
144 /** Mark zone as a per-cpu zone */
145 ZC_PERCPU = 0x01000000,
146
147 /** Force the created zone to clear every allocation on free */
148 ZC_ZFREE_CLEARMEM = 0x02000000,
149
150 /** Mark zone as non collectable by zone_gc() */
151 ZC_NOGC = 0x04000000,
152
153 /** Do not encrypt this zone during hibernation */
154 ZC_NOENCRYPT = 0x08000000,
155
156 /** Type requires alignment to be preserved */
157 ZC_ALIGNMENT_REQUIRED = 0x10000000,
158
159 /** Obsolete */
160 ZC_NOGZALLOC = 0x20000000,
161
162 /** Don't asynchronously replenish the zone via callouts */
163 ZC_NOCALLOUT = 0x40000000,
164
165 /** Can be zdestroy()ed, not default unlike zinit() */
166 ZC_DESTRUCTIBLE = 0x80000000,
167
168 #ifdef XNU_KERNEL_PRIVATE
169 /** This zone is a built object cache */
170 ZC_OBJ_CACHE = 0x0080000000000000,
171
172 /** Use guard pages in PGZ mode */
173 ZC_PGZ_USE_GUARDS = 0x0100000000000000,
174
175 /** Zone doesn't support TBI tagging */
176 ZC_NO_TBI_TAG = 0x0200000000000000,
177
178 /** This zone will back a kalloc type */
179 ZC_KALLOC_TYPE = 0x0400000000000000,
180
181 /** Disable PGZ for this zone */
182 ZC_NOPGZ = 0x0800000000000000,
183
184 /** This zone contains pure data */
185 ZC_DATA = 0x1000000000000000,
186
187 /** This zone belongs to the VM submap */
188 ZC_VM = 0x2000000000000000,
189
190 /** Disable kasan quarantine for this zone */
191 ZC_KASAN_NOQUARANTINE = 0x4000000000000000,
192
193 /** Disable kasan redzones for this zone */
194 ZC_KASAN_NOREDZONE = 0x8000000000000000,
195 #endif /* XNU_KERNEL_PRIVATE */
196 });
197
198 /*!
199 * @union zone_or_view
200 *
201 * @abstract
202 * A type used for calls that admit both a zone or a zone view.
203 *
204 * @discussion
205 * @c zalloc() and @c zfree() and their variants can act on both
206 * zones and zone views.
207 */
208 union zone_or_view {
209 struct kalloc_type_view *zov_kt_heap;
210 struct zone_view *zov_view;
211 struct zone *zov_zone;
212 #ifdef __cplusplus
zone_or_view(struct zone_view * zv)213 inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
214 }
zone_or_view(struct zone * z)215 inline zone_or_view(struct zone *z) : zov_zone(z) {
216 }
zone_or_view(struct kalloc_type_view * kth)217 inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) {
218 }
219 #endif
220 };
221 #ifdef __cplusplus
222 typedef union zone_or_view zone_or_view_t;
223 #else
224 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
225 #endif
226
227 /*!
228 * @enum zone_create_ro_id_t
229 *
230 * @abstract
231 * Zone creation IDs for external read only zones
232 *
233 * @discussion
234 * Kexts that desire to use the RO allocator should:
235 * 1. Add a zone creation id below
236 * 2. Add a corresponding ID to @c zone_reserved_id_t
237 * 3. Use @c zone_create_ro with ID from #1 to create a RO zone.
238 * 4. Save the zone ID returned from #3 in a SECURITY_READ_ONLY_LATE variable.
239 * 5. Use the saved ID for zalloc_ro/zfree_ro, etc.
240 */
241 __enum_decl(zone_create_ro_id_t, zone_id_t, {
242 ZC_RO_ID_SANDBOX,
243 ZC_RO_ID_PROFILE,
244 ZC_RO_ID_PROTOBOX,
245 ZC_RO_ID_SB_FILTER,
246 ZC_RO_ID_AMFI_OSENTITLEMENTS,
247 ZC_RO_ID__LAST = ZC_RO_ID_AMFI_OSENTITLEMENTS,
248 });
249
250 /*!
251 * @function zone_create
252 *
253 * @abstract
254 * Creates a zone with the specified parameters.
255 *
256 * @discussion
257 * A Zone is a slab allocator that returns objects of a given size very quickly.
258 *
259 * @param name the name for the new zone.
260 * @param size the size of the elements returned by this zone.
261 * @param flags a set of @c zone_create_flags_t flags.
262 *
263 * @returns the created zone, this call never fails.
264 */
265 extern zone_t zone_create(
266 const char *name __unsafe_indexable,
267 vm_size_t size,
268 zone_create_flags_t flags);
269
270 /*!
271 *
272 * @function zone_get_elem_size
273 *
274 * @abstract
275 * Get the intrinsic size of one element allocated by the given zone.
276 *
277 * @discussion
278 * All zones are created to allocate elements of a fixed size, but the size is
279 * not always a compile-time constant. @c zone_get_elem_size can be used to
280 * retrieve the size of elements allocated by this zone at runtime.
281 *
282 * @param zone the zone to inspect
283 *
284 * @returns the size of elements allocated by this zone
285 */
286 extern vm_size_t zone_get_elem_size(zone_t zone);
287
288 /*!
289 * @function zone_create_ro
290 *
291 * @abstract
292 * Creates a read only zone with the specified parameters from kexts
293 *
294 * @discussion
295 * See notes under @c zone_create_ro_id_t wrt creation and use of RO zones in
296 * kexts. Do not use this API to create read only zones in xnu.
297 *
298 * @param name the name for the new zone.
299 * @param size the size of the elements returned by this zone.
300 * @param flags a set of @c zone_create_flags_t flags.
301 * @param zc_ro_id an ID declared in @c zone_create_ro_id_t
302 *
303 * @returns the zone ID of the created zone, this call never fails.
304 */
305 extern zone_id_t zone_create_ro(
306 const char *name __unsafe_indexable,
307 vm_size_t size,
308 zone_create_flags_t flags,
309 zone_create_ro_id_t zc_ro_id);
310
311 /*!
312 * @function zdestroy
313 *
314 * @abstract
315 * Destroys a zone previously made with zone_create.
316 *
317 * @discussion
318 * Zones must have been made destructible for @c zdestroy() to be allowed,
319 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
320 *
321 * @param zone the zone to destroy.
322 */
323 extern void zdestroy(
324 zone_t zone);
325
326 /*!
327 * @function zone_require
328 *
329 * @abstract
330 * Requires for a given pointer to belong to the specified zone.
331 *
332 * @discussion
333 * The function panics if the check fails as it indicates that the kernel
334 * internals have been compromised.
335 *
336 * @param zone the zone the address needs to belong to.
337 * @param addr the element address to check.
338 */
339 extern void zone_require(
340 zone_t zone,
341 void *addr __unsafe_indexable);
342
343 /*!
344 * @function zone_require_ro
345 *
346 * @abstract
347 * Version of zone require intended for zones created with ZC_READONLY
348 *
349 * @discussion
350 * This check is not sufficient to fully trust the element.
351 *
352 * Another check of its content must be performed to prove
353 * that the element is "the right one", a typical technique
354 * for when the RO data structure is 1:1 with a mutable one,
355 * is a simple circularity check with a very strict lifetime
356 * (both the mutable and read-only data structures are made
357 * and destroyed as close as possible).
358 *
359 * @param zone_id the zone id the address needs to belong to.
360 * @param elem_size the element size for this zone.
361 * @param addr the element address to check.
362 */
363 extern void zone_require_ro(
364 zone_id_t zone_id,
365 vm_size_t elem_size,
366 void *addr __unsafe_indexable);
367
368 /*!
369 * @enum zalloc_flags_t
370 *
371 * @brief
372 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
373 *
374 * @discussion
375 * It is encouraged that any callsite passing flags uses exactly one of:
376 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
377 * if nothing else was specified.
378 *
379 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
380 * then @c Z_WAITOK is ignored.
381 *
382 * @const Z_WAITOK
383 * Passing this flag means that zalloc() will be allowed to sleep
384 * for memory to become available for this allocation. If the zone
385 * isn't exhaustible, zalloc(Z_WAITOK) never fails.
386 *
387 * If the zone is exhaustible, zalloc() might still fail if the zone
388 * is at its maximum allowed memory usage, unless Z_NOFAIL is passed,
389 * in which case zalloc() will block until an element is freed.
390 *
391 * @const Z_NOWAIT
392 * Passing this flag means that zalloc is not allowed to ever block.
393 *
394 * @const Z_NOPAGEWAIT
395 * Passing this flag means that zalloc is allowed to wait due to lock
396 * contention, but will not wait for the VM to wait for pages when
397 * under memory pressure.
398 *
399 * @const Z_ZERO
400 * Passing this flags means that the returned memory has been zeroed out.
401 *
402 * @const Z_NOFAIL
403 * Passing this flag means that the caller expects the allocation to always
404 * succeed. This will result in a panic if this assumption isn't correct.
405 *
406 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT.
407 * For exhaustible zones, it forces the caller to wait until a zfree() happend
408 * if the zone has reached its maximum of allowed elements.
409 *
410 * @const Z_REALLOCF
411 * For the realloc family of functions,
412 * free the incoming memory on failure cases.
413 *
414 #if XNU_KERNEL_PRIVATE
415 * @const Z_SET_NOTSHARED
416 * Using this flag from external allocations APIs (kalloc_type/zalloc)
417 * allows the callsite to skip the shared zone for that sizeclass and
418 * directly allocated from the requested zone.
419 * Using this flag from internal APIs (zalloc_ext) will skip the shared
420 * zone only when a given threshold is exceeded. It will also set a flag
421 * to indicate that future allocations to the zone should directly go to
422 * the zone instead of the shared zone.
423 *
424 * @const Z_SPRAYQTN
425 * This flag tells the VM to allocate from the "spray quarantine" range when
426 * it services the allocation. For more details on what allocations qualify
427 * to use this flag see @c KMEM_RANGE_ID_SPRAYQTN.
428 *
429 * @const Z_KALLOC_ARRAY
430 * Instead of returning a standard "pointer" return a pointer that encodes
431 * its size-class into the pointer itself (Only for kalloc, might limit
432 * the range of allocations that can be done).
433 *
434 * @const Z_FULLSIZE
435 * Used to indicate that the caller will use all available space in excess
436 * from the requested allocation size.
437 *
438 * @const Z_SKIP_KASAN
439 * Tell zalloc() not to do any kasan adjustments.
440 *
441 * @const Z_MAY_COPYINMAP
442 * This data allocation might be used with vm_map_copyin().
443 * This allows for those allocations to be associated with a proper VM object.
444 *
445 * @const Z_VM_TAG_BT_BIT
446 * Used to blame allocation accounting on the first kext
447 * found in the backtrace of the allocation.
448 *
449 * @const Z_NOZZC
450 * Used internally to mark allocations that will skip zero validation.
451 *
452 * @const Z_PCPU
453 * Used internally for the percpu paths.
454 *
455 * @const Z_VM_TAG_MASK
456 * Represents bits in which a vm_tag_t for the allocation can be passed.
457 * (used by kalloc for the zone tagging debugging feature).
458 #endif
459 */
460 __options_decl(zalloc_flags_t, uint32_t, {
461 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC
462 Z_WAITOK = 0x0000,
463 Z_NOWAIT = 0x0001,
464 Z_NOPAGEWAIT = 0x0002,
465 Z_ZERO = 0x0004,
466 Z_REALLOCF = 0x0008,
467
468 #if XNU_KERNEL_PRIVATE
469 Z_SET_NOTSHARED = 0x0040,
470 Z_SPRAYQTN = 0x0080,
471 Z_KALLOC_ARRAY = 0x0100,
472 #if KASAN_CLASSIC
473 Z_FULLSIZE = 0x0000,
474 #else
475 Z_FULLSIZE = 0x0200,
476 #endif
477 #if KASAN_CLASSIC
478 Z_SKIP_KASAN = 0x0400,
479 #else
480 Z_SKIP_KASAN = 0x0000,
481 #endif
482 Z_MAY_COPYINMAP = 0x0800,
483 Z_VM_TAG_BT_BIT = 0x1000,
484 Z_PCPU = 0x2000,
485 Z_NOZZC = 0x4000,
486 #endif /* XNU_KERNEL_PRIVATE */
487 Z_NOFAIL = 0x8000,
488
489 /* convenient c++ spellings */
490 Z_NOWAIT_ZERO = Z_NOWAIT | Z_ZERO,
491 Z_WAITOK_ZERO = Z_WAITOK | Z_ZERO,
492 Z_WAITOK_ZERO_NOFAIL = Z_WAITOK | Z_ZERO | Z_NOFAIL,
493 #if XNU_KERNEL_PRIVATE
494 Z_WAITOK_ZERO_SPRAYQTN = Z_WAITOK | Z_ZERO | Z_SPRAYQTN,
495 #endif
496
497 Z_KPI_MASK = Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT | Z_ZERO,
498 #if XNU_KERNEL_PRIVATE
499 Z_ZERO_VM_TAG_BT_BIT = Z_ZERO | Z_VM_TAG_BT_BIT,
500 /** used by kalloc to propagate vm tags for -zt */
501 Z_VM_TAG_MASK = 0xffff0000,
502
503 #define Z_VM_TAG_SHIFT 16
504 #define Z_VM_TAG(fl, tag) ((zalloc_flags_t)((fl) | ((tag) << Z_VM_TAG_SHIFT)))
505 #define Z_VM_TAG_BT(fl, tag) ((zalloc_flags_t)(Z_VM_TAG(fl, tag) | Z_VM_TAG_BT_BIT))
506 #endif
507 });
508
509 /*
510 * This type is used so that kalloc_internal has good calling conventions
511 * for callers who want to cheaply both know the allocated address
512 * and the actual size of the allocation.
513 */
514 struct kalloc_result {
515 void *addr __sized_by(size);
516 vm_size_t size;
517 };
518
519 /*!
520 * @typedef zone_stats_t
521 *
522 * @abstract
523 * The opaque type for per-cpu zone stats that are accumulated per zone
524 * or per zone-view.
525 */
526 typedef struct zone_stats *__zpercpu zone_stats_t;
527
528 /*!
529 * @typedef zone_view_t
530 *
531 * @abstract
532 * A view on a zone for accounting purposes.
533 *
534 * @discussion
535 * A zone view uses the zone it references for the allocations backing store,
536 * but does the allocation accounting at the view level.
537 *
538 * These accounting are surfaced by @b zprint(1) and similar tools,
539 * which allow for cheap but finer grained understanding of allocations
540 * without any fragmentation cost.
541 *
542 * Zone views are protected by the kernel lockdown and can't be initialized
543 * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
544 */
545 typedef struct zone_view *zone_view_t;
546 struct zone_view {
547 zone_t zv_zone;
548 zone_stats_t zv_stats;
549 const char *zv_name __unsafe_indexable;
550 zone_view_t zv_next;
551 };
552
553 /*!
554 * @typedef kalloc_type_view_t
555 *
556 * @abstract
557 * The opaque type created at kalloc_type callsites to redirect calls to
558 * the right zone.
559 */
560 typedef struct kalloc_type_view *kalloc_type_view_t;
561
562 #if XNU_KERNEL_PRIVATE
563 /*
564 * kalloc_type/kfree_type implementation functions
565 */
566 extern void *__unsafe_indexable kalloc_type_impl_internal(
567 kalloc_type_view_t kt_view,
568 zalloc_flags_t flags);
569
570 extern void kfree_type_impl_internal(
571 kalloc_type_view_t kt_view,
572 void *ptr __unsafe_indexable);
573
574 static inline void *__unsafe_indexable
kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)575 kalloc_type_impl(
576 kalloc_type_view_t kt_view,
577 zalloc_flags_t flags)
578 {
579 void *__unsafe_indexable addr = kalloc_type_impl_internal(kt_view, flags);
580 if (flags & Z_NOFAIL) {
581 __builtin_assume(addr != NULL);
582 }
583 return addr;
584 }
585
586 #define kfree_type_impl(kt_view, ptr) \
587 kfree_type_impl_internal(kt_view, (ptr))
588
589 #else /* XNU_KERNEL_PRIVATE */
590
591 extern void *__unsafe_indexable kalloc_type_impl(
592 kalloc_type_view_t kt_view,
593 zalloc_flags_t flags);
594
595 static inline void *__unsafe_indexable
__kalloc_type_impl(kalloc_type_view_t kt_view,zalloc_flags_t flags)596 __kalloc_type_impl(
597 kalloc_type_view_t kt_view,
598 zalloc_flags_t flags)
599 {
600 void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
601 if (flags & Z_NOFAIL) {
602 __builtin_assume(addr != NULL);
603 }
604 return addr;
605 }
606
607 #define kalloc_type_impl(ktv, fl) __kalloc_type_impl(ktv, fl)
608
609 extern void kfree_type_impl(
610 kalloc_type_view_t kt_view,
611 void *ptr __unsafe_indexable);
612
613 #endif /* XNU_KERNEL_PRIVATE */
614
615 /*!
616 * @function zalloc
617 *
618 * @abstract
619 * Allocates an element from a specified zone.
620 *
621 * @discussion
622 * If the zone isn't exhaustible and is expandable, this call never fails.
623 *
624 * @param zone the zone or zone view to allocate from
625 *
626 * @returns NULL or the allocated element
627 */
628 __attribute__((malloc))
629 extern void *__unsafe_indexable zalloc(
630 zone_t zone);
631
632 __attribute__((malloc))
633 __attribute__((overloadable))
634 static inline void *__unsafe_indexable
zalloc(zone_view_t view)635 zalloc(zone_view_t view)
636 {
637 return zalloc((zone_t)view);
638 }
639
640 __attribute__((malloc))
641 __attribute__((overloadable))
642 static inline void *__unsafe_indexable
zalloc(kalloc_type_view_t kt_view)643 zalloc(kalloc_type_view_t kt_view)
644 {
645 return (kalloc_type_impl)(kt_view, Z_WAITOK);
646 }
647
648 /*!
649 * @function zalloc_noblock
650 *
651 * @abstract
652 * Allocates an element from a specified zone, but never blocks.
653 *
654 * @discussion
655 * This call is suitable for preemptible code, however allocation
656 * isn't allowed from interrupt context.
657 *
658 * @param zone the zone or zone view to allocate from
659 *
660 * @returns NULL or the allocated element
661 */
662 __attribute__((malloc))
663 extern void *__unsafe_indexable zalloc_noblock(
664 zone_t zone);
665
666 __attribute__((malloc))
667 __attribute__((overloadable))
668 static inline void *__unsafe_indexable
zalloc_noblock(zone_view_t view)669 zalloc_noblock(zone_view_t view)
670 {
671 return zalloc_noblock((zone_t)view);
672 }
673
674 __attribute__((malloc))
675 __attribute__((overloadable))
676 static inline void *__unsafe_indexable
zalloc_noblock(kalloc_type_view_t kt_view)677 zalloc_noblock(kalloc_type_view_t kt_view)
678 {
679 return (kalloc_type_impl)(kt_view, Z_NOWAIT);
680 }
681
682 /*!
683 * @function zalloc_flags()
684 *
685 * @abstract
686 * Allocates an element from a specified zone, with flags.
687 *
688 * @param zone the zone or zone view to allocate from
689 * @param flags a collection of @c zalloc_flags_t.
690 *
691 * @returns NULL or the allocated element
692 */
693 __attribute__((malloc))
694 extern void *__unsafe_indexable zalloc_flags(
695 zone_t zone,
696 zalloc_flags_t flags);
697
698 __attribute__((malloc))
699 __attribute__((overloadable))
700 static inline void *__unsafe_indexable
__zalloc_flags(zone_t zone,zalloc_flags_t flags)701 __zalloc_flags(
702 zone_t zone,
703 zalloc_flags_t flags)
704 {
705 void *__unsafe_indexable addr = (zalloc_flags)(zone, flags);
706 if (flags & Z_NOFAIL) {
707 __builtin_assume(addr != NULL);
708 }
709 return addr;
710 }
711
712 __attribute__((malloc))
713 __attribute__((overloadable))
714 static inline void *__unsafe_indexable
__zalloc_flags(zone_view_t view,zalloc_flags_t flags)715 __zalloc_flags(
716 zone_view_t view,
717 zalloc_flags_t flags)
718 {
719 return __zalloc_flags((zone_t)view, flags);
720 }
721
722 __attribute__((malloc))
723 __attribute__((overloadable))
724 static inline void *__unsafe_indexable
__zalloc_flags(kalloc_type_view_t kt_view,zalloc_flags_t flags)725 __zalloc_flags(
726 kalloc_type_view_t kt_view,
727 zalloc_flags_t flags)
728 {
729 void *__unsafe_indexable addr = (kalloc_type_impl)(kt_view, flags);
730 if (flags & Z_NOFAIL) {
731 __builtin_assume(addr != NULL);
732 }
733 return addr;
734 }
735
736 __attribute__((malloc))
737 static inline void *__header_indexable
zalloc_flags_buf(zone_t zone,zalloc_flags_t flags)738 zalloc_flags_buf(
739 zone_t zone,
740 zalloc_flags_t flags)
741 {
742 void *__unsafe_indexable addr = __zalloc_flags(zone, flags);
743 if (flags & Z_NOFAIL) {
744 __builtin_assume(addr != NULL);
745 }
746 return __unsafe_forge_bidi_indexable(void *, addr, zone_get_elem_size(zone));
747 }
748
749 #if XNU_KERNEL_PRIVATE && ZALLOC_TYPE_SAFE
750 #define zalloc_flags(zov, fl) __zalloc_cast(zov, (__zalloc_flags)(zov, fl))
751 #else
752 #define zalloc_flags(zov, fl) __zalloc_flags(zov, fl)
753 #endif
754
755 /*!
756 * @macro zalloc_id
757 *
758 * @abstract
759 * Allocates an element from a specified zone ID, with flags.
760 *
761 * @param zid The proper @c ZONE_ID_* constant.
762 * @param flags a collection of @c zalloc_flags_t.
763 *
764 * @returns NULL or the allocated element
765 */
766 __attribute__((malloc))
767 extern void *__unsafe_indexable zalloc_id(
768 zone_id_t zid,
769 zalloc_flags_t flags);
770
771 __attribute__((malloc))
772 static inline void *__unsafe_indexable
__zalloc_id(zone_id_t zid,zalloc_flags_t flags)773 __zalloc_id(
774 zone_id_t zid,
775 zalloc_flags_t flags)
776 {
777 void *__unsafe_indexable addr = (zalloc_id)(zid, flags);
778 if (flags & Z_NOFAIL) {
779 __builtin_assume(addr != NULL);
780 }
781 return addr;
782 }
783
784 #if XNU_KERNEL_PRIVATE
785 #define zalloc_id(zid, flags) __zalloc_cast(zid, (__zalloc_id)(zid, flags))
786 #else
787 #define zalloc_id(zid, fl) __zalloc_id(zid, fl)
788 #endif
789
790 /*!
791 * @function zalloc_ro
792 *
793 * @abstract
794 * Allocates an element from a specified read-only zone.
795 *
796 * @param zone_id the zone id to allocate from
797 * @param flags a collection of @c zalloc_flags_t.
798 *
799 * @returns NULL or the allocated element
800 */
801 __attribute__((malloc))
802 extern void *__unsafe_indexable zalloc_ro(
803 zone_id_t zone_id,
804 zalloc_flags_t flags);
805
806 __attribute__((malloc))
807 static inline void *__unsafe_indexable
__zalloc_ro(zone_id_t zone_id,zalloc_flags_t flags)808 __zalloc_ro(
809 zone_id_t zone_id,
810 zalloc_flags_t flags)
811 {
812 void *__unsafe_indexable addr = (zalloc_ro)(zone_id, flags);
813 if (flags & Z_NOFAIL) {
814 __builtin_assume(addr != NULL);
815 }
816 return addr;
817 }
818
819 #if XNU_KERNEL_PRIVATE
820 #define zalloc_ro(zid, fl) __zalloc_cast(zid, (__zalloc_ro)(zid, fl))
821 #else
822 #define zalloc_ro(zid, fl) __zalloc_ro(zid, fl)
823 #endif
824
825 /*!
826 * @function zalloc_ro_mut
827 *
828 * @abstract
829 * Modifies an element from a specified read-only zone.
830 *
831 * @discussion
832 * Modifying compiler-assisted authenticated pointers using this function will
833 * not result in a signed pointer being written. The caller is expected to
834 * sign the value appropriately beforehand if they wish to do this.
835 *
836 * @param zone_id the zone id to allocate from
837 * @param elem element to be modified
838 * @param offset offset from element
839 * @param new_data pointer to new data
840 * @param new_data_size size of modification
841 *
842 */
843 extern void zalloc_ro_mut(
844 zone_id_t zone_id,
845 void *elem __unsafe_indexable,
846 vm_offset_t offset,
847 const void *new_data __sized_by(new_data_size),
848 vm_size_t new_data_size);
849
850 /*!
851 * @function zalloc_ro_update_elem
852 *
853 * @abstract
854 * Update the value of an entire element allocated in the read only allocator.
855 *
856 * @param zone_id the zone id to allocate from
857 * @param elem element to be modified
858 * @param new_data pointer to new data
859 *
860 */
861 #define zalloc_ro_update_elem(zone_id, elem, new_data) ({ \
862 const typeof(*(elem)) *__new_data = (new_data); \
863 zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data)); \
864 })
865
866 /*!
867 * @function zalloc_ro_update_field
868 *
869 * @abstract
870 * Update a single field of an element allocated in the read only allocator.
871 *
872 * @param zone_id the zone id to allocate from
873 * @param elem element to be modified
874 * @param field the element field to be modified
875 * @param new_data pointer to new data
876 *
877 */
878 #define zalloc_ro_update_field(zone_id, elem, field, value) ({ \
879 const typeof((elem)->field) *__value = (value); \
880 zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field), \
881 __value, sizeof((elem)->field)); \
882 })
883
884 #define ZRO_ATOMIC_LONG(op) ZRO_ATOMIC_##op##_64
885
886 /*!
887 * @enum zro_atomic_op_t
888 *
889 * @brief
890 * Flags that can be used with @c zalloc_ro_*_atomic to specify the desired
891 * atomic operations.
892 *
893 * @discussion
894 * This enum provides all flavors of atomic operations supported in sizes 8,
895 * 16, 32, 64 bits.
896 *
897 * @const ZRO_ATOMIC_OR_*
898 * To perform an @s os_atomic_or
899 *
900 * @const ZRO_ATOMIC_XOR_*
901 * To perform an @s os_atomic_xor
902 *
903 * @const ZRO_ATOMIC_AND_*
904 * To perform an @s os_atomic_and
905 *
906 * @const ZRO_ATOMIC_ADD_*
907 * To perform an @s os_atomic_add
908 *
909 * @const ZRO_ATOMIC_XCHG_*
910 * To perform an @s os_atomic_xchg
911 *
912 */
913 __enum_decl(zro_atomic_op_t, uint32_t, {
914 ZRO_ATOMIC_OR_8 = 0x00000010 | 1,
915 ZRO_ATOMIC_OR_16 = 0x00000010 | 2,
916 ZRO_ATOMIC_OR_32 = 0x00000010 | 4,
917 ZRO_ATOMIC_OR_64 = 0x00000010 | 8,
918
919 ZRO_ATOMIC_XOR_8 = 0x00000020 | 1,
920 ZRO_ATOMIC_XOR_16 = 0x00000020 | 2,
921 ZRO_ATOMIC_XOR_32 = 0x00000020 | 4,
922 ZRO_ATOMIC_XOR_64 = 0x00000020 | 8,
923
924 ZRO_ATOMIC_AND_8 = 0x00000030 | 1,
925 ZRO_ATOMIC_AND_16 = 0x00000030 | 2,
926 ZRO_ATOMIC_AND_32 = 0x00000030 | 4,
927 ZRO_ATOMIC_AND_64 = 0x00000030 | 8,
928
929 ZRO_ATOMIC_ADD_8 = 0x00000040 | 1,
930 ZRO_ATOMIC_ADD_16 = 0x00000040 | 2,
931 ZRO_ATOMIC_ADD_32 = 0x00000040 | 4,
932 ZRO_ATOMIC_ADD_64 = 0x00000040 | 8,
933
934 ZRO_ATOMIC_XCHG_8 = 0x00000050 | 1,
935 ZRO_ATOMIC_XCHG_16 = 0x00000050 | 2,
936 ZRO_ATOMIC_XCHG_32 = 0x00000050 | 4,
937 ZRO_ATOMIC_XCHG_64 = 0x00000050 | 8,
938
939 /* cconvenient spellings */
940 ZRO_ATOMIC_OR_LONG = ZRO_ATOMIC_LONG(OR),
941 ZRO_ATOMIC_XOR_LONG = ZRO_ATOMIC_LONG(XOR),
942 ZRO_ATOMIC_AND_LONG = ZRO_ATOMIC_LONG(AND),
943 ZRO_ATOMIC_ADD_LONG = ZRO_ATOMIC_LONG(ADD),
944 ZRO_ATOMIC_XCHG_LONG = ZRO_ATOMIC_LONG(XCHG),
945 });
946
947 /*!
948 * @function zalloc_ro_mut_atomic
949 *
950 * @abstract
951 * Atomically update an offset in an element allocated in the read only
952 * allocator. Do not use directly. Use via @c zalloc_ro_update_field_atomic.
953 *
954 * @param zone_id the zone id to allocate from
955 * @param elem element to be modified
956 * @param offset offset in the element to be modified
957 * @param op atomic operation to perform (see @c zro_atomic_op_t)
958 * @param value value for the atomic operation
959 *
960 */
961 extern uint64_t zalloc_ro_mut_atomic(
962 zone_id_t zone_id,
963 void *elem __unsafe_indexable,
964 vm_offset_t offset,
965 zro_atomic_op_t op,
966 uint64_t value);
967
968 /*!
969 * @macro zalloc_ro_update_field_atomic
970 *
971 * @abstract
972 * Atomically update a single field of an element allocated in the read only
973 * allocator.
974 *
975 * @param zone_id the zone id to allocate from
976 * @param elem element to be modified
977 * @param field the element field to be modified
978 * @param op atomic operation to perform (see @c zro_atomic_op_t)
979 * @param value value for the atomic operation
980 *
981 */
982 #define zalloc_ro_update_field_atomic(zone_id, elem, field, op, value) ({ \
983 const typeof((elem)->field) __value = (value); \
984 static_assert(sizeof(__value) == (op & 0xf)); \
985 (os_atomic_basetypeof(&(elem)->field))zalloc_ro_mut_atomic(zone_id, \
986 elem, offsetof(typeof(*(elem)), field), op, (uint64_t)__value); \
987 })
988
989 /*!
990 * @function zalloc_ro_clear
991 *
992 * @abstract
993 * Zeroes an element from a specified read-only zone.
994 *
995 * @param zone_id the zone id to allocate from
996 * @param elem element to be modified
997 * @param offset offset from element
998 * @param size size of modification
999 */
1000 extern void zalloc_ro_clear(
1001 zone_id_t zone_id,
1002 void *elem __unsafe_indexable,
1003 vm_offset_t offset,
1004 vm_size_t size);
1005
1006 /*!
1007 * @function zalloc_ro_clear_field
1008 *
1009 * @abstract
1010 * Zeroes the specified field of an element from a specified read-only zone.
1011 *
1012 * @param zone_id the zone id to allocate from
1013 * @param elem element to be modified
1014 * @param field offset from element
1015 */
1016 #define zalloc_ro_clear_field(zone_id, elem, field) \
1017 zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \
1018 sizeof((elem)->field))
1019
1020 /*!
1021 * @function zfree_id()
1022 *
1023 * @abstract
1024 * Frees an element previously allocated with @c zalloc_id().
1025 *
1026 * @param zone_id the zone id to free the element to.
1027 * @param addr the address to free
1028 */
1029 extern void zfree_id(
1030 zone_id_t zone_id,
1031 void *addr __unsafe_indexable);
1032 #define zfree_id(zid, elem) ({ \
1033 zone_id_t __zfree_zid = (zid); \
1034 (zfree_id)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1035 })
1036
1037
1038 /*!
1039 * @function zfree_ro()
1040 *
1041 * @abstract
1042 * Frees an element previously allocated with @c zalloc_ro().
1043 *
1044 * @param zone_id the zone id to free the element to.
1045 * @param addr the address to free
1046 */
1047 extern void zfree_ro(
1048 zone_id_t zone_id,
1049 void *addr __unsafe_indexable);
1050 #define zfree_ro(zid, elem) ({ \
1051 zone_id_t __zfree_zid = (zid); \
1052 (zfree_ro)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1053 })
1054
1055
1056 /*!
1057 * @function zfree
1058 *
1059 * @abstract
1060 * Frees an element allocated with @c zalloc*.
1061 *
1062 * @discussion
1063 * If the element being freed doesn't belong to the specified zone,
1064 * then this call will panic.
1065 *
1066 * @param zone the zone or zone view to free the element to.
1067 * @param elem the element to free
1068 */
1069 extern void zfree(
1070 zone_t zone,
1071 void *elem __unsafe_indexable);
1072
1073 __attribute__((overloadable))
1074 static inline void
zfree(zone_view_t view,void * elem __unsafe_indexable)1075 zfree(
1076 zone_view_t view,
1077 void *elem __unsafe_indexable)
1078 {
1079 zfree((zone_t)view, elem);
1080 }
1081
1082 __attribute__((overloadable))
1083 static inline void
zfree(kalloc_type_view_t kt_view,void * elem __unsafe_indexable)1084 zfree(
1085 kalloc_type_view_t kt_view,
1086 void *elem __unsafe_indexable)
1087 {
1088 return kfree_type_impl(kt_view, elem);
1089 }
1090
1091 #define zfree(zone, elem) ({ \
1092 __auto_type __zfree_zone = (zone); \
1093 (zfree)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1094 })
1095
1096
1097 /* deprecated KPIS */
1098
1099 __zalloc_deprecated("use zone_create()")
1100 extern zone_t zinit(
1101 vm_size_t size, /* the size of an element */
1102 vm_size_t maxmem, /* maximum memory to use */
1103 vm_size_t alloc, /* allocation size */
1104 const char *name __unsafe_indexable);
1105
1106 #pragma mark: implementation details
1107
1108 #define __ZONE_DECLARE_TYPE(var, type_t) __ZONE_DECLARE_TYPE2(var, type_t)
1109 #define __ZONE_DECLARE_TYPE2(var, type_t) \
1110 __attribute__((visibility("hidden"))) \
1111 extern type_t *__single __zalloc__##var##__type_name
1112
1113 #ifdef XNU_KERNEL_PRIVATE
1114 #pragma mark - XNU only interfaces
1115
1116 #include <kern/cpu_number.h>
1117
1118 #pragma GCC visibility push(hidden)
1119
1120 #pragma mark XNU only: zalloc (extended)
1121
1122 #define ZALIGN_NONE (sizeof(uint8_t) - 1)
1123 #define ZALIGN_16 (sizeof(uint16_t) - 1)
1124 #define ZALIGN_32 (sizeof(uint32_t) - 1)
1125 #define ZALIGN_PTR (sizeof(void *) - 1)
1126 #define ZALIGN_64 (sizeof(uint64_t) - 1)
1127 #define ZALIGN(t) (_Alignof(t) - 1)
1128
1129
1130 /*!
1131 * @function zalloc_permanent_tag()
1132 *
1133 * @abstract
1134 * Allocates a permanent element from the permanent zone
1135 *
1136 * @discussion
1137 * Memory returned by this function is always 0-initialized.
1138 * Note that the size of this allocation can not be determined
1139 * by zone_element_size so it should not be used for copyio.
1140 *
1141 * @param size the element size (must be smaller than PAGE_SIZE)
1142 * @param align_mask the required alignment for this allocation
1143 * @param tag the tag to use for allocations larger than a page.
1144 *
1145 * @returns the allocated element
1146 */
1147 __attribute__((malloc))
1148 extern void *__sized_by(size) zalloc_permanent_tag(
1149 vm_size_t size,
1150 vm_offset_t align_mask,
1151 vm_tag_t tag)
1152 __attribute__((__diagnose_if__((align_mask & (align_mask + 1)),
1153 "align mask looks invalid", "error")));
1154
1155 /*!
1156 * @function zalloc_permanent()
1157 *
1158 * @abstract
1159 * Allocates a permanent element from the permanent zone
1160 *
1161 * @discussion
1162 * Memory returned by this function is always 0-initialized.
1163 * Note that the size of this allocation can not be determined
1164 * by zone_element_size so it should not be used for copyio.
1165 *
1166 * @param size the element size (must be smaller than PAGE_SIZE)
1167 * @param align_mask the required alignment for this allocation
1168 *
1169 * @returns the allocated element
1170 */
1171 #define zalloc_permanent(size, align) \
1172 zalloc_permanent_tag(size, align, VM_KERN_MEMORY_KALLOC)
1173
1174 /*!
1175 * @function zalloc_permanent_type()
1176 *
1177 * @abstract
1178 * Allocates a permanent element of a given type with its natural alignment.
1179 *
1180 * @discussion
1181 * Memory returned by this function is always 0-initialized.
1182 *
1183 * @param type_t the element type
1184 *
1185 * @returns the allocated element
1186 */
1187 #define zalloc_permanent_type(type_t) \
1188 __unsafe_forge_single(type_t *, \
1189 zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
1190
1191 /*!
1192 * @function zalloc_first_proc_made()
1193 *
1194 * @abstract
1195 * Declare that the "early" allocation phase is done.
1196 */
1197 extern void zalloc_first_proc_made(void);
1198 /*!
1199 * @function zalloc_iokit_lockdown()
1200 *
1201 * @abstract
1202 * Declare that iokit matching has started.
1203 */
1204 extern void zalloc_iokit_lockdown(void);
1205
1206 #pragma mark XNU only: per-cpu allocations
1207
1208 /*!
1209 * @macro zpercpu_get_cpu()
1210 *
1211 * @abstract
1212 * Get a pointer to a specific CPU slot of a given per-cpu variable.
1213 *
1214 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1215 * @param cpu the specified CPU number as returned by @c cpu_number()
1216 *
1217 * @returns the per-CPU slot for @c ptr for the specified CPU.
1218 */
1219 #define zpercpu_get_cpu(ptr, cpu) \
1220 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)(cpu)))
1221
1222 /*!
1223 * @macro zpercpu_get()
1224 *
1225 * @abstract
1226 * Get a pointer to the current CPU slot of a given per-cpu variable.
1227 *
1228 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1229 *
1230 * @returns the per-CPU slot for @c ptr for the current CPU.
1231 */
1232 #define zpercpu_get(ptr) \
1233 zpercpu_get_cpu(ptr, cpu_number())
1234
1235 /*!
1236 * @macro zpercpu_foreach()
1237 *
1238 * @abstract
1239 * Enumerate all per-CPU slots by address.
1240 *
1241 * @param it the name for the iterator
1242 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
1243 */
1244 #define zpercpu_foreach(it, ptr) \
1245 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
1246 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
1247 it < __end_##it; it = __zpcpu_next(it))
1248
1249 /*!
1250 * @macro zpercpu_foreach_cpu()
1251 *
1252 * @abstract
1253 * Enumerate all per-CPU slots by CPU slot number.
1254 *
1255 * @param cpu the name for cpu number iterator.
1256 */
1257 #define zpercpu_foreach_cpu(cpu) \
1258 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
1259
1260 /*!
1261 * @function zalloc_percpu()
1262 *
1263 * @abstract
1264 * Allocates an element from a per-cpu zone.
1265 *
1266 * @discussion
1267 * The returned pointer cannot be used directly and must be manipulated
1268 * through the @c zpercpu_get*() interfaces.
1269 *
1270 * @param zone_or_view the zone or zone view to allocate from
1271 * @param flags a collection of @c zalloc_flags_t.
1272 *
1273 * @returns NULL or the allocated element
1274 */
1275 extern void *__zpercpu zalloc_percpu(
1276 zone_or_view_t zone_or_view,
1277 zalloc_flags_t flags);
1278
1279 static inline void *__zpercpu
__zalloc_percpu(zone_or_view_t zone_or_view,zalloc_flags_t flags)1280 __zalloc_percpu(
1281 zone_or_view_t zone_or_view,
1282 zalloc_flags_t flags)
1283 {
1284 void *__unsafe_indexable addr = (zalloc_percpu)(zone_or_view, flags);
1285 if (flags & Z_NOFAIL) {
1286 __builtin_assume(addr != NULL);
1287 }
1288 return addr;
1289 }
1290
1291 #define zalloc_percpu(zov, fl) __zalloc_percpu(zov, fl)
1292
1293 /*!
1294 * @function zfree_percpu()
1295 *
1296 * @abstract
1297 * Frees an element previously allocated with @c zalloc_percpu().
1298 *
1299 * @param zone_or_view the zone or zone view to free the element to.
1300 * @param addr the address to free
1301 */
1302 extern void zfree_percpu(
1303 zone_or_view_t zone_or_view,
1304 void *__zpercpu addr);
1305
1306 /*!
1307 * @function zalloc_percpu_permanent()
1308 *
1309 * @abstract
1310 * Allocates a permanent percpu-element from the permanent percpu zone.
1311 *
1312 * @discussion
1313 * Memory returned by this function is always 0-initialized.
1314 *
1315 * @param size the element size (must be smaller than PAGE_SIZE)
1316 * @param align_mask the required alignment for this allocation
1317 *
1318 * @returns the allocated element
1319 */
1320 extern void *__zpercpu zalloc_percpu_permanent(
1321 vm_size_t size,
1322 vm_offset_t align_mask);
1323
1324 /*!
1325 * @function zalloc_percpu_permanent_type()
1326 *
1327 * @abstract
1328 * Allocates a permanent percpu-element from the permanent percpu zone of a given
1329 * type with its natural alignment.
1330 *
1331 * @discussion
1332 * Memory returned by this function is always 0-initialized.
1333 *
1334 * @param type_t the element type
1335 *
1336 * @returns the allocated element
1337 */
1338 #define zalloc_percpu_permanent_type(type_t) \
1339 ((type_t *__zpercpu)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
1340
1341
1342 #pragma mark XNU only: SMR support for zones
1343
1344 struct smr;
1345
1346 /*!
1347 * @typedef zone_smr_free_cb_t
1348 *
1349 * @brief
1350 * Type for the delayed free callback for SMR zones.
1351 *
1352 * @description
1353 * This function is called before an element is reused,
1354 * or when memory is returned to the system.
1355 *
1356 * This function MUST zero the element, and if no special
1357 * action is to be taken on free, then @c bzero() is a fine
1358 * callback to use.
1359 *
1360 * This function also must be preemption-disabled safe,
1361 * as it runs with preemption disabled.
1362 *
1363 *
1364 * Note that this function should only clean the fields
1365 * that must be preserved for stale SMR readers to see.
1366 * Any field that is accessed after element validation
1367 * such as a try-retain or acquiring a lock on it must
1368 * be cleaned up much earlier as they might hold onto
1369 * expensive resources.
1370 *
1371 * The suggested pattern for an SMR type using this facility,
1372 * is to have 2 functions:
1373 *
1374 * - one "retire" stage that tries to clean up as much from
1375 * the element as possible, with great care to leave no dangling
1376 * pointers around, as elements in this stage might linger
1377 * in the allocator for a long time, and this could possibly
1378 * be abused during UaF exploitation.
1379 *
1380 * - one "smr_free" function which cleans up whatever was left,
1381 * and zeroes the rest of the element.
1382 *
1383 * <code>
1384 * void
1385 * type_retire(type_t elem)
1386 * {
1387 * // invalidating the element makes most fields
1388 * // inaccessible to readers.
1389 * type_mark_invalid(elem);
1390 *
1391 * // do cleanups for things requiring a validity check
1392 * kfree_type(some_type_t, elem->expensive_thing);
1393 * type_remove_from_global_list(&elem->linkage);
1394 *
1395 * zfree_smr(type_zone, elem);
1396 * }
1397 *
1398 * void
1399 * type_smr_free(void *_elem)
1400 * {
1401 * type_t elem = elem;
1402 *
1403 * // cleanup fields that are used to "find" this element
1404 * // and that SMR readers may access hazardously.
1405 * lck_ticket_destroy(&elem->lock);
1406 * kfree_data(elem->key, elem->keylen);
1407 *
1408 * // compulsory: element must be zeroed fully
1409 * bzero(elem, sizeof(*elem));
1410 * }
1411 * </code>
1412 */
1413 typedef void (*zone_smr_free_cb_t)(void *, size_t);
1414
1415 /*!
1416 * @function zone_enable_smr()
1417 *
1418 * @abstract
1419 * Enable SMR for a zone.
1420 *
1421 * @discussion
1422 * This can only be done once, and must be done before
1423 * the first allocation is made with this zone.
1424 *
1425 * @param zone the zone to enable SMR for
1426 * @param smr the smr domain to use
1427 * @param free_cb the free callback to use
1428 */
1429 extern void zone_enable_smr(
1430 zone_t zone,
1431 struct smr *smr,
1432 zone_smr_free_cb_t free_cb);
1433
1434 /*!
1435 * @function zone_id_enable_smr()
1436 *
1437 * @abstract
1438 * Enable SMR for a zone ID.
1439 *
1440 * @discussion
1441 * This can only be done once, and must be done before
1442 * the first allocation is made with this zone.
1443 *
1444 * @param zone_id the zone to enable SMR for
1445 * @param smr the smr domain to use
1446 * @param free_cb the free callback to use
1447 */
1448 #define zone_id_enable_smr(zone_id, smr, free_cb) ({ \
1449 void (*__cb)(typeof(__zalloc__##zone_id##__type_name), vm_size_t); \
1450 \
1451 __cb = (free_cb); \
1452 zone_enable_smr(zone_by_id(zone_id), smr, (zone_smr_free_cb_t)__cb); \
1453 })
1454
1455 /*!
1456 * @macro zalloc_smr()
1457 *
1458 * @abstract
1459 * Allocates an element from an SMR enabled zone
1460 *
1461 * @discussion
1462 * The SMR domain for this zone MUST NOT be entered when calling zalloc_smr().
1463 *
1464 * @param zone the zone to allocate from
1465 * @param flags a collection of @c zalloc_flags_t.
1466 *
1467 * @returns NULL or the allocated element
1468 */
1469 #define zalloc_smr(zone, flags) \
1470 zalloc_flags(zone, flags)
1471
1472 /*!
1473 * @macro zalloc_id_smr()
1474 *
1475 * @abstract
1476 * Allocates an element from a specified zone ID with SMR enabled.
1477 *
1478 * @param zid The proper @c ZONE_ID_* constant.
1479 * @param flags a collection of @c zalloc_flags_t.
1480 *
1481 * @returns NULL or the allocated element
1482 */
1483 #define zalloc_id_smr(zid, flags) \
1484 zalloc_id(zid, flags)
1485
1486 /*!
1487 * @macro zfree_smr()
1488 *
1489 * @abstract
1490 * Frees an element previously allocated with @c zalloc_smr().
1491 *
1492 * @discussion
1493 * When zfree_smr() is called, then the element is not immediately zeroed,
1494 * and the "free" callback that has been registered with the zone will
1495 * run later (@see zone_smr_free_cb_t).
1496 *
1497 * The SMR domain for this zone MUST NOT be entered when calling zfree_smr().
1498 *
1499 *
1500 * It is guaranteed that the SMR timestamp associated with an element
1501 * will always be equal or greater than the stamp associated with
1502 * elements freed before it on the same thread.
1503 *
1504 * It means that when freeing multiple elements in a sequence, these
1505 * must be freed in topological order (parents before children).
1506 *
1507 * It is worth noting that calling zfree_smr() on several elements
1508 * in a given order doesn't necessarily mean they will be effectively
1509 * reused or cleaned up in that same order, only that their SMR clocks
1510 * will expire in that order.
1511 *
1512 *
1513 * @param zone the zone to free the element to.
1514 * @param elem the address to free
1515 */
1516 extern void zfree_smr(
1517 zone_t zone,
1518 void *elem __unsafe_indexable);
1519 #define zfree_smr(zone, elem) ({ \
1520 __auto_type __zfree_zone = (zone); \
1521 (zfree_smr)(__zfree_zone, (void *)os_ptr_load_and_erase(elem)); \
1522 })
1523
1524
1525 /*!
1526 * @function zfree_id_smr()
1527 *
1528 * @abstract
1529 * Frees an element previously allocated with @c zalloc_id_smr().
1530 *
1531 * @param zone_id the zone id to free the element to.
1532 * @param addr the address to free
1533 */
1534 extern void zfree_id_smr(
1535 zone_id_t zone_id,
1536 void *addr __unsafe_indexable);
1537 #define zfree_id_smr(zid, elem) ({ \
1538 zone_id_t __zfree_zid = (zid); \
1539 (zfree_id_smr)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
1540 })
1541
1542 /*!
1543 * @macro zfree_smr_noclear()
1544 *
1545 * @abstract
1546 * Frees an element previously allocated with @c zalloc_smr().
1547 *
1548 * @discussion
1549 * This variant doesn't clear the pointer passed as an argument,
1550 * as it is often required for SMR algorithms to function correctly
1551 * to leave pointers "dangling" to an extent.
1552 *
1553 * However it expects the field in question to be an SMR_POINTER()
1554 * struct.
1555 *
1556 * @param zone the zone to free the element to.
1557 * @param elem the address to free
1558 */
1559 #define zfree_smr_noclear(zone, elem) \
1560 (zfree_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1561
1562 /*!
1563 * @macro zfree_id_smr_noclear()
1564 *
1565 * @abstract
1566 * Frees an element previously allocated with @c zalloc_id_smr().
1567 *
1568 * @discussion
1569 * This variant doesn't clear the pointer passed as an argument,
1570 * as it is often required for SMR algorithms to function correctly
1571 * to leave pointers "dangling" to an extent.
1572 *
1573 * However it expects the field in question to be an SMR_POINTER()
1574 * struct.
1575 *
1576 * @param zone the zone to free the element to.
1577 * @param elem the address to free
1578 */
1579 #define zfree_id_smr_noclear(zone, elem) \
1580 (zfree_id_smr)(zone, (void *)smr_unsafe_load(&(elem)))
1581
1582
1583 #pragma mark XNU only: zone creation (extended)
1584
1585 /*!
1586 * @enum zone_reserved_id_t
1587 *
1588 * @abstract
1589 * Well known pre-registered zones, allowing use of zone_id_require()
1590 *
1591 * @discussion
1592 * @c ZONE_ID__* aren't real zone IDs.
1593 *
1594 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
1595 * easy a value to produce (by malice or accident).
1596 *
1597 * @c ZONE_ID__FIRST_RO_EXT is the first external read only zone ID that corresponds
1598 * to the first @c zone_create_ro_id_t. There is a 1:1 mapping between zone IDs
1599 * belonging to [ZONE_ID__FIRST_RO_EXT - ZONE_ID__LAST_RO_EXT] and zone creations IDs
1600 * listed in @c zone_create_ro_id_t.
1601 *
1602 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
1603 * @c zone_create().
1604 */
1605 __enum_decl(zone_reserved_id_t, zone_id_t, {
1606 ZONE_ID__ZERO,
1607
1608 ZONE_ID_PERMANENT,
1609 ZONE_ID_PERCPU_PERMANENT,
1610
1611 ZONE_ID_THREAD_RO,
1612 ZONE_ID_MAC_LABEL,
1613 ZONE_ID_PROC_RO,
1614 ZONE_ID_PROC_SIGACTS_RO,
1615 ZONE_ID_KAUTH_CRED,
1616 ZONE_ID_CS_BLOB,
1617
1618 ZONE_ID_SANDBOX_RO,
1619 ZONE_ID_PROFILE_RO,
1620 ZONE_ID_PROTOBOX,
1621 ZONE_ID_SB_FILTER,
1622 ZONE_ID_AMFI_OSENTITLEMENTS,
1623
1624 ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO,
1625 ZONE_ID__FIRST_RO_EXT = ZONE_ID_SANDBOX_RO,
1626 ZONE_ID__LAST_RO_EXT = ZONE_ID_AMFI_OSENTITLEMENTS,
1627 ZONE_ID__LAST_RO = ZONE_ID__LAST_RO_EXT,
1628
1629 ZONE_ID_PMAP,
1630 ZONE_ID_VM_MAP,
1631 ZONE_ID_VM_MAP_ENTRY,
1632 ZONE_ID_VM_MAP_HOLES,
1633 ZONE_ID_VM_MAP_COPY,
1634 ZONE_ID_VM_PAGES,
1635 ZONE_ID_IPC_PORT,
1636 ZONE_ID_IPC_PORT_SET,
1637 ZONE_ID_IPC_KMSG,
1638 ZONE_ID_IPC_VOUCHERS,
1639 ZONE_ID_PROC_TASK,
1640 ZONE_ID_THREAD,
1641 ZONE_ID_TURNSTILE,
1642 ZONE_ID_SEMAPHORE,
1643 ZONE_ID_SELECT_SET,
1644 ZONE_ID_FILEPROC,
1645
1646 #if !CONFIG_MBUF_MCACHE
1647 ZONE_ID_MBUF_REF,
1648 ZONE_ID_MBUF,
1649 ZONE_ID_CLUSTER_2K,
1650 ZONE_ID_CLUSTER_4K,
1651 ZONE_ID_CLUSTER_16K,
1652 ZONE_ID_MBUF_CLUSTER_2K,
1653 ZONE_ID_MBUF_CLUSTER_4K,
1654 ZONE_ID_MBUF_CLUSTER_16K,
1655 #endif /* !CONFIG_MBUF_MCACHE */
1656
1657 ZONE_ID__FIRST_DYNAMIC,
1658 });
1659
1660 /*!
1661 * @const ZONE_ID_ANY
1662 * The value to pass to @c zone_create_ext() to allocate a non pre-registered
1663 * Zone ID.
1664 */
1665 #define ZONE_ID_ANY ((zone_id_t)-1)
1666
1667 /*!
1668 * @const ZONE_ID_INVALID
1669 * An invalid zone_id_t that corresponds to nothing.
1670 */
1671 #define ZONE_ID_INVALID ((zone_id_t)-2)
1672
1673 /**!
1674 * @function zone_by_id
1675 *
1676 * @param zid the specified zone ID.
1677 * @returns the zone with that ID.
1678 */
1679 zone_t zone_by_id(
1680 size_t zid) __pure2;
1681
1682 /**!
1683 * @function zone_name
1684 *
1685 * @param zone the specified zone
1686 * @returns the name of the specified zone.
1687 */
1688 const char *__unsafe_indexable zone_name(
1689 zone_t zone);
1690
1691 /**!
1692 * @function zone_heap_name
1693 *
1694 * @param zone the specified zone
1695 * @returns the name of the heap this zone is part of, or "".
1696 */
1697 const char *__unsafe_indexable zone_heap_name(
1698 zone_t zone);
1699
1700 /*!
1701 * @function zone_create_ext
1702 *
1703 * @abstract
1704 * Creates a zone with the specified parameters.
1705 *
1706 * @discussion
1707 * This is an extended version of @c zone_create().
1708 *
1709 * @param name the name for the new zone.
1710 * @param size the size of the elements returned by this zone.
1711 * @param flags a set of @c zone_create_flags_t flags.
1712 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1713 *
1714 * @param extra_setup a block that can perform non trivial initialization
1715 * on the zone before it is marked valid.
1716 * This block can call advanced setups like:
1717 * - zone_set_exhaustible()
1718 *
1719 * @returns the created zone, this call never fails.
1720 */
1721 extern zone_t zone_create_ext(
1722 const char *name __unsafe_indexable,
1723 vm_size_t size,
1724 zone_create_flags_t flags,
1725 zone_id_t desired_zid,
1726 void (^extra_setup)(zone_t));
1727
1728 /*!
1729 * @macro ZONE_DECLARE
1730 *
1731 * @abstract
1732 * Declares a zone variable and its associated type.
1733 *
1734 * @param var the name of the variable to declare.
1735 * @param type_t the type of elements in the zone.
1736 */
1737 #define ZONE_DECLARE(var, type_t) \
1738 extern zone_t var; \
1739 __ZONE_DECLARE_TYPE(var, type_t)
1740
1741 /*!
1742 * @macro ZONE_DECLARE_ID
1743 *
1744 * @abstract
1745 * Declares the type associated with a zone ID.
1746 *
1747 * @param id the name of zone ID to associate a type with.
1748 * @param type_t the type of elements in the zone.
1749 */
1750 #define ZONE_DECLARE_ID(id, type_t) \
1751 __ZONE_DECLARE_TYPE(id, type_t)
1752
1753 /*!
1754 * @macro ZONE_DEFINE
1755 *
1756 * @abstract
1757 * Declares a zone variable to automatically initialize with the specified
1758 * parameters.
1759 *
1760 * @discussion
1761 * Using ZONE_DEFINE_TYPE is preferred, but not always possible.
1762 *
1763 * @param var the name of the variable to declare.
1764 * @param name the name for the zone
1765 * @param size the size of the elements returned by this zone.
1766 * @param flags a set of @c zone_create_flags_t flags.
1767 */
1768 #define ZONE_DEFINE(var, name, size, flags) \
1769 SECURITY_READ_ONLY_LATE(zone_t) var; \
1770 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
1771 static __startup_data struct zone_create_startup_spec \
1772 __startup_zone_spec_ ## var = { &var, name, size, flags, \
1773 ZONE_ID_ANY, NULL }; \
1774 STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
1775 &__startup_zone_spec_ ## var)
1776
1777 /*!
1778 * @macro ZONE_DEFINE_TYPE
1779 *
1780 * @abstract
1781 * Defines a zone variable to automatically initialize with the specified
1782 * parameters, associated with a particular type.
1783 *
1784 * @param var the name of the variable to declare.
1785 * @param name the name for the zone
1786 * @param type_t the type of elements in the zone.
1787 * @param flags a set of @c zone_create_flags_t flags.
1788 */
1789 #define ZONE_DEFINE_TYPE(var, name, type_t, flags) \
1790 ZONE_DEFINE(var, name, sizeof(type_t), flags); \
1791 __ZONE_DECLARE_TYPE(var, type_t)
1792
1793 /*!
1794 * @macro ZONE_DEFINE_ID
1795 *
1796 * @abstract
1797 * Initializes a given zone automatically during startup with the specified
1798 * parameters.
1799 *
1800 * @param zid a @c zone_reserved_id_t value.
1801 * @param name the name for the zone
1802 * @param type_t the type of elements in the zone.
1803 * @param flags a set of @c zone_create_flags_t flags.
1804 */
1805 #define ZONE_DEFINE_ID(zid, name, type_t, flags) \
1806 ZONE_DECLARE_ID(zid, type_t); \
1807 ZONE_INIT(NULL, name, sizeof(type_t), flags, zid, NULL)
1808
1809 /*!
1810 * @macro ZONE_INIT
1811 *
1812 * @abstract
1813 * Initializes a given zone automatically during startup with the specified
1814 * parameters.
1815 *
1816 * @param var the name of the variable to initialize.
1817 * @param name the name for the zone
1818 * @param size the size of the elements returned by this zone.
1819 * @param flags a set of @c zone_create_flags_t flags.
1820 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
1821 * @param extra_setup a block that can perform non trivial initialization
1822 * (@see @c zone_create_ext()).
1823 */
1824 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
1825 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
1826
1827 /*!
1828 * @function zone_id_require
1829 *
1830 * @abstract
1831 * Requires for a given pointer to belong to the specified zone, by ID and size.
1832 *
1833 * @discussion
1834 * The function panics if the check fails as it indicates that the kernel
1835 * internals have been compromised.
1836 *
1837 * This is a variant of @c zone_require() which:
1838 * - isn't sensitive to @c zone_t::elem_size being compromised,
1839 * - is slightly faster as it saves one load and a multiplication.
1840 *
1841 * @param zone_id the zone ID the address needs to belong to.
1842 * @param elem_size the size of elements for this zone.
1843 * @param addr the element address to check.
1844 */
1845 extern void zone_id_require(
1846 zone_id_t zone_id,
1847 vm_size_t elem_size,
1848 void *addr __unsafe_indexable);
1849
1850 /*!
1851 * @function zone_id_require_aligned
1852 *
1853 * @abstract
1854 * Requires for a given pointer to belong to the specified zone, by ID and size.
1855 *
1856 * @discussion
1857 * Similar to @c zone_id_require() but does more checks such as whether the
1858 * element is properly aligned.
1859 *
1860 * @param zone_id the zone ID the address needs to belong to.
1861 * @param addr the element address to check.
1862 */
1863 extern void zone_id_require_aligned(
1864 zone_id_t zone_id,
1865 void *addr __unsafe_indexable);
1866
1867 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
1868 extern void zone_set_exhaustible(
1869 zone_t zone,
1870 vm_size_t max_elements,
1871 bool exhausts_by_design);
1872
1873 /*!
1874 * @function zone_raise_reserve()
1875 *
1876 * @brief
1877 * Used to raise the reserve on a zone.
1878 *
1879 * @discussion
1880 * Can be called from any context (zone_create_ext() setup hook or after).
1881 */
1882 extern void zone_raise_reserve(
1883 zone_or_view_t zone_or_view,
1884 uint16_t min_elements);
1885
1886 /*!
1887 * @function zone_fill_initially
1888 *
1889 * @brief
1890 * Initially fill a non collectable zone to have the specified amount of
1891 * elements.
1892 *
1893 * @discussion
1894 * This function must be called on a non collectable permanent zone before it
1895 * has been used yet.
1896 *
1897 * @param zone The zone to fill.
1898 * @param nelems The number of elements to be able to hold.
1899 */
1900 extern void zone_fill_initially(
1901 zone_t zone,
1902 vm_size_t nelems);
1903
1904 /*!
1905 * @function zone_drain()
1906 *
1907 * @abstract
1908 * Forces a zone to be drained (have all its data structures freed
1909 * back to its data store, and empty pages returned to the system).
1910 *
1911 * @param zone the zone id to free the objects to.
1912 */
1913 extern void zone_drain(
1914 zone_t zone);
1915
1916 /*!
1917 * @struct zone_basic_stats
1918 *
1919 * @abstract
1920 * Used to report basic statistics about a zone.
1921 *
1922 * @field zbs_avail the number of elements in a zone.
1923 * @field zbs_alloc the number of allocated elements in a zone.
1924 * @field zbs_free the number of free elements in a zone.
1925 * @field zbs_cached the number of free elements in the per-CPU caches.
1926 * (included in zbs_free).
1927 * @field zbs_alloc_fail
1928 * the number of allocation failures.
1929 */
1930 struct zone_basic_stats {
1931 uint64_t zbs_avail;
1932 uint64_t zbs_alloc;
1933 uint64_t zbs_free;
1934 uint64_t zbs_cached;
1935 uint64_t zbs_alloc_fail;
1936 };
1937
1938 /*!
1939 * @function zone_get_stats
1940 *
1941 * @abstract
1942 * Retrieves statistics about zones, include its per-CPU caches.
1943 *
1944 * @param zone the zone to collect stats from.
1945 * @param stats the statistics to fill.
1946 */
1947 extern void zone_get_stats(
1948 zone_t zone,
1949 struct zone_basic_stats *stats);
1950
1951
1952 /*!
1953 * @typedef zone_exhausted_cb_t
1954 *
1955 * @brief
1956 * The callback type for the ZONE_EXHAUSTED event.
1957 */
1958 typedef void (zone_exhausted_cb_t)(zone_id_t zid, zone_t zone, bool exhausted);
1959
1960 /*!
1961 * @brief
1962 * The @c ZONE_EXHAUSTED event, which is emited when an exhaustible zone hits its
1963 * wiring limit.
1964 *
1965 * @discussion
1966 * The @c ZONE_EXHAUSTED event is emitted from a thread that is currently
1967 * performing zone expansion and no significant amount of work can be performed
1968 * from this context.
1969 *
1970 * In particular, those callbacks cannot allocate any memory, it is expected
1971 * that they will filter if the zone is of interest, and wake up another thread
1972 * to perform the actual work (for example via thread call).
1973 */
1974 EVENT_DECLARE(ZONE_EXHAUSTED, zone_exhausted_cb_t);
1975
1976
1977 #pragma mark XNU only: zone views
1978
1979 /*!
1980 * @enum zone_kheap_id_t
1981 *
1982 * @brief
1983 * Enumerate a particular kalloc heap.
1984 *
1985 * @discussion
1986 * More documentation about heaps is available in @c <kern/kalloc.h>.
1987 *
1988 * @const KHEAP_ID_NONE
1989 * This value denotes regular zones, not used by kalloc.
1990 *
1991 * @const KHEAP_ID_SHARED
1992 * Indicates zones part of the KHEAP_SHARED heap.
1993 *
1994 * @const KHEAP_ID_DATA_BUFFERS
1995 * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
1996 *
1997 * @const KHEAP_ID_KT_VAR
1998 * Indicates zones part of the KHEAP_KT_VAR heap.
1999 */
2000 __enum_decl(zone_kheap_id_t, uint8_t, {
2001 KHEAP_ID_NONE,
2002 KHEAP_ID_SHARED,
2003 KHEAP_ID_DATA_BUFFERS,
2004 KHEAP_ID_KT_VAR,
2005
2006 #define KHEAP_ID_COUNT (KHEAP_ID_KT_VAR + 1)
2007 });
2008
2009 /*!
2010 * @macro ZONE_VIEW_DECLARE
2011 *
2012 * @abstract
2013 * (optionally) declares a zone view (in a header).
2014 *
2015 * @param var the name for the zone view.
2016 */
2017 #define ZONE_VIEW_DECLARE(var) \
2018 extern struct zone_view var[1]
2019
2020 /*!
2021 * @macro ZONE_VIEW_DEFINE
2022 *
2023 * @abstract
2024 * Defines a given zone view and what it points to.
2025 *
2026 * @discussion
2027 * Zone views can either share a pre-existing zone,
2028 * or perform a lookup into a kalloc heap for the zone
2029 * backing the bucket of the proper size.
2030 *
2031 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
2032 * as the last rank. If views on zones are created, these must have been
2033 * created before this stage.
2034 *
2035 * This macro should not be used to create zone views from default
2036 * kalloc heap, KALLOC_TYPE_DEFINE should be used instead.
2037 *
2038 * @param var the name for the zone view.
2039 * @param name a string describing the zone view.
2040 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone.
2041 * @param size the element size to be allocated from this view.
2042 */
2043 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
2044 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
2045 .zv_name = (name), \
2046 } }; \
2047 static __startup_data struct zone_view_startup_spec \
2048 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
2049 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_view_startup_init, \
2050 &__startup_zone_view_spec_ ## var)
2051
2052
2053 #pragma mark XNU only: batched allocations
2054
2055 /*!
2056 * @typedef zstack_t
2057 *
2058 * @brief
2059 * A stack of allocated elements chained with delta encoding.
2060 *
2061 * @discussion
2062 * Some batch allocation interfaces interact with the data heap
2063 * where leaking kernel pointers is not acceptable. This is why
2064 * element offsets are used instead.
2065 */
2066 typedef struct zstack {
2067 vm_offset_t z_head;
2068 uint32_t z_count;
2069 } zstack_t;
2070
2071 /*!
2072 * @function zstack_push
2073 *
2074 * @brief
2075 * Push a given element onto a zstack.
2076 */
2077 extern void zstack_push(
2078 zstack_t *stack,
2079 void *elem);
2080
2081 /*!
2082 * @function zstack_pop
2083 *
2084 * @brief
2085 * Pops an element from a zstack, the caller must check it's not empty.
2086 */
2087 void *zstack_pop(
2088 zstack_t *stack);
2089
2090 /*!
2091 * @function zstack_empty
2092 *
2093 * @brief
2094 * Returns whether a stack is empty.
2095 */
2096 static inline uint32_t
zstack_count(zstack_t stack)2097 zstack_count(zstack_t stack)
2098 {
2099 return stack.z_count;
2100 }
2101
2102 /*!
2103 * @function zstack_empty
2104 *
2105 * @brief
2106 * Returns whether a stack is empty.
2107 */
2108 static inline bool
zstack_empty(zstack_t stack)2109 zstack_empty(zstack_t stack)
2110 {
2111 return zstack_count(stack) == 0;
2112 }
2113
2114 static inline zstack_t
zstack_load_and_erase(zstack_t * stackp)2115 zstack_load_and_erase(zstack_t *stackp)
2116 {
2117 zstack_t stack = *stackp;
2118
2119 *stackp = (zstack_t){ };
2120 return stack;
2121 }
2122
2123 /*!
2124 * @function zfree_nozero
2125 *
2126 * @abstract
2127 * Frees an element allocated with @c zalloc*, without zeroing it.
2128 *
2129 * @discussion
2130 * This is for the sake of networking only, no one else should use this.
2131 *
2132 * @param zone_id the zone id to free the element to.
2133 * @param elem the element to free
2134 */
2135 extern void zfree_nozero(
2136 zone_id_t zone_id,
2137 void *elem __unsafe_indexable);
2138 #define zfree_nozero(zone_id, elem) ({ \
2139 zone_id_t __zfree_zid = (zone_id); \
2140 (zfree_nozero)(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
2141 })
2142
2143 /*!
2144 * @function zalloc_n
2145 *
2146 * @abstract
2147 * Allocates a batch of elements from the specified zone.
2148 *
2149 * @discussion
2150 * This is for the sake of networking only, no one else should use this.
2151 *
2152 * @param zone_id the zone id to allocate the element from.
2153 * @param count how many elements to allocate (less might be returned)
2154 * @param flags a set of @c zone_create_flags_t flags.
2155 */
2156 extern zstack_t zalloc_n(
2157 zone_id_t zone_id,
2158 uint32_t count,
2159 zalloc_flags_t flags);
2160
2161 /*!
2162 * @function zfree_n
2163 *
2164 * @abstract
2165 * Batched variant of zfree(): frees a stack of elements.
2166 *
2167 * @param zone_id the zone id to free the element to.
2168 * @param stack a stack of elements to free.
2169 */
2170 extern void zfree_n(
2171 zone_id_t zone_id,
2172 zstack_t stack);
2173 #define zfree_n(zone_id, stack) ({ \
2174 zone_id_t __zfree_zid = (zone_id); \
2175 (zfree_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2176 })
2177
2178 /*!
2179 * @function zfree_nozero_n
2180 *
2181 * @abstract
2182 * Batched variant of zfree_nozero(): frees a stack of elements without zeroing
2183 * them.
2184 *
2185 * @discussion
2186 * This is for the sake of networking only, no one else should use this.
2187 *
2188 * @param zone_id the zone id to free the element to.
2189 * @param stack a stack of elements to free.
2190 */
2191 extern void zfree_nozero_n(
2192 zone_id_t zone_id,
2193 zstack_t stack);
2194 #define zfree_nozero_n(zone_id, stack) ({ \
2195 zone_id_t __zfree_zid = (zone_id); \
2196 (zfree_nozero_n)(__zfree_zid, zstack_load_and_erase(&(stack))); \
2197 })
2198
2199 #pragma mark XNU only: cached objects
2200
2201 /*!
2202 * @typedef zone_cache_ops_t
2203 *
2204 * @brief
2205 * A set of callbacks used for a zcache (cache of composite objects).
2206 *
2207 * @field zc_op_alloc
2208 * The callback to "allocate" a cached object from scratch.
2209 *
2210 * @field zc_op_mark_valid
2211 * The callback that is called when a cached object is being reused,
2212 * will typically call @c zcache_mark_valid() on the various
2213 * sub-pieces of the composite cached object.
2214 *
2215 * @field zc_op_mark_invalid
2216 * The callback that is called when a composite object is being freed
2217 * to the cache. This will typically call @c zcache_mark_invalid()
2218 * on the various sub-pieces of the composite object.
2219 *
2220 * @field zc_op_free
2221 * The callback to "free" a composite object completely.
2222 */
2223 typedef const struct zone_cache_ops {
2224 void *(*zc_op_alloc)(zone_id_t, zalloc_flags_t);
2225 void *(*zc_op_mark_valid)(zone_id_t, void *);
2226 void *(*zc_op_mark_invalid)(zone_id_t, void *);
2227 void (*zc_op_free)(zone_id_t, void *);
2228 } *zone_cache_ops_t;
2229
2230 #if __has_ptrcheck
2231 static inline char *__bidi_indexable
zcache_transpose_bounds(char * __bidi_indexable pointer_with_bounds,char * __unsafe_indexable unsafe_pointer)2232 zcache_transpose_bounds(
2233 char *__bidi_indexable pointer_with_bounds,
2234 char *__unsafe_indexable unsafe_pointer)
2235 {
2236 vm_offset_t offset_from_start = pointer_with_bounds - __ptr_lower_bound(pointer_with_bounds);
2237 vm_offset_t offset_to_end = __ptr_upper_bound(pointer_with_bounds) - pointer_with_bounds;
2238 vm_offset_t size = offset_from_start + offset_to_end;
2239 return __unsafe_forge_bidi_indexable(char *, unsafe_pointer - offset_from_start, size)
2240 + offset_from_start;
2241 }
2242 #else
2243 static inline char *__header_indexable
zcache_transpose_bounds(char * __header_indexable pointer_with_bounds __unused,char * __unsafe_indexable unsafe_pointer)2244 zcache_transpose_bounds(
2245 char *__header_indexable pointer_with_bounds __unused,
2246 char *__unsafe_indexable unsafe_pointer)
2247 {
2248 return unsafe_pointer;
2249 }
2250 #endif // __has_ptrcheck
2251
2252 /*!
2253 * @function zcache_mark_valid()
2254 *
2255 * @brief
2256 * Mark an element as "valid".
2257 *
2258 * @description
2259 * This function is used to be able to integrate with KASAN or PGZ
2260 * for a cache of composite objects. It typically is a function
2261 * called in their @c zc_op_mark_valid() callback.
2262 *
2263 * If PGZ or KASAN isn't in use, then this callback is a no-op.
2264 * Otherwise the @c elem address might be updated.
2265 *
2266 * @param zone the zone the element belongs to.
2267 * @param elem the address of the element
2268 * @returns the new address to correctly access @c elem.
2269 */
2270 extern void *__unsafe_indexable zcache_mark_valid(
2271 zone_t zone,
2272 void *elem __unsafe_indexable);
2273
2274 static inline void *
zcache_mark_valid_single(zone_t zone,void * elem)2275 zcache_mark_valid_single(
2276 zone_t zone,
2277 void *elem)
2278 {
2279 return __unsafe_forge_single(void *, zcache_mark_valid(zone, elem));
2280 }
2281
2282 static inline void *__header_bidi_indexable
zcache_mark_valid_indexable(zone_t zone,void * elem __header_bidi_indexable)2283 zcache_mark_valid_indexable(
2284 zone_t zone,
2285 void *elem __header_bidi_indexable)
2286 {
2287 return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_valid(zone, elem));
2288 }
2289
2290 /*!
2291 * @function zcache_mark_invalid()
2292 *
2293 * @brief
2294 * Mark an element as "invalid".
2295 *
2296 * @description
2297 * This function is used to be able to integrate with KASAN or PGZ
2298 * for a cache of composite objects. It typically is a function
2299 * called in their @c zc_op_mark_invalid() callback.
2300 *
2301 * This function performs validation that @c elem belongs
2302 * to the right zone and is properly "aligned", and should
2303 * never be elided under any configuration.
2304 *
2305 * @param zone the zone the element belongs to.
2306 * @param elem the address of the element
2307 * @returns the new address to correctly access @c elem.
2308 */
2309 extern void *__unsafe_indexable zcache_mark_invalid(
2310 zone_t zone,
2311 void *elem __unsafe_indexable);
2312
2313 static inline void *
zcache_mark_invalid_single(zone_t zone,void * elem)2314 zcache_mark_invalid_single(
2315 zone_t zone,
2316 void *elem)
2317 {
2318 return __unsafe_forge_single(void *, zcache_mark_invalid(zone, elem));
2319 }
2320
2321 static inline void *__header_bidi_indexable
zcache_mark_invalid_indexable(zone_t zone,void * elem __header_bidi_indexable)2322 zcache_mark_invalid_indexable(
2323 zone_t zone,
2324 void *elem __header_bidi_indexable)
2325 {
2326 return zcache_transpose_bounds((char *)elem, (char *)zcache_mark_invalid(zone, elem));
2327 }
2328
2329 /*!
2330 * @macro zcache_alloc()
2331 *
2332 * @abstract
2333 * Allocates a composite object from a cache.
2334 *
2335 * @param zone_id The proper @c ZONE_ID_* constant.
2336 * @param flags a collection of @c zalloc_flags_t.
2337 *
2338 * @returns NULL or the allocated element
2339 */
2340 #define zcache_alloc(zone_id, fl) \
2341 __zalloc_cast(zone_id, zcache_alloc_n(zone_id, 1, fl).z_head)
2342
2343 /*!
2344 * @function zcache_alloc_n()
2345 *
2346 * @abstract
2347 * Allocates a stack of composite objects from a cache.
2348 *
2349 * @param zone_id The proper @c ZONE_ID_* constant.
2350 * @param count how many elements to allocate (less might be returned)
2351 * @param flags a set of @c zone_create_flags_t flags.
2352 *
2353 * @returns NULL or the allocated composite object
2354 */
2355 extern zstack_t zcache_alloc_n(
2356 zone_id_t zone_id,
2357 uint32_t count,
2358 zalloc_flags_t flags,
2359 zone_cache_ops_t ops);
2360 #define zcache_alloc_n(zone_id, count, flags) \
2361 (zcache_alloc_n)(zone_id, count, flags, __zcache_##zone_id##_ops)
2362
2363
2364
2365 /*!
2366 * @function zcache_free()
2367 *
2368 * @abstract
2369 * Frees a composite object previously allocated
2370 * with @c zcache_alloc() or @c zcache_alloc_n().
2371 *
2372 * @param zone_id the zcache id to free the object to.
2373 * @param addr the address to free
2374 * @param ops the pointer to the zcache ops for this zcache.
2375 */
2376 extern void zcache_free(
2377 zone_id_t zone_id,
2378 void *addr __unsafe_indexable,
2379 zone_cache_ops_t ops);
2380 #define zcache_free(zone_id, elem) \
2381 (zcache_free)(zone_id, (void *)os_ptr_load_and_erase(elem), \
2382 __zcache_##zone_id##_ops)
2383
2384 /*!
2385 * @function zcache_free_n()
2386 *
2387 * @abstract
2388 * Frees a stack of composite objects previously allocated
2389 * with @c zcache_alloc() or @c zcache_alloc_n().
2390 *
2391 * @param zone_id the zcache id to free the objects to.
2392 * @param stack a stack of composite objects
2393 * @param ops the pointer to the zcache ops for this zcache.
2394 */
2395 extern void zcache_free_n(
2396 zone_id_t zone_id,
2397 zstack_t stack,
2398 zone_cache_ops_t ops);
2399 #define zcache_free_n(zone_id, stack) \
2400 (zcache_free_n)(zone_id, zstack_load_and_erase(&(stack)), \
2401 __zcache_##zone_id##_ops)
2402
2403
2404 /*!
2405 * @function zcache_drain()
2406 *
2407 * @abstract
2408 * Forces a zcache to be drained (have all its data structures freed
2409 * back to the original zones).
2410 *
2411 * @param zone_id the zcache id to free the objects to.
2412 */
2413 extern void zcache_drain(
2414 zone_id_t zone_id);
2415
2416
2417 /*!
2418 * @macro ZCACHE_DECLARE
2419 *
2420 * @abstract
2421 * Declares the type associated with a zone cache ID.
2422 *
2423 * @param id the name of zone ID to associate a type with.
2424 * @param type_t the type of elements in the zone.
2425 */
2426 #define ZCACHE_DECLARE(id, type_t) \
2427 __ZONE_DECLARE_TYPE(id, type_t); \
2428 __attribute__((visibility("hidden"))) \
2429 extern const zone_cache_ops_t __zcache_##id##_ops
2430
2431
2432 /*!
2433 * @macro ZCACHE_DEFINE
2434 *
2435 * @abstract
2436 * Defines a zone cache for a given ID and type.
2437 *
2438 * @param zone_id the name of zone ID to associate a type with.
2439 * @param name the name for the zone
2440 * @param type_t the type of elements in the zone.
2441 * @param size the size of elements in the cache
2442 * @param ops the ops for this zcache.
2443 */
2444 #define ZCACHE_DEFINE(zid, name, type_t, size, ops) \
2445 ZCACHE_DECLARE(zid, type_t); \
2446 ZONE_DECLARE_ID(zid, type_t); \
2447 const zone_cache_ops_t __zcache_##zid##_ops = (ops); \
2448 ZONE_INIT(NULL, name, size, ZC_OBJ_CACHE, zid, ^(zone_t z __unused) { \
2449 zcache_ops[zid] = (ops); \
2450 })
2451
2452 extern zone_cache_ops_t zcache_ops[ZONE_ID__FIRST_DYNAMIC];
2453
2454 #pragma mark XNU only: PGZ support
2455
2456 /*!
2457 * @function pgz_owned()
2458 *
2459 * @brief
2460 * Returns whether an address is PGZ owned.
2461 *
2462 * @param addr The address to translate.
2463 * @returns Whether it is PGZ owned
2464 */
2465 #if CONFIG_PROB_GZALLOC
2466 extern bool pgz_owned(mach_vm_address_t addr) __pure2;
2467 #else
2468 #define pgz_owned(addr) false
2469 #endif
2470
2471 /*!
2472 * @function pgz_decode()
2473 *
2474 * @brief
2475 * Translates a PGZ protected virtual address to its unprotected
2476 * backing store.
2477 *
2478 * @discussion
2479 * This is exposed so that the VM can lookup the vm_page_t for PGZ protected
2480 * elements since the PGZ protected virtual addresses are maintained by PGZ
2481 * at the pmap level without the VM involvment.
2482 *
2483 * "allow_invalid" schemes relying on sequestering also need this
2484 * to perform the locking attempts on the unprotected address.
2485 *
2486 * @param addr The address to translate.
2487 * @param size The object size.
2488 * @returns The unprotected address or @c addr.
2489 */
2490 #if CONFIG_PROB_GZALLOC
2491 #define pgz_decode(addr, size) \
2492 ((typeof(addr))__pgz_decode((mach_vm_address_t)(addr), size))
2493 #else
2494 #define pgz_decode(addr, size) (addr)
2495 #endif
2496
2497 /*!
2498 * @function pgz_decode_allow_invalid()
2499 *
2500 * @brief
2501 * Translates a PGZ protected virtual address to its unprotected
2502 * backing store, but doesn't assert it is still allocated/valid.
2503 *
2504 * @discussion
2505 * "allow_invalid" schemes relying on sequestering also need this
2506 * to perform the locking attempts on the unprotected address.
2507 *
2508 * @param addr The address to translate.
2509 * @param want_zid The expected zone ID for the element.
2510 * @returns The unprotected address or @c addr.
2511 */
2512 #if CONFIG_PROB_GZALLOC
2513 #define pgz_decode_allow_invalid(addr, want_zid) \
2514 ((typeof(addr))__pgz_decode_allow_invalid((vm_offset_t)(addr), want_zid))
2515 #else
2516 #define pgz_decode_allow_invalid(addr, zid) (addr)
2517 #endif
2518
2519 #pragma mark XNU only: misc & implementation details
2520
2521 struct zone_create_startup_spec {
2522 zone_t *z_var;
2523 const char *z_name __unsafe_indexable;
2524 vm_size_t z_size;
2525 zone_create_flags_t z_flags;
2526 zone_id_t z_zid;
2527 void (^z_setup)(zone_t);
2528 };
2529
2530 extern void zone_create_startup(
2531 struct zone_create_startup_spec *spec);
2532
2533 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2534 static __startup_data struct zone_create_startup_spec \
2535 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
2536 STARTUP_ARG(ZALLOC, STARTUP_RANK_FOURTH, zone_create_startup, \
2537 &__startup_zone_spec_ ## ns)
2538
2539 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
2540 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
2541
2542 #define __zalloc_cast(namespace, expr) \
2543 ((typeof(__zalloc__##namespace##__type_name))__unsafe_forge_single(void *, expr))
2544
2545 #if ZALLOC_TYPE_SAFE
2546 #define zalloc(zov) __zalloc_cast(zov, (zalloc)(zov))
2547 #define zalloc_noblock(zov) __zalloc_cast(zov, (zalloc_noblock)(zov))
2548 #endif /* !ZALLOC_TYPE_SAFE */
2549
2550 struct zone_view_startup_spec {
2551 zone_view_t zv_view;
2552 union {
2553 zone_kheap_id_t zv_heapid;
2554 zone_t *zv_zone;
2555 };
2556 vm_size_t zv_size;
2557 };
2558
2559 extern void zone_view_startup_init(
2560 struct zone_view_startup_spec *spec);
2561
2562 extern void zone_userspace_reboot_checks(void);
2563
2564 #if VM_TAG_SIZECLASSES
2565 extern void __zone_site_register(
2566 vm_allocation_site_t *site);
2567
2568 #define VM_ALLOC_SITE_TAG() ({ \
2569 __PLACE_IN_SECTION("__DATA, __data") \
2570 static vm_allocation_site_t site = { .refcount = 2, }; \
2571 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, __zone_site_register, &site); \
2572 site.tag; \
2573 })
2574 #else /* VM_TAG_SIZECLASSES */
2575 #define VM_ALLOC_SITE_TAG() VM_KERN_MEMORY_NONE
2576 #endif /* !VM_TAG_SIZECLASSES */
2577
2578 static inline zalloc_flags_t
__zone_flags_mix_tag(zalloc_flags_t flags,vm_tag_t tag)2579 __zone_flags_mix_tag(zalloc_flags_t flags, vm_tag_t tag)
2580 {
2581 return (flags & Z_VM_TAG_MASK) ? flags : Z_VM_TAG(flags, (uint32_t)tag);
2582 }
2583
2584 #if DEBUG || DEVELOPMENT
2585 # define ZPCPU_MANGLE_MASK 0xc0c0000000000000ul
2586 #else /* !(DEBUG || DEVELOPMENT) */
2587 # define ZPCPU_MANGLE_MASK 0ul
2588 #endif /* !(DEBUG || DEVELOPMENT) */
2589
2590 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_MASK)
2591 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_MASK)
2592 #define __zpcpu_addr(e) ((vm_address_t)(e))
2593 #define __zpcpu_cast(ptr, e) __unsafe_forge_single(typeof(ptr), e)
2594 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
2595
2596 /**
2597 * @macro __zpcpu_mangle_for_boot()
2598 *
2599 * @discussion
2600 * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
2601 * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
2602 * storage marked @c __startup_data and replace it with the proper allocation
2603 * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
2604 *
2605 * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
2606 * provides the proper mangling of the storage into a "fake" percpu pointer so
2607 * that accesses through @c zpercpu_get() functions properly.
2608 *
2609 * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
2610 */
2611 #define __zpcpu_mangle_for_boot(ptr) ({ \
2612 assert(startup_phase < STARTUP_SUB_ZALLOC); \
2613 __zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
2614 })
2615
2616 extern unsigned zpercpu_count(void) __pure2;
2617
2618 #if CONFIG_PROB_GZALLOC
2619
2620 extern vm_offset_t __pgz_decode(
2621 mach_vm_address_t addr,
2622 mach_vm_size_t size);
2623
2624 extern vm_offset_t __pgz_decode_allow_invalid(
2625 vm_offset_t offs,
2626 zone_id_t zid);
2627
2628 #endif
2629 #if DEBUG || DEVELOPMENT
2630 /* zone_max_zone is here (but not zalloc_internal.h) for the BSD kernel */
2631 extern unsigned int zone_max_zones(void);
2632
2633 extern size_t zone_pages_wired;
2634 extern size_t zone_guard_pages;
2635 #endif /* DEBUG || DEVELOPMENT */
2636 #if CONFIG_ZLEAKS
2637 extern uint32_t zleak_active;
2638 extern vm_size_t zleak_max_zonemap_size;
2639 extern vm_size_t zleak_per_zone_tracking_threshold;
2640
2641 extern kern_return_t zleak_update_threshold(
2642 vm_size_t *arg,
2643 uint64_t value);
2644 #endif /* CONFIG_ZLEAKS */
2645
2646 extern uint32_t zone_map_jetsam_limit;
2647
2648 extern kern_return_t zone_map_jetsam_set_limit(uint32_t value);
2649
2650 extern zone_t percpu_u64_zone;
2651
2652 /*!
2653 * @function mach_memory_info_sample
2654 *
2655 * @abstract
2656 * Helper function for mach_memory_info() (MACH) and memorystatus_collect_jetsam_snapshot_zprint() (BSD)
2657 * to collect wired memory information.
2658 *
2659 * @param names array with `*zonesCnt` elements.
2660 * @param info array with `*zonesCnt` elements.
2661 * @param coalesce array with `*zonesCnt` elements, must be set if `redact_info` is true.
2662 * @param zonesCnt set to the allocated count of the above, and on return will be the actual count.
2663 * @param memoryInfo optional, if set must have at least `vm_page_diagnose_estimate()` elements.
2664 * @param memoryInfoCnt optional, if set must be the count of memoryInfo, otherwise if set to 0 then on return will be `vm_page_diagnose_estimate()`.
2665 * @param redact_info if true sensitive information about zone allocations will be removed.
2666 */
2667 extern kern_return_t
2668 mach_memory_info_sample(
2669 mach_zone_name_t *names,
2670 mach_zone_info_t *info,
2671 int *coalesce,
2672 unsigned int *zonesCnt,
2673 mach_memory_info_t *memoryInfo,
2674 unsigned int memoryInfoCnt,
2675 bool redact_info);
2676
2677 extern void zone_gc_trim(void);
2678 extern void zone_gc_drain(void);
2679
2680 #pragma GCC visibility pop
2681 #endif /* XNU_KERNEL_PRIVATE */
2682
2683 /*
2684 * This macro is currently used by AppleImage4 (rdar://83924635)
2685 */
2686 #define __zalloc_ptr_load_and_erase(elem) \
2687 os_ptr_load_and_erase(elem)
2688
2689 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
2690
2691 #endif /* _KERN_ZALLOC_H_ */
2692
2693 #endif /* KERNEL_PRIVATE */
2694