xref: /xnu-11215/osfmk/kern/zalloc_internal.h (revision 8d741a5d)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 
59 #ifndef _KERN_ZALLOC_INTERNAL_H_
60 #define _KERN_ZALLOC_INTERNAL_H_
61 
62 #include <kern/zalloc.h>
63 #include <kern/locks.h>
64 #include <kern/simple_lock.h>
65 
66 #include <os/atomic_private.h>
67 #include <sys/queue.h>
68 #include <vm/vm_map_internal.h>
69 
70 #if KASAN
71 #include <san/kasan.h>
72 #include <kern/spl.h>
73 #endif /* !KASAN */
74 
75 /*
76  * Disable zalloc zero validation under kasan as it is
77  * double-duty with what kasan already does.
78  */
79 #if KASAN
80 #define ZALLOC_ENABLE_ZERO_CHECK        0
81 #else
82 #define ZALLOC_ENABLE_ZERO_CHECK        1
83 #endif
84 
85 #if KASAN
86 #define ZALLOC_ENABLE_LOGGING           0
87 #elif DEBUG || DEVELOPMENT
88 #define ZALLOC_ENABLE_LOGGING           1
89 #else
90 #define ZALLOC_ENABLE_LOGGING           0
91 #endif
92 
93 /*!
94  * @file <kern/zalloc_internal.h>
95  *
96  * @abstract
97  * Exposes some guts of zalloc to interact with the VM, debugging, copyio and
98  * kalloc subsystems.
99  */
100 
101 __BEGIN_DECLS
102 
103 #pragma GCC visibility push(hidden)
104 
105 /*
106  *	A zone is a collection of fixed size blocks for which there
107  *	is fast allocation/deallocation access.  Kernel routines can
108  *	use zones to manage data structures dynamically, creating a zone
109  *	for each type of data structure to be managed.
110  *
111  */
112 
113 /*!
114  * @typedef zone_pva_t
115  *
116  * @brief
117  * Type used to point to a page virtual address in the zone allocator.
118  *
119  * @description
120  * - Valid pages have the top bit set.
121  * - 0 represents the "NULL" page
122  * - non 0 values with the top bit cleared represent queue heads,
123  *   indexed from the beginning of the __DATA section of the kernel.
124  *   (see zone_pageq_base).
125  */
126 typedef struct zone_packed_virtual_address {
127 	uint32_t packed_address;
128 } zone_pva_t;
129 
130 /*!
131  * @struct zone_stats
132  *
133  * @abstract
134  * Per-cpu structure used for basic zone stats.
135  *
136  * @discussion
137  * The values aren't scaled for per-cpu zones.
138  */
139 struct zone_stats {
140 	uint64_t            zs_mem_allocated;
141 	uint64_t            zs_mem_freed;
142 	uint64_t            zs_alloc_fail;
143 	uint32_t            zs_alloc_rr;     /* allocation rr bias */
144 	uint32_t _Atomic    zs_alloc_not_shared;
145 };
146 
147 typedef struct zone_magazine *zone_magazine_t;
148 
149 /*!
150  * @struct zone_depot
151  *
152  * @abstract
153  * Holds a list of full and empty magazines.
154  *
155  * @discussion
156  * The data structure is a "STAILQ" and an "SLIST" combined with counters
157  * to know their lengths in O(1). Here is a graphical example:
158  *
159  *      zd_full = 3
160  *      zd_empty = 1
161  * ╭─── zd_head
162  * │ ╭─ zd_tail
163  * │ ╰────────────────────────────────────╮
164  * │    ╭───────╮   ╭───────╮   ╭───────╮ v ╭───────╮
165  * ╰───>│███████┼──>│███████┼──>│███████┼──>│       ┼─> X
166  *      ╰───────╯   ╰───────╯   ╰───────╯   ╰───────╯
167  */
168 struct zone_depot {
169 	uint32_t            zd_full;
170 	uint32_t            zd_empty;
171 	zone_magazine_t     zd_head;
172 	zone_magazine_t    *zd_tail;
173 };
174 
175 /* see https://lemire.me/blog/2019/02/20/more-fun-with-fast-remainders-when-the-divisor-is-a-constant/ */
176 #define Z_MAGIC_QUO(s)      (((1ull << 32) - 1) / (uint64_t)(s) + 1)
177 #define Z_MAGIC_ALIGNED(s)  (~0u / (uint32_t)(s) + 1)
178 
179 /*
180  * Returns (offs / size) if offs is small enough
181  * and magic = Z_MAGIC_QUO(size)
182  */
183 static inline uint32_t
Z_FAST_QUO(uint64_t offs,uint64_t magic)184 Z_FAST_QUO(uint64_t offs, uint64_t magic)
185 {
186 	return (offs * magic) >> 32;
187 }
188 
189 /*
190  * Returns (offs % size) if offs is small enough
191  * and magic = Z_MAGIC_QUO(size)
192  */
193 static inline uint32_t
Z_FAST_MOD(uint64_t offs,uint64_t magic,uint64_t size)194 Z_FAST_MOD(uint64_t offs, uint64_t magic, uint64_t size)
195 {
196 	uint32_t lowbits = (uint32_t)(offs * magic);
197 
198 	return (lowbits * size) >> 32;
199 }
200 
201 /*
202  * Returns whether (offs % size) == 0 if offs is small enough
203  * and magic = Z_MAGIC_ALIGNED(size)
204  */
205 static inline bool
Z_FAST_ALIGNED(uint64_t offs,uint32_t magic)206 Z_FAST_ALIGNED(uint64_t offs, uint32_t magic)
207 {
208 	return (uint32_t)(offs * magic) < magic;
209 }
210 
211 struct zone_size_params {
212 	uint32_t            z_align_magic;  /* magic to use with Z_FAST_ALIGNED()  */
213 	uint32_t            z_elem_size;    /* size of an element                  */
214 };
215 
216 struct zone_expand {
217 	struct zone_expand *ze_next;
218 	thread_t            ze_thread;
219 	bool                ze_pg_wait;
220 	bool                ze_vm_priv;
221 	bool                ze_clear_priv;
222 };
223 
224 #define Z_WMA_UNIT (1u << 8)
225 #define Z_WMA_MIX(base, e)  ((3 * (base) + (e) * Z_WMA_UNIT) / 4)
226 
227 struct zone {
228 	/*
229 	 * Readonly / rarely written fields
230 	 */
231 
232 	/*
233 	 * The first 4 fields match a zone_view.
234 	 *
235 	 * z_self points back to the zone when the zone is initialized,
236 	 * or is NULL else.
237 	 */
238 	struct zone        *z_self;
239 	zone_stats_t        z_stats;
240 	const char         *z_name;
241 	struct zone_view   *z_views;
242 	struct zone_expand *z_expander;
243 
244 	uint64_t            z_quo_magic;
245 	uint32_t            z_align_magic;
246 	uint16_t            z_elem_size;
247 	uint16_t            z_elem_offs;
248 	uint16_t            z_chunk_pages;
249 	uint16_t            z_chunk_elems;
250 
251 	uint32_t /* 32 bits */
252 	/*
253 	 * Lifecycle state (Mutable after creation)
254 	 */
255 	    z_destroyed        :1,  /* zone is (being) destroyed */
256 	    z_async_refilling  :1,  /* asynchronous allocation pending? */
257 	    z_depot_cleanup    :1,  /* per cpu depots need cleaning */
258 	    z_expanding_wait   :1,  /* is thread waiting for expansion? */
259 	    z_exhausted_wait   :1,  /* are threads waiting for exhaustion end */
260 	    z_exhausts         :1,  /* whether the zone exhausts by design */
261 
262 	/*
263 	 * Behavior configuration bits
264 	 */
265 	    z_percpu           :1,  /* the zone is percpu */
266 	    z_smr              :1,  /* the zone uses SMR */
267 	    z_permanent        :1,  /* the zone allocations are permanent */
268 	    z_nocaching        :1,  /* disallow zone caching for this zone */
269 	    collectable        :1,  /* garbage collect empty pages */
270 	    no_callout         :1,
271 	    z_destructible     :1,  /* zone can be zdestroy()ed  */
272 
273 	    _reserved          :6,
274 
275 	/*
276 	 * Debugging features
277 	 */
278 	    z_pgz_tracked      :1,  /* this zone is tracked by pgzalloc */
279 	    z_pgz_use_guards   :1,  /* this zone uses guards with PGZ */
280 	    z_kasan_fakestacks :1,
281 	    z_kasan_quarantine :1,  /* whether to use the kasan quarantine */
282 	    z_tags_sizeclass   :6,  /* idx into zone_tags_sizeclasses to associate
283 	                             * sizeclass for a particualr kalloc tag */
284 	    z_uses_tags        :1,
285 	    z_log_on           :1,  /* zone logging was enabled by boot-arg */
286 	    z_tbi_tag          :1;  /* Zone supports tbi tagging */
287 
288 	uint8_t             z_cacheline1[0] __attribute__((aligned(64)));
289 
290 	/*
291 	 * Zone caching / recirculation cacheline
292 	 *
293 	 * z_recirc* fields are protected by the recirculation lock.
294 	 *
295 	 * z_recirc_cont_wma:
296 	 *   weighted moving average of the number of contentions per second,
297 	 *   in Z_WMA_UNIT units (fixed point decimal).
298 	 *
299 	 * z_recirc_cont_cur:
300 	 *   count of recorded contentions that will be fused
301 	 *   in z_recirc_cont_wma at the next period.
302 	 *
303 	 *   Note: if caching is disabled,
304 	 *   this field is used under the zone lock.
305 	 *
306 	 * z_elems_free_{min,wma} (overloaded on z_recirc_empty*):
307 	 *   tracks the history of the minimum values of z_elems_free over time
308 	 *   with "min" being the minimum it hit for the current period,
309 	 *   and "wma" the weighted moving average of those value.
310 	 *
311 	 *   This field is used if z_pcpu_cache is NULL,
312 	 *   otherwise it aliases with z_recirc_empty_{min,wma}
313 	 *
314 	 * z_recirc_{full,empty}_{min,wma}:
315 	 *   tracks the history of the the minimum number of full/empty
316 	 *   magazines in the depot over time, with "min" being the minimum
317 	 *   it hit for the current period, and "wma" the weighted moving
318 	 *   average of those value.
319 	 */
320 	struct zone_cache  *__zpercpu z_pcpu_cache;
321 	struct zone_depot   z_recirc;
322 
323 	hw_lck_ticket_t     z_recirc_lock;
324 	uint32_t            z_recirc_full_min;
325 	uint32_t            z_recirc_full_wma;
326 	union {
327 		uint32_t    z_recirc_empty_min;
328 		uint32_t    z_elems_free_min;
329 	};
330 	union {
331 		uint32_t    z_recirc_empty_wma;
332 		uint32_t    z_elems_free_wma;
333 	};
334 	uint32_t            z_recirc_cont_cur;
335 	uint32_t            z_recirc_cont_wma;
336 
337 	uint16_t            z_depot_size;
338 	uint16_t            z_depot_limit;
339 
340 	uint8_t             z_cacheline2[0] __attribute__((aligned(64)));
341 
342 	/*
343 	 * often mutated fields
344 	 */
345 
346 	hw_lck_ticket_t     z_lock;
347 
348 	/*
349 	 * Page accounting (wired / VA)
350 	 *
351 	 * Those numbers are unscaled for z_percpu zones
352 	 * (zone_scale_for_percpu() needs to be used to find the true value).
353 	 */
354 	uint32_t            z_wired_max;    /* how large can this zone grow        */
355 	uint32_t            z_wired_hwm;    /* z_wired_cur high watermark          */
356 	uint32_t            z_wired_cur;    /* number of pages used by this zone   */
357 	uint32_t            z_wired_empty;  /* pages collectable by GC             */
358 	uint32_t            z_va_cur;       /* amount of VA used by this zone      */
359 
360 	/*
361 	 * list of metadata structs, which maintain per-page free element lists
362 	 */
363 	zone_pva_t          z_pageq_empty;  /* populated, completely empty pages   */
364 	zone_pva_t          z_pageq_partial;/* populated, partially filled pages   */
365 	zone_pva_t          z_pageq_full;   /* populated, completely full pages    */
366 	zone_pva_t          z_pageq_va;     /* non-populated VA pages              */
367 
368 	/*
369 	 * Zone statistics
370 	 *
371 	 * z_elems_avail:
372 	 *   number of elements in the zone (at all).
373 	 */
374 	uint32_t            z_elems_free;   /* Number of free elements             */
375 	uint32_t            z_elems_avail;  /* Number of elements available        */
376 	uint32_t            z_elems_rsv;
377 	uint32_t            z_array_size_class;
378 
379 	struct zone        *z_kt_next;
380 
381 	uint8_t             z_cacheline3[0] __attribute__((aligned(64)));
382 
383 #if KASAN_CLASSIC
384 	uint16_t            z_kasan_redzone;
385 	spl_t               z_kasan_spl;
386 #endif
387 
388 #if ZONE_ENABLE_LOGGING || CONFIG_ZLEAKS || KASAN_TBI
389 	/*
390 	 * the allocation logs are used when:
391 	 *
392 	 * - zlog<n>= boot-args are used (and then z_log_on is set)
393 	 *
394 	 * - the leak detection was triggered for the zone.
395 	 *   In that case, the log can't ever be freed,
396 	 *   but it can be enabled/disabled dynamically.
397 	 */
398 	struct btlog       *z_btlog;
399 	struct btlog       *z_btlog_disabled;
400 #endif
401 } __attribute__((aligned((64))));
402 
403 /*!
404  * @typedef zone_security_flags_t
405  *
406  * @brief
407  * Type used to store the immutable security properties of a zone.
408  *
409  * @description
410  * These properties influence the security nature of a zone and can't be
411  * modified after lockdown.
412  */
413 typedef struct zone_security_flags {
414 	uint16_t
415 	/*
416 	 * Security sensitive configuration bits
417 	 */
418 	    z_submap_idx       :8,  /* a Z_SUBMAP_IDX_* value */
419 	    z_kheap_id         :2,  /* zone_kheap_id_t when part of a kalloc heap */
420 	    z_kalloc_type      :1,  /* zones that does types based seggregation */
421 	    z_lifo             :1,  /* depot and recirculation layer are LIFO */
422 	    z_pgz_use_guards   :1,  /* this zone uses guards with PGZ */
423 	    z_submap_from_end  :1,  /* allocate from the left or the right ? */
424 	    z_noencrypt        :1,  /* do not encrypt pages when hibernating */
425 	    z_tag              :1;  /* zone supports TBI tagging */
426 	/*
427 	 * Signature equivalance zone
428 	 */
429 	zone_id_t           z_sig_eq;
430 } zone_security_flags_t;
431 
432 
433 /*
434  * Zsecurity config to enable strict free of iokit objects to zone
435  * or heap they were allocated from.
436  *
437  * Turn ZSECURITY_OPTIONS_STRICT_IOKIT_FREE off on x86 so as not
438  * not break third party kexts that haven't yet been recompiled
439  * to use the new iokit macros.
440  */
441 #if XNU_PLATFORM_MacOSX && __x86_64__
442 #   define ZSECURITY_CONFIG_STRICT_IOKIT_FREE           OFF
443 #else
444 #   define ZSECURITY_CONFIG_STRICT_IOKIT_FREE           ON
445 #endif
446 
447 /*
448  * Zsecurity config to enable the read-only allocator
449  */
450 #if KASAN_CLASSIC
451 #   define ZSECURITY_CONFIG_READ_ONLY                   OFF
452 #else
453 #   define ZSECURITY_CONFIG_READ_ONLY                   ON
454 #endif
455 
456 /*
457  * Zsecurity config to enable making heap feng-shui
458  * less reliable.
459  */
460 #if KASAN_CLASSIC
461 #   define ZSECURITY_CONFIG_SAD_FENG_SHUI               OFF
462 #   define ZSECURITY_CONFIG_GENERAL_SUBMAPS             1
463 #else
464 #   define ZSECURITY_CONFIG_SAD_FENG_SHUI               ON
465 #   define ZSECURITY_CONFIG_GENERAL_SUBMAPS             4
466 #endif
467 
468 /*
469  * Zsecurity config to enable adjusting of elements
470  * with PGZ-OOB to right-align them in their space.
471  */
472 #if KASAN || defined(__x86_64__) || CONFIG_KERNEL_TAGGING
473 #   define ZSECURITY_CONFIG_PGZ_OOB_ADJUST              OFF
474 #else
475 #   define ZSECURITY_CONFIG_PGZ_OOB_ADJUST              ON
476 #endif
477 
478 /*
479  * Zsecurity config to enable kalloc type segregation
480  */
481 #if XNU_TARGET_OS_WATCH || KASAN_CLASSIC
482 #   define ZSECURITY_CONFIG_KT_BUDGET                   120
483 #   define ZSECURITY_CONFIG_KT_VAR_BUDGET               6
484 #else
485 #   define ZSECURITY_CONFIG_KT_BUDGET                   260
486 #   define ZSECURITY_CONFIG_KT_VAR_BUDGET               6
487 #endif
488 
489 /*
490  * Zsecurity config to enable (KASAN) tagging of memory allocations
491  */
492 #if CONFIG_KERNEL_TAGGING
493 #   define ZSECURITY_CONFIG_ZONE_TAGGING                ON
494 #else
495 #   define ZSECURITY_CONFIG_ZONE_TAGGING                OFF
496 #endif
497 
498 
499 __options_decl(kalloc_type_options_t, uint64_t, {
500 	/*
501 	 * kalloc type option to switch default accounting to private.
502 	 */
503 	KT_OPTIONS_ACCT                         = 0x00000001,
504 	/*
505 	 * kalloc type option to print additional stats regarding zone
506 	 * budget distribution and signatures.
507 	 */
508 	KT_OPTIONS_DEBUG                        = 0x00000002,
509 	/*
510 	 * kalloc type option to allow loose freeing between heaps
511 	 */
512 	KT_OPTIONS_LOOSE_FREE                   = 0x00000004,
513 });
514 
515 __enum_decl(kt_var_heap_id_t, uint32_t, {
516 	/*
517 	 * Fake "data" heap used to link views of data-only allocation that
518 	 * have been redirected to KHEAP_DATA_BUFFERS
519 	 */
520 	KT_VAR_DATA_HEAP,
521 	/*
522 	 * Heaps for pointer arrays
523 	 */
524 	KT_VAR_PTR_HEAP0,
525 	KT_VAR_PTR_HEAP1,
526 	/*
527 	 * Indicating first additional heap added
528 	 */
529 	KT_VAR__FIRST_FLEXIBLE_HEAP,
530 });
531 
532 /*
533  * Zone submap indices
534  *
535  * Z_SUBMAP_IDX_VM
536  * this map has the special property that its allocations
537  * can be done without ever locking the submap, and doesn't use
538  * VM entries in the map (which limits certain VM map operations on it).
539  *
540  * On ILP32 a single zone lives here (the vm_map_entry_reserved_zone).
541  *
542  * On LP64 it is also used to restrict VM allocations on LP64 lower
543  * in the kernel VA space, for pointer packing purposes.
544  *
545  * Z_SUBMAP_IDX_GENERAL_{0,1,2,3}
546  * used for unrestricted allocations
547  *
548  * Z_SUBMAP_IDX_DATA
549  * used to sequester bags of bytes from all other allocations and allow VA reuse
550  * within the map
551  *
552  * Z_SUBMAP_IDX_READ_ONLY
553  * used for the read-only allocator
554  */
555 __enum_decl(zone_submap_idx_t, uint32_t, {
556 	Z_SUBMAP_IDX_VM,
557 	Z_SUBMAP_IDX_READ_ONLY,
558 	Z_SUBMAP_IDX_GENERAL_0,
559 #if ZSECURITY_CONFIG(SAD_FENG_SHUI)
560 	Z_SUBMAP_IDX_GENERAL_1,
561 	Z_SUBMAP_IDX_GENERAL_2,
562 	Z_SUBMAP_IDX_GENERAL_3,
563 #endif /* ZSECURITY_CONFIG(SAD_FENG_SHUI) */
564 	Z_SUBMAP_IDX_DATA,
565 
566 	Z_SUBMAP_IDX_COUNT,
567 });
568 
569 #define KALLOC_MINALIGN     (1 << KALLOC_LOG2_MINALIGN)
570 
571 /*
572  * Variable kalloc_type heap config
573  */
574 struct kheap_info {
575 	zone_id_t               kh_zstart;
576 	kalloc_heap_t           kh_views;
577 	kalloc_type_var_view_t  kt_views;
578 };
579 typedef union kalloc_type_views {
580 	struct kalloc_type_view     *ktv_fixed;
581 	struct kalloc_type_var_view *ktv_var;
582 } kalloc_type_views_t;
583 
584 #define KT_VAR_MAX_HEAPS 8
585 #define MAX_ZONES       690
586 extern struct kheap_info        kalloc_type_heap_array[KT_VAR_MAX_HEAPS];
587 extern zone_id_t _Atomic        num_zones;
588 extern uint32_t                 zone_view_count;
589 extern struct zone              zone_array[MAX_ZONES];
590 extern struct zone_size_params  zone_ro_size_params[ZONE_ID__LAST_RO + 1];
591 extern zone_security_flags_t    zone_security_array[];
592 extern const char * const       kalloc_heap_names[KHEAP_ID_COUNT];
593 extern mach_memory_info_t      *panic_kext_memory_info;
594 extern vm_size_t                panic_kext_memory_size;
595 extern vm_offset_t              panic_fault_address;
596 extern uint16_t                 _zc_mag_size;
597 
598 #define zone_index_foreach(i) \
599 	for (zone_id_t i = 1, num_zones_##i = os_atomic_load(&num_zones, acquire); \
600 	    i < num_zones_##i; i++)
601 
602 #define zone_foreach(z) \
603 	for (zone_t z = &zone_array[1], \
604 	    last_zone_##z = &zone_array[os_atomic_load(&num_zones, acquire)]; \
605 	    z < last_zone_##z; z++)
606 
607 __abortlike
608 extern void zone_invalid_panic(zone_t zone);
609 
610 __pure2
611 static inline zone_id_t
zone_index(zone_t z)612 zone_index(zone_t z)
613 {
614 	unsigned long delta;
615 	uint64_t quo;
616 
617 	delta = (unsigned long)z - (unsigned long)zone_array;
618 	if (delta >= MAX_ZONES * sizeof(*z)) {
619 		zone_invalid_panic(z);
620 	}
621 	quo = Z_FAST_QUO(delta, Z_MAGIC_QUO(sizeof(*z)));
622 	__builtin_assume(quo < MAX_ZONES);
623 	return (zone_id_t)quo;
624 }
625 
626 __pure2
627 static inline bool
zone_is_ro(zone_t zone)628 zone_is_ro(zone_t zone)
629 {
630 	return zone >= &zone_array[ZONE_ID__FIRST_RO] &&
631 	       zone <= &zone_array[ZONE_ID__LAST_RO];
632 }
633 
634 static inline bool
zone_addr_size_crosses_page(mach_vm_address_t addr,mach_vm_size_t size)635 zone_addr_size_crosses_page(mach_vm_address_t addr, mach_vm_size_t size)
636 {
637 	return atop(addr ^ (addr + size - 1)) != 0;
638 }
639 
640 __pure2
641 static inline uint16_t
zone_elem_redzone(zone_t zone)642 zone_elem_redzone(zone_t zone)
643 {
644 #if KASAN_CLASSIC
645 	return zone->z_kasan_redzone;
646 #else
647 	(void)zone;
648 	return 0;
649 #endif
650 }
651 
652 __pure2
653 static inline uint16_t
zone_elem_inner_offs(zone_t zone)654 zone_elem_inner_offs(zone_t zone)
655 {
656 	return zone->z_elem_offs;
657 }
658 
659 __pure2
660 static inline uint16_t
zone_elem_outer_offs(zone_t zone)661 zone_elem_outer_offs(zone_t zone)
662 {
663 	return zone_elem_inner_offs(zone) - zone_elem_redzone(zone);
664 }
665 
666 __pure2
667 static inline vm_offset_t
zone_elem_inner_size(zone_t zone)668 zone_elem_inner_size(zone_t zone)
669 {
670 	return zone->z_elem_size;
671 }
672 
673 __pure2
674 static inline vm_offset_t
zone_elem_outer_size(zone_t zone)675 zone_elem_outer_size(zone_t zone)
676 {
677 	return zone_elem_inner_size(zone) + zone_elem_redzone(zone);
678 }
679 
680 __pure2
681 static inline zone_security_flags_t
zone_security_config(zone_t z)682 zone_security_config(zone_t z)
683 {
684 	zone_id_t zid = zone_index(z);
685 	return zone_security_array[zid];
686 }
687 
688 static inline uint32_t
zone_count_free(zone_t zone)689 zone_count_free(zone_t zone)
690 {
691 	return zone->z_elems_free + zone->z_recirc.zd_full * _zc_mag_size;
692 }
693 
694 static inline uint32_t
zone_count_allocated(zone_t zone)695 zone_count_allocated(zone_t zone)
696 {
697 	return zone->z_elems_avail - zone_count_free(zone);
698 }
699 
700 static inline vm_size_t
zone_scale_for_percpu(zone_t zone,vm_size_t size)701 zone_scale_for_percpu(zone_t zone, vm_size_t size)
702 {
703 	if (zone->z_percpu) {
704 		size *= zpercpu_count();
705 	}
706 	return size;
707 }
708 
709 static inline vm_size_t
zone_size_wired(zone_t zone)710 zone_size_wired(zone_t zone)
711 {
712 	/*
713 	 * this either require the zone lock,
714 	 * or to be used for statistics purposes only.
715 	 */
716 	vm_size_t size = ptoa(os_atomic_load(&zone->z_wired_cur, relaxed));
717 	return zone_scale_for_percpu(zone, size);
718 }
719 
720 static inline vm_size_t
zone_size_free(zone_t zone)721 zone_size_free(zone_t zone)
722 {
723 	return zone_scale_for_percpu(zone,
724 	           zone_elem_inner_size(zone) * zone_count_free(zone));
725 }
726 
727 /* Under KASAN builds, this also accounts for quarantined elements. */
728 static inline vm_size_t
zone_size_allocated(zone_t zone)729 zone_size_allocated(zone_t zone)
730 {
731 	return zone_scale_for_percpu(zone,
732 	           zone_elem_inner_size(zone) * zone_count_allocated(zone));
733 }
734 
735 static inline vm_size_t
zone_size_wasted(zone_t zone)736 zone_size_wasted(zone_t zone)
737 {
738 	return zone_size_wired(zone) - zone_scale_for_percpu(zone,
739 	           zone_elem_outer_size(zone) * zone->z_elems_avail);
740 }
741 
742 __pure2
743 static inline bool
zone_exhaustible(zone_t zone)744 zone_exhaustible(zone_t zone)
745 {
746 	return zone->z_wired_max != ~0u;
747 }
748 
749 __pure2
750 static inline bool
zone_exhausted(zone_t zone)751 zone_exhausted(zone_t zone)
752 {
753 	return zone->z_wired_cur >= zone->z_wired_max;
754 }
755 
756 /*
757  * Set and get the signature equivalance for the given zone
758  */
759 extern void zone_set_sig_eq(zone_t zone, zone_id_t sig_eq);
760 extern zone_id_t zone_get_sig_eq(zone_t zone);
761 /*
762  * Return the accumulated allocated memory on the given zone stats
763  */
764 static inline vm_size_t
zone_stats_get_mem_allocated(zone_stats_t stats)765 zone_stats_get_mem_allocated(zone_stats_t stats)
766 {
767 	return stats->zs_mem_allocated;
768 }
769 
770 /*
771  * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a
772  * userspace reboot is needed. The only other way to query for this information
773  * is via mach_memory_info() which is unavailable on release kernels.
774  */
775 extern uint64_t get_zones_collectable_bytes(void);
776 
777 /*!
778  * @enum zone_gc_level_t
779  *
780  * @const ZONE_GC_TRIM
781  * Request a trimming GC: it will trim allocations in excess
782  * of the working set size estimate only.
783  *
784  * @const ZONE_GC_DRAIN
785  * Request a draining GC: this is an aggressive mode that will
786  * cause all caches to be drained and all free pages returned to the system.
787  *
788  * @const ZONE_GC_JETSAM
789  * Request to consider a jetsam, and then fallback to @c ZONE_GC_TRIM or
790  * @c ZONE_GC_DRAIN depending on the state of the zone map.
791  * To avoid deadlocks, only @c vm_pageout_garbage_collect() should ever
792  * request a @c ZONE_GC_JETSAM level.
793  */
794 __enum_closed_decl(zone_gc_level_t, uint32_t, {
795 	ZONE_GC_TRIM,
796 	ZONE_GC_DRAIN,
797 	ZONE_GC_JETSAM,
798 });
799 
800 /*!
801  * @function zone_gc
802  *
803  * @brief
804  * Reduces memory used by zones by trimming caches and freelists.
805  *
806  * @discussion
807  * @c zone_gc() is called:
808  * - by the pageout daemon when the system needs more free pages.
809  * - by the VM when contiguous page allocation requests get stuck
810  *   (see vm_page_find_contiguous()).
811  *
812  * @param level         The zone GC level requested.
813  */
814 extern void     zone_gc(zone_gc_level_t level);
815 
816 #define ZONE_WSS_UPDATE_PERIOD  15
817 /*!
818  * @function compute_zone_working_set_size
819  *
820  * @brief
821  * Recomputes the working set size for every zone
822  *
823  * @discussion
824  * This runs about every @c ZONE_WSS_UPDATE_PERIOD seconds (10),
825  * computing an exponential moving average with a weight of 75%,
826  * so that the history of the last minute is the dominating factor.
827  */
828 extern void     compute_zone_working_set_size(void *);
829 
830 /* Debug logging for zone-map-exhaustion jetsams. */
831 extern void     get_zone_map_size(uint64_t *current_size, uint64_t *capacity);
832 extern void     get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size);
833 
834 /* Bootstrap zone module (create zone zone) */
835 extern void     zone_bootstrap(void);
836 
837 /* Force-enable caching on a zone, generally unsafe to call directly */
838 extern void     zone_enable_caching(zone_t zone);
839 
840 /*!
841  * @function zone_early_mem_init
842  *
843  * @brief
844  * Steal memory from pmap (prior to initialization of zalloc)
845  * for the special vm zones that allow bootstrap memory and store
846  * the range so as to facilitate range checking in zfree.
847  *
848  * @param size              the size to steal (must be a page multiple)
849  */
850 __startup_func
851 extern vm_offset_t zone_early_mem_init(
852 	vm_size_t       size);
853 
854 /*!
855  * @function zone_get_early_alloc_size
856  *
857  * @brief
858  * Compute the correct size (greater than @c ptoa(min_pages)) that is a multiple
859  * of the allocation granule for the zone with the given creation flags and
860  * element size.
861  */
862 __startup_func
863 extern vm_size_t zone_get_early_alloc_size(
864 	const char          *name __unused,
865 	vm_size_t            elem_size,
866 	zone_create_flags_t  flags,
867 	vm_size_t            min_elems);
868 
869 /*!
870  * @function zone_cram_early
871  *
872  * @brief
873  * Cram memory allocated with @c zone_early_mem_init() into a zone.
874  *
875  * @param zone          The zone to cram memory into.
876  * @param newmem        The base address for the memory to cram.
877  * @param size          The size of the memory to cram into the zone.
878  */
879 __startup_func
880 extern void     zone_cram_early(
881 	zone_t          zone,
882 	vm_offset_t     newmem,
883 	vm_size_t       size);
884 
885 extern bool     zone_maps_owned(
886 	vm_address_t    addr,
887 	vm_size_t       size);
888 
889 #if KASAN_LIGHT
890 extern bool     kasan_zone_maps_owned(
891 	vm_address_t    addr,
892 	vm_size_t       size);
893 #endif /* KASAN_LIGHT */
894 
895 extern void     zone_map_sizes(
896 	vm_map_size_t  *psize,
897 	vm_map_size_t  *pfree,
898 	vm_map_size_t  *plargest_free);
899 
900 extern bool
901 zone_map_nearing_exhaustion(void);
902 
903 static inline vm_tag_t
zalloc_flags_get_tag(zalloc_flags_t flags)904 zalloc_flags_get_tag(zalloc_flags_t flags)
905 {
906 	return (vm_tag_t)((flags & Z_VM_TAG_MASK) >> Z_VM_TAG_SHIFT);
907 }
908 
909 extern struct kalloc_result zalloc_ext(
910 	zone_t          zone,
911 	zone_stats_t    zstats,
912 	zalloc_flags_t  flags);
913 
914 #if KASAN
915 #define ZFREE_PACK_SIZE(esize, usize)   (((uint64_t)(usize) << 32) | (esize))
916 #define ZFREE_ELEM_SIZE(combined)       ((uint32_t)(combined))
917 #define ZFREE_USER_SIZE(combined)       ((combined) >> 32)
918 #else
919 #define ZFREE_PACK_SIZE(esize, usize)   (esize)
920 #define ZFREE_ELEM_SIZE(combined)       (combined)
921 #endif
922 
923 extern void     zfree_ext(
924 	zone_t          zone,
925 	zone_stats_t    zstats,
926 	void           *addr,
927 	uint64_t        combined_size);
928 
929 extern zone_id_t zone_id_for_element(
930 	void           *addr,
931 	vm_size_t       esize);
932 
933 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
934 extern void *zone_element_pgz_oob_adjust(
935 	void           *addr,
936 	vm_size_t       req_size,
937 	vm_size_t       elem_size);
938 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
939 
940 extern void zone_element_bounds_check(
941 	vm_address_t    addr,
942 	vm_size_t       len);
943 
944 extern vm_size_t zone_element_size(
945 	void           *addr,
946 	zone_t         *z,
947 	bool            clear_oob,
948 	vm_offset_t    *oob_offs);
949 
950 /*!
951  * @function zone_spans_ro_va
952  *
953  * @abstract
954  * This function is used to check whether the specified address range
955  * spans through the read-only zone range.
956  *
957  * @discussion
958  * This only checks for the range specified within ZONE_ADDR_READONLY.
959  * The parameters addr_start and addr_end are stripped off of PAC bits
960  * before the check is made.
961  */
962 extern bool zone_spans_ro_va(
963 	vm_offset_t     addr_start,
964 	vm_offset_t     addr_end);
965 
966 /*!
967  * @function __zalloc_ro_mut_atomic
968  *
969  * @abstract
970  * This function is called from the pmap to perform the specified atomic
971  * operation on memory from the read-only allocator.
972  *
973  * @discussion
974  * This function is for internal use only and should not be called directly.
975  */
976 static inline uint64_t
__zalloc_ro_mut_atomic(vm_offset_t dst,zro_atomic_op_t op,uint64_t value)977 __zalloc_ro_mut_atomic(vm_offset_t dst, zro_atomic_op_t op, uint64_t value)
978 {
979 #define __ZALLOC_RO_MUT_OP(op, op2) \
980 	case ZRO_ATOMIC_##op##_8: \
981 	        return os_atomic_##op2((uint8_t *)dst, (uint8_t)value, seq_cst); \
982 	case ZRO_ATOMIC_##op##_16: \
983 	        return os_atomic_##op2((uint16_t *)dst, (uint16_t)value, seq_cst); \
984 	case ZRO_ATOMIC_##op##_32: \
985 	        return os_atomic_##op2((uint32_t *)dst, (uint32_t)value, seq_cst); \
986 	case ZRO_ATOMIC_##op##_64: \
987 	        return os_atomic_##op2((uint64_t *)dst, (uint64_t)value, seq_cst)
988 
989 	switch (op) {
990 		__ZALLOC_RO_MUT_OP(OR, or_orig);
991 		__ZALLOC_RO_MUT_OP(XOR, xor_orig);
992 		__ZALLOC_RO_MUT_OP(AND, and_orig);
993 		__ZALLOC_RO_MUT_OP(ADD, add_orig);
994 		__ZALLOC_RO_MUT_OP(XCHG, xchg);
995 	default:
996 		panic("%s: Invalid atomic operation: %d", __func__, op);
997 	}
998 
999 #undef __ZALLOC_RO_MUT_OP
1000 }
1001 
1002 /*!
1003  * @function zone_owns
1004  *
1005  * @abstract
1006  * This function is a soft version of zone_require that checks if a given
1007  * pointer belongs to the specified zone and should not be used outside
1008  * allocator code.
1009  *
1010  * @discussion
1011  * Note that zone_owns() can only work with:
1012  * - zones not allowing foreign memory
1013  * - zones in the general submap.
1014  *
1015  * @param zone          the zone the address needs to belong to.
1016  * @param addr          the element address to check.
1017  */
1018 extern bool     zone_owns(
1019 	zone_t          zone,
1020 	void           *addr);
1021 
1022 /**!
1023  * @function zone_submap
1024  *
1025  * @param zsflags       the security flags of a specified zone.
1026  * @returns             the zone (sub)map this zone allocates from.
1027  */
1028 __pure2
1029 extern vm_map_t zone_submap(
1030 	zone_security_flags_t   zsflags);
1031 
1032 #ifndef VM_TAG_SIZECLASSES
1033 #error MAX_TAG_ZONES
1034 #endif
1035 #if VM_TAG_SIZECLASSES
1036 
1037 extern uint16_t zone_index_from_tag_index(
1038 	uint32_t        tag_zone_index);
1039 
1040 #endif /* VM_TAG_SIZECLASSES */
1041 
1042 extern lck_grp_t zone_locks_grp;
1043 
1044 static inline void
zone_lock(zone_t zone)1045 zone_lock(zone_t zone)
1046 {
1047 #if KASAN_FAKESTACK
1048 	spl_t s = 0;
1049 	if (zone->z_kasan_fakestacks) {
1050 		s = splsched();
1051 	}
1052 #endif /* KASAN_FAKESTACK */
1053 	hw_lck_ticket_lock(&zone->z_lock, &zone_locks_grp);
1054 #if KASAN_FAKESTACK
1055 	zone->z_kasan_spl = s;
1056 #endif /* KASAN_FAKESTACK */
1057 }
1058 
1059 static inline void
zone_unlock(zone_t zone)1060 zone_unlock(zone_t zone)
1061 {
1062 #if KASAN_FAKESTACK
1063 	spl_t s = zone->z_kasan_spl;
1064 	zone->z_kasan_spl = 0;
1065 #endif /* KASAN_FAKESTACK */
1066 	hw_lck_ticket_unlock(&zone->z_lock);
1067 #if KASAN_FAKESTACK
1068 	if (zone->z_kasan_fakestacks) {
1069 		splx(s);
1070 	}
1071 #endif /* KASAN_FAKESTACK */
1072 }
1073 
1074 #define MAX_ZONE_NAME   32      /* max length of a zone name we can take from the boot-args */
1075 
1076 int track_this_zone(const char *zonename, const char *logname);
1077 extern bool panic_include_kalloc_types;
1078 extern zone_t kalloc_type_src_zone;
1079 extern zone_t kalloc_type_dst_zone;
1080 
1081 #if DEBUG || DEVELOPMENT
1082 extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag);
1083 #endif /* DEBUG || DEVELOPMENT */
1084 
1085 #pragma GCC visibility pop
1086 
1087 __END_DECLS
1088 
1089 #endif  /* _KERN_ZALLOC_INTERNAL_H_ */
1090