xref: /xnu-11215/osfmk/kern/kalloc.c (revision d4514f0b)
1 /*
2  * Copyright (c) 2000-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	kern/kalloc.c
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1985
62  *
63  *	General kernel memory allocator.  This allocator is designed
64  *	to be used by the kernel to manage dynamic memory fast.
65  */
66 
67 #include "mach/vm_types.h"
68 #include <mach/boolean.h>
69 #include <mach/sdt.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/vm_param.h>
72 #include <kern/misc_protos.h>
73 #include <kern/counter.h>
74 #include <kern/zalloc_internal.h>
75 #include <kern/kalloc.h>
76 #include <kern/ledger.h>
77 #include <kern/backtrace.h>
78 #include <vm/vm_kern_internal.h>
79 #include <vm/vm_object_xnu.h>
80 #include <vm/vm_map.h>
81 #include <vm/vm_memtag.h>
82 #include <sys/kdebug.h>
83 
84 #include <os/hash.h>
85 #include <san/kasan.h>
86 #include <libkern/section_keywords.h>
87 #include <libkern/prelink.h>
88 
89 SCALABLE_COUNTER_DEFINE(kalloc_large_count);
90 SCALABLE_COUNTER_DEFINE(kalloc_large_total);
91 
92 #pragma mark initialization
93 
94 /*
95  * All allocations of size less than KHEAP_MAX_SIZE are rounded to the next nearest
96  * sized zone.  This allocator is built on top of the zone allocator.  A zone
97  * is created for each potential size that we are willing to get in small
98  * blocks.
99  *
100  * Allocations of size greater than KHEAP_MAX_SIZE, are allocated from the VM.
101  */
102 
103 /*
104  * The kt_zone_cfg table defines the configuration of zones on various
105  * platforms for kalloc_type fixed size allocations.
106  */
107 
108 #if KASAN_CLASSIC
109 #define K_SIZE_CLASS(size)    \
110 	(((size) & PAGE_MASK) == 0 ? (size) : \
111 	((size) <= 1024 ? (size) : (size) - KASAN_GUARD_SIZE))
112 #else
113 #define K_SIZE_CLASS(size)    (size)
114 #endif
115 static_assert(K_SIZE_CLASS(KHEAP_MAX_SIZE) == KHEAP_MAX_SIZE);
116 
117 static const uint16_t kt_zone_cfg[] = {
118 	K_SIZE_CLASS(16),
119 	K_SIZE_CLASS(32),
120 	K_SIZE_CLASS(48),
121 	K_SIZE_CLASS(64),
122 	K_SIZE_CLASS(80),
123 	K_SIZE_CLASS(96),
124 	K_SIZE_CLASS(128),
125 	K_SIZE_CLASS(160),
126 	K_SIZE_CLASS(192),
127 	K_SIZE_CLASS(224),
128 	K_SIZE_CLASS(256),
129 	K_SIZE_CLASS(288),
130 	K_SIZE_CLASS(368),
131 	K_SIZE_CLASS(400),
132 	K_SIZE_CLASS(512),
133 	K_SIZE_CLASS(576),
134 	K_SIZE_CLASS(768),
135 	K_SIZE_CLASS(1024),
136 	K_SIZE_CLASS(1152),
137 	K_SIZE_CLASS(1280),
138 	K_SIZE_CLASS(1664),
139 	K_SIZE_CLASS(2048),
140 	K_SIZE_CLASS(4096),
141 	K_SIZE_CLASS(6144),
142 	K_SIZE_CLASS(8192),
143 	K_SIZE_CLASS(12288),
144 	K_SIZE_CLASS(16384),
145 #if __arm64__
146 	K_SIZE_CLASS(24576),
147 	K_SIZE_CLASS(32768),
148 #endif /* __arm64__ */
149 };
150 
151 #define MAX_K_ZONE(kzc) (uint32_t)(sizeof(kzc) / sizeof(kzc[0]))
152 
153 /*
154  * kalloc_type callsites are assigned a zone during early boot. They
155  * use the dlut[] (direct lookup table), indexed by size normalized
156  * to the minimum alignment to find the right zone index quickly.
157  */
158 #define INDEX_ZDLUT(size)       (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
159 #define KALLOC_DLUT_SIZE        (KHEAP_MAX_SIZE / KALLOC_MINALIGN)
160 #define MAX_SIZE_ZDLUT          ((KALLOC_DLUT_SIZE - 1) * KALLOC_MINALIGN)
161 static __startup_data uint8_t   kalloc_type_dlut[KALLOC_DLUT_SIZE];
162 static __startup_data uint32_t  kheap_zsize[KHEAP_NUM_ZONES];
163 
164 #if VM_TAG_SIZECLASSES
165 static_assert(VM_TAG_SIZECLASSES >= MAX_K_ZONE(kt_zone_cfg));
166 #endif
167 
168 const char * const kalloc_heap_names[] = {
169 	[KHEAP_ID_NONE]          = "",
170 	[KHEAP_ID_SHARED]        = "shared.",
171 	[KHEAP_ID_DATA_BUFFERS]  = "data.",
172 	[KHEAP_ID_KT_VAR]        = "",
173 };
174 
175 /*
176  * Shared heap configuration
177  */
178 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_SHARED[1] = {
179 	{
180 		.kh_name     = "shared.kalloc",
181 		.kh_heap_id  = KHEAP_ID_SHARED,
182 		.kh_tag      = VM_KERN_MEMORY_KALLOC_TYPE,
183 	}
184 };
185 
186 /*
187  * Bag of bytes heap configuration
188  */
189 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DATA_BUFFERS[1] = {
190 	{
191 		.kh_name     = "data.kalloc",
192 		.kh_heap_id  = KHEAP_ID_DATA_BUFFERS,
193 		.kh_tag      = VM_KERN_MEMORY_KALLOC_DATA,
194 	}
195 };
196 
197 /*
198  * Configuration of variable kalloc type heaps
199  */
200 SECURITY_READ_ONLY_LATE(struct kheap_info)
201 kalloc_type_heap_array[KT_VAR_MAX_HEAPS] = {};
202 SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_KT_VAR[1] = {
203 	{
204 		.kh_name     = "kalloc.type.var",
205 		.kh_heap_id  = KHEAP_ID_KT_VAR,
206 		.kh_tag      = VM_KERN_MEMORY_KALLOC_TYPE
207 	}
208 };
209 
210 KALLOC_HEAP_DEFINE(KHEAP_DEFAULT, "KHEAP_DEFAULT", KHEAP_ID_KT_VAR);
211 
212 __startup_func
213 static void
kalloc_zsize_compute(void)214 kalloc_zsize_compute(void)
215 {
216 	uint32_t step = KHEAP_STEP_START;
217 	uint32_t size = KHEAP_START_SIZE;
218 
219 	/*
220 	 * Manually initialize extra initial zones
221 	 */
222 	kheap_zsize[0] = size / 2;
223 	kheap_zsize[1] = size;
224 	static_assert(KHEAP_EXTRA_ZONES == 2);
225 
226 	/*
227 	 * Compute sizes for remaining zones
228 	 */
229 	for (uint32_t i = 0; i < KHEAP_NUM_STEPS; i++) {
230 		uint32_t step_idx = (i * 2) + KHEAP_EXTRA_ZONES;
231 
232 		kheap_zsize[step_idx] = K_SIZE_CLASS(size + step);
233 		kheap_zsize[step_idx + 1] = K_SIZE_CLASS(size + 2 * step);
234 
235 		step *= 2;
236 		size += step;
237 	}
238 }
239 
240 static zone_t
kalloc_zone_for_size_with_flags(zone_id_t zid,vm_size_t size,zalloc_flags_t flags)241 kalloc_zone_for_size_with_flags(
242 	zone_id_t               zid,
243 	vm_size_t               size,
244 	zalloc_flags_t          flags)
245 {
246 	vm_size_t max_size = KHEAP_MAX_SIZE;
247 	bool forcopyin = flags & Z_MAY_COPYINMAP;
248 	zone_t zone;
249 
250 	if (flags & Z_KALLOC_ARRAY) {
251 		size = roundup(size, KALLOC_ARRAY_GRANULE);
252 	}
253 
254 	if (forcopyin) {
255 #if __x86_64__
256 		/*
257 		 * On Intel, the OSData() ABI used to allocate
258 		 * from the kernel map starting at PAGE_SIZE.
259 		 *
260 		 * If only vm_map_copyin() or a wrapper is used,
261 		 * then everything will work fine because vm_map_copy_t
262 		 * will perform an actual copy if the data is smaller
263 		 * than msg_ool_size_small (== KHEAP_MAX_SIZE).
264 		 *
265 		 * However, if anyone is trying to call mach_vm_remap(),
266 		 * then bad things (TM) happen.
267 		 *
268 		 * Avoid this by preserving the ABI and moving
269 		 * to kalloc_large() earlier.
270 		 *
271 		 * Any recent code really ought to use IOMemoryDescriptor
272 		 * for this purpose however.
273 		 */
274 		max_size = PAGE_SIZE - 1;
275 #endif
276 	}
277 
278 	if (size <= max_size) {
279 		uint32_t idx;
280 
281 		if (size <= KHEAP_START_SIZE) {
282 			zid  += (size > 16);
283 		} else {
284 			/*
285 			 * . log2down(size - 1) is log2up(size) - 1
286 			 * . (size - 1) >> (log2down(size - 1) - 1)
287 			 *   is either 0x2 or 0x3
288 			 */
289 			idx   = kalloc_log2down((uint32_t)(size - 1));
290 			zid  += KHEAP_EXTRA_ZONES +
291 			    2 * (idx - KHEAP_START_IDX) +
292 			    ((uint32_t)(size - 1) >> (idx - 1)) - 2;
293 		}
294 
295 		zone = zone_by_id(zid);
296 #if KASAN_CLASSIC
297 		/*
298 		 * Under kasan classic, certain size classes are a redzone
299 		 * away from the mathematical formula above, and we need
300 		 * to "go to the next zone".
301 		 *
302 		 * Because the KHEAP_MAX_SIZE bucket _does_ exist however,
303 		 * this will never go to an "invalid" zone that doesn't
304 		 * belong to the kheap.
305 		 */
306 		if (size > zone_elem_inner_size(zone)) {
307 			zone++;
308 		}
309 #endif
310 		return zone;
311 	}
312 
313 	return ZONE_NULL;
314 }
315 
316 zone_t
kalloc_zone_for_size(zone_id_t zid,size_t size)317 kalloc_zone_for_size(zone_id_t zid, size_t size)
318 {
319 	return kalloc_zone_for_size_with_flags(zid, size, Z_WAITOK);
320 }
321 
322 static inline bool
kheap_size_from_zone(void * addr,vm_size_t size,zalloc_flags_t flags)323 kheap_size_from_zone(
324 	void                   *addr,
325 	vm_size_t               size,
326 	zalloc_flags_t          flags)
327 {
328 	vm_size_t max_size = KHEAP_MAX_SIZE;
329 	bool forcopyin = flags & Z_MAY_COPYINMAP;
330 
331 #if __x86_64__
332 	/*
333 	 * If Z_FULLSIZE is used, then due to kalloc_zone_for_size_with_flags()
334 	 * behavior, then the element could have a PAGE_SIZE reported size,
335 	 * yet still be from a zone for Z_MAY_COPYINMAP.
336 	 */
337 	if (forcopyin) {
338 		if (size == PAGE_SIZE &&
339 		    zone_id_for_element(addr, size) != ZONE_ID_INVALID) {
340 			return true;
341 		}
342 
343 		max_size = PAGE_SIZE - 1;
344 	}
345 #else
346 #pragma unused(addr, forcopyin)
347 #endif
348 
349 	return size <= max_size;
350 }
351 
352 /*
353  * All data zones shouldn't use shared zone. Therefore set the no share
354  * bit right after creation.
355  */
356 __startup_func
357 static void
kalloc_set_no_share_for_data(zone_kheap_id_t kheap_id,zone_stats_t zstats)358 kalloc_set_no_share_for_data(
359 	zone_kheap_id_t       kheap_id,
360 	zone_stats_t          zstats)
361 {
362 	if (kheap_id == KHEAP_ID_DATA_BUFFERS) {
363 		zpercpu_foreach(zs, zstats) {
364 			os_atomic_store(&zs->zs_alloc_not_shared, 1, relaxed);
365 		}
366 	}
367 }
368 
369 __startup_func
370 static void
kalloc_zone_init(const char * kheap_name,zone_kheap_id_t kheap_id,zone_id_t * kheap_zstart,zone_create_flags_t zc_flags)371 kalloc_zone_init(
372 	const char           *kheap_name,
373 	zone_kheap_id_t       kheap_id,
374 	zone_id_t            *kheap_zstart,
375 	zone_create_flags_t   zc_flags)
376 {
377 	zc_flags |= ZC_PGZ_USE_GUARDS;
378 
379 	for (uint32_t i = 0; i < KHEAP_NUM_ZONES; i++) {
380 		uint32_t size = kheap_zsize[i];
381 		char buf[MAX_ZONE_NAME], *z_name;
382 		int len;
383 
384 		len = scnprintf(buf, MAX_ZONE_NAME, "%s.%u", kheap_name, size);
385 		z_name = zalloc_permanent(len + 1, ZALIGN_NONE);
386 		strlcpy(z_name, buf, len + 1);
387 
388 		(void)zone_create_ext(z_name, size, zc_flags, ZONE_ID_ANY, ^(zone_t z){
389 #if __arm64e__ || ZSECURITY_CONFIG(ZONE_TAGGING)
390 			uint32_t scale = kalloc_log2down(size / 32);
391 
392 			if (size == 32 << scale) {
393 			        z->z_array_size_class = scale;
394 			} else {
395 			        z->z_array_size_class = scale | 0x10;
396 			}
397 #endif
398 			zone_security_array[zone_index(z)].z_kheap_id = kheap_id;
399 			if (i == 0) {
400 			        *kheap_zstart = zone_index(z);
401 			}
402 			kalloc_set_no_share_for_data(kheap_id, z->z_stats);
403 		});
404 	}
405 }
406 
407 __startup_func
408 static void
kalloc_heap_init(struct kalloc_heap * kheap)409 kalloc_heap_init(struct kalloc_heap *kheap)
410 {
411 	kalloc_zone_init("kalloc", kheap->kh_heap_id, &kheap->kh_zstart,
412 	    ZC_NONE);
413 	/*
414 	 * Count all the "raw" views for zones in the heap.
415 	 */
416 	zone_view_count += KHEAP_NUM_ZONES;
417 }
418 
419 #define KEXT_ALIGN_SHIFT           6
420 #define KEXT_ALIGN_BYTES           (1<< KEXT_ALIGN_SHIFT)
421 #define KEXT_ALIGN_MASK            (KEXT_ALIGN_BYTES-1)
422 #define kt_scratch_size            (256ul << 10)
423 #define KALLOC_TYPE_SECTION(type) \
424 	(type == KTV_FIXED? "__kalloc_type": "__kalloc_var")
425 
426 /*
427  * Enum to specify the kalloc_type variant being used.
428  */
429 __options_decl(kalloc_type_variant_t, uint16_t, {
430 	KTV_FIXED     = 0x0001,
431 	KTV_VAR       = 0x0002,
432 });
433 
434 /*
435  * Macros that generate the appropriate kalloc_type variant (i.e fixed or
436  * variable) of the desired variable/function.
437  */
438 #define kalloc_type_var(type, var)              \
439 	((type) == KTV_FIXED?                       \
440 	(vm_offset_t) kalloc_type_##var##_fixed:    \
441 	(vm_offset_t) kalloc_type_##var##_var)
442 #define kalloc_type_func(type, func, ...)       \
443 	((type) == KTV_FIXED?                       \
444 	kalloc_type_##func##_fixed(__VA_ARGS__):    \
445 	kalloc_type_##func##_var(__VA_ARGS__))
446 
447 TUNABLE(kalloc_type_options_t, kt_options, "kt", 0);
448 TUNABLE(uint16_t, kt_var_heaps, "kt_var_heaps",
449     ZSECURITY_CONFIG_KT_VAR_BUDGET);
450 TUNABLE(uint16_t, kt_fixed_zones, "kt_fixed_zones",
451     ZSECURITY_CONFIG_KT_BUDGET);
452 TUNABLE(uint16_t, kt_var_ptr_heaps, "kt_var_ptr_heaps", 2);
453 static TUNABLE(bool, kt_shared_fixed, "-kt-shared", true);
454 
455 /*
456  * Section start/end for fixed kalloc_type views
457  */
458 extern struct kalloc_type_view kalloc_type_sec_start_fixed[]
459 __SECTION_START_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_type");
460 
461 extern struct kalloc_type_view kalloc_type_sec_end_fixed[]
462 __SECTION_END_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_type");
463 
464 /*
465  * Section start/end for variable kalloc_type views
466  */
467 extern struct kalloc_type_var_view kalloc_type_sec_start_var[]
468 __SECTION_START_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_var");
469 
470 extern struct kalloc_type_var_view kalloc_type_sec_end_var[]
471 __SECTION_END_SYM(KALLOC_TYPE_SEGMENT, "__kalloc_var");
472 
473 __startup_data
474 static kalloc_type_views_t *kt_buffer = NULL;
475 __startup_data
476 static uint64_t kt_count;
477 __startup_data
478 uint32_t kalloc_type_hash_seed;
479 
480 __startup_data
481 static uint16_t kt_freq_list[MAX_K_ZONE(kt_zone_cfg)];
482 __startup_data
483 static uint16_t kt_freq_list_total[MAX_K_ZONE(kt_zone_cfg)];
484 
485 struct nzones_with_idx {
486 	uint16_t nzones;
487 	uint16_t idx;
488 };
489 int16_t zone_carry = 0;
490 
491 _Static_assert(__builtin_popcount(KT_SUMMARY_MASK_TYPE_BITS) == (KT_GRANULE_MAX + 1),
492     "KT_SUMMARY_MASK_TYPE_BITS doesn't match KT_GRANULE_MAX");
493 
494 /*
495  * For use by lldb to iterate over kalloc types
496  */
497 SECURITY_READ_ONLY_LATE(uint64_t) num_kt_sizeclass = MAX_K_ZONE(kt_zone_cfg);
498 SECURITY_READ_ONLY_LATE(zone_t) kalloc_type_zarray[MAX_K_ZONE(kt_zone_cfg)];
499 SECURITY_READ_ONLY_LATE(zone_t) kt_singleton_array[MAX_K_ZONE(kt_zone_cfg)];
500 
501 #define KT_GET_HASH(flags) (uint16_t)((flags & KT_HASH) >> 16)
502 static_assert(KT_HASH >> 16 == (KMEM_RANGE_MASK | KMEM_HASH_SET |
503     KMEM_DIRECTION_MASK),
504     "Insufficient bits to represent range and dir for VM allocations");
505 static_assert(MAX_K_ZONE(kt_zone_cfg) < KALLOC_TYPE_IDX_MASK,
506     "validate idx mask");
507 /* qsort routines */
508 typedef int (*cmpfunc_t)(const void *a, const void *b);
509 extern void qsort(void *a, size_t n, size_t es, cmpfunc_t cmp);
510 
511 static inline uint16_t
kalloc_type_get_idx(uint32_t kt_size)512 kalloc_type_get_idx(uint32_t kt_size)
513 {
514 	return (uint16_t) (kt_size >> KALLOC_TYPE_IDX_SHIFT);
515 }
516 
517 static inline uint32_t
kalloc_type_set_idx(uint32_t kt_size,uint16_t idx)518 kalloc_type_set_idx(uint32_t kt_size, uint16_t idx)
519 {
520 	return kt_size | ((uint32_t) idx << KALLOC_TYPE_IDX_SHIFT);
521 }
522 
523 static void
kalloc_type_build_dlut(void)524 kalloc_type_build_dlut(void)
525 {
526 	vm_size_t size = 0;
527 	for (int i = 0; i < KALLOC_DLUT_SIZE; i++, size += KALLOC_MINALIGN) {
528 		uint8_t zindex = 0;
529 		while (kt_zone_cfg[zindex] < size) {
530 			zindex++;
531 		}
532 		kalloc_type_dlut[i] = zindex;
533 	}
534 }
535 
536 static uint32_t
kalloc_type_idx_for_size(uint32_t size)537 kalloc_type_idx_for_size(uint32_t size)
538 {
539 	assert(size <= KHEAP_MAX_SIZE);
540 	uint16_t idx = kalloc_type_dlut[INDEX_ZDLUT(size)];
541 	return kalloc_type_set_idx(size, idx);
542 }
543 
544 static void
kalloc_type_assign_zone_fixed(kalloc_type_view_t * cur,kalloc_type_view_t * end,zone_t z,zone_t sig_zone,zone_t shared_zone)545 kalloc_type_assign_zone_fixed(
546 	kalloc_type_view_t     *cur,
547 	kalloc_type_view_t     *end,
548 	zone_t                  z,
549 	zone_t                  sig_zone,
550 	zone_t                  shared_zone)
551 {
552 	/*
553 	 * Assign the zone created for every kalloc_type_view
554 	 * of the same unique signature
555 	 */
556 	bool need_raw_view = false;
557 
558 	while (cur < end) {
559 		kalloc_type_view_t kt = *cur;
560 		struct zone_view *zv = &kt->kt_zv;
561 		zv->zv_zone = z;
562 		kalloc_type_flags_t kt_flags = kt->kt_flags;
563 		zone_security_flags_t zsflags = zone_security_config(z);
564 
565 		assert(kalloc_type_get_size(kt->kt_size) <= z->z_elem_size);
566 		if (!shared_zone) {
567 			assert(zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS);
568 		}
569 
570 		if (kt_flags & KT_SLID) {
571 			kt->kt_signature -= vm_kernel_slide;
572 			kt->kt_zv.zv_name -= vm_kernel_slide;
573 		}
574 
575 		if ((kt_flags & KT_PRIV_ACCT) ||
576 		    ((kt_options & KT_OPTIONS_ACCT) && (kt_flags & KT_DEFAULT))) {
577 			zv->zv_stats = zalloc_percpu_permanent_type(
578 				struct zone_stats);
579 			need_raw_view = true;
580 			zone_view_count += 1;
581 		} else {
582 			zv->zv_stats = z->z_stats;
583 		}
584 
585 		if ((kt_flags & KT_NOSHARED) || !shared_zone) {
586 			if ((kt_flags & KT_NOSHARED) && !(kt_flags & KT_PRIV_ACCT)) {
587 				panic("KT_NOSHARED used w/o private accounting for view %s",
588 				    zv->zv_name);
589 			}
590 
591 			zpercpu_foreach(zs, zv->zv_stats) {
592 				os_atomic_store(&zs->zs_alloc_not_shared, 1, relaxed);
593 			}
594 		}
595 
596 		if (zsflags.z_kheap_id != KHEAP_ID_DATA_BUFFERS) {
597 			kt->kt_zshared = shared_zone;
598 			kt->kt_zsig = sig_zone;
599 			/*
600 			 * If we haven't yet set the signature equivalance then set it
601 			 * otherwise validate that the zone has the same signature equivalance
602 			 * as the sig_zone provided
603 			 */
604 			if (!zone_get_sig_eq(z)) {
605 				zone_set_sig_eq(z, zone_index(sig_zone));
606 			} else {
607 				assert(zone_get_sig_eq(z) == zone_get_sig_eq(sig_zone));
608 			}
609 		}
610 		zv->zv_next = (zone_view_t) z->z_views;
611 		zv->zv_zone->z_views = (zone_view_t) kt;
612 		cur++;
613 	}
614 	if (need_raw_view) {
615 		zone_view_count += 1;
616 	}
617 }
618 
619 __startup_func
620 static void
kalloc_type_assign_zone_var(kalloc_type_var_view_t * cur,kalloc_type_var_view_t * end,uint32_t heap_idx)621 kalloc_type_assign_zone_var(kalloc_type_var_view_t *cur,
622     kalloc_type_var_view_t *end, uint32_t heap_idx)
623 {
624 	struct kheap_info *cfg = &kalloc_type_heap_array[heap_idx];
625 	while (cur < end) {
626 		kalloc_type_var_view_t kt = *cur;
627 		kt->kt_heap_start = cfg->kh_zstart;
628 		kalloc_type_flags_t kt_flags = kt->kt_flags;
629 
630 		if (kt_flags & KT_SLID) {
631 			if (kt->kt_sig_hdr) {
632 				kt->kt_sig_hdr -= vm_kernel_slide;
633 			}
634 			kt->kt_sig_type -= vm_kernel_slide;
635 			kt->kt_name -= vm_kernel_slide;
636 		}
637 
638 		if ((kt_flags & KT_PRIV_ACCT) ||
639 		    ((kt_options & KT_OPTIONS_ACCT) && (kt_flags & KT_DEFAULT))) {
640 			kt->kt_stats = zalloc_percpu_permanent_type(struct zone_stats);
641 			zone_view_count += 1;
642 		}
643 
644 		kt->kt_next = (zone_view_t) cfg->kt_views;
645 		cfg->kt_views = kt;
646 		cur++;
647 	}
648 }
649 
650 __startup_func
651 static inline void
kalloc_type_slide_fixed(vm_offset_t addr)652 kalloc_type_slide_fixed(vm_offset_t addr)
653 {
654 	kalloc_type_view_t ktv = (struct kalloc_type_view *) addr;
655 	ktv->kt_signature += vm_kernel_slide;
656 	ktv->kt_zv.zv_name += vm_kernel_slide;
657 	ktv->kt_flags |= KT_SLID;
658 }
659 
660 __startup_func
661 static inline void
kalloc_type_slide_var(vm_offset_t addr)662 kalloc_type_slide_var(vm_offset_t addr)
663 {
664 	kalloc_type_var_view_t ktv = (struct kalloc_type_var_view *) addr;
665 	if (ktv->kt_sig_hdr) {
666 		ktv->kt_sig_hdr += vm_kernel_slide;
667 	}
668 	ktv->kt_sig_type += vm_kernel_slide;
669 	ktv->kt_name += vm_kernel_slide;
670 	ktv->kt_flags |= KT_SLID;
671 }
672 
673 __startup_func
674 static void
kalloc_type_validate_flags(kalloc_type_flags_t kt_flags,const char * kt_name,uuid_string_t kext_uuid)675 kalloc_type_validate_flags(
676 	kalloc_type_flags_t   kt_flags,
677 	const char           *kt_name,
678 	uuid_string_t         kext_uuid)
679 {
680 	if (!(kt_flags & KT_CHANGED) || !(kt_flags & KT_CHANGED2)) {
681 		panic("kalloc_type_view(%s) from kext(%s) hasn't been rebuilt with "
682 		    "required xnu headers", kt_name, kext_uuid);
683 	}
684 }
685 
686 static kalloc_type_flags_t
kalloc_type_get_flags_fixed(vm_offset_t addr,uuid_string_t kext_uuid)687 kalloc_type_get_flags_fixed(vm_offset_t addr, uuid_string_t kext_uuid)
688 {
689 	kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
690 	kalloc_type_validate_flags(ktv->kt_flags, ktv->kt_zv.zv_name, kext_uuid);
691 	return ktv->kt_flags;
692 }
693 
694 static kalloc_type_flags_t
kalloc_type_get_flags_var(vm_offset_t addr,uuid_string_t kext_uuid)695 kalloc_type_get_flags_var(vm_offset_t addr, uuid_string_t kext_uuid)
696 {
697 	kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
698 	kalloc_type_validate_flags(ktv->kt_flags, ktv->kt_name, kext_uuid);
699 	return ktv->kt_flags;
700 }
701 
702 /*
703  * Check if signature of type is made up of only data and padding
704  */
705 static bool
kalloc_type_is_data(kalloc_type_flags_t kt_flags)706 kalloc_type_is_data(kalloc_type_flags_t kt_flags)
707 {
708 	assert(kt_flags & KT_CHANGED);
709 	return kt_flags & KT_DATA_ONLY;
710 }
711 
712 /*
713  * Check if signature of type is made up of only pointers
714  */
715 static bool
kalloc_type_is_ptr_array(kalloc_type_flags_t kt_flags)716 kalloc_type_is_ptr_array(kalloc_type_flags_t kt_flags)
717 {
718 	assert(kt_flags & KT_CHANGED2);
719 	return kt_flags & KT_PTR_ARRAY;
720 }
721 
722 static bool
kalloc_type_from_vm(kalloc_type_flags_t kt_flags)723 kalloc_type_from_vm(kalloc_type_flags_t kt_flags)
724 {
725 	assert(kt_flags & KT_CHANGED);
726 	return kt_flags & KT_VM;
727 }
728 
729 __startup_func
730 static inline vm_size_t
kalloc_type_view_sz_fixed(void)731 kalloc_type_view_sz_fixed(void)
732 {
733 	return sizeof(struct kalloc_type_view);
734 }
735 
736 __startup_func
737 static inline vm_size_t
kalloc_type_view_sz_var(void)738 kalloc_type_view_sz_var(void)
739 {
740 	return sizeof(struct kalloc_type_var_view);
741 }
742 
743 __startup_func
744 static inline uint64_t
kalloc_type_view_count(kalloc_type_variant_t type,vm_offset_t start,vm_offset_t end)745 kalloc_type_view_count(kalloc_type_variant_t type, vm_offset_t start,
746     vm_offset_t end)
747 {
748 	return (end - start) / kalloc_type_func(type, view_sz);
749 }
750 
751 __startup_func
752 static inline void
kalloc_type_buffer_copy_fixed(kalloc_type_views_t * buffer,vm_offset_t ktv)753 kalloc_type_buffer_copy_fixed(kalloc_type_views_t *buffer, vm_offset_t ktv)
754 {
755 	buffer->ktv_fixed = (kalloc_type_view_t) ktv;
756 }
757 
758 __startup_func
759 static inline void
kalloc_type_buffer_copy_var(kalloc_type_views_t * buffer,vm_offset_t ktv)760 kalloc_type_buffer_copy_var(kalloc_type_views_t *buffer, vm_offset_t ktv)
761 {
762 	buffer->ktv_var = (kalloc_type_var_view_t) ktv;
763 }
764 
765 __startup_func
766 static void
kalloc_type_handle_data_view_fixed(vm_offset_t addr)767 kalloc_type_handle_data_view_fixed(vm_offset_t addr)
768 {
769 	kalloc_type_view_t cur_data_view = (kalloc_type_view_t) addr;
770 	zone_t z = kalloc_zone_for_size(KHEAP_DATA_BUFFERS->kh_zstart,
771 	    cur_data_view->kt_size);
772 	kalloc_type_assign_zone_fixed(&cur_data_view, &cur_data_view + 1, z, NULL,
773 	    NULL);
774 }
775 
776 __startup_func
777 static void
kalloc_type_handle_data_view_var(vm_offset_t addr)778 kalloc_type_handle_data_view_var(vm_offset_t addr)
779 {
780 	kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
781 	kalloc_type_assign_zone_var(&ktv, &ktv + 1, KT_VAR_DATA_HEAP);
782 }
783 
784 __startup_func
785 static uint32_t
kalloc_type_handle_parray_var(void)786 kalloc_type_handle_parray_var(void)
787 {
788 	uint32_t i = 0;
789 	kalloc_type_var_view_t kt = kt_buffer[0].ktv_var;
790 	const char *p_name = kt->kt_name;
791 
792 	/*
793 	 * The sorted list of variable kalloc_type_view has pointer arrays at the
794 	 * beginning. Walk through them and assign a random pointer heap to each
795 	 * type detected by typename.
796 	 */
797 	while (kalloc_type_is_ptr_array(kt->kt_flags)) {
798 		uint32_t heap_id = kmem_get_random16(1) + KT_VAR_PTR_HEAP0;
799 		const char *c_name = kt->kt_name;
800 		uint32_t p_i = i;
801 
802 		while (strcmp(c_name, p_name) == 0) {
803 			i++;
804 			kt = kt_buffer[i].ktv_var;
805 			c_name = kt->kt_name;
806 		}
807 		p_name = c_name;
808 		kalloc_type_assign_zone_var(&kt_buffer[p_i].ktv_var,
809 		    &kt_buffer[i].ktv_var, heap_id);
810 	}
811 
812 	/*
813 	 * Returns the the index of the first view that isn't a pointer array
814 	 */
815 	return i;
816 }
817 
818 __startup_func
819 static uint32_t
kalloc_hash_adjust(uint32_t hash,uint32_t shift)820 kalloc_hash_adjust(uint32_t hash, uint32_t shift)
821 {
822 	/*
823 	 * Limit range_id to ptr ranges
824 	 */
825 	uint32_t range_id = kmem_adjust_range_id(hash);
826 	uint32_t direction = hash & 0x8000;
827 	return (range_id | KMEM_HASH_SET | direction) << shift;
828 }
829 
830 __startup_func
831 static void
kalloc_type_set_type_hash(const char * sig_ty,const char * sig_hdr,kalloc_type_flags_t * kt_flags)832 kalloc_type_set_type_hash(const char *sig_ty, const char *sig_hdr,
833     kalloc_type_flags_t *kt_flags)
834 {
835 	uint32_t hash = 0;
836 
837 	assert(sig_ty != NULL);
838 	hash = os_hash_jenkins_update(sig_ty, strlen(sig_ty),
839 	    kalloc_type_hash_seed);
840 	if (sig_hdr) {
841 		hash = os_hash_jenkins_update(sig_hdr, strlen(sig_hdr), hash);
842 	}
843 	os_hash_jenkins_finish(hash);
844 	hash &= (KMEM_RANGE_MASK | KMEM_DIRECTION_MASK);
845 
846 	*kt_flags = *kt_flags | kalloc_hash_adjust(hash, 16);
847 }
848 
849 __startup_func
850 static void
kalloc_type_set_type_hash_fixed(vm_offset_t addr)851 kalloc_type_set_type_hash_fixed(vm_offset_t addr)
852 {
853 	/*
854 	 * Use backtraces on fixed as we don't have signatures for types that go
855 	 * to the VM due to rdar://85182551.
856 	 */
857 	(void) addr;
858 }
859 
860 __startup_func
861 static void
kalloc_type_set_type_hash_var(vm_offset_t addr)862 kalloc_type_set_type_hash_var(vm_offset_t addr)
863 {
864 	kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
865 	kalloc_type_set_type_hash(ktv->kt_sig_type, ktv->kt_sig_hdr,
866 	    &ktv->kt_flags);
867 }
868 
869 __startup_func
870 static void
kalloc_type_mark_processed_fixed(vm_offset_t addr)871 kalloc_type_mark_processed_fixed(vm_offset_t addr)
872 {
873 	kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
874 	ktv->kt_flags |= KT_PROCESSED;
875 }
876 
877 __startup_func
878 static void
kalloc_type_mark_processed_var(vm_offset_t addr)879 kalloc_type_mark_processed_var(vm_offset_t addr)
880 {
881 	kalloc_type_var_view_t ktv = (kalloc_type_var_view_t) addr;
882 	ktv->kt_flags |= KT_PROCESSED;
883 }
884 
885 __startup_func
886 static void
kalloc_type_update_view_fixed(vm_offset_t addr)887 kalloc_type_update_view_fixed(vm_offset_t addr)
888 {
889 	kalloc_type_view_t ktv = (kalloc_type_view_t) addr;
890 	ktv->kt_size = kalloc_type_idx_for_size(ktv->kt_size);
891 }
892 
893 __startup_func
894 static void
kalloc_type_update_view_var(vm_offset_t addr)895 kalloc_type_update_view_var(vm_offset_t addr)
896 {
897 	(void) addr;
898 }
899 
900 __startup_func
901 static void
kalloc_type_view_copy(const kalloc_type_variant_t type,vm_offset_t start,vm_offset_t end,uint64_t * cur_count,bool slide,uuid_string_t kext_uuid)902 kalloc_type_view_copy(
903 	const kalloc_type_variant_t   type,
904 	vm_offset_t                   start,
905 	vm_offset_t                   end,
906 	uint64_t                     *cur_count,
907 	bool                          slide,
908 	uuid_string_t                 kext_uuid)
909 {
910 	uint64_t count = kalloc_type_view_count(type, start, end);
911 	if (count + *cur_count >= kt_count) {
912 		panic("kalloc_type_view_copy: Insufficient space in scratch buffer");
913 	}
914 	vm_offset_t cur = start;
915 	while (cur < end) {
916 		if (slide) {
917 			kalloc_type_func(type, slide, cur);
918 		}
919 		kalloc_type_flags_t kt_flags = kalloc_type_func(type, get_flags, cur,
920 		    kext_uuid);
921 		kalloc_type_func(type, mark_processed, cur);
922 		/*
923 		 * Skip views that go to the VM
924 		 */
925 		if (kalloc_type_from_vm(kt_flags)) {
926 			cur += kalloc_type_func(type, view_sz);
927 			continue;
928 		}
929 
930 		/*
931 		 * If signature indicates that the entire allocation is data move it to
932 		 * KHEAP_DATA_BUFFERS. Note that KT_VAR_DATA_HEAP is a fake "data" heap,
933 		 * variable kalloc_type handles the actual redirection in the entry points
934 		 * kalloc/kfree_type_var_impl.
935 		 */
936 		if (kalloc_type_is_data(kt_flags)) {
937 			kalloc_type_func(type, handle_data_view, cur);
938 			cur += kalloc_type_func(type, view_sz);
939 			continue;
940 		}
941 
942 		/*
943 		 * Set type hash that is used by kmem_*_guard
944 		 */
945 		kalloc_type_func(type, set_type_hash, cur);
946 		kalloc_type_func(type, update_view, cur);
947 		kalloc_type_func(type, buffer_copy, &kt_buffer[*cur_count], cur);
948 		cur += kalloc_type_func(type, view_sz);
949 		*cur_count = *cur_count + 1;
950 	}
951 }
952 
953 __startup_func
954 static uint64_t
kalloc_type_view_parse(const kalloc_type_variant_t type)955 kalloc_type_view_parse(const kalloc_type_variant_t type)
956 {
957 	kc_format_t kc_format;
958 	uint64_t cur_count = 0;
959 
960 	if (!PE_get_primary_kc_format(&kc_format)) {
961 		panic("kalloc_type_view_parse: wasn't able to determine kc format");
962 	}
963 
964 	if (kc_format == KCFormatStatic) {
965 		/*
966 		 * If kc is static or KCGEN, __kalloc_type sections from kexts and
967 		 * xnu are coalesced.
968 		 */
969 		kalloc_type_view_copy(type,
970 		    kalloc_type_var(type, sec_start),
971 		    kalloc_type_var(type, sec_end),
972 		    &cur_count, false, NULL);
973 	} else if (kc_format == KCFormatFileset) {
974 		/*
975 		 * If kc uses filesets, traverse __kalloc_type section for each
976 		 * macho in the BootKC.
977 		 */
978 		kernel_mach_header_t *kc_mh = NULL;
979 		kernel_mach_header_t *kext_mh = NULL;
980 
981 		kc_mh = (kernel_mach_header_t *)PE_get_kc_header(KCKindPrimary);
982 		struct load_command *lc =
983 		    (struct load_command *)((vm_offset_t)kc_mh + sizeof(*kc_mh));
984 		for (uint32_t i = 0; i < kc_mh->ncmds;
985 		    i++, lc = (struct load_command *)((vm_offset_t)lc + lc->cmdsize)) {
986 			if (lc->cmd != LC_FILESET_ENTRY) {
987 				continue;
988 			}
989 			struct fileset_entry_command *fse =
990 			    (struct fileset_entry_command *)(vm_offset_t)lc;
991 			kext_mh = (kernel_mach_header_t *)fse->vmaddr;
992 			kernel_section_t *sect = (kernel_section_t *)getsectbynamefromheader(
993 				kext_mh, KALLOC_TYPE_SEGMENT, KALLOC_TYPE_SECTION(type));
994 			if (sect != NULL) {
995 				unsigned long uuidlen = 0;
996 				void *kext_uuid = getuuidfromheader(kext_mh, &uuidlen);
997 				uuid_string_t kext_uuid_str;
998 				if ((kext_uuid != NULL) && (uuidlen == sizeof(uuid_t))) {
999 					uuid_unparse_upper(*(uuid_t *)kext_uuid, kext_uuid_str);
1000 				}
1001 				kalloc_type_view_copy(type, sect->addr, sect->addr + sect->size,
1002 				    &cur_count, false, kext_uuid_str);
1003 			}
1004 		}
1005 	} else if (kc_format == KCFormatKCGEN) {
1006 		/*
1007 		 * Parse __kalloc_type section from xnu
1008 		 */
1009 		kalloc_type_view_copy(type,
1010 		    kalloc_type_var(type, sec_start),
1011 		    kalloc_type_var(type, sec_end), &cur_count, false, NULL);
1012 
1013 		/*
1014 		 * Parse __kalloc_type section for kexts
1015 		 *
1016 		 * Note: We don't process the kalloc_type_views for kexts on armv7
1017 		 * as this platform has insufficient memory for type based
1018 		 * segregation. kalloc_type_impl_external will direct callsites
1019 		 * based on their size.
1020 		 */
1021 		kernel_mach_header_t *xnu_mh = &_mh_execute_header;
1022 		vm_offset_t cur = 0;
1023 		vm_offset_t end = 0;
1024 
1025 		/*
1026 		 * Kext machos are in the __PRELINK_TEXT segment. Extract the segment
1027 		 * and traverse it.
1028 		 */
1029 		kernel_section_t *prelink_sect = getsectbynamefromheader(
1030 			xnu_mh, kPrelinkTextSegment, kPrelinkTextSection);
1031 		assert(prelink_sect);
1032 		cur = prelink_sect->addr;
1033 		end = prelink_sect->addr + prelink_sect->size;
1034 
1035 		while (cur < end) {
1036 			uint64_t kext_text_sz = 0;
1037 			kernel_mach_header_t *kext_mh = (kernel_mach_header_t *) cur;
1038 
1039 			if (kext_mh->magic == 0) {
1040 				/*
1041 				 * Assert that we have processed all kexts and all that is left
1042 				 * is padding
1043 				 */
1044 				assert(memcmp_zero_ptr_aligned((void *)kext_mh, end - cur) == 0);
1045 				break;
1046 			} else if (kext_mh->magic != MH_MAGIC_64 &&
1047 			    kext_mh->magic != MH_CIGAM_64) {
1048 				panic("kalloc_type_view_parse: couldn't find kext @ offset:%lx",
1049 				    cur);
1050 			}
1051 
1052 			/*
1053 			 * Kext macho found, iterate through its segments
1054 			 */
1055 			struct load_command *lc =
1056 			    (struct load_command *)(cur + sizeof(kernel_mach_header_t));
1057 			bool isSplitKext = false;
1058 
1059 			for (uint32_t i = 0; i < kext_mh->ncmds && (vm_offset_t)lc < end;
1060 			    i++, lc = (struct load_command *)((vm_offset_t)lc + lc->cmdsize)) {
1061 				if (lc->cmd == LC_SEGMENT_SPLIT_INFO) {
1062 					isSplitKext = true;
1063 					continue;
1064 				} else if (lc->cmd != LC_SEGMENT_64) {
1065 					continue;
1066 				}
1067 
1068 				kernel_segment_command_t *seg_cmd =
1069 				    (struct segment_command_64 *)(vm_offset_t)lc;
1070 				/*
1071 				 * Parse kalloc_type section
1072 				 */
1073 				if (strcmp(seg_cmd->segname, KALLOC_TYPE_SEGMENT) == 0) {
1074 					kernel_section_t *kt_sect = getsectbynamefromseg(seg_cmd,
1075 					    KALLOC_TYPE_SEGMENT, KALLOC_TYPE_SECTION(type));
1076 					if (kt_sect) {
1077 						kalloc_type_view_copy(type, kt_sect->addr + vm_kernel_slide,
1078 						    kt_sect->addr + kt_sect->size + vm_kernel_slide, &cur_count,
1079 						    true, NULL);
1080 					}
1081 				}
1082 				/*
1083 				 * If the kext has a __TEXT segment, that is the only thing that
1084 				 * will be in the special __PRELINK_TEXT KC segment, so the next
1085 				 * macho is right after.
1086 				 */
1087 				if (strcmp(seg_cmd->segname, "__TEXT") == 0) {
1088 					kext_text_sz = seg_cmd->filesize;
1089 				}
1090 			}
1091 			/*
1092 			 * If the kext did not have a __TEXT segment (special xnu kexts with
1093 			 * only a __LINKEDIT segment) then the next macho will be after all the
1094 			 * header commands.
1095 			 */
1096 			if (!kext_text_sz) {
1097 				kext_text_sz = kext_mh->sizeofcmds;
1098 			} else if (!isSplitKext) {
1099 				panic("kalloc_type_view_parse: No support for non-split seg KCs");
1100 				break;
1101 			}
1102 
1103 			cur += ((kext_text_sz + (KEXT_ALIGN_BYTES - 1)) & (~KEXT_ALIGN_MASK));
1104 		}
1105 	} else {
1106 		/*
1107 		 * When kc_format is KCFormatDynamic or KCFormatUnknown, we don't handle
1108 		 * parsing kalloc_type_view structs during startup.
1109 		 */
1110 		panic("kalloc_type_view_parse: couldn't parse kalloc_type_view structs"
1111 		    " for kc_format = %d\n", kc_format);
1112 	}
1113 	return cur_count;
1114 }
1115 
1116 __startup_func
1117 static int
kalloc_type_cmp_fixed(const void * a,const void * b)1118 kalloc_type_cmp_fixed(const void *a, const void *b)
1119 {
1120 	const kalloc_type_view_t ktA = *(const kalloc_type_view_t *)a;
1121 	const kalloc_type_view_t ktB = *(const kalloc_type_view_t *)b;
1122 
1123 	const uint16_t idxA = kalloc_type_get_idx(ktA->kt_size);
1124 	const uint16_t idxB = kalloc_type_get_idx(ktB->kt_size);
1125 	/*
1126 	 * If the kalloc_type_views are in the same kalloc bucket, sort by
1127 	 * signature else sort by size
1128 	 */
1129 	if (idxA == idxB) {
1130 		int result = strcmp(ktA->kt_signature, ktB->kt_signature);
1131 		/*
1132 		 * If the kalloc_type_views have the same signature sort by site
1133 		 * name
1134 		 */
1135 		if (result == 0) {
1136 			return strcmp(ktA->kt_zv.zv_name, ktB->kt_zv.zv_name);
1137 		}
1138 		return result;
1139 	}
1140 	const uint32_t sizeA = kalloc_type_get_size(ktA->kt_size);
1141 	const uint32_t sizeB = kalloc_type_get_size(ktB->kt_size);
1142 	return (int)(sizeA - sizeB);
1143 }
1144 
1145 __startup_func
1146 static int
kalloc_type_cmp_var(const void * a,const void * b)1147 kalloc_type_cmp_var(const void *a, const void *b)
1148 {
1149 	const kalloc_type_var_view_t ktA = *(const kalloc_type_var_view_t *)a;
1150 	const kalloc_type_var_view_t ktB = *(const kalloc_type_var_view_t *)b;
1151 	const char *ktA_hdr = ktA->kt_sig_hdr ?: "";
1152 	const char *ktB_hdr = ktB->kt_sig_hdr ?: "";
1153 	bool ktA_ptrArray = kalloc_type_is_ptr_array(ktA->kt_flags);
1154 	bool ktB_ptrArray = kalloc_type_is_ptr_array(ktA->kt_flags);
1155 	int result = 0;
1156 
1157 	/*
1158 	 * Switched around (B - A) because we want the pointer arrays to be at the
1159 	 * top
1160 	 */
1161 	result = ktB_ptrArray - ktA_ptrArray;
1162 	if (result == 0) {
1163 		result = strcmp(ktA_hdr, ktB_hdr);
1164 		if (result == 0) {
1165 			result = strcmp(ktA->kt_sig_type, ktB->kt_sig_type);
1166 			if (result == 0) {
1167 				result = strcmp(ktA->kt_name, ktB->kt_name);
1168 			}
1169 		}
1170 	}
1171 	return result;
1172 }
1173 
1174 __startup_func
1175 static uint16_t *
kalloc_type_create_iterators_fixed(uint16_t * kt_skip_list_start,uint64_t count)1176 kalloc_type_create_iterators_fixed(
1177 	uint16_t           *kt_skip_list_start,
1178 	uint64_t            count)
1179 {
1180 	uint16_t *kt_skip_list = kt_skip_list_start;
1181 	uint16_t p_idx = UINT16_MAX; /* previous size idx */
1182 	uint16_t c_idx = 0; /* current size idx */
1183 	uint16_t unique_sig = 0;
1184 	uint16_t total_sig = 0;
1185 	const char *p_sig = NULL;
1186 	const char *p_name = "";
1187 	const char *c_sig = NULL;
1188 	const char *c_name = NULL;
1189 
1190 	/*
1191 	 * Walk over each kalloc_type_view
1192 	 */
1193 	for (uint16_t i = 0; i < count; i++) {
1194 		kalloc_type_view_t kt = kt_buffer[i].ktv_fixed;
1195 
1196 		c_idx = kalloc_type_get_idx(kt->kt_size);
1197 		c_sig = kt->kt_signature;
1198 		c_name = kt->kt_zv.zv_name;
1199 		/*
1200 		 * When current kalloc_type_view is in a different kalloc size
1201 		 * bucket than the previous, it means we have processed all in
1202 		 * the previous size bucket, so store the accumulated values
1203 		 * and advance the indices.
1204 		 */
1205 		if (p_idx == UINT16_MAX || c_idx != p_idx) {
1206 			/*
1207 			 * Updates for frequency lists
1208 			 */
1209 			if (p_idx != UINT16_MAX) {
1210 				kt_freq_list[p_idx] = unique_sig;
1211 				kt_freq_list_total[p_idx] = total_sig - unique_sig;
1212 			}
1213 			unique_sig = 1;
1214 			total_sig = 1;
1215 
1216 			p_idx = c_idx;
1217 			p_sig = c_sig;
1218 			p_name = c_name;
1219 
1220 			/*
1221 			 * Updates to signature skip list
1222 			 */
1223 			*kt_skip_list = i;
1224 			kt_skip_list++;
1225 
1226 			continue;
1227 		}
1228 
1229 		/*
1230 		 * When current kalloc_type_views is in the kalloc size bucket as
1231 		 * previous, analyze the siganture to see if it is unique.
1232 		 *
1233 		 * Signatures are collapsible if one is a substring of the next.
1234 		 */
1235 		if (strncmp(c_sig, p_sig, strlen(p_sig)) != 0) {
1236 			/*
1237 			 * Unique signature detected. Update counts and advance index
1238 			 */
1239 			unique_sig++;
1240 			total_sig++;
1241 
1242 			*kt_skip_list = i;
1243 			kt_skip_list++;
1244 			p_sig = c_sig;
1245 			p_name = c_name;
1246 			continue;
1247 		}
1248 		/*
1249 		 * Need this here as we do substring matching for signatures so you
1250 		 * want to track the longer signature seen rather than the substring
1251 		 */
1252 		p_sig = c_sig;
1253 
1254 		/*
1255 		 * Check if current kalloc_type_view corresponds to a new type
1256 		 */
1257 		if (strlen(p_name) != strlen(c_name) || strcmp(p_name, c_name) != 0) {
1258 			total_sig++;
1259 			p_name = c_name;
1260 		}
1261 	}
1262 	/*
1263 	 * Final update
1264 	 */
1265 	assert(c_idx == p_idx);
1266 	assert(kt_freq_list[c_idx] == 0);
1267 	kt_freq_list[c_idx] = unique_sig;
1268 	kt_freq_list_total[c_idx] = total_sig - unique_sig;
1269 	*kt_skip_list = (uint16_t) count;
1270 
1271 	return ++kt_skip_list;
1272 }
1273 
1274 __startup_func
1275 static uint32_t
kalloc_type_create_iterators_var(uint32_t * kt_skip_list_start,uint32_t buf_start)1276 kalloc_type_create_iterators_var(
1277 	uint32_t           *kt_skip_list_start,
1278 	uint32_t            buf_start)
1279 {
1280 	uint32_t *kt_skip_list = kt_skip_list_start;
1281 	uint32_t n = 0;
1282 
1283 	kt_skip_list[n] = buf_start;
1284 	assert(kt_count > buf_start + 1);
1285 	for (uint32_t i = buf_start + 1; i < kt_count; i++) {
1286 		kalloc_type_var_view_t ktA = kt_buffer[i - 1].ktv_var;
1287 		kalloc_type_var_view_t ktB = kt_buffer[i].ktv_var;
1288 		const char *ktA_hdr = ktA->kt_sig_hdr ?: "";
1289 		const char *ktB_hdr = ktB->kt_sig_hdr ?: "";
1290 		assert(ktA->kt_sig_type != NULL);
1291 		assert(ktB->kt_sig_type != NULL);
1292 		if (strcmp(ktA_hdr, ktB_hdr) != 0 ||
1293 		    strcmp(ktA->kt_sig_type, ktB->kt_sig_type) != 0) {
1294 			n++;
1295 			kt_skip_list[n] = i;
1296 		}
1297 	}
1298 	/*
1299 	 * Final update
1300 	 */
1301 	n++;
1302 	kt_skip_list[n] = (uint32_t) kt_count;
1303 	return n;
1304 }
1305 
1306 __startup_func
1307 static uint16_t
kalloc_type_distribute_budget(uint16_t freq_list[MAX_K_ZONE (kt_zone_cfg)],uint16_t kt_zones[MAX_K_ZONE (kt_zone_cfg)],uint16_t zone_budget,uint16_t min_zones_per_size)1308 kalloc_type_distribute_budget(
1309 	uint16_t            freq_list[MAX_K_ZONE(kt_zone_cfg)],
1310 	uint16_t            kt_zones[MAX_K_ZONE(kt_zone_cfg)],
1311 	uint16_t            zone_budget,
1312 	uint16_t            min_zones_per_size)
1313 {
1314 	uint16_t total_sig = 0;
1315 	uint16_t min_sig = 0;
1316 	uint16_t assigned_zones = 0;
1317 	uint16_t remaining_zones = zone_budget;
1318 	uint16_t modulo = 0;
1319 
1320 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1321 		uint16_t sig_freq = freq_list[i];
1322 		uint16_t min_zones = min_zones_per_size;
1323 
1324 		if (sig_freq < min_zones_per_size) {
1325 			min_zones = sig_freq;
1326 		}
1327 		total_sig += sig_freq;
1328 		kt_zones[i] = min_zones;
1329 		min_sig += min_zones;
1330 	}
1331 	if (remaining_zones > total_sig) {
1332 		remaining_zones = total_sig;
1333 	}
1334 	assert(remaining_zones >= min_sig);
1335 	remaining_zones -= min_sig;
1336 	total_sig -= min_sig;
1337 	assigned_zones += min_sig;
1338 
1339 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1340 		uint16_t freq = freq_list[i];
1341 
1342 		if (freq < min_zones_per_size) {
1343 			continue;
1344 		}
1345 		uint32_t numer = (freq - min_zones_per_size) * remaining_zones;
1346 		uint16_t n_zones = (uint16_t) numer / total_sig;
1347 
1348 		/*
1349 		 * Accumulate remainder and increment n_zones when it goes above
1350 		 * denominator
1351 		 */
1352 		modulo += numer % total_sig;
1353 		if (modulo >= total_sig) {
1354 			n_zones++;
1355 			modulo -= total_sig;
1356 		}
1357 
1358 		/*
1359 		 * Cap the total number of zones to the unique signatures
1360 		 */
1361 		if ((n_zones + min_zones_per_size) > freq) {
1362 			uint16_t extra_zones = n_zones + min_zones_per_size - freq;
1363 			modulo += (extra_zones * total_sig);
1364 			n_zones -= extra_zones;
1365 		}
1366 		kt_zones[i] += n_zones;
1367 		assigned_zones += n_zones;
1368 	}
1369 
1370 	if (kt_options & KT_OPTIONS_DEBUG) {
1371 		printf("kalloc_type_apply_policy: assigned %u zones wasted %u zones\n",
1372 		    assigned_zones, remaining_zones + min_sig - assigned_zones);
1373 	}
1374 	return remaining_zones + min_sig - assigned_zones;
1375 }
1376 
1377 __startup_func
1378 static int
kalloc_type_cmp_type_zones(const void * a,const void * b)1379 kalloc_type_cmp_type_zones(const void *a, const void *b)
1380 {
1381 	const struct nzones_with_idx A = *(const struct nzones_with_idx *)a;
1382 	const struct nzones_with_idx B = *(const struct nzones_with_idx *)b;
1383 
1384 	return (int)(B.nzones - A.nzones);
1385 }
1386 
1387 __startup_func
1388 static void
kalloc_type_redistribute_budget(uint16_t freq_total_list[MAX_K_ZONE (kt_zone_cfg)],uint16_t kt_zones[MAX_K_ZONE (kt_zone_cfg)])1389 kalloc_type_redistribute_budget(
1390 	uint16_t            freq_total_list[MAX_K_ZONE(kt_zone_cfg)],
1391 	uint16_t            kt_zones[MAX_K_ZONE(kt_zone_cfg)])
1392 {
1393 	uint16_t count = 0, cur_count = 0;
1394 	struct nzones_with_idx sorted_zones[MAX_K_ZONE(kt_zone_cfg)] = {};
1395 	uint16_t top_zone_total = 0;
1396 
1397 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1398 		uint16_t zones = kt_zones[i];
1399 
1400 		/*
1401 		 * If a sizeclass got no zones but has types to divide make a note
1402 		 * of it
1403 		 */
1404 		if (zones == 0 && (freq_total_list[i] != 0)) {
1405 			count++;
1406 		}
1407 
1408 		sorted_zones[i].nzones = kt_zones[i];
1409 		sorted_zones[i].idx = i;
1410 	}
1411 
1412 	qsort(&sorted_zones[0], (size_t) MAX_K_ZONE(kt_zone_cfg),
1413 	    sizeof(struct nzones_with_idx), kalloc_type_cmp_type_zones);
1414 
1415 	for (uint16_t i = 0; i < 3; i++) {
1416 		top_zone_total += sorted_zones[i].nzones;
1417 	}
1418 
1419 	/*
1420 	 * Borrow zones from the top 3 sizeclasses and redistribute to those
1421 	 * that didn't get a zone but that types to divide
1422 	 */
1423 	cur_count = count;
1424 	for (uint16_t i = 0; i < 3; i++) {
1425 		uint16_t zone_borrow = (sorted_zones[i].nzones * count) / top_zone_total;
1426 		uint16_t zone_available = kt_zones[sorted_zones[i].idx];
1427 
1428 		if (zone_borrow > (zone_available / 2)) {
1429 			zone_borrow = zone_available / 2;
1430 		}
1431 		kt_zones[sorted_zones[i].idx] -= zone_borrow;
1432 		cur_count -= zone_borrow;
1433 	}
1434 
1435 	for (uint16_t i = 0; i < 3; i++) {
1436 		if (cur_count == 0) {
1437 			break;
1438 		}
1439 		kt_zones[sorted_zones[i].idx]--;
1440 		cur_count--;
1441 	}
1442 
1443 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1444 		if (kt_zones[i] == 0 && (freq_total_list[i] != 0) &&
1445 		    (count > cur_count)) {
1446 			kt_zones[i]++;
1447 			count--;
1448 		}
1449 	}
1450 }
1451 
1452 static uint16_t
kalloc_type_apply_policy(uint16_t freq_list[MAX_K_ZONE (kt_zone_cfg)],uint16_t freq_total_list[MAX_K_ZONE (kt_zone_cfg)],uint16_t kt_zones_sig[MAX_K_ZONE (kt_zone_cfg)],uint16_t kt_zones_type[MAX_K_ZONE (kt_zone_cfg)],uint16_t zone_budget)1453 kalloc_type_apply_policy(
1454 	uint16_t            freq_list[MAX_K_ZONE(kt_zone_cfg)],
1455 	uint16_t            freq_total_list[MAX_K_ZONE(kt_zone_cfg)],
1456 	uint16_t            kt_zones_sig[MAX_K_ZONE(kt_zone_cfg)],
1457 	uint16_t            kt_zones_type[MAX_K_ZONE(kt_zone_cfg)],
1458 	uint16_t            zone_budget)
1459 {
1460 	uint16_t zbudget_sig = (uint16_t) ((7 * zone_budget) / 10);
1461 	uint16_t zbudget_type = zone_budget - zbudget_sig;
1462 	uint16_t wasted_zones = 0;
1463 
1464 #if DEBUG || DEVELOPMENT
1465 	if (startup_phase < STARTUP_SUB_LOCKDOWN) {
1466 		__assert_only uint16_t current_zones = os_atomic_load(&num_zones, relaxed);
1467 		assert(zone_budget + current_zones <= MAX_ZONES);
1468 	}
1469 #endif
1470 
1471 	wasted_zones += kalloc_type_distribute_budget(freq_list, kt_zones_sig,
1472 	    zbudget_sig, 2);
1473 	wasted_zones += kalloc_type_distribute_budget(freq_total_list,
1474 	    kt_zones_type, zbudget_type, 0);
1475 	kalloc_type_redistribute_budget(freq_total_list, kt_zones_type);
1476 
1477 	/*
1478 	 * Print stats when KT_OPTIONS_DEBUG boot-arg present
1479 	 */
1480 	if (kt_options & KT_OPTIONS_DEBUG) {
1481 		printf("Size\ttotal_sig\tunique_signatures\tzones\tzones_sig\t"
1482 		    "zones_type\n");
1483 		for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1484 			printf("%u\t%u\t%u\t%u\t%u\t%u\n", kt_zone_cfg[i],
1485 			    freq_total_list[i] + freq_list[i], freq_list[i],
1486 			    kt_zones_sig[i] + kt_zones_type[i],
1487 			    kt_zones_sig[i], kt_zones_type[i]);
1488 		}
1489 	}
1490 
1491 	return wasted_zones;
1492 }
1493 
1494 
1495 __startup_func
1496 static void
kalloc_type_create_zone_for_size(zone_t * kt_zones_for_size,uint16_t kt_zones,vm_size_t z_size)1497 kalloc_type_create_zone_for_size(
1498 	zone_t             *kt_zones_for_size,
1499 	uint16_t            kt_zones,
1500 	vm_size_t           z_size)
1501 {
1502 	zone_t p_zone = NULL;
1503 	char *z_name = NULL;
1504 	zone_t shared_z = NULL;
1505 
1506 	for (uint16_t i = 0; i < kt_zones; i++) {
1507 		z_name = zalloc_permanent(MAX_ZONE_NAME, ZALIGN_NONE);
1508 		snprintf(z_name, MAX_ZONE_NAME, "kalloc.type%u.%zu", i,
1509 		    (size_t) z_size);
1510 		zone_t z = zone_create(z_name, z_size, ZC_KALLOC_TYPE);
1511 		if (i != 0) {
1512 			p_zone->z_kt_next = z;
1513 		}
1514 		p_zone = z;
1515 		kt_zones_for_size[i] = z;
1516 	}
1517 	/*
1518 	 * Create shared zone for sizeclass if it doesn't already exist
1519 	 */
1520 	if (kt_shared_fixed) {
1521 		shared_z = kalloc_zone_for_size(KHEAP_SHARED->kh_zstart, z_size);
1522 		if (zone_elem_inner_size(shared_z) != z_size) {
1523 			z_name = zalloc_permanent(MAX_ZONE_NAME, ZALIGN_NONE);
1524 			snprintf(z_name, MAX_ZONE_NAME, "kalloc.%zu",
1525 			    (size_t) z_size);
1526 			shared_z = zone_create_ext(z_name, z_size, ZC_NONE, ZONE_ID_ANY,
1527 			    ^(zone_t zone){
1528 				zone_security_array[zone_index(zone)].z_kheap_id = KHEAP_ID_SHARED;
1529 			});
1530 		}
1531 	}
1532 	kt_zones_for_size[kt_zones] = shared_z;
1533 }
1534 
1535 __startup_func
1536 static uint16_t
kalloc_type_zones_for_type(uint16_t zones_total_type,uint16_t unique_types,uint16_t total_types,bool last_sig)1537 kalloc_type_zones_for_type(
1538 	uint16_t            zones_total_type,
1539 	uint16_t            unique_types,
1540 	uint16_t            total_types,
1541 	bool                last_sig)
1542 {
1543 	uint16_t zones_for_type = 0, n_mod = 0;
1544 
1545 	if (zones_total_type == 0) {
1546 		return 0;
1547 	}
1548 
1549 	zones_for_type = (zones_total_type * unique_types) / total_types;
1550 	n_mod = (zones_total_type * unique_types) % total_types;
1551 	zone_carry += n_mod;
1552 
1553 	/*
1554 	 * Drain carry opportunistically
1555 	 */
1556 	if (((unique_types > 3) && (zone_carry > 0)) ||
1557 	    (zone_carry >= (int) total_types) ||
1558 	    (last_sig && (zone_carry > 0))) {
1559 		zone_carry -= total_types;
1560 		zones_for_type++;
1561 	}
1562 
1563 	if (last_sig) {
1564 		assert(zone_carry == 0);
1565 	}
1566 
1567 	return zones_for_type;
1568 }
1569 
1570 __startup_func
1571 static uint16_t
kalloc_type_build_skip_list(kalloc_type_view_t * start,kalloc_type_view_t * end,uint16_t * kt_skip_list)1572 kalloc_type_build_skip_list(
1573 	kalloc_type_view_t     *start,
1574 	kalloc_type_view_t     *end,
1575 	uint16_t               *kt_skip_list)
1576 {
1577 	kalloc_type_view_t *cur = start;
1578 	kalloc_type_view_t prev = *start;
1579 	uint16_t i = 0, idx = 0;
1580 
1581 	kt_skip_list[idx] = i;
1582 	idx++;
1583 
1584 	while (cur < end) {
1585 		kalloc_type_view_t kt_cur = *cur;
1586 
1587 		if (strcmp(prev->kt_zv.zv_name, kt_cur->kt_zv.zv_name) != 0) {
1588 			kt_skip_list[idx] = i;
1589 
1590 			prev = kt_cur;
1591 			idx++;
1592 		}
1593 		i++;
1594 		cur++;
1595 	}
1596 
1597 	/*
1598 	 * Final update
1599 	 */
1600 	kt_skip_list[idx] = i;
1601 	return idx;
1602 }
1603 
1604 __startup_func
1605 static void
kalloc_type_init_sig_eq(zone_t * zones,uint16_t n_zones,zone_t sig_zone)1606 kalloc_type_init_sig_eq(
1607 	zone_t             *zones,
1608 	uint16_t            n_zones,
1609 	zone_t              sig_zone)
1610 {
1611 	for (uint16_t i = 0; i < n_zones; i++) {
1612 		zone_t z = zones[i];
1613 
1614 		assert(!zone_get_sig_eq(z));
1615 		zone_set_sig_eq(z, zone_index(sig_zone));
1616 	}
1617 }
1618 
1619 __startup_func
1620 static uint16_t
kalloc_type_distribute_zone_for_type(kalloc_type_view_t * start,kalloc_type_view_t * end,bool last_sig,uint16_t zones_total_type,uint16_t total_types,uint16_t * kt_skip_list,zone_t kt_zones_for_size[32],uint16_t type_zones_start,zone_t sig_zone,zone_t shared_zone)1621 kalloc_type_distribute_zone_for_type(
1622 	kalloc_type_view_t *start,
1623 	kalloc_type_view_t *end,
1624 	bool                last_sig,
1625 	uint16_t            zones_total_type,
1626 	uint16_t            total_types,
1627 	uint16_t           *kt_skip_list,
1628 	zone_t              kt_zones_for_size[32],
1629 	uint16_t            type_zones_start,
1630 	zone_t              sig_zone,
1631 	zone_t              shared_zone)
1632 {
1633 	uint16_t count = 0, n_zones = 0;
1634 	uint16_t *shuffle_buf = NULL;
1635 	zone_t *type_zones = &kt_zones_for_size[type_zones_start];
1636 
1637 	/*
1638 	 * Assert there is space in buffer
1639 	 */
1640 	count = kalloc_type_build_skip_list(start, end, kt_skip_list);
1641 	n_zones = kalloc_type_zones_for_type(zones_total_type, count, total_types,
1642 	    last_sig);
1643 	shuffle_buf = &kt_skip_list[count + 1];
1644 
1645 	/*
1646 	 * Initalize signature equivalence zone for type zones
1647 	 */
1648 	kalloc_type_init_sig_eq(type_zones, n_zones, sig_zone);
1649 
1650 	if (n_zones == 0) {
1651 		kalloc_type_assign_zone_fixed(start, end, sig_zone, sig_zone,
1652 		    shared_zone);
1653 		return n_zones;
1654 	}
1655 
1656 	/*
1657 	 * Don't shuffle in the sig_zone if there is only 1 type in the zone
1658 	 */
1659 	if (count == 1) {
1660 		kalloc_type_assign_zone_fixed(start, end, type_zones[0], sig_zone,
1661 		    shared_zone);
1662 		return n_zones;
1663 	}
1664 
1665 	/*
1666 	 * Add the signature based zone to n_zones
1667 	 */
1668 	n_zones++;
1669 
1670 	for (uint16_t i = 0; i < count; i++) {
1671 		uint16_t zidx = i % n_zones, shuffled_zidx = 0;
1672 		uint16_t type_start = kt_skip_list[i];
1673 		kalloc_type_view_t *kt_type_start = &start[type_start];
1674 		uint16_t type_end = kt_skip_list[i + 1];
1675 		kalloc_type_view_t *kt_type_end = &start[type_end];
1676 		zone_t zone;
1677 
1678 		if (zidx == 0) {
1679 			kmem_shuffle(shuffle_buf, n_zones);
1680 		}
1681 
1682 		shuffled_zidx = shuffle_buf[zidx];
1683 		zone = shuffled_zidx == 0 ? sig_zone : type_zones[shuffled_zidx - 1];
1684 		kalloc_type_assign_zone_fixed(kt_type_start, kt_type_end, zone, sig_zone,
1685 		    shared_zone);
1686 	}
1687 
1688 	return n_zones - 1;
1689 }
1690 
1691 __startup_func
1692 static void
kalloc_type_create_zones_fixed(uint16_t * kt_skip_list_start,uint16_t * kt_shuffle_buf)1693 kalloc_type_create_zones_fixed(
1694 	uint16_t           *kt_skip_list_start,
1695 	uint16_t           *kt_shuffle_buf)
1696 {
1697 	uint16_t *kt_skip_list = kt_skip_list_start;
1698 	uint16_t p_j = 0;
1699 	uint16_t kt_zones_sig[MAX_K_ZONE(kt_zone_cfg)] = {};
1700 	uint16_t kt_zones_type[MAX_K_ZONE(kt_zone_cfg)] = {};
1701 #if DEBUG || DEVELOPMENT
1702 	__assert_only uint64_t kt_shuffle_count = ((vm_address_t) kt_shuffle_buf -
1703 	    (vm_address_t) kt_buffer) / sizeof(uint16_t);
1704 #endif
1705 	/*
1706 	 * Apply policy to determine how many zones to create for each size
1707 	 * class.
1708 	 */
1709 	kalloc_type_apply_policy(kt_freq_list, kt_freq_list_total,
1710 	    kt_zones_sig, kt_zones_type, kt_fixed_zones);
1711 
1712 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
1713 		uint16_t n_unique_sig = kt_freq_list[i];
1714 		vm_size_t z_size = kt_zone_cfg[i];
1715 		uint16_t n_zones_sig = kt_zones_sig[i];
1716 		uint16_t n_zones_type = kt_zones_type[i];
1717 		uint16_t total_types = kt_freq_list_total[i];
1718 		uint16_t type_zones_used = 0;
1719 
1720 		if (n_unique_sig == 0) {
1721 			continue;
1722 		}
1723 
1724 		zone_carry = 0;
1725 		assert(n_zones_sig + n_zones_type + 1 <= 32);
1726 		zone_t kt_zones_for_size[32] = {};
1727 		kalloc_type_create_zone_for_size(kt_zones_for_size,
1728 		    n_zones_sig + n_zones_type, z_size);
1729 
1730 		kalloc_type_zarray[i] = kt_zones_for_size[0];
1731 		/*
1732 		 * Ensure that there is enough space to shuffle n_unique_sig
1733 		 * indices
1734 		 */
1735 		assert(n_unique_sig < kt_shuffle_count);
1736 
1737 		/*
1738 		 * Get a shuffled set of signature indices
1739 		 */
1740 		*kt_shuffle_buf = 0;
1741 		if (n_unique_sig > 1) {
1742 			kmem_shuffle(kt_shuffle_buf, n_unique_sig);
1743 		}
1744 
1745 		for (uint16_t j = 0; j < n_zones_sig; j++) {
1746 			zone_t *z_ptr = &kt_zones_for_size[j];
1747 
1748 			kalloc_type_init_sig_eq(z_ptr, 1, *z_ptr);
1749 		}
1750 
1751 		for (uint16_t j = 0; j < n_unique_sig; j++) {
1752 			/*
1753 			 * For every size that has unique types
1754 			 */
1755 			uint16_t shuffle_idx = kt_shuffle_buf[j];
1756 			uint16_t cur = kt_skip_list[shuffle_idx + p_j];
1757 			uint16_t end = kt_skip_list[shuffle_idx + p_j + 1];
1758 			zone_t zone = kt_zones_for_size[j % n_zones_sig];
1759 			zone_t shared_zone = kt_zones_for_size[n_zones_sig + n_zones_type];
1760 			bool last_sig;
1761 
1762 			last_sig = (j == (n_unique_sig - 1)) ? true : false;
1763 			type_zones_used += kalloc_type_distribute_zone_for_type(
1764 				&kt_buffer[cur].ktv_fixed,
1765 				&kt_buffer[end].ktv_fixed, last_sig,
1766 				n_zones_type, total_types + n_unique_sig,
1767 				&kt_shuffle_buf[n_unique_sig], kt_zones_for_size,
1768 				n_zones_sig + type_zones_used, zone, shared_zone);
1769 		}
1770 		assert(type_zones_used <= n_zones_type);
1771 		p_j += n_unique_sig;
1772 	}
1773 }
1774 
1775 __startup_func
1776 static void
kalloc_type_view_init_fixed(void)1777 kalloc_type_view_init_fixed(void)
1778 {
1779 	kalloc_type_hash_seed = (uint32_t) early_random();
1780 	kalloc_type_build_dlut();
1781 	/*
1782 	 * Parse __kalloc_type sections and build array of pointers to
1783 	 * all kalloc type views in kt_buffer.
1784 	 */
1785 	kt_count = kalloc_type_view_parse(KTV_FIXED);
1786 	assert(kt_count < KALLOC_TYPE_SIZE_MASK);
1787 
1788 #if MACH_ASSERT
1789 	vm_size_t sig_slist_size = (size_t) kt_count * sizeof(uint16_t);
1790 	vm_size_t kt_buffer_size = (size_t) kt_count * sizeof(kalloc_type_view_t);
1791 	assert(kt_scratch_size >= kt_buffer_size + sig_slist_size);
1792 #endif
1793 
1794 	/*
1795 	 * Sort based on size class and signature
1796 	 */
1797 	qsort(kt_buffer, (size_t) kt_count, sizeof(kalloc_type_view_t),
1798 	    kalloc_type_cmp_fixed);
1799 
1800 	/*
1801 	 * Build a skip list that holds starts of unique signatures and a
1802 	 * frequency list of number of unique and total signatures per kalloc
1803 	 * size class
1804 	 */
1805 	uint16_t *kt_skip_list_start = (uint16_t *)(kt_buffer + kt_count);
1806 	uint16_t *kt_shuffle_buf = kalloc_type_create_iterators_fixed(
1807 		kt_skip_list_start, kt_count);
1808 
1809 	/*
1810 	 * Create zones based on signatures
1811 	 */
1812 	kalloc_type_create_zones_fixed(kt_skip_list_start, kt_shuffle_buf);
1813 }
1814 
1815 __startup_func
1816 static void
kalloc_type_heap_init(void)1817 kalloc_type_heap_init(void)
1818 {
1819 	assert(kt_var_heaps + 1 <= KT_VAR_MAX_HEAPS);
1820 	char kh_name[MAX_ZONE_NAME];
1821 	uint32_t last_heap = KT_VAR_PTR_HEAP0 + kt_var_heaps;
1822 
1823 	for (uint32_t i = KT_VAR_PTR_HEAP0; i < last_heap; i++) {
1824 		snprintf(&kh_name[0], MAX_ZONE_NAME, "%s%u", KHEAP_KT_VAR->kh_name, i);
1825 		kalloc_zone_init((const char *)&kh_name[0], KHEAP_ID_KT_VAR,
1826 		    &kalloc_type_heap_array[i].kh_zstart, ZC_KALLOC_TYPE);
1827 	}
1828 	/*
1829 	 * All variable kalloc type allocations are collapsed into a single
1830 	 * stat. Individual accounting can be requested via KT_PRIV_ACCT
1831 	 */
1832 	KHEAP_KT_VAR->kh_stats = zalloc_percpu_permanent_type(struct zone_stats);
1833 	zone_view_count += 1;
1834 }
1835 
1836 __startup_func
1837 static void
kalloc_type_assign_heap(uint32_t start,uint32_t end,uint32_t heap_id)1838 kalloc_type_assign_heap(
1839 	uint32_t            start,
1840 	uint32_t            end,
1841 	uint32_t            heap_id)
1842 {
1843 	bool use_split = kmem_get_random16(1);
1844 
1845 	if (use_split) {
1846 		heap_id = kt_var_heaps;
1847 	}
1848 	kalloc_type_assign_zone_var(&kt_buffer[start].ktv_var,
1849 	    &kt_buffer[end].ktv_var, heap_id);
1850 }
1851 
1852 __startup_func
1853 static void
kalloc_type_split_heap(uint32_t start,uint32_t end,uint32_t heap_id)1854 kalloc_type_split_heap(
1855 	uint32_t            start,
1856 	uint32_t            end,
1857 	uint32_t            heap_id)
1858 {
1859 	uint32_t count = start;
1860 	const char *p_name = NULL;
1861 
1862 	while (count < end) {
1863 		kalloc_type_var_view_t cur = kt_buffer[count].ktv_var;
1864 		const char *c_name = cur->kt_name;
1865 
1866 		if (!p_name) {
1867 			assert(count == start);
1868 			p_name = c_name;
1869 		}
1870 		if (strcmp(c_name, p_name) != 0) {
1871 			kalloc_type_assign_heap(start, count, heap_id);
1872 			start = count;
1873 			p_name = c_name;
1874 		}
1875 		count++;
1876 	}
1877 	kalloc_type_assign_heap(start, end, heap_id);
1878 }
1879 
1880 __startup_func
1881 static void
kalloc_type_view_init_var(void)1882 kalloc_type_view_init_var(void)
1883 {
1884 	uint32_t buf_start = 0, unique_sig = 0;
1885 	uint32_t *kt_skip_list_start;
1886 	uint16_t *shuffle_buf;
1887 	uint16_t fixed_heaps = KT_VAR__FIRST_FLEXIBLE_HEAP - 1;
1888 	uint16_t flex_heap_count = kt_var_heaps - fixed_heaps - 1;
1889 	/*
1890 	 * Pick a random heap to split
1891 	 */
1892 	uint16_t split_heap = kmem_get_random16(flex_heap_count - 1);
1893 
1894 	/*
1895 	 * Zones are created prior to parsing the views as zone budget is fixed
1896 	 * per sizeclass and special types identified while parsing are redirected
1897 	 * as they are discovered.
1898 	 */
1899 	kalloc_type_heap_init();
1900 
1901 	/*
1902 	 * Parse __kalloc_var sections and build array of pointers to views that
1903 	 * aren't rediected in kt_buffer.
1904 	 */
1905 	kt_count = kalloc_type_view_parse(KTV_VAR);
1906 	assert(kt_count < UINT32_MAX);
1907 
1908 #if MACH_ASSERT
1909 	vm_size_t sig_slist_size = (size_t) kt_count * sizeof(uint32_t);
1910 	vm_size_t kt_buffer_size = (size_t) kt_count * sizeof(kalloc_type_views_t);
1911 	assert(kt_scratch_size >= kt_buffer_size + sig_slist_size);
1912 #endif
1913 
1914 	/*
1915 	 * Sort based on size class and signature
1916 	 */
1917 	qsort(kt_buffer, (size_t) kt_count, sizeof(kalloc_type_var_view_t),
1918 	    kalloc_type_cmp_var);
1919 
1920 	buf_start = kalloc_type_handle_parray_var();
1921 
1922 	/*
1923 	 * Build a skip list that holds starts of unique signatures
1924 	 */
1925 	kt_skip_list_start = (uint32_t *)(kt_buffer + kt_count);
1926 	unique_sig = kalloc_type_create_iterators_var(kt_skip_list_start,
1927 	    buf_start);
1928 	shuffle_buf = (uint16_t *)(kt_skip_list_start + unique_sig + 1);
1929 	/*
1930 	 * If we have only one heap then other elements share heap with pointer
1931 	 * arrays
1932 	 */
1933 	if (kt_var_heaps < KT_VAR__FIRST_FLEXIBLE_HEAP) {
1934 		panic("kt_var_heaps is too small");
1935 	}
1936 
1937 	kmem_shuffle(shuffle_buf, flex_heap_count);
1938 	/*
1939 	 * The index of the heap we decide to split is placed twice in the shuffle
1940 	 * buffer so that it gets twice the number of signatures that we split
1941 	 * evenly
1942 	 */
1943 	shuffle_buf[flex_heap_count] = split_heap;
1944 	split_heap += (fixed_heaps + 1);
1945 
1946 	for (uint32_t i = 1; i <= unique_sig; i++) {
1947 		uint32_t heap_id = shuffle_buf[i % (flex_heap_count + 1)] +
1948 		    fixed_heaps + 1;
1949 		uint32_t start = kt_skip_list_start[i - 1];
1950 		uint32_t end = kt_skip_list_start[i];
1951 
1952 		assert(heap_id <= kt_var_heaps);
1953 		if (heap_id == split_heap) {
1954 			kalloc_type_split_heap(start, end, heap_id);
1955 			continue;
1956 		}
1957 		kalloc_type_assign_zone_var(&kt_buffer[start].ktv_var,
1958 		    &kt_buffer[end].ktv_var, heap_id);
1959 	}
1960 }
1961 
1962 __startup_func
1963 static void
kalloc_init(void)1964 kalloc_init(void)
1965 {
1966 	/*
1967 	 * Allocate scratch space to parse kalloc_type_views and create
1968 	 * other structures necessary to process them.
1969 	 */
1970 	uint64_t max_count = kt_count = kt_scratch_size / sizeof(kalloc_type_views_t);
1971 
1972 	static_assert(KHEAP_MAX_SIZE >= KALLOC_SAFE_ALLOC_SIZE);
1973 	kalloc_zsize_compute();
1974 
1975 	/* Initialize kalloc data buffers heap */
1976 	kalloc_heap_init(KHEAP_DATA_BUFFERS);
1977 
1978 	/* Initialize kalloc shared buffers heap */
1979 	kalloc_heap_init(KHEAP_SHARED);
1980 
1981 	kmem_alloc(kernel_map, (vm_offset_t *)&kt_buffer, kt_scratch_size,
1982 	    KMA_NOFAIL | KMA_ZERO | KMA_KOBJECT | KMA_SPRAYQTN, VM_KERN_MEMORY_KALLOC);
1983 
1984 	/*
1985 	 * Handle fixed size views
1986 	 */
1987 	kalloc_type_view_init_fixed();
1988 
1989 	/*
1990 	 * Reset
1991 	 */
1992 	bzero(kt_buffer, kt_scratch_size);
1993 	kt_count = max_count;
1994 
1995 	/*
1996 	 * Handle variable size views
1997 	 */
1998 	kalloc_type_view_init_var();
1999 
2000 	/*
2001 	 * Free resources used
2002 	 */
2003 	kmem_free(kernel_map, (vm_offset_t) kt_buffer, kt_scratch_size);
2004 }
2005 STARTUP(ZALLOC, STARTUP_RANK_THIRD, kalloc_init);
2006 
2007 #pragma mark accessors
2008 
2009 #define KFREE_ABSURD_SIZE \
2010 	((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_AND_KEXT_ADDRESS) / 2)
2011 
2012 static void
KALLOC_ZINFO_SALLOC(vm_size_t bytes)2013 KALLOC_ZINFO_SALLOC(vm_size_t bytes)
2014 {
2015 	thread_t thr = current_thread();
2016 	ledger_debit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes);
2017 }
2018 
2019 static void
KALLOC_ZINFO_SFREE(vm_size_t bytes)2020 KALLOC_ZINFO_SFREE(vm_size_t bytes)
2021 {
2022 	thread_t thr = current_thread();
2023 	ledger_credit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes);
2024 }
2025 
2026 static kmem_guard_t
kalloc_guard(vm_tag_t tag,uint16_t type_hash,const void * owner)2027 kalloc_guard(vm_tag_t tag, uint16_t type_hash, const void *owner)
2028 {
2029 	kmem_guard_t guard = {
2030 		.kmg_atomic      = true,
2031 		.kmg_tag         = tag,
2032 		.kmg_type_hash   = type_hash,
2033 		.kmg_context     = os_hash_kernel_pointer(owner),
2034 	};
2035 
2036 	/*
2037 	 * TODO: this use is really not sufficiently smart.
2038 	 */
2039 
2040 	return guard;
2041 }
2042 
2043 #if __arm64e__ || ZSECURITY_CONFIG(ZONE_TAGGING)
2044 
2045 #if __arm64e__
2046 #define KALLOC_ARRAY_TYPE_SHIFT (64 - T1SZ_BOOT - 1)
2047 
2048 /*
2049  * Zone encoding is:
2050  *
2051  *   <PAC SIG><1><1><PTR value><5 bits of size class>
2052  *
2053  * VM encoding is:
2054  *
2055  *   <PAC SIG><1><0><PTR value><14 bits of page count>
2056  *
2057  * The <1> is precisely placed so that <PAC SIG><1> is T1SZ worth of bits,
2058  * so that PAC authentication extends the proper sign bit.
2059  */
2060 
2061 static_assert(T1SZ_BOOT + 1 + VM_KERNEL_POINTER_SIGNIFICANT_BITS <= 64);
2062 #else /* __arm64e__ */
2063 #define KALLOC_ARRAY_TYPE_SHIFT (64 - 8 - 1)
2064 
2065 /*
2066  * Zone encoding is:
2067  *
2068  *   <TBI><1><PTR value><5 bits of size class>
2069  *
2070  * VM encoding is:
2071  *
2072  *   <TBI><0><PTR value><14 bits of page count>
2073  */
2074 
2075 static_assert(8 + 1 + 1 + VM_KERNEL_POINTER_SIGNIFICANT_BITS <= 64);
2076 #endif /* __arm64e__*/
2077 
2078 SECURITY_READ_ONLY_LATE(uint32_t) kalloc_array_type_shift = KALLOC_ARRAY_TYPE_SHIFT;
2079 
2080 __attribute__((always_inline))
2081 struct kalloc_result
__kalloc_array_decode(vm_address_t ptr)2082 __kalloc_array_decode(vm_address_t ptr)
2083 {
2084 	struct kalloc_result kr;
2085 	vm_address_t zone_mask = 1ul << KALLOC_ARRAY_TYPE_SHIFT;
2086 
2087 	if (ptr & zone_mask) {
2088 		kr.size = (32 + (ptr & 0x10)) << (ptr & 0xf);
2089 		ptr &= ~0x1full;
2090 	} else if (__probable(ptr)) {
2091 		kr.size = (ptr & PAGE_MASK) << PAGE_SHIFT;
2092 		ptr &= ~PAGE_MASK;
2093 		ptr |= zone_mask;
2094 	} else {
2095 		kr.size = 0;
2096 	}
2097 
2098 	kr.addr = (void *)ptr;
2099 	return kr;
2100 }
2101 
2102 static inline void *
__kalloc_array_encode_zone(zone_t z,void * ptr,vm_size_t size __unused)2103 __kalloc_array_encode_zone(zone_t z, void *ptr, vm_size_t size __unused)
2104 {
2105 	return (void *)((vm_address_t)ptr | z->z_array_size_class);
2106 }
2107 
2108 static inline vm_address_t
__kalloc_array_encode_vm(vm_address_t addr,vm_size_t size)2109 __kalloc_array_encode_vm(vm_address_t addr, vm_size_t size)
2110 {
2111 	addr &= ~(0x1ull << KALLOC_ARRAY_TYPE_SHIFT);
2112 
2113 	return addr | atop(size);
2114 }
2115 
2116 #else /* __arm64e__ || ZSECURITY_CONFIG(ZONE_TAGGING) */
2117 
2118 SECURITY_READ_ONLY_LATE(uint32_t) kalloc_array_type_shift = 0;
2119 
2120 /*
2121  * Encoding is:
2122  * bits  0..46: pointer value
2123  * bits 47..47: 0: zones, 1: VM
2124  * bits 48..63: zones: elem size, VM: number of pages
2125  */
2126 
2127 #define KALLOC_ARRAY_TYPE_BIT   47
2128 static_assert(KALLOC_ARRAY_TYPE_BIT > VM_KERNEL_POINTER_SIGNIFICANT_BITS + 1);
2129 static_assert(__builtin_clzll(KHEAP_MAX_SIZE) > KALLOC_ARRAY_TYPE_BIT);
2130 
2131 __attribute__((always_inline))
2132 struct kalloc_result
__kalloc_array_decode(vm_address_t ptr)2133 __kalloc_array_decode(vm_address_t ptr)
2134 {
2135 	struct kalloc_result kr;
2136 	uint32_t shift = 64 - KALLOC_ARRAY_TYPE_BIT;
2137 
2138 	kr.size = ptr >> (KALLOC_ARRAY_TYPE_BIT + 1);
2139 	if (ptr & (1ull << KALLOC_ARRAY_TYPE_BIT)) {
2140 		kr.size <<= PAGE_SHIFT;
2141 	}
2142 	/* sign extend, so that it also works with NULL */
2143 	kr.addr = (void *)((long)(ptr << shift) >> shift);
2144 
2145 	return kr;
2146 }
2147 
2148 static inline void *
__kalloc_array_encode_zone(zone_t z __unused,void * ptr,vm_size_t size)2149 __kalloc_array_encode_zone(zone_t z __unused, void *ptr, vm_size_t size)
2150 {
2151 	vm_address_t addr = (vm_address_t)ptr;
2152 
2153 	addr &= (1ull << KALLOC_ARRAY_TYPE_BIT) - 1; /* clear bit */
2154 	addr |= size << (KALLOC_ARRAY_TYPE_BIT + 1);
2155 
2156 	return (void *)addr;
2157 }
2158 
2159 static inline vm_address_t
__kalloc_array_encode_vm(vm_address_t addr,vm_size_t size)2160 __kalloc_array_encode_vm(vm_address_t addr, vm_size_t size)
2161 {
2162 	addr &= (2ull << KALLOC_ARRAY_TYPE_BIT) - 1; /* keep bit */
2163 	addr |= size << (KALLOC_ARRAY_TYPE_BIT + 1 - PAGE_SHIFT);
2164 
2165 	return addr;
2166 }
2167 
2168 #endif /* __arm64e__ || ZSECURITY_CONFIG(ZONE_TAGGING) */
2169 
2170 vm_size_t
kalloc_next_good_size(vm_size_t size,uint32_t period)2171 kalloc_next_good_size(vm_size_t size, uint32_t period)
2172 {
2173 	uint32_t scale = kalloc_log2down((uint32_t)size);
2174 	vm_size_t step, size_class;
2175 
2176 	if (size < KHEAP_STEP_START) {
2177 		return KHEAP_STEP_START;
2178 	}
2179 	if (size < 2 * KHEAP_STEP_START) {
2180 		return 2 * KHEAP_STEP_START;
2181 	}
2182 
2183 	if (size < KHEAP_MAX_SIZE) {
2184 		step = 1ul << (scale - 1);
2185 	} else {
2186 		step = round_page(1ul << (scale - kalloc_log2down(period)));
2187 	}
2188 
2189 	size_class = (size + step) & -step;
2190 #if KASAN_CLASSIC
2191 	if (size > K_SIZE_CLASS(size_class)) {
2192 		return kalloc_next_good_size(size_class, period);
2193 	}
2194 	size_class = K_SIZE_CLASS(size_class);
2195 #endif
2196 	return size_class;
2197 }
2198 
2199 
2200 #pragma mark kalloc
2201 
2202 static inline kalloc_heap_t
kalloc_type_get_heap(kalloc_type_flags_t kt_flags)2203 kalloc_type_get_heap(kalloc_type_flags_t kt_flags)
2204 {
2205 	/*
2206 	 * Redirect data-only views
2207 	 */
2208 	if (kalloc_type_is_data(kt_flags)) {
2209 		return KHEAP_DATA_BUFFERS;
2210 	}
2211 
2212 	if (kt_flags & KT_PROCESSED) {
2213 		return KHEAP_KT_VAR;
2214 	}
2215 
2216 	return KHEAP_DEFAULT;
2217 }
2218 
2219 __attribute__((noinline))
2220 static struct kalloc_result
kalloc_large(kalloc_heap_t kheap,vm_size_t req_size,zalloc_flags_t flags,uint16_t kt_hash,void * owner __unused)2221 kalloc_large(
2222 	kalloc_heap_t         kheap,
2223 	vm_size_t             req_size,
2224 	zalloc_flags_t        flags,
2225 	uint16_t              kt_hash,
2226 	void                 *owner __unused)
2227 {
2228 	kma_flags_t kma_flags = KMA_KASAN_GUARD;
2229 	vm_tag_t tag;
2230 	vm_offset_t addr, size;
2231 
2232 #if ZSECURITY_CONFIG(ZONE_TAGGING)
2233 	kma_flags |= KMA_TAG;
2234 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
2235 
2236 	if (flags & Z_NOFAIL) {
2237 		panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)",
2238 		    (size_t)req_size);
2239 	}
2240 
2241 	/*
2242 	 * kmem_alloc could block so we return if noblock
2243 	 *
2244 	 * also, reject sizes larger than our address space is quickly,
2245 	 * as kt_size or IOMallocArraySize() expect this.
2246 	 */
2247 	if ((flags & Z_NOWAIT) ||
2248 	    (req_size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS)) {
2249 		return (struct kalloc_result){ };
2250 	}
2251 
2252 	if ((flags & Z_KALLOC_ARRAY) && req_size > KALLOC_ARRAY_SIZE_MAX) {
2253 		return (struct kalloc_result){ };
2254 	}
2255 
2256 	/*
2257 	 * (73465472) on Intel we didn't use to pass this flag,
2258 	 * which in turned allowed kalloc_large() memory to be shared
2259 	 * with user directly.
2260 	 *
2261 	 * We're bound by this unfortunate ABI.
2262 	 */
2263 	if ((flags & Z_MAY_COPYINMAP) == 0) {
2264 #ifndef __x86_64__
2265 		kma_flags |= KMA_KOBJECT;
2266 #endif
2267 	} else {
2268 		assert(kheap == KHEAP_DATA_BUFFERS);
2269 		kma_flags &= ~KMA_TAG;
2270 	}
2271 	if (flags & Z_NOPAGEWAIT) {
2272 		kma_flags |= KMA_NOPAGEWAIT;
2273 	}
2274 	if (flags & Z_ZERO) {
2275 		kma_flags |= KMA_ZERO;
2276 	}
2277 	if (kheap == KHEAP_DATA_BUFFERS) {
2278 		kma_flags |= KMA_DATA;
2279 	} else if (flags & (Z_KALLOC_ARRAY | Z_SPRAYQTN)) {
2280 		kma_flags |= KMA_SPRAYQTN;
2281 	}
2282 
2283 
2284 	tag = zalloc_flags_get_tag(flags);
2285 	if (flags & Z_VM_TAG_BT_BIT) {
2286 		tag = vm_tag_bt() ?: tag;
2287 	}
2288 	if (tag == VM_KERN_MEMORY_NONE) {
2289 		tag = kheap->kh_tag;
2290 	}
2291 
2292 	size = round_page(req_size);
2293 	if (flags & (Z_FULLSIZE | Z_KALLOC_ARRAY)) {
2294 		req_size = round_page(size);
2295 	}
2296 
2297 	addr = kmem_alloc_guard(kernel_map, req_size, 0,
2298 	    kma_flags, kalloc_guard(tag, kt_hash, owner)).kmr_address;
2299 
2300 	if (addr != 0) {
2301 		counter_inc(&kalloc_large_count);
2302 		counter_add(&kalloc_large_total, size);
2303 		KALLOC_ZINFO_SALLOC(size);
2304 		if (flags & Z_KALLOC_ARRAY) {
2305 			addr = __kalloc_array_encode_vm(addr, req_size);
2306 		}
2307 	} else {
2308 		addr = 0;
2309 	}
2310 
2311 	DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, req_size, void*, addr);
2312 	return (struct kalloc_result){ .addr = (void *)addr, .size = req_size };
2313 }
2314 
2315 #if KASAN
2316 
2317 static inline void
kalloc_mark_unused_space(void * addr,vm_size_t size,vm_size_t used)2318 kalloc_mark_unused_space(void *addr, vm_size_t size, vm_size_t used)
2319 {
2320 #if KASAN_CLASSIC
2321 	/*
2322 	 * On KASAN_CLASSIC, Z_SKIP_KASAN is defined and the entire sanitizer
2323 	 * tagging of the memory region is performed here.
2324 	 */
2325 	kasan_alloc((vm_offset_t)addr, size, used, KASAN_GUARD_SIZE, false,
2326 	    __builtin_frame_address(0));
2327 #endif /* KASAN_CLASSIC */
2328 
2329 #if KASAN_TBI
2330 	kasan_tbi_retag_unused_space((vm_offset_t)addr, size, used ? :1);
2331 #endif /* KASAN_TBI */
2332 }
2333 #endif /* KASAN */
2334 
2335 static inline struct kalloc_result
kalloc_zone(zone_t z,zone_stats_t zstats,zalloc_flags_t flags,vm_size_t req_size)2336 kalloc_zone(
2337 	zone_t                  z,
2338 	zone_stats_t            zstats,
2339 	zalloc_flags_t          flags,
2340 	vm_size_t               req_size)
2341 {
2342 	struct kalloc_result kr;
2343 	vm_size_t esize;
2344 
2345 	kr = zalloc_ext(z, zstats ?: z->z_stats, flags | Z_SKIP_KASAN);
2346 	esize = kr.size;
2347 
2348 	if (__probable(kr.addr)) {
2349 		if (flags & (Z_FULLSIZE | Z_KALLOC_ARRAY)) {
2350 			req_size = esize;
2351 		} else {
2352 			kr.size = req_size;
2353 		}
2354 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
2355 		kr.addr = zone_element_pgz_oob_adjust(kr.addr, req_size, esize);
2356 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
2357 
2358 #if KASAN
2359 		kalloc_mark_unused_space(kr.addr, esize, kr.size);
2360 #endif /* KASAN */
2361 
2362 		if (flags & Z_KALLOC_ARRAY) {
2363 			kr.addr = __kalloc_array_encode_zone(z, kr.addr, kr.size);
2364 		}
2365 	}
2366 
2367 	DTRACE_VM3(kalloc, vm_size_t, req_size, vm_size_t, kr.size, void*, kr.addr);
2368 	return kr;
2369 }
2370 
2371 static zone_id_t
kalloc_use_shared_heap(kalloc_heap_t kheap,zone_stats_t zstats,zone_id_t zstart,zalloc_flags_t * flags)2372 kalloc_use_shared_heap(
2373 	kalloc_heap_t           kheap,
2374 	zone_stats_t            zstats,
2375 	zone_id_t               zstart,
2376 	zalloc_flags_t         *flags)
2377 {
2378 	if (kheap->kh_heap_id != KHEAP_ID_DATA_BUFFERS) {
2379 		zone_stats_t zstats_cpu = zpercpu_get(zstats);
2380 
2381 		if (os_atomic_load(&zstats_cpu->zs_alloc_not_shared, relaxed) == 0) {
2382 			*flags |= Z_SET_NOTSHARED;
2383 			return KHEAP_SHARED->kh_zstart;
2384 		}
2385 	}
2386 
2387 	return zstart;
2388 }
2389 
2390 #undef kalloc_ext
2391 
2392 struct kalloc_result
kalloc_ext(void * kheap_or_kt_view,vm_size_t size,zalloc_flags_t flags,void * owner)2393 kalloc_ext(
2394 	void                   *kheap_or_kt_view,
2395 	vm_size_t               size,
2396 	zalloc_flags_t          flags,
2397 	void                   *owner)
2398 {
2399 	kalloc_type_var_view_t kt_view;
2400 	kalloc_heap_t kheap;
2401 	zone_stats_t zstats = NULL;
2402 	zone_t z;
2403 	uint16_t kt_hash;
2404 	zone_id_t zstart;
2405 
2406 	if (kt_is_var_view(kheap_or_kt_view)) {
2407 		kt_view = kt_demangle_var_view(kheap_or_kt_view);
2408 		kheap   = kalloc_type_get_heap(kt_view->kt_flags);
2409 		/*
2410 		 * Use stats from view if present, else use stats from kheap.
2411 		 * KHEAP_KT_VAR accumulates stats for all allocations going to
2412 		 * kalloc.type.var zones, while KHEAP_DEFAULT and KHEAP_DATA_BUFFERS
2413 		 * use stats from the respective zones.
2414 		 */
2415 		zstats  = kt_view->kt_stats;
2416 		kt_hash = (uint16_t) KT_GET_HASH(kt_view->kt_flags);
2417 		zstart  = kt_view->kt_heap_start ?: kheap->kh_zstart;
2418 	} else {
2419 		kt_view = NULL;
2420 		kheap   = kheap_or_kt_view;
2421 		kt_hash = kheap->kh_type_hash;
2422 		zstart  = kheap->kh_zstart;
2423 	}
2424 
2425 	if (!zstats) {
2426 		zstats = kheap->kh_stats;
2427 	}
2428 
2429 	zstart = kalloc_use_shared_heap(kheap, zstats, zstart, &flags);
2430 	z = kalloc_zone_for_size_with_flags(zstart, size, flags);
2431 	if (z) {
2432 		return kalloc_zone(z, zstats, flags, size);
2433 	} else {
2434 		return kalloc_large(kheap, size, flags, kt_hash, owner);
2435 	}
2436 }
2437 
2438 #if XNU_PLATFORM_MacOSX
2439 void *
2440 kalloc_external(vm_size_t size);
2441 void *
kalloc_external(vm_size_t size)2442 kalloc_external(vm_size_t size)
2443 {
2444 	zalloc_flags_t flags = Z_VM_TAG_BT(Z_WAITOK, VM_KERN_MEMORY_KALLOC);
2445 	return kheap_alloc(KHEAP_DEFAULT, size, flags);
2446 }
2447 #endif /* XNU_PLATFORM_MacOSX */
2448 
2449 void *
2450 kalloc_data_external(vm_size_t size, zalloc_flags_t flags);
2451 void *
kalloc_data_external(vm_size_t size,zalloc_flags_t flags)2452 kalloc_data_external(vm_size_t size, zalloc_flags_t flags)
2453 {
2454 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC_DATA);
2455 	return kheap_alloc(KHEAP_DATA_BUFFERS, size, flags);
2456 }
2457 
2458 __abortlike
2459 static void
kalloc_data_require_panic(void * addr,vm_size_t size)2460 kalloc_data_require_panic(void *addr, vm_size_t size)
2461 {
2462 	zone_id_t zid = zone_id_for_element(addr, size);
2463 
2464 	if (zid != ZONE_ID_INVALID) {
2465 		zone_t z = &zone_array[zid];
2466 		zone_security_flags_t zsflags = zone_security_array[zid];
2467 
2468 		if (zsflags.z_kheap_id != KHEAP_ID_DATA_BUFFERS) {
2469 			panic("kalloc_data_require failed: address %p in [%s%s]",
2470 			    addr, zone_heap_name(z), zone_name(z));
2471 		}
2472 
2473 		panic("kalloc_data_require failed: address %p in [%s%s], "
2474 		    "size too large %zd > %zd", addr,
2475 		    zone_heap_name(z), zone_name(z),
2476 		    (size_t)size, (size_t)zone_elem_inner_size(z));
2477 	} else {
2478 		panic("kalloc_data_require failed: address %p not in zone native map",
2479 		    addr);
2480 	}
2481 }
2482 
2483 __abortlike
2484 static void
kalloc_non_data_require_panic(void * addr,vm_size_t size)2485 kalloc_non_data_require_panic(void *addr, vm_size_t size)
2486 {
2487 	zone_id_t zid = zone_id_for_element(addr, size);
2488 
2489 	if (zid != ZONE_ID_INVALID) {
2490 		zone_t z = &zone_array[zid];
2491 		zone_security_flags_t zsflags = zone_security_array[zid];
2492 
2493 		switch (zsflags.z_kheap_id) {
2494 		case KHEAP_ID_NONE:
2495 		case KHEAP_ID_DATA_BUFFERS:
2496 		case KHEAP_ID_KT_VAR:
2497 			panic("kalloc_non_data_require failed: address %p in [%s%s]",
2498 			    addr, zone_heap_name(z), zone_name(z));
2499 		default:
2500 			break;
2501 		}
2502 
2503 		panic("kalloc_non_data_require failed: address %p in [%s%s], "
2504 		    "size too large %zd > %zd", addr,
2505 		    zone_heap_name(z), zone_name(z),
2506 		    (size_t)size, (size_t)zone_elem_inner_size(z));
2507 	} else {
2508 		panic("kalloc_non_data_require failed: address %p not in zone native map",
2509 		    addr);
2510 	}
2511 }
2512 
2513 void
kalloc_data_require(void * addr,vm_size_t size)2514 kalloc_data_require(void *addr, vm_size_t size)
2515 {
2516 	zone_id_t zid = zone_id_for_element(addr, size);
2517 
2518 	if (zid != ZONE_ID_INVALID) {
2519 		zone_t z = &zone_array[zid];
2520 		zone_security_flags_t zsflags = zone_security_array[zid];
2521 		if (zsflags.z_kheap_id == KHEAP_ID_DATA_BUFFERS &&
2522 		    size <= zone_elem_inner_size(z)) {
2523 			return;
2524 		}
2525 	} else if (kmem_range_id_contains(KMEM_RANGE_ID_DATA,
2526 	    (vm_address_t)pgz_decode(addr, size), size)) {
2527 		return;
2528 	}
2529 
2530 	kalloc_data_require_panic(addr, size);
2531 }
2532 
2533 void
kalloc_non_data_require(void * addr,vm_size_t size)2534 kalloc_non_data_require(void *addr, vm_size_t size)
2535 {
2536 	zone_id_t zid = zone_id_for_element(addr, size);
2537 
2538 	if (zid != ZONE_ID_INVALID) {
2539 		zone_t z = &zone_array[zid];
2540 		zone_security_flags_t zsflags = zone_security_array[zid];
2541 		switch (zsflags.z_kheap_id) {
2542 		case KHEAP_ID_NONE:
2543 			if (!zsflags.z_kalloc_type) {
2544 				break;
2545 			}
2546 			OS_FALLTHROUGH;
2547 		case KHEAP_ID_KT_VAR:
2548 			if (size < zone_elem_inner_size(z)) {
2549 				return;
2550 			}
2551 			break;
2552 		default:
2553 			break;
2554 		}
2555 	} else if (!kmem_range_id_contains(KMEM_RANGE_ID_DATA,
2556 	    (vm_address_t)pgz_decode(addr, size), size)) {
2557 		return;
2558 	}
2559 
2560 	kalloc_non_data_require_panic(addr, size);
2561 }
2562 
2563 void *
kalloc_type_impl_external(kalloc_type_view_t kt_view,zalloc_flags_t flags)2564 kalloc_type_impl_external(kalloc_type_view_t kt_view, zalloc_flags_t flags)
2565 {
2566 	/*
2567 	 * Callsites from a kext that aren't in the BootKC on macOS or
2568 	 * any callsites on armv7 are not processed during startup,
2569 	 * default to using kheap_alloc
2570 	 *
2571 	 * Additionally when size is greater KHEAP_MAX_SIZE zone is left
2572 	 * NULL as we need to use the vm for the allocation
2573 	 *
2574 	 */
2575 	if (__improbable(kt_view->kt_zv.zv_zone == ZONE_NULL)) {
2576 		kalloc_heap_t kheap;
2577 		vm_size_t size;
2578 
2579 		flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2580 		size  = kalloc_type_get_size(kt_view->kt_size);
2581 		kheap = kalloc_type_get_heap(kt_view->kt_flags);
2582 		return kalloc_ext(kheap, size, flags, NULL).addr;
2583 	}
2584 
2585 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2586 	return kalloc_type_impl(kt_view, flags);
2587 }
2588 
2589 void *
2590 kalloc_type_var_impl_external(
2591 	kalloc_type_var_view_t  kt_view,
2592 	vm_size_t               size,
2593 	zalloc_flags_t          flags,
2594 	void                   *owner);
2595 void *
kalloc_type_var_impl_external(kalloc_type_var_view_t kt_view,vm_size_t size,zalloc_flags_t flags,void * owner)2596 kalloc_type_var_impl_external(
2597 	kalloc_type_var_view_t  kt_view,
2598 	vm_size_t               size,
2599 	zalloc_flags_t          flags,
2600 	void                   *owner)
2601 {
2602 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC);
2603 	return kalloc_type_var_impl(kt_view, size, flags, owner);
2604 }
2605 
2606 #pragma mark kfree
2607 
2608 __abortlike
2609 static void
kfree_heap_confusion_panic(kalloc_heap_t kheap,void * data,size_t size,zone_t z)2610 kfree_heap_confusion_panic(kalloc_heap_t kheap, void *data, size_t size, zone_t z)
2611 {
2612 	zone_security_flags_t zsflags = zone_security_config(z);
2613 	const char *kheap_name = kalloc_heap_names[kheap->kh_heap_id];
2614 
2615 	if (zsflags.z_kalloc_type) {
2616 		panic_include_kalloc_types = true;
2617 		kalloc_type_src_zone = z;
2618 		panic("kfree: addr %p found in kalloc type zone '%s'"
2619 		    "but being freed to %s heap", data, z->z_name, kheap_name);
2620 	}
2621 
2622 	if (zsflags.z_kheap_id == KHEAP_ID_NONE) {
2623 		panic("kfree: addr %p, size %zd found in regular zone '%s%s'",
2624 		    data, size, zone_heap_name(z), z->z_name);
2625 	} else {
2626 		panic("kfree: addr %p, size %zd found in heap %s* instead of %s*",
2627 		    data, size, zone_heap_name(z), kheap_name);
2628 	}
2629 }
2630 
2631 __abortlike
2632 static void
kfree_size_confusion_panic(zone_t z,void * data,size_t oob_offs,size_t size,size_t zsize)2633 kfree_size_confusion_panic(zone_t z, void *data,
2634     size_t oob_offs, size_t size, size_t zsize)
2635 {
2636 	if (z) {
2637 		panic("kfree: addr %p, size %zd (offs:%zd) found in zone '%s%s' "
2638 		    "with elem_size %zd",
2639 		    data, size, oob_offs, zone_heap_name(z), z->z_name, zsize);
2640 	} else {
2641 		panic("kfree: addr %p, size %zd (offs:%zd) not found in any zone",
2642 		    data, size, oob_offs);
2643 	}
2644 }
2645 
2646 __abortlike
2647 static void
kfree_size_invalid_panic(void * data,size_t size)2648 kfree_size_invalid_panic(void *data, size_t size)
2649 {
2650 	panic("kfree: addr %p trying to free with nonsensical size %zd",
2651 	    data, size);
2652 }
2653 
2654 __abortlike
2655 static void
kfree_size_require_panic(void * data,size_t size,size_t min_size,size_t max_size)2656 kfree_size_require_panic(void *data, size_t size, size_t min_size,
2657     size_t max_size)
2658 {
2659 	panic("kfree: addr %p has size %zd, not in specified bounds [%zd - %zd]",
2660 	    data, size, min_size, max_size);
2661 }
2662 
2663 static void
kfree_size_require(kalloc_heap_t kheap,void * addr,vm_size_t min_size,vm_size_t max_size)2664 kfree_size_require(
2665 	kalloc_heap_t kheap,
2666 	void *addr,
2667 	vm_size_t min_size,
2668 	vm_size_t max_size)
2669 {
2670 	assert3u(min_size, <=, max_size);
2671 	zone_t max_zone = kalloc_zone_for_size(kheap->kh_zstart, max_size);
2672 	vm_size_t max_zone_size = zone_elem_inner_size(max_zone);
2673 	vm_size_t elem_size = zone_element_size(addr, NULL, false, NULL);
2674 	if (elem_size > max_zone_size || elem_size < min_size) {
2675 		kfree_size_require_panic(addr, elem_size, min_size, max_zone_size);
2676 	}
2677 }
2678 
2679 static void
kfree_large(vm_offset_t addr,vm_size_t size,kmf_flags_t flags,void * owner)2680 kfree_large(
2681 	vm_offset_t             addr,
2682 	vm_size_t               size,
2683 	kmf_flags_t             flags,
2684 	void                   *owner)
2685 {
2686 	size = kmem_free_guard(kernel_map, addr, size,
2687 	    flags | KMF_TAG | KMF_KASAN_GUARD,
2688 	    kalloc_guard(VM_KERN_MEMORY_NONE, 0, owner));
2689 
2690 	counter_dec(&kalloc_large_count);
2691 	counter_add(&kalloc_large_total, -(uint64_t)size);
2692 	KALLOC_ZINFO_SFREE(size);
2693 	DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, addr);
2694 }
2695 
2696 static void
kfree_zone(void * kheap_or_kt_view __unsafe_indexable,void * data,vm_size_t size,zone_t z,vm_size_t zsize)2697 kfree_zone(
2698 	void                   *kheap_or_kt_view __unsafe_indexable,
2699 	void                   *data,
2700 	vm_size_t               size,
2701 	zone_t                  z,
2702 	vm_size_t               zsize)
2703 {
2704 	zone_security_flags_t zsflags = zone_security_config(z);
2705 	kalloc_type_var_view_t kt_view;
2706 	kalloc_heap_t kheap;
2707 	zone_stats_t zstats = NULL;
2708 
2709 	if (kt_is_var_view(kheap_or_kt_view)) {
2710 		kt_view = kt_demangle_var_view(kheap_or_kt_view);
2711 		kheap   = kalloc_type_get_heap(kt_view->kt_flags);
2712 		/*
2713 		 * Note: If we have cross frees between KHEAP_KT_VAR and KHEAP_DEFAULT
2714 		 * we will end up having incorrect stats. Cross frees may happen on
2715 		 * macOS due to allocation from an unprocessed view and free from
2716 		 * a processed view or vice versa.
2717 		 */
2718 		zstats  = kt_view->kt_stats;
2719 	} else {
2720 		kt_view = NULL;
2721 		kheap   = kheap_or_kt_view;
2722 	}
2723 
2724 	if (!zstats) {
2725 		zstats = kheap->kh_stats;
2726 	}
2727 
2728 	zsflags = zone_security_config(z);
2729 	if (kheap == KHEAP_DATA_BUFFERS) {
2730 		if (kheap->kh_heap_id != zsflags.z_kheap_id) {
2731 			kfree_heap_confusion_panic(kheap, data, size, z);
2732 		}
2733 	} else {
2734 		if ((kheap->kh_heap_id != zsflags.z_kheap_id) &&
2735 		    (zsflags.z_kheap_id != KHEAP_ID_SHARED)) {
2736 			kfree_heap_confusion_panic(kheap, data, size, z);
2737 		}
2738 	}
2739 
2740 	DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, zsize, void*, data);
2741 
2742 	/* needs to be __nosan because the user size might be partial */
2743 	__nosan_bzero(data, zsize);
2744 	zfree_ext(z, zstats ?: z->z_stats, data, ZFREE_PACK_SIZE(zsize, size));
2745 }
2746 
2747 void
kfree_ext(void * kheap_or_kt_view,void * data,vm_size_t size)2748 kfree_ext(void *kheap_or_kt_view, void *data, vm_size_t size)
2749 {
2750 	vm_size_t bucket_size;
2751 	zone_t z;
2752 
2753 	if (data == NULL) {
2754 		return;
2755 	}
2756 
2757 	if (size > KFREE_ABSURD_SIZE) {
2758 		kfree_size_invalid_panic(data, size);
2759 	}
2760 
2761 	if (size <= KHEAP_MAX_SIZE) {
2762 		vm_size_t oob_offs;
2763 
2764 		bucket_size = zone_element_size(data, &z, true, &oob_offs);
2765 		if (size + oob_offs > bucket_size || bucket_size == 0) {
2766 			kfree_size_confusion_panic(z, data,
2767 			    oob_offs, size, bucket_size);
2768 		}
2769 
2770 		data = (char *)data - oob_offs;
2771 		kfree_zone(kheap_or_kt_view, data, size, z, bucket_size);
2772 	} else {
2773 		kfree_large((vm_offset_t)data, size, KMF_NONE, NULL);
2774 	}
2775 }
2776 
2777 void
kfree_addr_ext(kalloc_heap_t kheap,void * data)2778 kfree_addr_ext(kalloc_heap_t kheap, void *data)
2779 {
2780 	vm_offset_t oob_offs;
2781 	vm_size_t size, usize = 0;
2782 	zone_t z;
2783 
2784 	if (data == NULL) {
2785 		return;
2786 	}
2787 
2788 	size = zone_element_size(data, &z, true, &oob_offs);
2789 	if (size) {
2790 #if KASAN_CLASSIC
2791 		usize = kasan_user_size((vm_offset_t)data);
2792 #endif
2793 		data = (char *)data - oob_offs;
2794 		kfree_zone(kheap, data, usize, z, size);
2795 	} else {
2796 		kfree_large((vm_offset_t)data, 0, KMF_GUESS_SIZE, NULL);
2797 	}
2798 }
2799 
2800 #if XNU_PLATFORM_MacOSX
2801 void
2802 kfree_external(void *addr, vm_size_t size);
2803 void
kfree_external(void * addr,vm_size_t size)2804 kfree_external(void *addr, vm_size_t size)
2805 {
2806 	kalloc_heap_t kheap = KHEAP_DEFAULT;
2807 
2808 	kfree_ext(kheap, addr, size);
2809 }
2810 #endif /* XNU_PLATFORM_MacOSX */
2811 
2812 void
2813 (kheap_free_bounded)(kalloc_heap_t kheap, void *addr,
2814     vm_size_t min_sz, vm_size_t max_sz)
2815 {
2816 	if (__improbable(addr == NULL)) {
2817 		return;
2818 	}
2819 	kfree_size_require(kheap, addr, min_sz, max_sz);
2820 	kfree_addr_ext(kheap, addr);
2821 }
2822 
2823 void *
kalloc_type_impl_internal(kalloc_type_view_t kt_view,zalloc_flags_t flags)2824 kalloc_type_impl_internal(kalloc_type_view_t kt_view, zalloc_flags_t flags)
2825 {
2826 	zone_stats_t zs = kt_view->kt_zv.zv_stats;
2827 	zone_t       z  = kt_view->kt_zv.zv_zone;
2828 	zone_stats_t zs_cpu = zpercpu_get(zs);
2829 
2830 	if ((flags & Z_SET_NOTSHARED) ||
2831 	    os_atomic_load(&zs_cpu->zs_alloc_not_shared, relaxed)) {
2832 		return zalloc_ext(z, zs, flags).addr;
2833 	}
2834 
2835 	assert(zone_security_config(z).z_kheap_id != KHEAP_ID_DATA_BUFFERS);
2836 	return zalloc_ext(kt_view->kt_zshared, zs, flags | Z_SET_NOTSHARED).addr;
2837 }
2838 
2839 void
kfree_type_impl_external(kalloc_type_view_t kt_view,void * ptr)2840 kfree_type_impl_external(kalloc_type_view_t kt_view, void *ptr)
2841 {
2842 	/*
2843 	 * If callsite is from a kext that isn't in the BootKC, it wasn't
2844 	 * processed during startup so default to using kheap_alloc
2845 	 *
2846 	 * Additionally when size is greater KHEAP_MAX_SIZE zone is left
2847 	 * NULL as we need to use the vm for the allocation/free
2848 	 */
2849 	if (kt_view->kt_zv.zv_zone == ZONE_NULL) {
2850 		kalloc_heap_t kheap;
2851 		vm_size_t size;
2852 
2853 		size  = kalloc_type_get_size(kt_view->kt_size);
2854 		kheap = kalloc_type_get_heap(kt_view->kt_flags);
2855 		return kheap_free(kheap, ptr, size);
2856 	}
2857 	return kfree_type_impl(kt_view, ptr);
2858 }
2859 
2860 void
2861 kfree_type_var_impl_external(
2862 	kalloc_type_var_view_t  kt_view,
2863 	void                   *ptr,
2864 	vm_size_t               size);
2865 void
kfree_type_var_impl_external(kalloc_type_var_view_t kt_view,void * ptr,vm_size_t size)2866 kfree_type_var_impl_external(
2867 	kalloc_type_var_view_t  kt_view,
2868 	void                   *ptr,
2869 	vm_size_t               size)
2870 {
2871 	return kfree_type_var_impl(kt_view, ptr, size);
2872 }
2873 
2874 void
2875 kfree_data_external(void *ptr, vm_size_t size);
2876 void
kfree_data_external(void * ptr,vm_size_t size)2877 kfree_data_external(void *ptr, vm_size_t size)
2878 {
2879 	return kheap_free(KHEAP_DATA_BUFFERS, ptr, size);
2880 }
2881 
2882 void
2883 kfree_data_addr_external(void *ptr);
2884 void
kfree_data_addr_external(void * ptr)2885 kfree_data_addr_external(void *ptr)
2886 {
2887 	return kheap_free_addr(KHEAP_DATA_BUFFERS, ptr);
2888 }
2889 
2890 #pragma mark krealloc
2891 
2892 __abortlike
2893 static void
krealloc_size_invalid_panic(void * data,size_t size)2894 krealloc_size_invalid_panic(void *data, size_t size)
2895 {
2896 	panic("krealloc: addr %p trying to free with nonsensical size %zd",
2897 	    data, size);
2898 }
2899 
2900 __attribute__((noinline))
2901 static struct kalloc_result
krealloc_large(kalloc_heap_t kheap,vm_offset_t addr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,uint16_t kt_hash,void * owner __unused)2902 krealloc_large(
2903 	kalloc_heap_t         kheap,
2904 	vm_offset_t           addr,
2905 	vm_size_t             old_size,
2906 	vm_size_t             new_size,
2907 	zalloc_flags_t        flags,
2908 	uint16_t              kt_hash,
2909 	void                 *owner __unused)
2910 {
2911 	kmr_flags_t kmr_flags = KMR_FREEOLD | KMR_KASAN_GUARD;
2912 	vm_size_t new_req_size = new_size;
2913 	vm_size_t old_req_size = old_size;
2914 	uint64_t delta;
2915 	kmem_return_t kmr;
2916 	vm_tag_t tag;
2917 
2918 #if ZSECURITY_CONFIG(ZONE_TAGGING)
2919 	kmr_flags |= KMR_TAG;
2920 #endif /* ZSECURITY_CONFIG(ZONE_TAGGING) */
2921 
2922 	if (flags & Z_NOFAIL) {
2923 		panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)",
2924 		    (size_t)new_req_size);
2925 	}
2926 
2927 	/*
2928 	 * kmem_alloc could block so we return if noblock
2929 	 *
2930 	 * also, reject sizes larger than our address space is quickly,
2931 	 * as kt_size or IOMallocArraySize() expect this.
2932 	 */
2933 	if ((flags & Z_NOWAIT) ||
2934 	    (new_req_size >> VM_KERNEL_POINTER_SIGNIFICANT_BITS)) {
2935 		return (struct kalloc_result){ };
2936 	}
2937 
2938 	/*
2939 	 * (73465472) on Intel we didn't use to pass this flag,
2940 	 * which in turned allowed kalloc_large() memory to be shared
2941 	 * with user directly.
2942 	 *
2943 	 * We're bound by this unfortunate ABI.
2944 	 */
2945 	if ((flags & Z_MAY_COPYINMAP) == 0) {
2946 #ifndef __x86_64__
2947 		kmr_flags |= KMR_KOBJECT;
2948 #endif
2949 	} else {
2950 		assert(kheap == KHEAP_DATA_BUFFERS);
2951 		kmr_flags &= ~KMR_TAG;
2952 	}
2953 	if (flags & Z_NOPAGEWAIT) {
2954 		kmr_flags |= KMR_NOPAGEWAIT;
2955 	}
2956 	if (flags & Z_ZERO) {
2957 		kmr_flags |= KMR_ZERO;
2958 	}
2959 	if (kheap == KHEAP_DATA_BUFFERS) {
2960 		kmr_flags |= KMR_DATA;
2961 	} else if (flags & (Z_KALLOC_ARRAY | Z_SPRAYQTN)) {
2962 		kmr_flags |= KMR_SPRAYQTN;
2963 	}
2964 	if (flags & Z_REALLOCF) {
2965 		kmr_flags |= KMR_REALLOCF;
2966 	}
2967 
2968 
2969 	tag = zalloc_flags_get_tag(flags);
2970 	if (flags & Z_VM_TAG_BT_BIT) {
2971 		tag = vm_tag_bt() ?: tag;
2972 	}
2973 	if (tag == VM_KERN_MEMORY_NONE) {
2974 		tag = kheap->kh_tag;
2975 	}
2976 
2977 	kmr = kmem_realloc_guard(kernel_map, addr, old_req_size, new_req_size,
2978 	    kmr_flags, kalloc_guard(tag, kt_hash, owner));
2979 
2980 	new_size = round_page(new_req_size);
2981 	old_size = round_page(old_req_size);
2982 
2983 	if (kmr.kmr_address != 0) {
2984 		delta = (uint64_t)(new_size - old_size);
2985 	} else if (flags & Z_REALLOCF) {
2986 		counter_dec(&kalloc_large_count);
2987 		delta = (uint64_t)(-old_size);
2988 	} else {
2989 		delta = 0;
2990 	}
2991 
2992 	counter_add(&kalloc_large_total, delta);
2993 	KALLOC_ZINFO_SALLOC(delta);
2994 
2995 	if (addr != 0 || (flags & Z_REALLOCF)) {
2996 		DTRACE_VM3(kfree, vm_size_t, old_size, vm_size_t, old_req_size,
2997 		    void*, addr);
2998 	}
2999 	if (__improbable(kmr.kmr_address == 0)) {
3000 		return (struct kalloc_result){ };
3001 	}
3002 
3003 	DTRACE_VM3(kalloc, vm_size_t, new_size, vm_size_t, new_req_size,
3004 	    void*, kmr.kmr_address);
3005 
3006 	if (flags & Z_KALLOC_ARRAY) {
3007 		kmr.kmr_address = __kalloc_array_encode_vm(kmr.kmr_address,
3008 		    new_req_size);
3009 	}
3010 	return (struct kalloc_result){ .addr = kmr.kmr_ptr, .size = new_req_size };
3011 }
3012 
3013 #undef krealloc_ext
3014 
3015 struct kalloc_result
krealloc_ext(void * kheap_or_kt_view __unsafe_indexable,void * addr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags,void * owner)3016 krealloc_ext(
3017 	void                 *kheap_or_kt_view __unsafe_indexable,
3018 	void                 *addr,
3019 	vm_size_t             old_size,
3020 	vm_size_t             new_size,
3021 	zalloc_flags_t        flags,
3022 	void                 *owner)
3023 {
3024 	vm_size_t old_bucket_size, new_bucket_size, min_size;
3025 	kalloc_type_var_view_t kt_view;
3026 	kalloc_heap_t kheap;
3027 	zone_stats_t zstats = NULL;
3028 	struct kalloc_result kr;
3029 	vm_offset_t oob_offs = 0;
3030 	zone_t old_z, new_z;
3031 	uint16_t kt_hash = 0;
3032 	zone_id_t zstart;
3033 
3034 	if (old_size > KFREE_ABSURD_SIZE) {
3035 		krealloc_size_invalid_panic(addr, old_size);
3036 	}
3037 
3038 	if (addr == NULL && new_size == 0) {
3039 		return (struct kalloc_result){ };
3040 	}
3041 
3042 	if (kt_is_var_view(kheap_or_kt_view)) {
3043 		kt_view = kt_demangle_var_view(kheap_or_kt_view);
3044 		kheap   = kalloc_type_get_heap(kt_view->kt_flags);
3045 		/*
3046 		 * Similar to kalloc_ext: Use stats from view if present,
3047 		 * else use stats from kheap.
3048 		 *
3049 		 * krealloc_type isn't exposed to kexts, so we don't need to
3050 		 * handle cross frees and can rely on stats from view or kheap.
3051 		 */
3052 		zstats  = kt_view->kt_stats;
3053 		kt_hash = KT_GET_HASH(kt_view->kt_flags);
3054 		zstart  = kt_view->kt_heap_start ?: kheap->kh_zstart;
3055 	} else {
3056 		kt_view = NULL;
3057 		kheap   = kheap_or_kt_view;
3058 		kt_hash = kheap->kh_type_hash;
3059 		zstart  = kheap->kh_zstart;
3060 	}
3061 
3062 	if (!zstats) {
3063 		zstats = kheap->kh_stats;
3064 	}
3065 	/*
3066 	 * Find out the size of the bucket in which the new sized allocation
3067 	 * would land. If it matches the bucket of the original allocation,
3068 	 * simply return the same address.
3069 	 */
3070 	if (new_size == 0) {
3071 		new_z = ZONE_NULL;
3072 		new_bucket_size = new_size = 0;
3073 	} else {
3074 		zstart = kalloc_use_shared_heap(kheap, zstats, zstart, &flags);
3075 		new_z = kalloc_zone_for_size_with_flags(zstart, new_size, flags);
3076 		new_bucket_size = new_z ? zone_elem_inner_size(new_z) : round_page(new_size);
3077 	}
3078 #if !KASAN_CLASSIC
3079 	if (flags & Z_FULLSIZE) {
3080 		new_size = new_bucket_size;
3081 	}
3082 #endif /* !KASAN_CLASSIC */
3083 
3084 	if (addr == NULL) {
3085 		old_z = ZONE_NULL;
3086 		old_size = old_bucket_size = 0;
3087 	} else if (kheap_size_from_zone(addr, old_size, flags)) {
3088 		old_bucket_size = zone_element_size(addr, &old_z, true, &oob_offs);
3089 		if (old_size + oob_offs > old_bucket_size || old_bucket_size == 0) {
3090 			kfree_size_confusion_panic(old_z, addr,
3091 			    oob_offs, old_size, old_bucket_size);
3092 		}
3093 		__builtin_assume(old_z != ZONE_NULL);
3094 	} else {
3095 		old_z = ZONE_NULL;
3096 		old_bucket_size = round_page(old_size);
3097 	}
3098 	min_size = MIN(old_size, new_size);
3099 
3100 	if (old_bucket_size == new_bucket_size && old_z) {
3101 		kr.addr = (char *)addr - oob_offs;
3102 		kr.size = new_size;
3103 #if ZSECURITY_CONFIG(PGZ_OOB_ADJUST)
3104 		kr.addr = zone_element_pgz_oob_adjust(kr.addr,
3105 		    new_size, new_bucket_size);
3106 		if (kr.addr != addr) {
3107 			memmove(kr.addr, addr, min_size);
3108 			bzero((char *)kr.addr + min_size,
3109 			    kr.size - min_size);
3110 		}
3111 #endif /* !ZSECURITY_CONFIG(PGZ_OOB_ADJUST) */
3112 #if KASAN
3113 		/*
3114 		 * On KASAN kernels, treat a reallocation effectively as a new
3115 		 * allocation and add a sanity check around the existing one
3116 		 * w.r.t. the old requested size. On KASAN_CLASSIC this doesn't account
3117 		 * to much extra work, on KASAN_TBI, assign a new tag both to the
3118 		 * buffer and to the potential free space.
3119 		 */
3120 #if KASAN_CLASSIC
3121 		kasan_check_alloc((vm_offset_t)addr, old_bucket_size, old_size);
3122 		kasan_alloc((vm_offset_t)addr, new_bucket_size, kr.size,
3123 		    KASAN_GUARD_SIZE, false, __builtin_frame_address(0));
3124 #endif /* KASAN_CLASSIC */
3125 #if KASAN_TBI
3126 		/*
3127 		 * Validate the current buffer, then generate a new tag,
3128 		 * even if the address is stable, it's a "new" allocation.
3129 		 */
3130 		__asan_loadN((vm_offset_t)addr, old_size);
3131 		kr.addr = (void *)vm_memtag_assign_tag((vm_offset_t)kr.addr, kr.size);
3132 		vm_memtag_set_tag((vm_offset_t)kr.addr, kr.size);
3133 		kasan_tbi_retag_unused_space((vm_offset_t)kr.addr, new_bucket_size, kr.size);
3134 #endif /* KASAN_TBI */
3135 #endif /* KASAN */
3136 		goto out_success;
3137 	}
3138 
3139 #if !KASAN
3140 	/*
3141 	 * Fallthrough to krealloc_large() for KASAN,
3142 	 * because we can't use kasan_check_alloc()
3143 	 * on kalloc_large() memory.
3144 	 *
3145 	 * kmem_realloc_guard() will perform all the validations,
3146 	 * and re-tagging.
3147 	 */
3148 	if (old_bucket_size == new_bucket_size) {
3149 		kr.addr = (char *)addr - oob_offs;
3150 		kr.size = new_size;
3151 		goto out_success;
3152 	}
3153 #endif
3154 
3155 	if (addr && !old_z && new_size && !new_z) {
3156 		return krealloc_large(kheap, (vm_offset_t)addr,
3157 		           old_size, new_size, flags, kt_hash, owner);
3158 	}
3159 
3160 	if (!new_size) {
3161 		kr.addr = NULL;
3162 		kr.size = 0;
3163 	} else if (new_z) {
3164 		kr = kalloc_zone(new_z, zstats,
3165 		    flags & ~Z_KALLOC_ARRAY, new_size);
3166 	} else if (old_z || addr == NULL) {
3167 		kr = kalloc_large(kheap, new_size,
3168 		    flags & ~Z_KALLOC_ARRAY, kt_hash, owner);
3169 	}
3170 
3171 	if (addr && kr.addr) {
3172 		__nosan_memcpy(kr.addr, addr, min_size);
3173 	}
3174 
3175 	if (addr && (kr.addr || (flags & Z_REALLOCF) || !new_size)) {
3176 		if (old_z) {
3177 			kfree_zone(kheap_or_kt_view,
3178 			    (char *)addr - oob_offs, old_size,
3179 			    old_z, old_bucket_size);
3180 		} else {
3181 			kfree_large((vm_offset_t)addr, old_size, KMF_NONE, owner);
3182 		}
3183 	}
3184 
3185 	if (__improbable(kr.addr == NULL)) {
3186 		return kr;
3187 	}
3188 
3189 out_success:
3190 	if ((flags & Z_KALLOC_ARRAY) == 0) {
3191 		return kr;
3192 	}
3193 
3194 	if (new_z) {
3195 		kr.addr = __kalloc_array_encode_zone(new_z,
3196 		    kr.addr, kr.size);
3197 	} else {
3198 		kr.addr = (void *)__kalloc_array_encode_vm((vm_offset_t)kr.addr,
3199 		    kr.size);
3200 	}
3201 	return kr;
3202 }
3203 
3204 void *
3205 krealloc_data_external(
3206 	void               *ptr,
3207 	vm_size_t           old_size,
3208 	vm_size_t           new_size,
3209 	zalloc_flags_t      flags);
3210 void *
krealloc_data_external(void * ptr,vm_size_t old_size,vm_size_t new_size,zalloc_flags_t flags)3211 krealloc_data_external(
3212 	void               *ptr,
3213 	vm_size_t           old_size,
3214 	vm_size_t           new_size,
3215 	zalloc_flags_t      flags)
3216 {
3217 	flags = Z_VM_TAG_BT(flags & Z_KPI_MASK, VM_KERN_MEMORY_KALLOC_DATA);
3218 	return krealloc_ext(KHEAP_DATA_BUFFERS, ptr, old_size, new_size, flags, NULL).addr;
3219 }
3220 
3221 __startup_func
3222 static void
kheap_init(kalloc_heap_t parent_heap,kalloc_heap_t kheap)3223 kheap_init(kalloc_heap_t parent_heap, kalloc_heap_t kheap)
3224 {
3225 	kheap->kh_zstart      = parent_heap->kh_zstart;
3226 	kheap->kh_heap_id     = parent_heap->kh_heap_id;
3227 	kheap->kh_tag         = parent_heap->kh_tag;
3228 	kheap->kh_stats       = zalloc_percpu_permanent_type(struct zone_stats);
3229 	zone_view_count += 1;
3230 }
3231 
3232 __startup_func
3233 static void
kheap_init_data(kalloc_heap_t kheap)3234 kheap_init_data(kalloc_heap_t kheap)
3235 {
3236 	kheap_init(KHEAP_DATA_BUFFERS, kheap);
3237 	kheap->kh_views               = KHEAP_DATA_BUFFERS->kh_views;
3238 	KHEAP_DATA_BUFFERS->kh_views  = kheap;
3239 }
3240 
3241 __startup_func
3242 static void
kheap_init_var(kalloc_heap_t kheap)3243 kheap_init_var(kalloc_heap_t kheap)
3244 {
3245 	uint16_t idx;
3246 	struct kheap_info *parent_heap;
3247 
3248 	kheap_init(KHEAP_KT_VAR, kheap);
3249 	idx = kmem_get_random16(kt_var_heaps - kt_var_ptr_heaps - 1) +
3250 	    KT_VAR__FIRST_FLEXIBLE_HEAP;
3251 	parent_heap = &kalloc_type_heap_array[idx];
3252 	kheap->kh_zstart = parent_heap->kh_zstart;
3253 	kheap->kh_type_hash = (uint16_t) kalloc_hash_adjust(
3254 		(uint32_t) early_random(), 0);
3255 	kheap->kh_views       = parent_heap->kh_views;
3256 	parent_heap->kh_views = kheap;
3257 }
3258 
3259 __startup_func
3260 void
kheap_startup_init(kalloc_heap_t kheap)3261 kheap_startup_init(kalloc_heap_t kheap)
3262 {
3263 	switch (kheap->kh_heap_id) {
3264 	case KHEAP_ID_DATA_BUFFERS:
3265 		kheap_init_data(kheap);
3266 		break;
3267 	case KHEAP_ID_KT_VAR:
3268 		kheap_init_var(kheap);
3269 		break;
3270 	default:
3271 		panic("kalloc_heap_startup_init: invalid KHEAP_ID: %d",
3272 		    kheap->kh_heap_id);
3273 	}
3274 }
3275 
3276 #pragma mark IOKit/libkern helpers
3277 
3278 #if XNU_PLATFORM_MacOSX
3279 
3280 void *
3281 kern_os_malloc_external(size_t size);
3282 void *
kern_os_malloc_external(size_t size)3283 kern_os_malloc_external(size_t size)
3284 {
3285 	if (size == 0) {
3286 		return NULL;
3287 	}
3288 
3289 	return kheap_alloc(KERN_OS_MALLOC, size,
3290 	           Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_LIBKERN));
3291 }
3292 
3293 void
3294 kern_os_free_external(void *addr);
3295 void
kern_os_free_external(void * addr)3296 kern_os_free_external(void *addr)
3297 {
3298 	kheap_free_addr(KERN_OS_MALLOC, addr);
3299 }
3300 
3301 void *
3302 kern_os_realloc_external(void *addr, size_t nsize);
3303 void *
kern_os_realloc_external(void * addr,size_t nsize)3304 kern_os_realloc_external(void *addr, size_t nsize)
3305 {
3306 	zalloc_flags_t flags = Z_VM_TAG_BT(Z_WAITOK_ZERO, VM_KERN_MEMORY_LIBKERN);
3307 	vm_size_t osize, oob_offs = 0;
3308 
3309 	if (addr == NULL) {
3310 		return kern_os_malloc_external(nsize);
3311 	}
3312 
3313 	osize = zone_element_size(addr, NULL, false, &oob_offs);
3314 	if (osize == 0) {
3315 		osize = kmem_size_guard(kernel_map, (vm_offset_t)addr,
3316 		    kalloc_guard(VM_KERN_MEMORY_LIBKERN, 0, NULL));
3317 #if KASAN_CLASSIC
3318 	} else {
3319 		osize = kasan_user_size((vm_offset_t)addr);
3320 #endif
3321 	}
3322 	return __kheap_realloc(KERN_OS_MALLOC, addr, osize - oob_offs, nsize, flags, NULL);
3323 }
3324 
3325 #endif /* XNU_PLATFORM_MacOSX */
3326 
3327 void
kern_os_zfree(zone_t zone,void * addr,vm_size_t size)3328 kern_os_zfree(zone_t zone, void *addr, vm_size_t size)
3329 {
3330 #if ZSECURITY_CONFIG(STRICT_IOKIT_FREE)
3331 #pragma unused(size)
3332 	zfree(zone, addr);
3333 #else
3334 	if (zone_owns(zone, addr)) {
3335 		zfree(zone, addr);
3336 	} else {
3337 		/*
3338 		 * Third party kexts might not know about the operator new
3339 		 * and be allocated from the default heap
3340 		 */
3341 		printf("kern_os_zfree: kheap_free called for object from zone %s\n",
3342 		    zone->z_name);
3343 		kheap_free(KHEAP_DEFAULT, addr, size);
3344 	}
3345 #endif
3346 }
3347 
3348 bool
IOMallocType_from_vm(kalloc_type_view_t ktv)3349 IOMallocType_from_vm(kalloc_type_view_t ktv)
3350 {
3351 	return kalloc_type_from_vm(ktv->kt_flags);
3352 }
3353 
3354 void
kern_os_typed_free(kalloc_type_view_t ktv,void * addr,vm_size_t esize)3355 kern_os_typed_free(kalloc_type_view_t ktv, void *addr, vm_size_t esize)
3356 {
3357 #if ZSECURITY_CONFIG(STRICT_IOKIT_FREE)
3358 #pragma unused(esize)
3359 #else
3360 	/*
3361 	 * For third party kexts that have been compiled with sdk pre macOS 11,
3362 	 * an allocation of an OSObject that is defined in xnu or first pary
3363 	 * kexts, by directly calling new will lead to using the default heap
3364 	 * as it will call OSObject_operator_new_external. If this object
3365 	 * is freed by xnu, it panics as xnu uses the typed free which
3366 	 * requires the object to have been allocated in a kalloc.type zone.
3367 	 * To workaround this issue, detect if the allocation being freed is
3368 	 * from the default heap and allow freeing to it.
3369 	 */
3370 	zone_id_t zid = zone_id_for_element(addr, esize);
3371 	if (__probable(zid < MAX_ZONES)) {
3372 		zone_security_flags_t zsflags = zone_security_array[zid];
3373 		if (zsflags.z_kheap_id == KHEAP_ID_KT_VAR) {
3374 			return kheap_free(KHEAP_DEFAULT, addr, esize);
3375 		}
3376 	}
3377 #endif
3378 	kfree_type_impl_external(ktv, addr);
3379 }
3380 
3381 #pragma mark tests
3382 #if DEBUG || DEVELOPMENT
3383 
3384 #include <sys/random.h>
3385 
3386 /*
3387  * Ensure that the feature is on when the ZSECURITY_CONFIG is present.
3388  *
3389  * Note: Presence of zones with name kalloc.type* is used to
3390  * determine if the feature is on.
3391  */
3392 static int
kalloc_type_feature_on(void)3393 kalloc_type_feature_on(void)
3394 {
3395 	boolean_t zone_found = false;
3396 	const char kalloc_type_str[] = "kalloc.type";
3397 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
3398 		zone_t z = kalloc_type_zarray[i];
3399 		while (z != NULL) {
3400 			zone_found = true;
3401 			if (strncmp(z->z_name, kalloc_type_str,
3402 			    strlen(kalloc_type_str)) != 0) {
3403 				return 0;
3404 			}
3405 			z = z->z_kt_next;
3406 		}
3407 	}
3408 
3409 	if (!zone_found) {
3410 		return 0;
3411 	}
3412 
3413 	return 1;
3414 }
3415 
3416 /*
3417  * Ensure that the policy uses the zone budget completely
3418  */
3419 static int
kalloc_type_test_policy(int64_t in)3420 kalloc_type_test_policy(int64_t in)
3421 {
3422 	uint16_t zone_budget = (uint16_t) in;
3423 	uint16_t max_bucket_freq = 25;
3424 	uint16_t freq_list[MAX_K_ZONE(kt_zone_cfg)] = {};
3425 	uint16_t freq_total_list[MAX_K_ZONE(kt_zone_cfg)] = {};
3426 	uint16_t zones_per_sig[MAX_K_ZONE(kt_zone_cfg)] = {};
3427 	uint16_t zones_per_type[MAX_K_ZONE(kt_zone_cfg)] = {};
3428 	uint16_t random[MAX_K_ZONE(kt_zone_cfg) * 2];
3429 	uint16_t wasted_zone_budget = 0, total_types = 0;
3430 	uint16_t n_zones = 0, n_zones_cal = 0;
3431 	int ret = 0;
3432 
3433 	/*
3434 	 * Need a minimum of 2 zones per size class
3435 	 */
3436 	if (zone_budget < MAX_K_ZONE(kt_zone_cfg) * 2) {
3437 		return ret;
3438 	}
3439 	read_random((void *)&random[0], sizeof(random));
3440 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
3441 		uint16_t r1 = (random[2 * i] % max_bucket_freq) + 1;
3442 		uint16_t r2 = (random[2 * i + 1] % max_bucket_freq) + 1;
3443 
3444 		freq_list[i] = r1 > r2 ? r2 : r1;
3445 		freq_total_list[i] = r1 > r2 ? r1 : r2;
3446 	}
3447 	wasted_zone_budget = kalloc_type_apply_policy(
3448 		freq_list, freq_total_list,
3449 		zones_per_sig, zones_per_type, zone_budget);
3450 
3451 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
3452 		total_types += freq_total_list[i];
3453 	}
3454 
3455 	n_zones = kmem_get_random16(total_types);
3456 	printf("Dividing %u zones amongst %u types\n", n_zones, total_types);
3457 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
3458 		uint16_t n_zones_for_type = kalloc_type_zones_for_type(n_zones,
3459 		    freq_total_list[i], total_types,
3460 		    (i == MAX_K_ZONE(kt_zone_cfg) - 1) ? true : false);
3461 
3462 		n_zones_cal += n_zones_for_type;
3463 
3464 		printf("%u\t%u\n", freq_total_list[i], n_zones_for_type);
3465 	}
3466 	printf("-----------------------\n%u\t%u\n", total_types,
3467 	    n_zones_cal);
3468 
3469 	if ((wasted_zone_budget == 0) && (n_zones == n_zones_cal)) {
3470 		ret = 1;
3471 	}
3472 	return ret;
3473 }
3474 
3475 /*
3476  * Ensure that size of adopters of kalloc_type fit in the zone
3477  * they have been assigned.
3478  */
3479 static int
kalloc_type_check_size(zone_t z)3480 kalloc_type_check_size(zone_t z)
3481 {
3482 	kalloc_type_view_t kt_cur = (kalloc_type_view_t) z->z_views;
3483 
3484 	while (kt_cur != NULL) {
3485 		if (kalloc_type_get_size(kt_cur->kt_size) > z->z_elem_size) {
3486 			return 0;
3487 		}
3488 		kt_cur = (kalloc_type_view_t) kt_cur->kt_zv.zv_next;
3489 	}
3490 
3491 	return 1;
3492 }
3493 
3494 struct test_kt_data {
3495 	int a;
3496 };
3497 
3498 static int
kalloc_type_test_data_redirect(void)3499 kalloc_type_test_data_redirect(void)
3500 {
3501 	struct kalloc_type_view ktv_data = {
3502 		.kt_flags = KALLOC_TYPE_ADJUST_FLAGS(KT_SHARED_ACCT, struct test_kt_data),
3503 		.kt_signature = KALLOC_TYPE_EMIT_SIG(struct test_kt_data),
3504 	};
3505 	if (!kalloc_type_is_data(ktv_data.kt_flags)) {
3506 		printf("%s: data redirect failed\n", __func__);
3507 		return 0;
3508 	}
3509 	return 1;
3510 }
3511 
3512 static int
run_kalloc_type_test(int64_t in,int64_t * out)3513 run_kalloc_type_test(int64_t in, int64_t *out)
3514 {
3515 	*out = 0;
3516 	for (uint16_t i = 0; i < MAX_K_ZONE(kt_zone_cfg); i++) {
3517 		zone_t z = kalloc_type_zarray[i];
3518 		while (z != NULL) {
3519 			if (!kalloc_type_check_size(z)) {
3520 				printf("%s: size check failed\n", __func__);
3521 				return 0;
3522 			}
3523 			z = z->z_kt_next;
3524 		}
3525 	}
3526 
3527 	if (!kalloc_type_test_policy(in)) {
3528 		printf("%s: policy check failed\n", __func__);
3529 		return 0;
3530 	}
3531 
3532 	if (!kalloc_type_feature_on()) {
3533 		printf("%s: boot-arg is on but feature isn't\n", __func__);
3534 		return 0;
3535 	}
3536 
3537 	if (!kalloc_type_test_data_redirect()) {
3538 		printf("%s: kalloc_type redirect for all data signature failed\n",
3539 		    __func__);
3540 		return 0;
3541 	}
3542 
3543 	printf("%s: test passed\n", __func__);
3544 
3545 	*out = 1;
3546 	return 0;
3547 }
3548 SYSCTL_TEST_REGISTER(kalloc_type, run_kalloc_type_test);
3549 
3550 static vm_size_t
test_bucket_size(kalloc_heap_t kheap,vm_size_t size)3551 test_bucket_size(kalloc_heap_t kheap, vm_size_t size)
3552 {
3553 	zone_t z = kalloc_zone_for_size(kheap->kh_zstart, size);
3554 
3555 	return z ? zone_elem_inner_size(z) : round_page(size);
3556 }
3557 
3558 static int
run_kalloc_test(int64_t in __unused,int64_t * out)3559 run_kalloc_test(int64_t in __unused, int64_t *out)
3560 {
3561 	*out = 0;
3562 	uint64_t *data_ptr;
3563 	void *strippedp_old, *strippedp_new;
3564 	size_t alloc_size = 0, old_alloc_size = 0;
3565 	struct kalloc_result kr = {};
3566 
3567 	printf("%s: test running\n", __func__);
3568 
3569 	/*
3570 	 * Test size 0: alloc, free, realloc
3571 	 */
3572 	data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size, Z_WAITOK | Z_NOFAIL,
3573 	    NULL).addr;
3574 	if (!data_ptr) {
3575 		printf("%s: kalloc 0 returned null\n", __func__);
3576 		return 0;
3577 	}
3578 	kheap_free(KHEAP_DATA_BUFFERS, data_ptr, alloc_size);
3579 
3580 	data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size, Z_WAITOK | Z_NOFAIL,
3581 	    NULL).addr;
3582 	alloc_size = sizeof(uint64_t) + 1;
3583 	data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, kr.addr, old_alloc_size,
3584 	    alloc_size, Z_WAITOK | Z_NOFAIL, NULL).addr;
3585 	if (!data_ptr) {
3586 		printf("%s: krealloc -> old size 0 failed\n", __func__);
3587 		return 0;
3588 	}
3589 	*data_ptr = 0;
3590 
3591 	/*
3592 	 * Test krealloc: same sizeclass, different size classes, 2pgs,
3593 	 * VM (with owner)
3594 	 */
3595 	old_alloc_size = alloc_size;
3596 	alloc_size++;
3597 	kr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, old_alloc_size, alloc_size,
3598 	    Z_WAITOK | Z_NOFAIL, NULL);
3599 
3600 	strippedp_old = (void *)vm_memtag_canonicalize_address((vm_offset_t)data_ptr);
3601 	strippedp_new = (void *)vm_memtag_canonicalize_address((vm_offset_t)kr.addr);
3602 
3603 	if (!kr.addr || (strippedp_old != strippedp_new) ||
3604 	    (test_bucket_size(KHEAP_DATA_BUFFERS, kr.size) !=
3605 	    test_bucket_size(KHEAP_DATA_BUFFERS, old_alloc_size))) {
3606 		printf("%s: krealloc -> same size class failed\n", __func__);
3607 		return 0;
3608 	}
3609 	data_ptr = kr.addr;
3610 	*data_ptr = 0;
3611 
3612 	old_alloc_size = alloc_size;
3613 	alloc_size *= 2;
3614 	kr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, old_alloc_size, alloc_size,
3615 	    Z_WAITOK | Z_NOFAIL, NULL);
3616 
3617 	strippedp_old = (void *)vm_memtag_canonicalize_address((vm_offset_t)data_ptr);
3618 	strippedp_new = (void *)vm_memtag_canonicalize_address((vm_offset_t)kr.addr);
3619 
3620 	if (!kr.addr || (strippedp_old == strippedp_new) ||
3621 	    (test_bucket_size(KHEAP_DATA_BUFFERS, kr.size) ==
3622 	    test_bucket_size(KHEAP_DATA_BUFFERS, old_alloc_size))) {
3623 		printf("%s: krealloc -> different size class failed\n", __func__);
3624 		return 0;
3625 	}
3626 	data_ptr = kr.addr;
3627 	*data_ptr = 0;
3628 
3629 	kheap_free(KHEAP_DATA_BUFFERS, kr.addr, alloc_size);
3630 
3631 	alloc_size = 3544;
3632 	data_ptr = kalloc_ext(KHEAP_DATA_BUFFERS, alloc_size,
3633 	    Z_WAITOK | Z_FULLSIZE, &data_ptr).addr;
3634 	if (!data_ptr) {
3635 		printf("%s: kalloc 3544 with owner and Z_FULLSIZE returned not null\n",
3636 		    __func__);
3637 		return 0;
3638 	}
3639 	*data_ptr = 0;
3640 
3641 	data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, alloc_size,
3642 	    PAGE_SIZE * 2, Z_REALLOCF | Z_WAITOK, &data_ptr).addr;
3643 	if (!data_ptr) {
3644 		printf("%s: krealloc -> 2pgs returned not null\n", __func__);
3645 		return 0;
3646 	}
3647 	*data_ptr = 0;
3648 
3649 	data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, PAGE_SIZE * 2,
3650 	    KHEAP_MAX_SIZE * 2, Z_REALLOCF | Z_WAITOK, &data_ptr).addr;
3651 	if (!data_ptr) {
3652 		printf("%s: krealloc -> VM1 returned not null\n", __func__);
3653 		return 0;
3654 	}
3655 	*data_ptr = 0;
3656 
3657 	data_ptr = krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, KHEAP_MAX_SIZE * 2,
3658 	    KHEAP_MAX_SIZE * 4, Z_REALLOCF | Z_WAITOK, &data_ptr).addr;
3659 	*data_ptr = 0;
3660 	if (!data_ptr) {
3661 		printf("%s: krealloc -> VM2 returned not null\n", __func__);
3662 		return 0;
3663 	}
3664 
3665 	krealloc_ext(KHEAP_DATA_BUFFERS, data_ptr, KHEAP_MAX_SIZE * 4,
3666 	    0, Z_REALLOCF | Z_WAITOK, &data_ptr);
3667 
3668 	printf("%s: test passed\n", __func__);
3669 	*out = 1;
3670 	return 0;
3671 }
3672 SYSCTL_TEST_REGISTER(kalloc, run_kalloc_test);
3673 
3674 #endif
3675