xref: /xnu-11215/osfmk/kern/kern_cdata.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2015 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kern/assert.h>
30 #include <mach/mach_types.h>
31 #include <mach/boolean.h>
32 #include <mach/vm_param.h>
33 #include <kern/kern_types.h>
34 #include <kern/mach_param.h>
35 #include <kern/thread.h>
36 #include <kern/task.h>
37 #include <kern/kern_cdata.h>
38 #include <kern/kalloc.h>
39 #include <kern/ipc_kobject.h>
40 #include <mach/mach_vm.h>
41 
42 static kern_return_t kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data, uint32_t type, uint32_t size, uint64_t flags, mach_vm_address_t *user_addr);
43 static size_t kcdata_get_memory_size_for_data(uint32_t size);
44 static kern_return_t kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t flags);
45 static kern_return_t kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size);
46 static kern_return_t kcdata_write_compression_stats(kcdata_descriptor_t data);
47 static kern_return_t kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin);
48 static void kcdata_object_no_senders(ipc_port_t port, mach_port_mscount_t mscount);
49 
50 #ifndef ROUNDUP
51 #define ROUNDUP(x, y)            ((((x)+(y)-1)/(y))*(y))
52 #endif
53 
54 /*
55  * zlib will need to store its metadata and this value is indifferent from the
56  * window bits and other zlib internals
57  */
58 #define ZLIB_METADATA_SIZE 1440
59 
60 /* #define kcdata_debug_printf printf */
61 #define kcdata_debug_printf(...) ;
62 
63 #pragma pack(push, 4)
64 
65 /* Internal structs for convenience */
66 struct _uint64_with_description_data {
67 	char desc[KCDATA_DESC_MAXLEN];
68 	uint64_t data;
69 };
70 
71 struct _uint32_with_description_data {
72 	char     desc[KCDATA_DESC_MAXLEN];
73 	uint32_t data;
74 };
75 
76 #pragma pack(pop)
77 
78 int _Atomic lw_corpse_obj_cnt = 0;
79 
80 IPC_KOBJECT_DEFINE(IKOT_KCDATA,
81     .iko_op_stable     = true,
82     .iko_op_no_senders = kcdata_object_no_senders);
83 
84 KALLOC_TYPE_DEFINE(KCDATA_OBJECT, struct kcdata_object, KT_DEFAULT);
85 
86 os_refgrp_decl(static, kcdata_object_refgrp, "kcdata_object", NULL);
87 
88 /* Grab a throttle slot for rate-limited kcdata object type(s) */
89 kern_return_t
kcdata_object_throttle_get(kcdata_obj_flags_t flags)90 kcdata_object_throttle_get(
91 	kcdata_obj_flags_t flags)
92 {
93 	int oval, nval;
94 
95 	/* Currently only lightweight corpse is rate-limited */
96 	assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
97 	if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
98 		os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
99 			if (oval >= MAX_INFLIGHT_KCOBJECT_LW_CORPSE) {
100 			        printf("Too many lightweight corpse in flight: %d\n", oval);
101 			        os_atomic_rmw_loop_give_up(return KERN_RESOURCE_SHORTAGE);
102 			}
103 			nval = oval + 1;
104 		});
105 	}
106 
107 	return KERN_SUCCESS;
108 }
109 
110 /* Release a throttle slot for rate-limited kcdata object type(s) */
111 void
kcdata_object_throttle_release(kcdata_obj_flags_t flags)112 kcdata_object_throttle_release(
113 	kcdata_obj_flags_t flags)
114 {
115 	int oval, nval;
116 
117 	/* Currently only lightweight corpse is rate-limited */
118 	assert(flags & KCDATA_OBJECT_TYPE_LW_CORPSE);
119 	if (flags & KCDATA_OBJECT_TYPE_LW_CORPSE) {
120 		os_atomic_rmw_loop(&lw_corpse_obj_cnt, oval, nval, relaxed, {
121 			nval = oval - 1;
122 			if (__improbable(nval < 0)) {
123 			        os_atomic_rmw_loop_give_up(panic("Lightweight corpse kcdata object over-released"));
124 			}
125 		});
126 	}
127 }
128 
129 /*
130  * Create an object representation for the given kcdata.
131  *
132  * Captures kcdata descripter ref in object. If the object creation
133  * should be rate-limited, kcdata_object_throttle_get() must be called
134  * manually before invoking kcdata_create_object(), so as to save
135  * work (of creating the enclosed kcdata blob) if a throttled reference
136  * cannot be obtained in the first place.
137  */
138 kern_return_t
kcdata_create_object(kcdata_descriptor_t data,kcdata_obj_flags_t flags,uint32_t size,kcdata_object_t * objp)139 kcdata_create_object(
140 	kcdata_descriptor_t data,
141 	kcdata_obj_flags_t flags,
142 	uint32_t        size,
143 	kcdata_object_t *objp)
144 {
145 	kcdata_object_t obj;
146 
147 	if (data == NULL) {
148 		return KERN_INVALID_ARGUMENT;
149 	}
150 
151 	obj = zalloc_flags(KCDATA_OBJECT,
152 	    Z_ZERO | Z_WAITOK | Z_NOFAIL | Z_SET_NOTSHARED);
153 
154 	obj->ko_data = data;
155 	obj->ko_flags = flags;
156 	obj->ko_alloc_size = size;
157 	obj->ko_port = IP_NULL;
158 
159 	os_ref_init_count(&obj->ko_refs, &kcdata_object_refgrp, 1);
160 
161 	*objp = obj;
162 
163 	return KERN_SUCCESS;
164 }
165 
166 void
kcdata_object_reference(kcdata_object_t obj)167 kcdata_object_reference(kcdata_object_t obj)
168 {
169 	if (obj == KCDATA_OBJECT_NULL) {
170 		return;
171 	}
172 
173 	os_ref_retain(&obj->ko_refs);
174 }
175 
176 static void
kcdata_object_destroy(kcdata_object_t obj)177 kcdata_object_destroy(kcdata_object_t obj)
178 {
179 	void *begin_addr;
180 	ipc_port_t port;
181 	kcdata_obj_flags_t flags;
182 
183 	if (obj == KCDATA_OBJECT_NULL) {
184 		return;
185 	}
186 
187 	port = obj->ko_port;
188 	flags = obj->ko_flags;
189 
190 	/* Release the port */
191 	if (IP_VALID(port)) {
192 		ipc_kobject_dealloc_port(port, 0, IKOT_KCDATA);
193 	}
194 
195 	/* Release the ref for rate-limited kcdata object type(s) */
196 	kcdata_object_throttle_release(flags);
197 
198 	/* Destroy the kcdata backing captured in the object */
199 	begin_addr = kcdata_memory_get_begin_addr(obj->ko_data);
200 	kfree_data(begin_addr, obj->ko_alloc_size);
201 	kcdata_memory_destroy(obj->ko_data);
202 
203 	/* Free the object */
204 	zfree(KCDATA_OBJECT, obj);
205 }
206 
207 void
kcdata_object_release(kcdata_object_t obj)208 kcdata_object_release(kcdata_object_t obj)
209 {
210 	if (obj == KCDATA_OBJECT_NULL) {
211 		return;
212 	}
213 
214 	if (os_ref_release(&obj->ko_refs) > 0) {
215 		return;
216 	}
217 	/* last ref */
218 
219 	kcdata_object_destroy(obj);
220 }
221 
222 /* Produces kcdata object ref */
223 kcdata_object_t
convert_port_to_kcdata_object(ipc_port_t port)224 convert_port_to_kcdata_object(ipc_port_t port)
225 {
226 	kcdata_object_t obj = KCDATA_OBJECT_NULL;
227 
228 	if (IP_VALID(port)) {
229 		obj = ipc_kobject_get_stable(port, IKOT_KCDATA);
230 		if (obj != KCDATA_OBJECT_NULL) {
231 			zone_require(KCDATA_OBJECT->kt_zv.zv_zone, obj);
232 			kcdata_object_reference(obj);
233 		}
234 	}
235 
236 	return obj;
237 }
238 
239 /* Consumes kcdata object ref */
240 ipc_port_t
convert_kcdata_object_to_port(kcdata_object_t obj)241 convert_kcdata_object_to_port(kcdata_object_t obj)
242 {
243 	if (obj == KCDATA_OBJECT_NULL) {
244 		return IP_NULL;
245 	}
246 
247 	zone_require(KCDATA_OBJECT->kt_zv.zv_zone, obj);
248 
249 	if (!ipc_kobject_make_send_lazy_alloc_port(&obj->ko_port,
250 	    obj, IKOT_KCDATA, IPC_KOBJECT_ALLOC_NONE)) {
251 		kcdata_object_release(obj);
252 	}
253 	/* object ref consumed */
254 
255 	return obj->ko_port;
256 }
257 
258 static void
kcdata_object_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)259 kcdata_object_no_senders(
260 	ipc_port_t port,
261 	__unused mach_port_mscount_t mscount)
262 {
263 	kcdata_object_t obj;
264 
265 	obj = ipc_kobject_get_stable(port, IKOT_KCDATA);
266 	assert(obj != KCDATA_OBJECT_NULL);
267 
268 	/* release the ref given by no-senders notification */
269 	kcdata_object_release(obj);
270 }
271 
272 /*
273  * Estimates how large of a buffer that should be allocated for a buffer that will contain
274  * num_items items of known types with overall length payload_size.
275  *
276  * NOTE: This function will not give an accurate estimate for buffers that will
277  *       contain unknown types (those with string descriptions).
278  */
279 uint32_t
kcdata_estimate_required_buffer_size(uint32_t num_items,uint32_t payload_size)280 kcdata_estimate_required_buffer_size(uint32_t num_items, uint32_t payload_size)
281 {
282 	/*
283 	 * In the worst case each item will need (KCDATA_ALIGNMENT_SIZE - 1) padding
284 	 */
285 	uint32_t max_padding_bytes = 0;
286 	uint32_t max_padding_with_item_description_bytes = 0;
287 	uint32_t estimated_required_buffer_size = 0;
288 	const uint32_t begin_and_end_marker_bytes = 2 * sizeof(struct kcdata_item);
289 
290 	if (os_mul_overflow(num_items, KCDATA_ALIGNMENT_SIZE - 1, &max_padding_bytes)) {
291 		panic("%s: Overflow in required buffer size estimate", __func__);
292 	}
293 
294 	if (os_mul_and_add_overflow(num_items, sizeof(struct kcdata_item), max_padding_bytes, &max_padding_with_item_description_bytes)) {
295 		panic("%s: Overflow in required buffer size estimate", __func__);
296 	}
297 
298 	if (os_add3_overflow(max_padding_with_item_description_bytes, begin_and_end_marker_bytes, payload_size, &estimated_required_buffer_size)) {
299 		panic("%s: Overflow in required buffer size estimate", __func__);
300 	}
301 
302 	return estimated_required_buffer_size;
303 }
304 
305 kcdata_descriptor_t
kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)306 kcdata_memory_alloc_init(mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
307 {
308 	kcdata_descriptor_t data = NULL;
309 	mach_vm_address_t user_addr = 0;
310 	uint16_t clamped_flags = (uint16_t) flags;
311 
312 	data = kalloc_type(struct kcdata_descriptor, Z_WAITOK | Z_ZERO | Z_NOFAIL);
313 	data->kcd_addr_begin = buffer_addr_p;
314 	data->kcd_addr_end = buffer_addr_p;
315 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
316 	data->kcd_length = size;
317 	data->kcd_endalloced = 0;
318 
319 	/* Initialize the BEGIN header */
320 	if (KERN_SUCCESS != kcdata_get_memory_addr(data, data_type, 0, &user_addr)) {
321 		kcdata_memory_destroy(data);
322 		return NULL;
323 	}
324 
325 	return data;
326 }
327 
328 kern_return_t
kcdata_memory_static_init(kcdata_descriptor_t data,mach_vm_address_t buffer_addr_p,unsigned data_type,unsigned size,unsigned flags)329 kcdata_memory_static_init(kcdata_descriptor_t data, mach_vm_address_t buffer_addr_p, unsigned data_type, unsigned size, unsigned flags)
330 {
331 	mach_vm_address_t user_addr = 0;
332 	uint16_t clamped_flags = (uint16_t) flags;
333 
334 	if (data == NULL) {
335 		return KERN_INVALID_ARGUMENT;
336 	}
337 	bzero(data, sizeof(struct kcdata_descriptor));
338 	data->kcd_addr_begin = buffer_addr_p;
339 	data->kcd_addr_end = buffer_addr_p;
340 	data->kcd_flags = (clamped_flags & KCFLAG_USE_COPYOUT) ? clamped_flags : clamped_flags | KCFLAG_USE_MEMCOPY;
341 	data->kcd_length = size;
342 	data->kcd_endalloced = 0;
343 
344 	/* Initialize the BEGIN header */
345 	return kcdata_get_memory_addr(data, data_type, 0, &user_addr);
346 }
347 
348 void *
kcdata_endalloc(kcdata_descriptor_t data,size_t length)349 kcdata_endalloc(kcdata_descriptor_t data, size_t length)
350 {
351 	/*
352 	 * We do not support endalloc with a space allocation callback - the
353 	 * callback may need to free the remaining free space in the buffer,
354 	 * trampling endallocs and complicating things.
355 	 */
356 	if (data->kcd_alloc_callback != NULL) {
357 		return NULL;
358 	}
359 	mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
360 	/* round up allocation and ensure return value is uint64-aligned */
361 	size_t toalloc = ROUNDUP(length, sizeof(uint64_t)) + (curend % sizeof(uint64_t));
362 	/* an arbitrary limit: make sure we don't allocate more then 1/4th of the remaining buffer. */
363 	if (data->kcd_length / 4 <= toalloc) {
364 		return NULL;
365 	}
366 	data->kcd_length -= toalloc;
367 	data->kcd_endalloced += toalloc;
368 	return (void *)(curend - toalloc);
369 }
370 
371 /* Zeros and releases data allocated from the end of the buffer */
372 static void
kcdata_release_endallocs(kcdata_descriptor_t data)373 kcdata_release_endallocs(kcdata_descriptor_t data)
374 {
375 	mach_vm_address_t curend = data->kcd_addr_begin + data->kcd_length;
376 	size_t endalloced = data->kcd_endalloced;
377 	if (endalloced > 0) {
378 		bzero((void *)curend, endalloced);
379 		data->kcd_length += endalloced;
380 		data->kcd_endalloced = 0;
381 	}
382 }
383 
384 void *
kcdata_memory_get_begin_addr(kcdata_descriptor_t data)385 kcdata_memory_get_begin_addr(kcdata_descriptor_t data)
386 {
387 	if (data == NULL) {
388 		return NULL;
389 	}
390 
391 	return (void *)data->kcd_addr_begin;
392 }
393 
394 uint64_t
kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)395 kcdata_memory_get_used_bytes(kcdata_descriptor_t kcd)
396 {
397 	assert(kcd != NULL);
398 	return ((uint64_t)kcd->kcd_addr_end - (uint64_t)kcd->kcd_addr_begin) + sizeof(struct kcdata_item);
399 }
400 
401 uint64_t
kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)402 kcdata_memory_get_uncompressed_bytes(kcdata_descriptor_t kcd)
403 {
404 	kern_return_t kr;
405 
406 	assert(kcd != NULL);
407 	if (kcd->kcd_flags & KCFLAG_USE_COMPRESSION) {
408 		uint64_t totalout, totalin;
409 
410 		kr = kcdata_get_compression_stats(kcd, &totalout, &totalin);
411 		if (kr == KERN_SUCCESS) {
412 			return totalin;
413 		} else {
414 			return 0;
415 		}
416 	} else {
417 		/* If compression wasn't used, get the number of bytes used  */
418 		return kcdata_memory_get_used_bytes(kcd);
419 	}
420 }
421 
422 /*
423  * Free up the memory associated with kcdata
424  */
425 kern_return_t
kcdata_memory_destroy(kcdata_descriptor_t data)426 kcdata_memory_destroy(kcdata_descriptor_t data)
427 {
428 	if (!data) {
429 		return KERN_INVALID_ARGUMENT;
430 	}
431 
432 	/*
433 	 * data->kcd_addr_begin points to memory in not tracked by
434 	 * kcdata lib. So not clearing that here.
435 	 */
436 	kfree_type(struct kcdata_descriptor, data);
437 	return KERN_SUCCESS;
438 }
439 
440 /* Used by zlib to allocate space in its metadata section */
441 static void *
kcdata_compress_zalloc(void * opaque,u_int items,u_int size)442 kcdata_compress_zalloc(void *opaque, u_int items, u_int size)
443 {
444 	void *result;
445 	struct kcdata_compress_descriptor *cd = opaque;
446 	int alloc_size = ~31L & (31 + (items * size));
447 
448 	result = (void *)((uintptr_t)cd->kcd_cd_base + cd->kcd_cd_offset);
449 	if ((uintptr_t) result + alloc_size > (uintptr_t) cd->kcd_cd_base + cd->kcd_cd_maxoffset) {
450 		result = Z_NULL;
451 	} else {
452 		cd->kcd_cd_offset += alloc_size;
453 	}
454 
455 	kcdata_debug_printf("%s: %d * %d = %d  => %p\n", __func__, items, size, items * size, result);
456 
457 	return result;
458 }
459 
460 /* Used by zlib to free previously allocated space in its metadata section */
461 static void
kcdata_compress_zfree(void * opaque,void * ptr)462 kcdata_compress_zfree(void *opaque, void *ptr)
463 {
464 	(void) opaque;
465 	(void) ptr;
466 
467 	kcdata_debug_printf("%s: ptr %p\n", __func__, ptr);
468 
469 	/*
470 	 * Since the buffers we are using are temporary, we don't worry about
471 	 * freeing memory for now. Besides, testing has shown that zlib only calls
472 	 * this at the end, near deflateEnd() or a Z_FINISH deflate() call.
473 	 */
474 }
475 
476 /* Used to initialize the selected compression algorithm's internal state (if any) */
477 static kern_return_t
kcdata_init_compress_state(kcdata_descriptor_t data,void (* memcpy_f)(void *,const void *,size_t),uint64_t type,mach_vm_address_t totalout_addr,mach_vm_address_t totalin_addr)478 kcdata_init_compress_state(kcdata_descriptor_t data, void (*memcpy_f)(void *, const void *, size_t), uint64_t type, mach_vm_address_t totalout_addr, mach_vm_address_t totalin_addr)
479 {
480 	kern_return_t ret = KERN_SUCCESS;
481 	size_t size;
482 	int wbits = 12, memlevel = 3;
483 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
484 
485 	cd->kcd_cd_memcpy_f = memcpy_f;
486 	cd->kcd_cd_compression_type = type;
487 	cd->kcd_cd_totalout_addr = totalout_addr;
488 	cd->kcd_cd_totalin_addr = totalin_addr;
489 
490 	switch (type) {
491 	case KCDCT_ZLIB:
492 		/* allocate space for the metadata used by zlib */
493 		size = round_page(ZLIB_METADATA_SIZE + zlib_deflate_memory_size(wbits, memlevel));
494 		kcdata_debug_printf("%s: size = %zu kcd_length: %d\n", __func__, size, data->kcd_length);
495 		kcdata_debug_printf("%s: kcd buffer [%p - %p]\n", __func__, (void *) data->kcd_addr_begin, (void *) data->kcd_addr_begin + data->kcd_length);
496 		void *buf = kcdata_endalloc(data, size);
497 		if (buf == NULL) {
498 			return KERN_INSUFFICIENT_BUFFER_SIZE;
499 		}
500 
501 		cd->kcd_cd_zs.avail_in = 0;
502 		cd->kcd_cd_zs.next_in = NULL;
503 		cd->kcd_cd_zs.avail_out = 0;
504 		cd->kcd_cd_zs.next_out = NULL;
505 		cd->kcd_cd_zs.opaque = cd;
506 		cd->kcd_cd_zs.zalloc = kcdata_compress_zalloc;
507 		cd->kcd_cd_zs.zfree = kcdata_compress_zfree;
508 		cd->kcd_cd_base = (void *)(data->kcd_addr_begin + data->kcd_length - size);
509 		data->kcd_length -= size;
510 		cd->kcd_cd_offset = 0;
511 		cd->kcd_cd_maxoffset = size;
512 		cd->kcd_cd_flags = 0;
513 
514 		kcdata_debug_printf("%s: buffer [%p - %p]\n", __func__, cd->kcd_cd_base, cd->kcd_cd_base + size);
515 
516 		if (deflateInit2(&cd->kcd_cd_zs, Z_BEST_SPEED, Z_DEFLATED, wbits, memlevel, Z_DEFAULT_STRATEGY) != Z_OK) {
517 			kcdata_debug_printf("EMERGENCY: deflateInit2 failed!\n");
518 			ret = KERN_INVALID_ARGUMENT;
519 		}
520 		break;
521 	default:
522 		panic("kcdata_init_compress_state: invalid compression type: %d", (int) type);
523 	}
524 
525 	return ret;
526 }
527 
528 
529 /*
530  * Turn on the compression logic for kcdata
531  */
532 kern_return_t
kcdata_init_compress(kcdata_descriptor_t data,int hdr_tag,void (* memcpy_f)(void *,const void *,size_t),uint64_t type)533 kcdata_init_compress(kcdata_descriptor_t data, int hdr_tag, void (*memcpy_f)(void *, const void *, size_t), uint64_t type)
534 {
535 	kern_return_t kr;
536 	mach_vm_address_t user_addr, totalout_addr, totalin_addr;
537 	struct _uint64_with_description_data save_data;
538 	const uint64_t size_req = sizeof(save_data);
539 
540 	assert(data && (data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0);
541 
542 	/* reset the compression descriptor */
543 	bzero(&data->kcd_comp_d, sizeof(struct kcdata_compress_descriptor));
544 
545 	/* add the header information */
546 	kcdata_add_uint64_with_description(data, type, "kcd_c_type");
547 
548 	/* reserve space to write total out */
549 	bzero(&save_data, size_req);
550 	strlcpy(&(save_data.desc[0]), "kcd_c_totalout", sizeof(save_data.desc));
551 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalout_addr);
552 	if (kr != KERN_SUCCESS) {
553 		return kr;
554 	}
555 	memcpy((void *)totalout_addr, &save_data, size_req);
556 
557 	/* space for total in */
558 	bzero(&save_data, size_req);
559 	strlcpy(&(save_data.desc[0]), "kcd_c_totalin", sizeof(save_data.desc));
560 	kr = kcdata_get_memory_addr(data, KCDATA_TYPE_UINT64_DESC, size_req, &totalin_addr);
561 	if (kr != KERN_SUCCESS) {
562 		return kr;
563 	}
564 	memcpy((void *)totalin_addr, &save_data, size_req);
565 
566 	/* add the inner buffer */
567 	kcdata_get_memory_addr(data, hdr_tag, 0, &user_addr);
568 
569 	/* save the flag */
570 	data->kcd_flags |= KCFLAG_USE_COMPRESSION;
571 
572 	/* initialize algorithm specific state */
573 	kr = kcdata_init_compress_state(data, memcpy_f, type, totalout_addr + offsetof(struct _uint64_with_description_data, data), totalin_addr + offsetof(struct _uint64_with_description_data, data));
574 	if (kr != KERN_SUCCESS) {
575 		kcdata_debug_printf("%s: failed to initialize compression state!\n", __func__);
576 		return kr;
577 	}
578 
579 	return KERN_SUCCESS;
580 }
581 
582 static inline
583 int
kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)584 kcdata_zlib_translate_kcd_cf_flag(enum kcdata_compression_flush flush)
585 {
586 	switch (flush) {
587 	case KCDCF_NO_FLUSH: return Z_NO_FLUSH;
588 	case KCDCF_SYNC_FLUSH: return Z_SYNC_FLUSH;
589 	case KCDCF_FINISH: return Z_FINISH;
590 	default: panic("invalid kcdata_zlib_translate_kcd_cf_flag flag");
591 	}
592 }
593 
594 static inline
595 int
kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)596 kcdata_zlib_translate_kcd_cf_expected_ret(enum kcdata_compression_flush flush)
597 {
598 	switch (flush) {
599 	case KCDCF_NO_FLUSH:         /* fall through */
600 	case KCDCF_SYNC_FLUSH: return Z_OK;
601 	case KCDCF_FINISH: return Z_STREAM_END;
602 	default: panic("invalid kcdata_zlib_translate_kcd_cf_expected_ret flag");
603 	}
604 }
605 
606 /* Called by kcdata_do_compress() when the configured compression algorithm is zlib */
607 static kern_return_t
kcdata_do_compress_zlib(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)608 kcdata_do_compress_zlib(kcdata_descriptor_t data, void *inbuffer,
609     size_t insize, void *outbuffer, size_t outsize, size_t *wrote,
610     enum kcdata_compression_flush flush)
611 {
612 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
613 	z_stream *zs = &cd->kcd_cd_zs;
614 	int expected_ret, ret;
615 
616 	zs->next_out = outbuffer;
617 	zs->avail_out = (unsigned int) outsize;
618 	zs->next_in = inbuffer;
619 	zs->avail_in = (unsigned int) insize;
620 	ret = deflate(zs, kcdata_zlib_translate_kcd_cf_flag(flush));
621 	if (zs->avail_in != 0 || zs->avail_out <= 0) {
622 		return KERN_INSUFFICIENT_BUFFER_SIZE;
623 	}
624 
625 	expected_ret = kcdata_zlib_translate_kcd_cf_expected_ret(flush);
626 	if (ret != expected_ret) {
627 		/*
628 		 * Should only fail with catastrophic, unrecoverable cases (i.e.,
629 		 * corrupted z_stream, or incorrect configuration)
630 		 */
631 		panic("zlib kcdata compression ret = %d", ret);
632 	}
633 
634 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d; ret = %ld\n",
635 	    __func__, outbuffer, outsize, inbuffer, insize, flush, outsize - zs->avail_out);
636 	if (wrote) {
637 		*wrote = outsize - zs->avail_out;
638 	}
639 	return KERN_SUCCESS;
640 }
641 
642 /*
643  * Compress the buffer at @inbuffer (of size @insize) into the kcdata buffer
644  * @outbuffer (of size @outsize). Flush based on the @flush parameter.
645  *
646  * Returns KERN_SUCCESS on success, or KERN_INSUFFICIENT_BUFFER_SIZE if
647  * @outsize isn't sufficient. Also, writes the number of bytes written in the
648  * @outbuffer to @wrote.
649  */
650 static kern_return_t
kcdata_do_compress(kcdata_descriptor_t data,void * inbuffer,size_t insize,void * outbuffer,size_t outsize,size_t * wrote,enum kcdata_compression_flush flush)651 kcdata_do_compress(kcdata_descriptor_t data, void *inbuffer, size_t insize,
652     void *outbuffer, size_t outsize, size_t *wrote, enum kcdata_compression_flush flush)
653 {
654 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
655 
656 	assert(data->kcd_flags & KCFLAG_USE_COMPRESSION);
657 
658 	kcdata_debug_printf("%s: %p (%zu) <- %p (%zu); flush: %d\n",
659 	    __func__, outbuffer, outsize, inbuffer, insize, flush);
660 
661 	/* don't compress if we are in a window */
662 	if (cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK || data->kcd_comp_d.kcd_cd_compression_type == KCDCT_NONE) {
663 		assert(cd->kcd_cd_memcpy_f);
664 		if (outsize >= insize) {
665 			cd->kcd_cd_memcpy_f(outbuffer, inbuffer, insize);
666 			if (wrote) {
667 				*wrote = insize;
668 			}
669 			return KERN_SUCCESS;
670 		} else {
671 			return KERN_INSUFFICIENT_BUFFER_SIZE;
672 		}
673 	}
674 
675 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
676 	case KCDCT_ZLIB:
677 		return kcdata_do_compress_zlib(data, inbuffer, insize, outbuffer, outsize, wrote, flush);
678 	default:
679 		panic("invalid compression type 0x%llx in kcdata_do_compress", data->kcd_comp_d.kcd_cd_compression_type);
680 	}
681 }
682 
683 static size_t
kcdata_compression_bound_zlib(kcdata_descriptor_t data,size_t size)684 kcdata_compression_bound_zlib(kcdata_descriptor_t data, size_t size)
685 {
686 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
687 	z_stream *zs = &cd->kcd_cd_zs;
688 
689 	return (size_t) deflateBound(zs, (unsigned long) size);
690 }
691 
692 
693 /*
694  * returns the worst-case, maximum length of the compressed data when
695  * compressing a buffer of size @size using the configured algorithm.
696  */
697 static size_t
kcdata_compression_bound(kcdata_descriptor_t data,size_t size)698 kcdata_compression_bound(kcdata_descriptor_t data, size_t size)
699 {
700 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
701 	case KCDCT_ZLIB:
702 		return kcdata_compression_bound_zlib(data, size);
703 	case KCDCT_NONE:
704 		return size;
705 	default:
706 		panic("%s: unknown compression method", __func__);
707 	}
708 }
709 
710 /*
711  * kcdata_compress_chunk_with_flags:
712  *		Compress buffer found at @input_data (length @input_size) to the kcdata
713  *		buffer described by @data. This method will construct the kcdata_item_t
714  *		required by parsers using the type information @type and flags @flags.
715  *
716  *	Returns KERN_SUCCESS when successful. Currently, asserts on failure.
717  */
718 kern_return_t
kcdata_compress_chunk_with_flags(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size,uint64_t kcdata_flags)719 kcdata_compress_chunk_with_flags(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size, uint64_t kcdata_flags)
720 {
721 	assert(data);
722 	assert((data->kcd_flags & KCFLAG_USE_COMPRESSION));
723 	assert(input_data);
724 	struct kcdata_item info;
725 	char padding_data[16] = {0};
726 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
727 	size_t wrote = 0;
728 	kern_return_t kr;
729 
730 	kcdata_debug_printf("%s: type: %d input_data: %p (%d) kcdata_flags: 0x%llx\n",
731 	    __func__, type, input_data, input_size, kcdata_flags);
732 
733 	/*
734 	 * first, get memory space. The uncompressed size must fit in the remained
735 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
736 	 * compress the data at all.
737 	 */
738 	size_t total_uncompressed_size = kcdata_compression_bound(data, (size_t) kcdata_get_memory_size_for_data(input_size));
739 	if (total_uncompressed_size > data->kcd_length ||
740 	    data->kcd_length - total_uncompressed_size < data->kcd_addr_end - data->kcd_addr_begin) {
741 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %zu\n",
742 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, total_uncompressed_size);
743 		return KERN_INSUFFICIENT_BUFFER_SIZE;
744 	}
745 	uint32_t padding = kcdata_calc_padding(input_size);
746 	assert(padding < sizeof(padding_data));
747 
748 	void *space_start = (void *) data->kcd_addr_end;
749 	void *space_ptr = space_start;
750 
751 	/* create the output stream */
752 	size_t total_uncompressed_space_remaining = total_uncompressed_size;
753 
754 	/* create the info data */
755 	bzero(&info, sizeof(info));
756 	info.type = type;
757 	info.size = input_size + padding;
758 	info.flags = kcdata_flags;
759 
760 	/*
761 	 * The next possibly three compresses are needed separately because of the
762 	 * scatter-gather nature of this operation. The kcdata item header (info)
763 	 * and padding are on the stack, while the actual data is somewhere else.
764 	 * */
765 
766 	/* create the input stream for info & compress */
767 	enum kcdata_compression_flush flush = (padding || input_size) ? KCDCF_NO_FLUSH :
768 	    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
769 	    KCDCF_SYNC_FLUSH;
770 	kr = kcdata_do_compress(data, &info, sizeof(info), space_ptr, total_uncompressed_space_remaining, &wrote, flush);
771 	if (kr != KERN_SUCCESS) {
772 		return kr;
773 	}
774 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
775 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
776 	total_uncompressed_space_remaining -= wrote;
777 
778 	/* If there is input provided, compress that here */
779 	if (input_size) {
780 		flush = padding ? KCDCF_NO_FLUSH :
781 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH :
782 		    KCDCF_SYNC_FLUSH;
783 		kr = kcdata_do_compress(data, (void *) (uintptr_t) input_data, input_size, space_ptr, total_uncompressed_space_remaining, &wrote, flush);
784 		if (kr != KERN_SUCCESS) {
785 			return kr;
786 		}
787 		kcdata_debug_printf("%s: 2nd wrote = %zu\n", __func__, wrote);
788 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
789 		total_uncompressed_space_remaining -= wrote;
790 	}
791 
792 	/* If the item and its data require padding to maintain alignment,
793 	 * "compress" that into the output buffer. */
794 	if (padding) {
795 		/* write the padding */
796 		kr = kcdata_do_compress(data, padding_data, padding, space_ptr, total_uncompressed_space_remaining, &wrote,
797 		    cd->kcd_cd_flags & KCD_CD_FLAG_FINALIZE ? KCDCF_FINISH : KCDCF_SYNC_FLUSH);
798 		if (kr != KERN_SUCCESS) {
799 			return kr;
800 		}
801 		kcdata_debug_printf("%s: 3rd wrote = %zu\n", __func__, wrote);
802 		if (wrote == 0) {
803 			return KERN_FAILURE;
804 		}
805 		space_ptr = (void *)((uintptr_t)space_ptr + wrote);
806 		total_uncompressed_space_remaining -= wrote;
807 	}
808 
809 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= total_uncompressed_size);
810 
811 	/* move the end marker forward */
812 	data->kcd_addr_end = (mach_vm_address_t) space_start + (total_uncompressed_size - total_uncompressed_space_remaining);
813 
814 	return KERN_SUCCESS;
815 }
816 
817 /*
818  * kcdata_compress_chunk:
819  *		Like kcdata_compress_chunk_with_flags(), but uses the default set of kcdata flags,
820  *		i.e. padding and also saves the amount of padding bytes.
821  *
822  * Returns are the same as in kcdata_compress_chunk_with_flags()
823  */
824 kern_return_t
kcdata_compress_chunk(kcdata_descriptor_t data,uint32_t type,const void * input_data,uint32_t input_size)825 kcdata_compress_chunk(kcdata_descriptor_t data, uint32_t type, const void *input_data, uint32_t input_size)
826 {
827 	/* these flags are for kcdata - store that the struct is padded and store the amount of padding bytes */
828 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(input_size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
829 	return kcdata_compress_chunk_with_flags(data, type, input_data, input_size, flags);
830 }
831 
832 kern_return_t
kcdata_push_data(kcdata_descriptor_t data,uint32_t type,uint32_t size,const void * input_data)833 kcdata_push_data(kcdata_descriptor_t data, uint32_t type, uint32_t size, const void *input_data)
834 {
835 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
836 		return kcdata_compress_chunk(data, type, input_data, size);
837 	} else {
838 		kern_return_t ret;
839 		mach_vm_address_t uaddr = 0;
840 		ret = kcdata_get_memory_addr(data, type, size, &uaddr);
841 		if (ret != KERN_SUCCESS) {
842 			return ret;
843 		}
844 
845 		kcdata_memcpy(data, uaddr, input_data, size);
846 		return KERN_SUCCESS;
847 	}
848 }
849 
850 kern_return_t
kcdata_push_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,const void * input_data)851 kcdata_push_array(kcdata_descriptor_t data, uint32_t type_of_element, uint32_t size_of_element, uint32_t count, const void *input_data)
852 {
853 	uint64_t flags      = type_of_element;
854 	flags               = (flags << 32) | count;
855 	uint32_t total_size = count * size_of_element;
856 	uint32_t pad        = kcdata_calc_padding(total_size);
857 
858 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
859 		return kcdata_compress_chunk_with_flags(data, KCDATA_TYPE_ARRAY_PAD0 | pad, input_data, total_size, flags);
860 	} else {
861 		kern_return_t ret;
862 		mach_vm_address_t uaddr = 0;
863 		ret = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, &uaddr);
864 		if (ret != KERN_SUCCESS) {
865 			return ret;
866 		}
867 
868 		kcdata_memcpy(data, uaddr, input_data, total_size);
869 		return KERN_SUCCESS;
870 	}
871 }
872 
873 /* A few words on how window compression works:
874  *
875  * This is how the buffer looks when the window is opened:
876  *
877  * X---------------------------------------------------------------------X
878  * |                                |                                    |
879  * |   Filled with stackshot data   |            Zero bytes              |
880  * |                                |                                    |
881  * X---------------------------------------------------------------------X
882  *                                  ^
883  *									\ - kcd_addr_end
884  *
885  * Opening a window will save the current kcd_addr_end to kcd_cd_mark_begin.
886  *
887  * Any kcdata_* operation will then push data to the buffer like normal. (If
888  * you call any compressing functions they will pass-through, i.e. no
889  * compression will be done) Once the window is closed, the following takes
890  * place:
891  *
892  * X---------------------------------------------------------------------X
893  * |               |                    |                    |           |
894  * | Existing data |     New data       |   Scratch buffer   |           |
895  * |               |                    |                    |           |
896  * X---------------------------------------------------------------------X
897  *				   ^                    ^                    ^
898  *				   |                    |                    |
899  *				   \ -kcd_cd_mark_begin |                    |
900  *							            |                    |
901  *							            \ - kcd_addr_end     |
902  *							                                 |
903  *		 kcd_addr_end + (kcd_addr_end - kcd_cd_mark_begin) - /
904  *
905  *	(1) The data between kcd_cd_mark_begin and kcd_addr_end is fed to the
906  *      compression algorithm to compress to the scratch buffer.
907  *  (2) The scratch buffer's contents are copied into the area denoted "New
908  *      data" above. Effectively overwriting the uncompressed data with the
909  *      compressed one.
910  *  (3) kcd_addr_end is then rewound to kcd_cd_mark_begin + sizeof_compressed_data
911  */
912 
913 /* Record the state, and restart compression from this later */
914 void
kcdata_compression_window_open(kcdata_descriptor_t data)915 kcdata_compression_window_open(kcdata_descriptor_t data)
916 {
917 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
918 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
919 
920 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
921 		cd->kcd_cd_flags |= KCD_CD_FLAG_IN_MARK;
922 		cd->kcd_cd_mark_begin = data->kcd_addr_end;
923 	}
924 }
925 
926 /* Compress the region between the mark and the current end */
927 kern_return_t
kcdata_compression_window_close(kcdata_descriptor_t data)928 kcdata_compression_window_close(kcdata_descriptor_t data)
929 {
930 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
931 	uint64_t total_size, max_size;
932 	void *space_start, *space_ptr;
933 	size_t total_uncompressed_space_remaining, wrote = 0;
934 	kern_return_t kr;
935 
936 	if ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0) {
937 		return KERN_SUCCESS;
938 	}
939 
940 	assert(cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK);
941 
942 	if (data->kcd_addr_end == (mach_vm_address_t) cd->kcd_cd_mark_begin) {
943 		/* clear the window marker and return, this is a no-op */
944 		cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
945 		return KERN_SUCCESS;
946 	}
947 
948 	assert(cd->kcd_cd_mark_begin < data->kcd_addr_end);
949 	total_size = data->kcd_addr_end - (uint64_t) cd->kcd_cd_mark_begin;
950 	max_size = (uint64_t) kcdata_compression_bound(data, total_size);
951 	kcdata_debug_printf("%s: total_size = %lld\n", __func__, total_size);
952 
953 	/*
954 	 * first, get memory space. The uncompressed size must fit in the remained
955 	 * of the kcdata buffer, in case the compression algorithm doesn't actually
956 	 * compress the data at all.
957 	 */
958 	if (max_size > data->kcd_length ||
959 	    data->kcd_length - max_size < data->kcd_addr_end - data->kcd_addr_begin) {
960 		kcdata_debug_printf("%s: insufficient buffer size: kcd_length => %d e-b=> %lld our size: %lld\n",
961 		    __func__, data->kcd_length, data->kcd_addr_end - data->kcd_addr_begin, max_size);
962 		return KERN_INSUFFICIENT_BUFFER_SIZE;
963 	}
964 
965 	/* clear the window marker */
966 	cd->kcd_cd_flags &= ~KCD_CD_FLAG_IN_MARK;
967 
968 	space_start = (void *) data->kcd_addr_end;
969 	space_ptr = space_start;
970 	total_uncompressed_space_remaining = (unsigned int) max_size;
971 	kr = kcdata_do_compress(data, (void *) cd->kcd_cd_mark_begin, total_size, space_ptr,
972 	    total_uncompressed_space_remaining, &wrote, KCDCF_SYNC_FLUSH);
973 	if (kr != KERN_SUCCESS) {
974 		return kr;
975 	}
976 	kcdata_debug_printf("%s: first wrote = %zu\n", __func__, wrote);
977 	if (wrote == 0) {
978 		return KERN_FAILURE;
979 	}
980 	space_ptr = (void *)((uintptr_t)space_ptr + wrote);
981 	total_uncompressed_space_remaining  -= wrote;
982 
983 	assert((size_t)((uintptr_t)space_ptr - (uintptr_t)space_start) <= max_size);
984 
985 	/* copy to the original location */
986 	kcdata_memcpy(data, cd->kcd_cd_mark_begin, space_start, (uint32_t) (max_size - total_uncompressed_space_remaining));
987 
988 	/* rewind the end marker */
989 	data->kcd_addr_end = cd->kcd_cd_mark_begin + (max_size - total_uncompressed_space_remaining);
990 
991 	return KERN_SUCCESS;
992 }
993 
994 static kern_return_t
kcdata_get_compression_stats_zlib(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)995 kcdata_get_compression_stats_zlib(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
996 {
997 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
998 	z_stream *zs = &cd->kcd_cd_zs;
999 
1000 	assert((cd->kcd_cd_flags & KCD_CD_FLAG_IN_MARK) == 0);
1001 
1002 	*totalout = (uint64_t) zs->total_out;
1003 	*totalin = (uint64_t) zs->total_in;
1004 
1005 	return KERN_SUCCESS;
1006 }
1007 
1008 static kern_return_t
kcdata_get_compression_stats(kcdata_descriptor_t data,uint64_t * totalout,uint64_t * totalin)1009 kcdata_get_compression_stats(kcdata_descriptor_t data, uint64_t *totalout, uint64_t *totalin)
1010 {
1011 	kern_return_t kr;
1012 
1013 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
1014 	case KCDCT_ZLIB:
1015 		kr = kcdata_get_compression_stats_zlib(data, totalout, totalin);
1016 		break;
1017 	case KCDCT_NONE:
1018 		*totalout = *totalin = kcdata_memory_get_used_bytes(data);
1019 		kr = KERN_SUCCESS;
1020 		break;
1021 	default:
1022 		panic("invalid compression flag 0x%llx in kcdata_write_compression_stats", (data->kcd_comp_d.kcd_cd_compression_type));
1023 	}
1024 
1025 	return kr;
1026 }
1027 
1028 kern_return_t
kcdata_write_compression_stats(kcdata_descriptor_t data)1029 kcdata_write_compression_stats(kcdata_descriptor_t data)
1030 {
1031 	kern_return_t kr;
1032 	uint64_t totalout, totalin;
1033 
1034 	kr = kcdata_get_compression_stats(data, &totalout, &totalin);
1035 	if (kr != KERN_SUCCESS) {
1036 		return kr;
1037 	}
1038 
1039 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalout_addr = totalout;
1040 	*(uint64_t *)data->kcd_comp_d.kcd_cd_totalin_addr = totalin;
1041 
1042 	return kr;
1043 }
1044 
1045 static kern_return_t
kcdata_finish_compression_zlib(kcdata_descriptor_t data)1046 kcdata_finish_compression_zlib(kcdata_descriptor_t data)
1047 {
1048 	struct kcdata_compress_descriptor *cd = &data->kcd_comp_d;
1049 	z_stream *zs = &cd->kcd_cd_zs;
1050 
1051 	/*
1052 	 * macOS on x86 w/ coprocessor ver. 2 and later context: Stackshot compression leaves artifacts
1053 	 * in the panic buffer which interferes with CRC checks. The CRC is calculated here over the full
1054 	 * buffer but only the portion with valid panic data is sent to iBoot via the SMC. When iBoot
1055 	 * calculates the CRC to compare with the value in the header it uses a zero-filled buffer.
1056 	 * The stackshot compression leaves non-zero bytes behind so those must be cleared prior to the CRC calculation.
1057 	 * This doesn't get the compression metadata; that's zeroed by kcdata_release_endallocs().
1058 	 *
1059 	 * All other contexts: The stackshot compression artifacts are present in its panic buffer but the CRC check
1060 	 * is done on the same buffer for the before and after calculation so there's nothing functionally
1061 	 * broken. The same buffer cleanup is done here for completeness' sake.
1062 	 * From rdar://problem/64381661
1063 	 */
1064 
1065 	void* stackshot_end = (char*)data->kcd_addr_begin + kcdata_memory_get_used_bytes(data);
1066 	uint32_t zero_fill_size = data->kcd_length - kcdata_memory_get_used_bytes(data);
1067 	bzero(stackshot_end, zero_fill_size);
1068 
1069 	if (deflateEnd(zs) == Z_OK) {
1070 		return KERN_SUCCESS;
1071 	} else {
1072 		return KERN_FAILURE;
1073 	}
1074 }
1075 
1076 static kern_return_t
kcdata_finish_compression(kcdata_descriptor_t data)1077 kcdata_finish_compression(kcdata_descriptor_t data)
1078 {
1079 	kcdata_write_compression_stats(data);
1080 
1081 	switch (data->kcd_comp_d.kcd_cd_compression_type) {
1082 	case KCDCT_ZLIB:
1083 		return kcdata_finish_compression_zlib(data);
1084 	case KCDCT_NONE:
1085 		return KERN_SUCCESS;
1086 	default:
1087 		panic("invalid compression type 0x%llxin kcdata_finish_compression", data->kcd_comp_d.kcd_cd_compression_type);
1088 	}
1089 }
1090 
1091 kern_return_t
kcdata_finish(kcdata_descriptor_t data)1092 kcdata_finish(kcdata_descriptor_t data)
1093 {
1094 	int ret = KERN_SUCCESS;
1095 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1096 		ret = kcdata_finish_compression(data);
1097 	}
1098 	kcdata_release_endallocs(data);
1099 	return ret;
1100 }
1101 
1102 void
kcd_finalize_compression(kcdata_descriptor_t data)1103 kcd_finalize_compression(kcdata_descriptor_t data)
1104 {
1105 	if (data->kcd_flags & KCFLAG_USE_COMPRESSION) {
1106 		data->kcd_comp_d.kcd_cd_flags |= KCD_CD_FLAG_FINALIZE;
1107 	}
1108 }
1109 
1110 /*
1111  * Routine: kcdata_get_memory_addr
1112  * Desc: get memory address in the userspace memory for corpse info
1113  *       NOTE: The caller is responsible for zeroing the resulting memory or
1114  *             using other means to mark memory if it has failed populating the
1115  *             data in middle of operation.
1116  * params:  data - pointer describing the crash info allocation
1117  *	        type - type of data to be put. See corpse.h for defined types
1118  *          size - size requested. The header describes this size
1119  * returns: mach_vm_address_t address in user memory for copyout().
1120  */
1121 kern_return_t
kcdata_get_memory_addr(kcdata_descriptor_t data,uint32_t type,uint32_t size,mach_vm_address_t * user_addr)1122 kcdata_get_memory_addr(kcdata_descriptor_t data, uint32_t type, uint32_t size, mach_vm_address_t * user_addr)
1123 {
1124 	/* record number of padding bytes as lower 4 bits of flags */
1125 	uint64_t flags = (KCDATA_FLAGS_STRUCT_PADDING_MASK & kcdata_calc_padding(size)) | KCDATA_FLAGS_STRUCT_HAS_PADDING;
1126 	return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
1127 }
1128 
1129 /*
1130  * Routine: kcdata_add_buffer_end
1131  *
1132  * Desc: Write buffer end marker.  This does not advance the end pointer in the
1133  * kcdata_descriptor_t, so it may be used conservatively before additional data
1134  * is added, as long as it is at least called after the last time data is added.
1135  *
1136  * params:  data - pointer describing the crash info allocation
1137  */
1138 
1139 kern_return_t
kcdata_write_buffer_end(kcdata_descriptor_t data)1140 kcdata_write_buffer_end(kcdata_descriptor_t data)
1141 {
1142 	struct kcdata_item info;
1143 	bzero(&info, sizeof(info));
1144 	info.type = KCDATA_TYPE_BUFFER_END;
1145 	info.size = 0;
1146 	return kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
1147 }
1148 
1149 /*
1150  * Routine: kcdata_get_memory_addr_with_flavor
1151  * Desc: internal function with flags field. See documentation for kcdata_get_memory_addr for details
1152  */
1153 
1154 static kern_return_t
kcdata_get_memory_addr_with_flavor(kcdata_descriptor_t data,uint32_t type,uint32_t size,uint64_t flags,mach_vm_address_t * user_addr)1155 kcdata_get_memory_addr_with_flavor(
1156 	kcdata_descriptor_t data,
1157 	uint32_t type,
1158 	uint32_t size,
1159 	uint64_t flags,
1160 	mach_vm_address_t *user_addr)
1161 {
1162 	kern_return_t kr;
1163 	struct kcdata_item info;
1164 
1165 	uint32_t orig_size = size;
1166 	/* make sure 16 byte aligned */
1167 	uint32_t padding = kcdata_calc_padding(size);
1168 	size += padding;
1169 	uint32_t total_size  = size + sizeof(info);
1170 
1171 	if (user_addr == NULL || data == NULL || total_size + sizeof(info) < orig_size) {
1172 		return KERN_INVALID_ARGUMENT;
1173 	}
1174 
1175 	assert(((data->kcd_flags & KCFLAG_USE_COMPRESSION) && (data->kcd_comp_d.kcd_cd_flags & KCD_CD_FLAG_IN_MARK))
1176 	    || ((data->kcd_flags & KCFLAG_USE_COMPRESSION) == 0));
1177 
1178 	bzero(&info, sizeof(info));
1179 	info.type  = type;
1180 	info.size = size;
1181 	info.flags = flags;
1182 
1183 	/* check available memory, including trailer size for KCDATA_TYPE_BUFFER_END */
1184 	if (total_size + sizeof(info) > data->kcd_length ||
1185 	    data->kcd_length - (total_size + sizeof(info)) < data->kcd_addr_end - data->kcd_addr_begin) {
1186 		if (data->kcd_alloc_callback) {
1187 			size_t const hdr_ftr_sz = 2 * sizeof(info);
1188 			kcdata_descriptor_t new_data = data->kcd_alloc_callback(data, total_size + hdr_ftr_sz);
1189 			if (new_data != NULL) {
1190 				*data = *new_data;
1191 				return kcdata_get_memory_addr_with_flavor(data, type, size, flags, user_addr);
1192 			}
1193 		}
1194 		return KERN_INSUFFICIENT_BUFFER_SIZE;
1195 	}
1196 
1197 	kr = kcdata_memcpy(data, data->kcd_addr_end, &info, sizeof(info));
1198 	if (kr) {
1199 		return kr;
1200 	}
1201 
1202 	data->kcd_addr_end += sizeof(info);
1203 
1204 	if (padding) {
1205 		kr = kcdata_bzero(data, data->kcd_addr_end + size - padding, padding);
1206 		if (kr) {
1207 			return kr;
1208 		}
1209 	}
1210 
1211 	*user_addr = data->kcd_addr_end;
1212 	data->kcd_addr_end += size;
1213 
1214 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1215 		/* setup the end header as well */
1216 		return kcdata_write_buffer_end(data);
1217 	} else {
1218 		return KERN_SUCCESS;
1219 	}
1220 }
1221 
1222 /* Routine: kcdata_get_memory_size_for_data
1223  * Desc: returns the amount of memory that is required to store the information
1224  *       in kcdata
1225  */
1226 static size_t
kcdata_get_memory_size_for_data(uint32_t size)1227 kcdata_get_memory_size_for_data(uint32_t size)
1228 {
1229 	return size + kcdata_calc_padding(size) + sizeof(struct kcdata_item);
1230 }
1231 
1232 /*
1233  * Routine: kcdata_get_memory_addr_for_array
1234  * Desc: get memory address in the userspace memory for corpse info
1235  *       NOTE: The caller is responsible to zero the resulting memory or
1236  *             user other means to mark memory if it has failed populating the
1237  *             data in middle of operation.
1238  * params:  data - pointer describing the crash info allocation
1239  *          type_of_element - type of data to be put. See kern_cdata.h for defined types
1240  *          size_of_element - size of element. The header describes this size
1241  *          count - num of elements in array.
1242  * returns: mach_vm_address_t address in user memory for copyout().
1243  */
1244 
1245 kern_return_t
kcdata_get_memory_addr_for_array(kcdata_descriptor_t data,uint32_t type_of_element,uint32_t size_of_element,uint32_t count,mach_vm_address_t * user_addr)1246 kcdata_get_memory_addr_for_array(
1247 	kcdata_descriptor_t data,
1248 	uint32_t type_of_element,
1249 	uint32_t size_of_element,
1250 	uint32_t count,
1251 	mach_vm_address_t *user_addr)
1252 {
1253 	/* for arrays we record the number of padding bytes as the low-order 4 bits
1254 	 * of the type field.  KCDATA_TYPE_ARRAY_PAD{x} means x bytes of pad. */
1255 	uint64_t flags      = type_of_element;
1256 	flags               = (flags << 32) | count;
1257 	uint32_t total_size = count * size_of_element;
1258 	uint32_t pad        = kcdata_calc_padding(total_size);
1259 
1260 	return kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_ARRAY_PAD0 | pad, total_size, flags, user_addr);
1261 }
1262 
1263 /*
1264  * Routine: kcdata_add_container_marker
1265  * Desc: Add a container marker in the buffer for type and identifier.
1266  * params:  data - pointer describing the crash info allocation
1267  *          header_type - one of (KCDATA_TYPE_CONTAINER_BEGIN ,KCDATA_TYPE_CONTAINER_END)
1268  *          container_type - type of data to be put. See kern_cdata.h for defined types
1269  *          identifier - unique identifier. This is required to match nested containers.
1270  * returns: return value of kcdata_get_memory_addr()
1271  */
1272 
1273 kern_return_t
kcdata_add_container_marker(kcdata_descriptor_t data,uint32_t header_type,uint32_t container_type,uint64_t identifier)1274 kcdata_add_container_marker(
1275 	kcdata_descriptor_t data,
1276 	uint32_t header_type,
1277 	uint32_t container_type,
1278 	uint64_t identifier)
1279 {
1280 	mach_vm_address_t user_addr;
1281 	kern_return_t kr;
1282 	uint32_t data_size;
1283 
1284 	assert(header_type == KCDATA_TYPE_CONTAINER_END || header_type == KCDATA_TYPE_CONTAINER_BEGIN);
1285 
1286 	data_size = (header_type == KCDATA_TYPE_CONTAINER_BEGIN)? sizeof(uint32_t): 0;
1287 
1288 	if (!(data->kcd_flags & KCFLAG_USE_COMPRESSION)) {
1289 		kr = kcdata_get_memory_addr_with_flavor(data, header_type, data_size, identifier, &user_addr);
1290 		if (kr != KERN_SUCCESS) {
1291 			return kr;
1292 		}
1293 
1294 		if (data_size) {
1295 			kr = kcdata_memcpy(data, user_addr, &container_type, data_size);
1296 		}
1297 	} else {
1298 		kr = kcdata_compress_chunk_with_flags(data, header_type, &container_type, data_size, identifier);
1299 	}
1300 
1301 	return kr;
1302 }
1303 
1304 /*
1305  * Routine: kcdata_undo_addcontainer_begin
1306  * Desc: call this after adding a container begin but before adding anything else to revert.
1307  */
1308 kern_return_t
kcdata_undo_add_container_begin(kcdata_descriptor_t data)1309 kcdata_undo_add_container_begin(kcdata_descriptor_t data)
1310 {
1311 	/*
1312 	 * the payload of a container begin is a single uint64_t.  It is padded out
1313 	 * to 16 bytes.
1314 	 */
1315 	const mach_vm_address_t padded_payload_size = 16;
1316 	data->kcd_addr_end -= sizeof(struct kcdata_item) + padded_payload_size;
1317 
1318 	if (!(data->kcd_flags & KCFLAG_NO_AUTO_ENDBUFFER)) {
1319 		/* setup the end header as well */
1320 		return kcdata_write_buffer_end(data);
1321 	} else {
1322 		return KERN_SUCCESS;
1323 	}
1324 }
1325 
1326 /*
1327  * Routine: kcdata_memcpy
1328  * Desc: a common function to copy data out based on either copyout or memcopy flags
1329  * params:  data - pointer describing the kcdata buffer
1330  *          dst_addr - destination address
1331  *          src_addr - source address
1332  *          size - size in bytes to copy.
1333  * returns: KERN_NO_ACCESS if copyout fails.
1334  */
1335 
1336 kern_return_t
kcdata_memcpy(kcdata_descriptor_t data,mach_vm_address_t dst_addr,const void * src_addr,uint32_t size)1337 kcdata_memcpy(kcdata_descriptor_t data, mach_vm_address_t dst_addr, const void *src_addr, uint32_t size)
1338 {
1339 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1340 		if (copyout(src_addr, dst_addr, size)) {
1341 			return KERN_NO_ACCESS;
1342 		}
1343 	} else {
1344 		memcpy((void *)dst_addr, src_addr, size);
1345 	}
1346 	return KERN_SUCCESS;
1347 }
1348 
1349 /*
1350  * Routine: kcdata_bzero
1351  * Desc: zero out a portion of a kcdata buffer.
1352  */
1353 kern_return_t
kcdata_bzero(kcdata_descriptor_t data,mach_vm_address_t dst_addr,uint32_t size)1354 kcdata_bzero(kcdata_descriptor_t data, mach_vm_address_t dst_addr, uint32_t size)
1355 {
1356 	kern_return_t kr = KERN_SUCCESS;
1357 	if (data->kcd_flags & KCFLAG_USE_COPYOUT) {
1358 		uint8_t zeros[16] = {};
1359 		while (size) {
1360 			uint32_t block_size = MIN(size, 16);
1361 			kr = copyout(&zeros, dst_addr, block_size);
1362 			if (kr) {
1363 				return KERN_NO_ACCESS;
1364 			}
1365 			size -= block_size;
1366 		}
1367 		return KERN_SUCCESS;
1368 	} else {
1369 		bzero((void*)dst_addr, size);
1370 		return KERN_SUCCESS;
1371 	}
1372 }
1373 
1374 /*
1375  * Routine: kcdata_add_type_definition
1376  * Desc: add type definition to kcdata buffer.
1377  *       see feature description in documentation above.
1378  * params:  data - pointer describing the kcdata buffer
1379  *          type_id - unique type identifier for this data
1380  *          type_name - a string of max KCDATA_DESC_MAXLEN size for name of type
1381  *          elements_array - address to descriptors for each field in struct
1382  *          elements_count - count of how many fields are there in struct.
1383  * returns: return code from kcdata_get_memory_addr in case of failure.
1384  */
1385 
1386 kern_return_t
kcdata_add_type_definition(kcdata_descriptor_t data,uint32_t type_id,char * type_name,struct kcdata_subtype_descriptor * elements_array_addr,uint32_t elements_count)1387 kcdata_add_type_definition(
1388 	kcdata_descriptor_t data,
1389 	uint32_t type_id,
1390 	char *type_name,
1391 	struct kcdata_subtype_descriptor *elements_array_addr,
1392 	uint32_t elements_count)
1393 {
1394 	kern_return_t kr = KERN_SUCCESS;
1395 	struct kcdata_type_definition kc_type_definition;
1396 	mach_vm_address_t user_addr;
1397 	uint32_t total_size = sizeof(struct kcdata_type_definition);
1398 	bzero(&kc_type_definition, sizeof(kc_type_definition));
1399 
1400 	if (strlen(type_name) >= KCDATA_DESC_MAXLEN) {
1401 		return KERN_INVALID_ARGUMENT;
1402 	}
1403 	strlcpy(&kc_type_definition.kct_name[0], type_name, KCDATA_DESC_MAXLEN);
1404 	kc_type_definition.kct_num_elements = elements_count;
1405 	kc_type_definition.kct_type_identifier = type_id;
1406 
1407 	total_size += elements_count * sizeof(struct kcdata_subtype_descriptor);
1408 	/* record number of padding bytes as lower 4 bits of flags */
1409 	if (KERN_SUCCESS != (kr = kcdata_get_memory_addr_with_flavor(data, KCDATA_TYPE_TYPEDEFINTION, total_size,
1410 	    kcdata_calc_padding(total_size), &user_addr))) {
1411 		return kr;
1412 	}
1413 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)&kc_type_definition, sizeof(struct kcdata_type_definition)))) {
1414 		return kr;
1415 	}
1416 	user_addr += sizeof(struct kcdata_type_definition);
1417 	if (KERN_SUCCESS != (kr = kcdata_memcpy(data, user_addr, (void *)elements_array_addr, elements_count * sizeof(struct kcdata_subtype_descriptor)))) {
1418 		return kr;
1419 	}
1420 	return kr;
1421 }
1422 
1423 kern_return_t
kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc,uint64_t data,const char * description)1424 kcdata_add_uint64_with_description(kcdata_descriptor_t data_desc, uint64_t data, const char * description)
1425 {
1426 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1427 		return KERN_INVALID_ARGUMENT;
1428 	}
1429 
1430 	kern_return_t kr = 0;
1431 	mach_vm_address_t user_addr;
1432 	struct _uint64_with_description_data save_data;
1433 	const uint64_t size_req = sizeof(save_data);
1434 	bzero(&save_data, size_req);
1435 
1436 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1437 	save_data.data = data;
1438 
1439 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1440 		/* allocate space for the output */
1441 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT64_DESC, &save_data, size_req);
1442 	}
1443 
1444 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT64_DESC, size_req, &user_addr);
1445 	if (kr != KERN_SUCCESS) {
1446 		return kr;
1447 	}
1448 
1449 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1450 		if (copyout(&save_data, user_addr, size_req)) {
1451 			return KERN_NO_ACCESS;
1452 		}
1453 	} else {
1454 		memcpy((void *)user_addr, &save_data, size_req);
1455 	}
1456 	return KERN_SUCCESS;
1457 }
1458 
1459 kern_return_t
kcdata_add_uint32_with_description(kcdata_descriptor_t data_desc,uint32_t data,const char * description)1460 kcdata_add_uint32_with_description(
1461 	kcdata_descriptor_t data_desc,
1462 	uint32_t data,
1463 	const char *description)
1464 {
1465 	assert(strlen(description) < KCDATA_DESC_MAXLEN);
1466 	if (strlen(description) >= KCDATA_DESC_MAXLEN) {
1467 		return KERN_INVALID_ARGUMENT;
1468 	}
1469 	kern_return_t kr = 0;
1470 	mach_vm_address_t user_addr;
1471 	struct _uint32_with_description_data save_data;
1472 	const uint64_t size_req = sizeof(save_data);
1473 
1474 	bzero(&save_data, size_req);
1475 	strlcpy(&(save_data.desc[0]), description, sizeof(save_data.desc));
1476 	save_data.data = data;
1477 
1478 	if (data_desc->kcd_flags & KCFLAG_USE_COMPRESSION) {
1479 		/* allocate space for the output */
1480 		return kcdata_compress_chunk(data_desc, KCDATA_TYPE_UINT32_DESC, &save_data, size_req);
1481 	}
1482 
1483 	kr = kcdata_get_memory_addr(data_desc, KCDATA_TYPE_UINT32_DESC, size_req, &user_addr);
1484 	if (kr != KERN_SUCCESS) {
1485 		return kr;
1486 	}
1487 
1488 	if (data_desc->kcd_flags & KCFLAG_USE_COPYOUT) {
1489 		if (copyout(&save_data, user_addr, size_req)) {
1490 			return KERN_NO_ACCESS;
1491 		}
1492 	} else {
1493 		memcpy((void *)user_addr, &save_data, size_req);
1494 	}
1495 
1496 	return KERN_SUCCESS;
1497 }
1498 
1499 
1500 /* end buffer management api */
1501