xref: /xnu-11215/bsd/kern/code_signing/txm.c (revision 4f1223e8)
1 /*
2  * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 
23 #include <stdarg.h>
24 #include <stdatomic.h>
25 #include <os/overflow.h>
26 #include <machine/atomic.h>
27 #include <mach/vm_param.h>
28 #include <mach/vm_map.h>
29 #include <mach/shared_region.h>
30 #include <vm/vm_kern_xnu.h>
31 #include <kern/zalloc.h>
32 #include <kern/kalloc.h>
33 #include <kern/assert.h>
34 #include <kern/locks.h>
35 #include <kern/recount.h>
36 #include <kern/sched_prim.h>
37 #include <kern/lock_rw.h>
38 #include <libkern/libkern.h>
39 #include <libkern/section_keywords.h>
40 #include <libkern/coretrust/coretrust.h>
41 #include <libkern/amfi/amfi.h>
42 #include <pexpert/pexpert.h>
43 #include <sys/vm.h>
44 #include <sys/proc.h>
45 #include <sys/codesign.h>
46 #include <sys/code_signing.h>
47 #include <sys/sysctl.h>
48 #include <uuid/uuid.h>
49 #include <IOKit/IOLib.h>
50 #include <IOKit/IOBSD.h>
51 
52 #if CONFIG_SPTM
53 /*
54  * The TrustedExecutionMonitor environment works in tandem with the SPTM to provide code
55  * signing and memory isolation enforcement for data structures critical to ensuring that
56  * all code executed on the system is authorized to do so.
57  *
58  * Unless the data is managed by TXM itself, XNU needs to page-align everything, make the
59  * relevant type transfer, and then reference the memory as read-only.
60  *
61  * TXM enforces concurrency on its side, but through the use of try-locks. Upon a failure
62  * in acquiring the lock, TXM will panic. As a result, in order to ensure single-threaded
63  * behavior, the kernel also has to take some locks on its side befor calling into TXM.
64  */
65 #include <sys/trusted_execution_monitor.h>
66 #include <pexpert/arm64/board_config.h>
67 
68 /* Lock group used for all locks within the kernel for TXM */
69 LCK_GRP_DECLARE(txm_lck_grp, "txm_code_signing_lck_grp");
70 
71 #pragma mark Utilities
72 
73 /* Number of thread stacks is known at build-time */
74 #define NUM_TXM_THREAD_STACKS (MAX_CPUS)
75 txm_thread_stack_t thread_stacks[NUM_TXM_THREAD_STACKS] = {0};
76 
77 /* Singly-linked-list head for thread stacks */
78 SLIST_HEAD(thread_stack_head, _txm_thread_stack) thread_stacks_head =
79     SLIST_HEAD_INITIALIZER(thread_stacks_head);
80 
81 static decl_lck_mtx_data(, thread_stacks_lock);
82 static void *thread_stack_event = NULL;
83 
84 static void
setup_thread_stacks(void)85 setup_thread_stacks(void)
86 {
87 	extern const sptm_bootstrap_args_xnu_t *SPTMArgs;
88 	txm_thread_stack_t *thread_stack = NULL;
89 
90 	/* Initialize each thread stack and add it to the list */
91 	for (uint32_t i = 0; i < NUM_TXM_THREAD_STACKS; i++) {
92 		thread_stack = &thread_stacks[i];
93 
94 		/* Acquire the thread stack virtual mapping */
95 		thread_stack->thread_stack_papt = SPTMArgs->txm_thread_stacks[i];
96 
97 		/* Acquire the thread stack physical page */
98 		thread_stack->thread_stack_phys = (uintptr_t)kvtophys_nofail(
99 			thread_stack->thread_stack_papt);
100 
101 		/* Resolve the pointer to the thread stack data */
102 		thread_stack->thread_stack_data =
103 		    (TXMThreadStack_t*)(thread_stack->thread_stack_papt + (PAGE_SIZE - 1024));
104 
105 		/* Add thread stack to the list head */
106 		SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
107 	}
108 
109 	/* Initialize the thread stacks lock */
110 	lck_mtx_init(&thread_stacks_lock, &txm_lck_grp, 0);
111 }
112 
113 static txm_thread_stack_t*
acquire_thread_stack(void)114 acquire_thread_stack(void)
115 {
116 	txm_thread_stack_t *thread_stack = NULL;
117 
118 	/* Lock the thread stack list */
119 	lck_mtx_lock(&thread_stacks_lock);
120 
121 	while (SLIST_EMPTY(&thread_stacks_head) == true) {
122 		lck_mtx_sleep(
123 			&thread_stacks_lock,
124 			LCK_SLEEP_DEFAULT,
125 			&thread_stack_event,
126 			THREAD_UNINT);
127 	}
128 
129 	if (SLIST_EMPTY(&thread_stacks_head) == true) {
130 		panic("unable to acquire a thread stack for TXM");
131 	}
132 
133 	/* Use the first available thread stack */
134 	thread_stack = SLIST_FIRST(&thread_stacks_head);
135 
136 	/* Remove the thread stack from the list */
137 	SLIST_REMOVE_HEAD(&thread_stacks_head, link);
138 
139 	/* Unlock the thread stack list */
140 	lck_mtx_unlock(&thread_stacks_lock);
141 
142 	/* Associate the thread stack with the current thread */
143 	thread_associate_txm_thread_stack(thread_stack->thread_stack_phys);
144 
145 	return thread_stack;
146 }
147 
148 static void
release_thread_stack(txm_thread_stack_t * thread_stack)149 release_thread_stack(
150 	txm_thread_stack_t* thread_stack)
151 {
152 	/* Remove the TXM thread stack association with the current thread */
153 	thread_disassociate_txm_thread_stack(thread_stack->thread_stack_phys);
154 
155 	/* Lock the thread stack list */
156 	lck_mtx_lock(&thread_stacks_lock);
157 
158 	/* Add the thread stack at the list head */
159 	SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
160 
161 	/* Unlock the thread stack list */
162 	lck_mtx_unlock(&thread_stacks_lock);
163 
164 	/* Wake up any threads waiting to acquire a thread stack */
165 	thread_wakeup(&thread_stack_event);
166 }
167 
168 static kern_return_t
txm_parse_return(TXMReturn_t txm_ret)169 txm_parse_return(
170 	TXMReturn_t txm_ret)
171 {
172 	switch (txm_ret.returnCode) {
173 	case kTXMSuccess:
174 		return KERN_SUCCESS;
175 
176 	case kTXMReturnOutOfMemory:
177 		return KERN_RESOURCE_SHORTAGE;
178 
179 	case kTXMReturnNotFound:
180 		return KERN_NOT_FOUND;
181 
182 	case kTXMReturnNotSupported:
183 		return KERN_NOT_SUPPORTED;
184 
185 #if kTXMKernelAPIVersion >= 6
186 	case kTXMReturnTryAgain:
187 		return KERN_OPERATION_TIMED_OUT;
188 #endif
189 
190 	default:
191 		return KERN_FAILURE;
192 	}
193 }
194 
195 static void
txm_print_return(TXMKernelSelector_t selector,TXMReturn_t txm_ret)196 txm_print_return(
197 	TXMKernelSelector_t selector,
198 	TXMReturn_t txm_ret)
199 {
200 	/*
201 	 * We specifically use IOLog instead of printf since printf is compiled out on
202 	 * RELEASE kernels. We want to ensure that errors from TXM are captured within
203 	 * sysdiagnoses from the field.
204 	 */
205 
206 	if (txm_ret.returnCode == kTXMSuccess) {
207 		return;
208 	} else if (txm_ret.returnCode == kTXMReturnTrustCache) {
209 		IOLog("TXM [Error]: TrustCache: selector: %u | 0x%02X | 0x%02X | %u\n",
210 		    selector, txm_ret.tcRet.component, txm_ret.tcRet.error, txm_ret.tcRet.uniqueError);
211 	} else if (txm_ret.returnCode == kTXMReturnCodeSignature) {
212 		IOLog("TXM [Error]: CodeSignature: selector: %u | 0x%02X | 0x%02X | %u\n",
213 		    selector, txm_ret.csRet.component, txm_ret.csRet.error, txm_ret.csRet.uniqueError);
214 	} else if (txm_ret.returnCode == kTXMReturnCodeErrno) {
215 		IOLog("TXM [Error]: Errno: selector: %u | %d\n",
216 		    selector, txm_ret.errnoRet);
217 	} else {
218 		IOLog("TXM [Error]: selector: %u | %u\n",
219 		    selector, txm_ret.returnCode);
220 	}
221 }
222 
223 #pragma mark Page Allocation
224 
225 static void
txm_add_page(void)226 txm_add_page(void)
227 {
228 	txm_call_t txm_call = {
229 		.selector = kTXMKernelSelectorAddFreeListPage,
230 		.failure_fatal = true,
231 		.num_input_args = 1
232 	};
233 
234 	/* Allocate a page from the VM -- transfers page to TXM internally */
235 	vm_map_address_t phys_addr = pmap_txm_allocate_page();
236 
237 	/* Add this page to the TXM free list */
238 	txm_kernel_call(&txm_call, phys_addr);
239 }
240 
241 #pragma mark Calls
242 
243 static void
txm_kernel_call_registers_setup(txm_call_t * parameters,sptm_call_regs_t * registers,va_list args)244 txm_kernel_call_registers_setup(
245 	txm_call_t *parameters,
246 	sptm_call_regs_t *registers,
247 	va_list args)
248 {
249 	/*
250 	 * We are only ever allowed a maximum of 7 arguments for calling into TXM.
251 	 * This is because the SPTM dispatch only sets up registers x0-x7 for the
252 	 * call, and x0 is always reserved for passing in a thread stack for TXM
253 	 * to operate on.
254 	 */
255 
256 	switch (parameters->num_input_args) {
257 	case 7:
258 		registers->x1 = va_arg(args, uintptr_t);
259 		registers->x2 = va_arg(args, uintptr_t);
260 		registers->x3 = va_arg(args, uintptr_t);
261 		registers->x4 = va_arg(args, uintptr_t);
262 		registers->x5 = va_arg(args, uintptr_t);
263 		registers->x6 = va_arg(args, uintptr_t);
264 		registers->x7 = va_arg(args, uintptr_t);
265 		break;
266 
267 	case 6:
268 		registers->x1 = va_arg(args, uintptr_t);
269 		registers->x2 = va_arg(args, uintptr_t);
270 		registers->x3 = va_arg(args, uintptr_t);
271 		registers->x4 = va_arg(args, uintptr_t);
272 		registers->x5 = va_arg(args, uintptr_t);
273 		registers->x6 = va_arg(args, uintptr_t);
274 		break;
275 
276 	case 5:
277 		registers->x1 = va_arg(args, uintptr_t);
278 		registers->x2 = va_arg(args, uintptr_t);
279 		registers->x3 = va_arg(args, uintptr_t);
280 		registers->x4 = va_arg(args, uintptr_t);
281 		registers->x5 = va_arg(args, uintptr_t);
282 		break;
283 
284 	case 4:
285 		registers->x1 = va_arg(args, uintptr_t);
286 		registers->x2 = va_arg(args, uintptr_t);
287 		registers->x3 = va_arg(args, uintptr_t);
288 		registers->x4 = va_arg(args, uintptr_t);
289 		break;
290 
291 	case 3:
292 		registers->x1 = va_arg(args, uintptr_t);
293 		registers->x2 = va_arg(args, uintptr_t);
294 		registers->x3 = va_arg(args, uintptr_t);
295 		break;
296 
297 	case 2:
298 		registers->x1 = va_arg(args, uintptr_t);
299 		registers->x2 = va_arg(args, uintptr_t);
300 		break;
301 
302 	case 1:
303 		registers->x1 = va_arg(args, uintptr_t);
304 		break;
305 
306 	case 0:
307 		break;
308 
309 	default:
310 		panic("invalid number of arguments to TXM: selector: %u | %u",
311 		    parameters->selector, parameters->num_input_args);
312 	}
313 }
314 
315 static TXMReturn_t
txm_kernel_call_internal(txm_call_t * parameters,va_list args)316 txm_kernel_call_internal(
317 	txm_call_t *parameters,
318 	va_list args)
319 {
320 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
321 	sptm_call_regs_t txm_registers = {0};
322 	txm_thread_stack_t *thread_stack = NULL;
323 	const TXMThreadStack_t *thread_stack_data = NULL;
324 	const TXMSharedContextData_t *shared_context_data = NULL;
325 
326 	/* Obtain a stack for this call */
327 	thread_stack = acquire_thread_stack();
328 	thread_stack_data = thread_stack->thread_stack_data;
329 	shared_context_data = &thread_stack_data->sharedData;
330 
331 	/* Setup argument registers */
332 	txm_registers.x0 = thread_stack->thread_stack_phys;
333 	txm_kernel_call_registers_setup(parameters, &txm_registers, args);
334 
335 	/* Track resource usage */
336 	recount_enter_secure();
337 
338 	/* Call into TXM */
339 	txm_enter(parameters->selector, &txm_registers);
340 
341 	recount_leave_secure();
342 
343 	txm_ret = (TXMReturn_t){.rawValue = shared_context_data->txmReturnCode};
344 	parameters->txm_ret = txm_ret;
345 
346 	if (parameters->txm_ret.returnCode == kTXMSuccess) {
347 		parameters->num_return_words = shared_context_data->txmNumReturnWords;
348 		if (parameters->num_return_words > kTXMStackReturnWords) {
349 			panic("received excessive return words from TXM: selector: %u | %llu",
350 			    parameters->selector, parameters->num_return_words);
351 		}
352 
353 		for (uint64_t i = 0; i < parameters->num_return_words; i++) {
354 			parameters->return_words[i] = shared_context_data->txmReturnWords[i];
355 		}
356 	}
357 
358 	/* Release the thread stack as it is no longer needed */
359 	release_thread_stack(thread_stack);
360 	thread_stack_data = NULL;
361 	shared_context_data = NULL;
362 
363 	return txm_ret;
364 }
365 
366 kern_return_t
txm_kernel_call(txm_call_t * parameters,...)367 txm_kernel_call(
368 	txm_call_t *parameters, ...)
369 {
370 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
371 	kern_return_t ret = KERN_DENIED;
372 	va_list args;
373 
374 	/* Start the variadic arguments list */
375 	va_start(args, parameters);
376 
377 	do {
378 		txm_ret = txm_kernel_call_internal(parameters, args);
379 		if (txm_ret.returnCode == kTXMReturnOutOfMemory) {
380 			if (parameters->selector == kTXMKernelSelectorAddFreeListPage) {
381 				panic("received out-of-memory error when adding a free page to TXM");
382 			}
383 			txm_add_page();
384 		}
385 	} while (txm_ret.returnCode == kTXMReturnOutOfMemory);
386 
387 	/* Clean up the variadic arguments list */
388 	va_end(args);
389 
390 	/* Print all TXM logs from the log buffer */
391 	if (parameters->skip_logs == false) {
392 		txm_print_logs();
393 	}
394 
395 	/* Print the return code from TXM -- only prints for an error */
396 	if (parameters->failure_silent != true) {
397 		if (parameters->failure_code_silent != txm_ret.returnCode) {
398 			txm_print_return(parameters->selector, txm_ret);
399 		}
400 	}
401 
402 	/*
403 	 * To ease the process of calling into TXM, and to also reduce the number of
404 	 * lines of code for each call site, the txm_call_t offers some properties
405 	 * we can enforce over here. Go through these, and panic in case they aren't
406 	 * honored.
407 	 *
408 	 * NOTE: We check for "<" instead of "!=" for the number of return words we
409 	 * get back from TXM since this helps in forward development. If the kernel
410 	 * and TXM are proceeding at different project cadences, we do not want to
411 	 * gate adding more return words from TXM on the kernel first adopting the
412 	 * new number of return words.
413 	 */
414 	ret = txm_parse_return(txm_ret);
415 
416 	if (parameters->failure_fatal && (ret != KERN_SUCCESS)) {
417 		panic("received fatal error for a selector from TXM: selector: %u | 0x%0llX",
418 		    parameters->selector, txm_ret.rawValue);
419 	} else if (parameters->num_return_words < parameters->num_output_args) {
420 		/* Only panic if return was a success */
421 		if (ret == KERN_SUCCESS) {
422 			panic("received fewer than expected return words from TXM: selector: %u | %llu",
423 			    parameters->selector, parameters->num_return_words);
424 		}
425 	}
426 
427 	return ret;
428 }
429 
430 void
txm_transfer_region(vm_address_t addr,vm_size_t size)431 txm_transfer_region(
432 	vm_address_t addr,
433 	vm_size_t size)
434 {
435 	vm_address_t addr_end = 0;
436 	vm_size_t size_aligned = round_page(size);
437 
438 	if ((addr & PAGE_MASK) != 0) {
439 		panic("attempted to transfer non-page-aligned memory to TXM: %p", (void*)addr);
440 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
441 		panic("overflow on range to be transferred to TXM: %p | %lu",
442 		    (void*)addr, size);
443 	}
444 
445 	/* Make the memory read-only first (transfer will panic otherwise) */
446 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ);
447 
448 	/* Transfer each physical page to be TXM_DEFAULT */
449 	for (vm_address_t page = addr; page < addr_end; page += PAGE_SIZE) {
450 		pmap_txm_transfer_page(page);
451 	}
452 }
453 
454 void
txm_reclaim_region(vm_address_t addr,vm_size_t size)455 txm_reclaim_region(
456 	vm_address_t addr,
457 	vm_size_t size)
458 {
459 	vm_address_t addr_end = 0;
460 	vm_size_t size_aligned = round_page(size);
461 
462 	if ((addr & PAGE_MASK) != 0) {
463 		panic("attempted to reclaim non-page-aligned memory from TXM: %p", (void*)addr);
464 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
465 		panic("overflow on range to be reclaimed from TXM: %p | %lu",
466 		    (void*)addr, size);
467 	}
468 
469 	/*
470 	 * We can only reclaim once TXM has transferred the memory range back to the
471 	 * kernel. Hence, we simply try and switch permissions to read-write. If TXM
472 	 * hasn't transferred pages, this then should panic.
473 	 */
474 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ | VM_PROT_WRITE);
475 }
476 
477 static SECURITY_READ_ONLY_LATE(const char*) txm_log_page = NULL;
478 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_head = NULL;
479 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_sync = NULL;
480 
481 static decl_lck_mtx_data(, log_lock);
482 static uint32_t log_head = 0;
483 
484 void
txm_print_logs(void)485 txm_print_logs(void)
486 {
487 	uint32_t start_index = 0;
488 	uint32_t end_index = 0;
489 
490 	/*
491 	 * The design here is very simple. TXM keeps adding slots to its circular buffer
492 	 * and the kernel attempts to read each one and print it, maintaining its own head
493 	 * for the log.
494 	 *
495 	 * This design is by nature lazy. TXM doesn't know or care if the kernel has gone
496 	 * through and printed any of the logs, so it'll just keep writing into its buffer
497 	 * and then circle around when it becomes full.
498 	 *
499 	 * This is fine most of the time since there are a decent amount of slots in the
500 	 * log buffer. We mostly have an issue when TXM is adding so many logs so quickly
501 	 * such that it wraps around and starts overwriting logs which haven't been seen
502 	 * by the kernel. If this were to happen, TXM's log head may circle around the
503 	 * head maintained by the kernel, causing a lot of logs to be missed, since the
504 	 * kernel only attempts the number of logs in-between the two heads.
505 	 *
506 	 * The fix for that is complicated, and until we see an actual impact, we're going
507 	 * to keep the simpler design in place.
508 	 */
509 
510 	/* Return if the logging hasn't been setup yet */
511 	if (txm_log_sync == NULL) {
512 		return;
513 	}
514 
515 	/*
516 	 * Holding the log lock and printing can cause lots of issues since printing can
517 	 * be rather slow. While we make it a point to keep the logging buffer quiet, some
518 	 * actions (such as loading trust caches) are still very chatty.
519 	 *
520 	 * As a result, we optimize this routine to ensure that the lock itself isn't held
521 	 * for very long. All we need to do within the critical section is calculate the
522 	 * starting and ending index of the log buffer. The actual printing doesn't need
523 	 * to be done with the lock held.
524 	 */
525 	lck_mtx_lock(&log_lock);
526 
527 	start_index = log_head;
528 	end_index = os_atomic_load(txm_log_head, relaxed) % kTXMLogSlots;
529 
530 	/* Update the log head with the new index */
531 	log_head = end_index;
532 
533 	/* Release the log lock */
534 	lck_mtx_unlock(&log_lock);
535 
536 	if (start_index != end_index) {
537 		/* Use load acquire here to sync up with all writes to the buffer */
538 		os_atomic_load(txm_log_sync, acquire);
539 
540 		while (start_index != end_index) {
541 			const char *slot = txm_log_page + (start_index * kTXMLogSlotSize);
542 
543 			/* We add newlines after each log statement since TXM does not */
544 			printf("%s\n", slot);
545 
546 			start_index = (start_index + 1) % kTXMLogSlots;
547 		}
548 	}
549 }
550 
551 #pragma mark Initialization
552 
553 SECURITY_READ_ONLY_LATE(const TXMReadOnlyData_t*) txm_ro_data = NULL;
554 SECURITY_READ_ONLY_LATE(const TXMStatistics_t*) txm_stats = NULL;
555 SECURITY_READ_ONLY_LATE(const CSConfig_t*) txm_cs_config = NULL;
556 SECURITY_READ_ONLY_LATE(CSRestrictedModeState_t*) txm_restricted_mode_state = NULL;
557 
558 SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = NULL;
559 static SECURITY_READ_ONLY_LATE(bool) code_signing_enabled = true;
560 static SECURITY_READ_ONLY_LATE(uint32_t) managed_signature_size = 0;
561 
562 static decl_lck_mtx_data(, compilation_service_lock);
563 static decl_lck_mtx_data(, unregister_sync_lock);
564 
565 static void
get_logging_info(void)566 get_logging_info(void)
567 {
568 	txm_call_t txm_call = {
569 		.selector = kTXMKernelSelectorGetLogInfo,
570 		.failure_fatal = true,
571 		.num_output_args = 3
572 	};
573 	txm_kernel_call(&txm_call);
574 
575 	txm_log_page = (const char*)txm_call.return_words[0];
576 	txm_log_head = (const uint32_t*)txm_call.return_words[1];
577 	txm_log_sync = (const uint32_t*)txm_call.return_words[2];
578 }
579 
580 static void
get_code_signing_info(void)581 get_code_signing_info(void)
582 {
583 	txm_call_t txm_call = {
584 		.selector = kTXMKernelSelectorGetCodeSigningInfo,
585 		.failure_fatal = true,
586 		.num_output_args = 6
587 	};
588 	txm_kernel_call(&txm_call);
589 
590 	/*
591 	 * Not using txm_call.return_words[0] for now. This was previously the
592 	 * code_signing_enabled field, but we've since switched to acquiring that
593 	 * value from TXM's read-only data.
594 	 *
595 	 * Not using txm_call.return_words[4] for now. This was previously the
596 	 * txm_cs_config field, but we've since switched to acquiring that value
597 	 * from TXM's read-only data.
598 	 */
599 
600 	developer_mode_enabled = (bool*)txm_call.return_words[1];
601 	txm_stats = (TXMStatistics_t*)txm_call.return_words[2];
602 	managed_signature_size = (uint32_t)txm_call.return_words[3];
603 	txm_ro_data = (TXMReadOnlyData_t*)txm_call.return_words[5];
604 
605 	/* Set code_signing_disabled based on read-only data */
606 	code_signing_enabled = txm_ro_data->codeSigningDisabled == false;
607 
608 	/* Set txm_cs_config based on read-only data */
609 	txm_cs_config = &txm_ro_data->CSConfiguration;
610 
611 	/* Only setup when REM is supported on the platform */
612 	if (txm_cs_config->systemPolicy->featureSet.restrictedExecutionMode == true) {
613 		txm_restricted_mode_state = txm_ro_data->restrictedModeState;
614 	}
615 }
616 
617 static void
set_shared_region_base_address(void)618 set_shared_region_base_address(void)
619 {
620 	txm_call_t txm_call = {
621 		.selector = kTXMKernelSelectorSetSharedRegionBaseAddress,
622 		.failure_fatal = true,
623 		.num_input_args = 2,
624 	};
625 
626 	txm_kernel_call(&txm_call,
627 	    SHARED_REGION_BASE,
628 	    SHARED_REGION_SIZE);
629 }
630 
631 void
code_signing_init(void)632 code_signing_init(void)
633 {
634 #if kTXMKernelAPIVersion >= 6
635 	printf("libTXM_KernelVersion: %u\n", libTrustedExecutionMonitor_KernelVersion);
636 	printf("libTXM_Image4Version: %u\n", libTrustedExecutionMonitor_Image4Version);
637 #endif
638 
639 	/* Setup the thread stacks used by TXM */
640 	setup_thread_stacks();
641 
642 	/* Setup the logging lock */
643 	lck_mtx_init(&log_lock, &txm_lck_grp, 0);
644 
645 	/* Setup TXM logging information */
646 	get_logging_info();
647 
648 	/* Setup code signing configuration */
649 	get_code_signing_info();
650 
651 	/* Setup all the other locks we need */
652 	lck_mtx_init(&compilation_service_lock, &txm_lck_grp, 0);
653 	lck_mtx_init(&unregister_sync_lock, &txm_lck_grp, 0);
654 
655 	/*
656 	 * We need to let TXM know what the shared region base address is going
657 	 * to be for this boot.
658 	 */
659 	set_shared_region_base_address();
660 
661 	/* Require signed code when monitor is enabled */
662 	if (code_signing_enabled == true) {
663 		cs_debug_fail_on_unsigned_code = 1;
664 	}
665 }
666 
667 void
txm_enter_lockdown_mode(void)668 txm_enter_lockdown_mode(void)
669 {
670 	txm_call_t txm_call = {
671 		.selector = kTXMKernelSelectorEnterLockdownMode,
672 		.failure_fatal = true,
673 	};
674 	txm_kernel_call(&txm_call);
675 }
676 
677 kern_return_t
txm_secure_channel_shared_page(uint64_t * secure_channel_phys,size_t * secure_channel_size)678 txm_secure_channel_shared_page(
679 	uint64_t *secure_channel_phys,
680 	size_t *secure_channel_size)
681 {
682 #if kTXMKernelAPIVersion >= 5
683 	txm_call_t txm_call = {
684 		.selector = kTXMKernelSelectorGetSecureChannelAddr,
685 		.num_output_args = 2
686 	};
687 
688 	kern_return_t ret = txm_kernel_call(&txm_call);
689 	if (ret == KERN_NOT_SUPPORTED) {
690 		return ret;
691 	} else if (ret != KERN_SUCCESS) {
692 		panic("unexpected failure for TXM secure channel: %d", ret);
693 	}
694 
695 	/* Return the physical address */
696 	if (secure_channel_phys != NULL) {
697 		*secure_channel_phys = txm_call.return_words[0];
698 	}
699 
700 	/* Return the size */
701 	if (secure_channel_size != NULL) {
702 		*secure_channel_size = txm_call.return_words[1];
703 	}
704 
705 	return KERN_SUCCESS;
706 #else
707 	(void)secure_channel_phys;
708 	(void)secure_channel_size;
709 	return KERN_NOT_SUPPORTED;
710 #endif
711 }
712 
713 #pragma mark Developer Mode
714 
715 void
txm_toggle_developer_mode(bool state)716 txm_toggle_developer_mode(bool state)
717 {
718 	txm_call_t txm_call = {
719 		.selector = kTXMKernelSelectorDeveloperModeToggle,
720 		.failure_fatal = true,
721 		.num_input_args = 1
722 	};
723 
724 	txm_kernel_call(&txm_call, state);
725 }
726 
727 #pragma mark Restricted Execution Mode
728 
729 kern_return_t
txm_rem_enable(void)730 txm_rem_enable(void)
731 {
732 	txm_call_t txm_call = {
733 		.selector = kTXMKernelSelectorEnableRestrictedMode
734 	};
735 	return txm_kernel_call(&txm_call);
736 }
737 
738 kern_return_t
txm_rem_state(void)739 txm_rem_state(void)
740 {
741 	if (txm_restricted_mode_state == NULL) {
742 		return KERN_NOT_SUPPORTED;
743 	}
744 
745 	CSReturn_t cs_ret = restrictedModeStatus(txm_restricted_mode_state);
746 	if (cs_ret.error == kCSReturnSuccess) {
747 		return KERN_SUCCESS;
748 	}
749 	return KERN_DENIED;
750 }
751 
752 #pragma mark Device State
753 
754 void
txm_update_device_state(void)755 txm_update_device_state(void)
756 {
757 #if kTXMKernelAPIVersion >= 6
758 	txm_call_t txm_call = {
759 		.selector = kTXMSelectorUpdateDeviceState,
760 		.failure_fatal = true
761 	};
762 	txm_kernel_call(&txm_call);
763 #endif
764 }
765 
766 void
txm_complete_security_boot_mode(__unused uint32_t security_boot_mode)767 txm_complete_security_boot_mode(
768 	__unused uint32_t security_boot_mode)
769 {
770 #if kTXMKernelAPIVersion >= 6
771 	txm_call_t txm_call = {
772 		.selector = kTXMSelectorCompleteSecurityBootMode,
773 		.num_input_args = 1,
774 		.failure_fatal = true
775 	};
776 	txm_kernel_call(&txm_call, security_boot_mode);
777 #endif
778 }
779 
780 #pragma mark Code Signing and Provisioning Profiles
781 
782 bool
txm_code_signing_enabled(void)783 txm_code_signing_enabled(void)
784 {
785 	return code_signing_enabled;
786 }
787 
788 vm_size_t
txm_managed_code_signature_size(void)789 txm_managed_code_signature_size(void)
790 {
791 	return managed_signature_size;
792 }
793 
794 kern_return_t
txm_register_provisioning_profile(const void * profile_blob,const size_t profile_blob_size,void ** profile_obj)795 txm_register_provisioning_profile(
796 	const void *profile_blob,
797 	const size_t profile_blob_size,
798 	void **profile_obj)
799 {
800 	txm_call_t txm_call = {
801 		.selector = kTXMKernelSelectorRegisterProvisioningProfile,
802 		.num_input_args = 2,
803 		.num_output_args = 1
804 	};
805 	vm_address_t payload_addr = 0;
806 	kern_return_t ret = KERN_DENIED;
807 
808 	/* We need to allocate page-wise in order to transfer the range to TXM */
809 	ret = kmem_alloc(kernel_map, &payload_addr, profile_blob_size,
810 	    KMA_KOBJECT | KMA_DATA, VM_KERN_MEMORY_SECURITY);
811 	if (ret != KERN_SUCCESS) {
812 		printf("unable to allocate memory for profile payload: %d\n", ret);
813 		goto exit;
814 	}
815 
816 	/* Copy the contents into the allocation */
817 	memcpy((void*)payload_addr, profile_blob, profile_blob_size);
818 
819 	/* Transfer the memory range to TXM */
820 	txm_transfer_region(payload_addr, profile_blob_size);
821 
822 	ret = txm_kernel_call(&txm_call, payload_addr, profile_blob_size);
823 	if (ret == KERN_SUCCESS) {
824 		*profile_obj = (void*)txm_call.return_words[0];
825 	}
826 
827 exit:
828 	if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
829 		/* Reclaim this memory range */
830 		txm_reclaim_region(payload_addr, profile_blob_size);
831 
832 		/* Free the memory range */
833 		kmem_free(kernel_map, payload_addr, profile_blob_size);
834 		payload_addr = 0;
835 	}
836 
837 	return ret;
838 }
839 
840 kern_return_t
txm_trust_provisioning_profile(__unused void * profile_obj,__unused const void * sig_data,__unused size_t sig_size)841 txm_trust_provisioning_profile(
842 	__unused void *profile_obj,
843 	__unused const void *sig_data,
844 	__unused size_t sig_size)
845 {
846 #if kTXMKernelAPIVersion >= 7
847 	txm_call_t txm_call = {
848 		.selector = kTXMKernelSelectorTrustProvisioningProfile,
849 		.num_input_args = 3
850 	};
851 
852 	return txm_kernel_call(&txm_call, profile_obj, sig_data, sig_size);
853 #else
854 	/* The TXM selector hasn't yet landed */
855 	return KERN_SUCCESS;
856 #endif
857 }
858 
859 kern_return_t
txm_unregister_provisioning_profile(void * profile_obj)860 txm_unregister_provisioning_profile(
861 	void *profile_obj)
862 {
863 	txm_call_t txm_call = {
864 		.selector = kTXMKernelSelectorUnregisterProvisioningProfile,
865 		.num_input_args = 1,
866 		.num_output_args = 2
867 	};
868 	vm_address_t profile_addr = 0;
869 	vm_size_t profile_size = 0;
870 	kern_return_t ret = KERN_DENIED;
871 
872 	ret = txm_kernel_call(&txm_call, profile_obj);
873 	if (ret != KERN_SUCCESS) {
874 		return ret;
875 	}
876 
877 	profile_addr = txm_call.return_words[0];
878 	profile_size = txm_call.return_words[1];
879 
880 	/* Reclaim this memory range */
881 	txm_reclaim_region(profile_addr, profile_size);
882 
883 	/* Free the memory range */
884 	kmem_free(kernel_map, profile_addr, profile_size);
885 
886 	return KERN_SUCCESS;
887 }
888 
889 kern_return_t
txm_associate_provisioning_profile(void * sig_obj,void * profile_obj)890 txm_associate_provisioning_profile(
891 	void *sig_obj,
892 	void *profile_obj)
893 {
894 	txm_call_t txm_call = {
895 		.selector = kTXMKernelSelectorAssociateProvisioningProfile,
896 		.num_input_args = 2,
897 	};
898 
899 	return txm_kernel_call(&txm_call, sig_obj, profile_obj);
900 }
901 
902 kern_return_t
txm_disassociate_provisioning_profile(void * sig_obj)903 txm_disassociate_provisioning_profile(
904 	void *sig_obj)
905 {
906 	txm_call_t txm_call = {
907 		.selector = kTXMKernelSelectorDisassociateProvisioningProfile,
908 		.num_input_args = 1,
909 	};
910 
911 	/*
912 	 * Take the unregistration sync lock.
913 	 * For more information: rdar://99205627.
914 	 */
915 	lck_mtx_lock(&unregister_sync_lock);
916 
917 	/* Disassociate the profile from the signature */
918 	kern_return_t ret = txm_kernel_call(&txm_call, sig_obj);
919 
920 	/* Release the unregistration sync lock */
921 	lck_mtx_unlock(&unregister_sync_lock);
922 
923 	return ret;
924 }
925 
926 void
txm_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])927 txm_set_compilation_service_cdhash(
928 	const uint8_t cdhash[CS_CDHASH_LEN])
929 {
930 	txm_call_t txm_call = {
931 		.selector = kTXMKernelSelectorAuthorizeCompilationServiceCDHash,
932 		.num_input_args = 1,
933 	};
934 
935 	lck_mtx_lock(&compilation_service_lock);
936 	txm_kernel_call(&txm_call, cdhash);
937 	lck_mtx_unlock(&compilation_service_lock);
938 }
939 
940 bool
txm_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])941 txm_match_compilation_service_cdhash(
942 	const uint8_t cdhash[CS_CDHASH_LEN])
943 {
944 	txm_call_t txm_call = {
945 		.selector = kTXMKernelSelectorMatchCompilationServiceCDHash,
946 		.failure_silent = true,
947 		.num_input_args = 1,
948 		.num_output_args = 1,
949 	};
950 	kern_return_t ret = KERN_DENIED;
951 
952 	/* Be safe and take the lock (avoid thread collisions) */
953 	lck_mtx_lock(&compilation_service_lock);
954 	ret = txm_kernel_call(&txm_call, cdhash);
955 	lck_mtx_unlock(&compilation_service_lock);
956 
957 	if (ret == KERN_SUCCESS) {
958 		return true;
959 	}
960 	return false;
961 }
962 
963 void
txm_set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])964 txm_set_local_signing_public_key(
965 	const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
966 {
967 	txm_call_t txm_call = {
968 		.selector = kTXMKernelSelectorSetLocalSigningPublicKey,
969 		.num_input_args = 1,
970 	};
971 
972 	txm_kernel_call(&txm_call, public_key);
973 }
974 
975 uint8_t*
txm_get_local_signing_public_key(void)976 txm_get_local_signing_public_key(void)
977 {
978 	txm_call_t txm_call = {
979 		.selector = kTXMKernelSelectorGetLocalSigningPublicKey,
980 		.num_output_args = 1,
981 	};
982 	kern_return_t ret = KERN_DENIED;
983 
984 	ret = txm_kernel_call(&txm_call);
985 	if (ret != KERN_SUCCESS) {
986 		return NULL;
987 	}
988 
989 	return (uint8_t*)txm_call.return_words[0];
990 }
991 
992 void
txm_unrestrict_local_signing_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])993 txm_unrestrict_local_signing_cdhash(
994 	const uint8_t cdhash[CS_CDHASH_LEN])
995 {
996 	txm_call_t txm_call = {
997 		.selector = kTXMKernelSelectorAuthorizeLocalSigningCDHash,
998 		.num_input_args = 1,
999 	};
1000 
1001 	txm_kernel_call(&txm_call, cdhash);
1002 }
1003 
1004 kern_return_t
txm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** sig_obj,vm_address_t * txm_signature_addr)1005 txm_register_code_signature(
1006 	const vm_address_t signature_addr,
1007 	const vm_size_t signature_size,
1008 	const vm_offset_t code_directory_offset,
1009 	const char *signature_path,
1010 	void **sig_obj,
1011 	vm_address_t *txm_signature_addr)
1012 {
1013 	txm_call_t txm_call = {
1014 		.selector = kTXMKernelSelectorRegisterCodeSignature,
1015 		.num_input_args = 3,
1016 		.num_output_args = 2,
1017 	};
1018 	kern_return_t ret = KERN_DENIED;
1019 
1020 	/*
1021 	 * TXM performs more exhaustive validation of the code signature and figures
1022 	 * out the best code directory to use on its own. As a result, this offset here
1023 	 * is not used.
1024 	 */
1025 	(void)code_directory_offset;
1026 
1027 	/*
1028 	 * If the signature is large enough to not fit within TXM's managed signature
1029 	 * size, then we need to transfer it over so it is owned by TXM.
1030 	 */
1031 	if (signature_size > txm_managed_code_signature_size()) {
1032 		txm_transfer_region(signature_addr, signature_size);
1033 	}
1034 
1035 	ret = txm_kernel_call(
1036 		&txm_call,
1037 		signature_addr,
1038 		signature_size,
1039 		signature_path);
1040 
1041 	if (ret != KERN_SUCCESS) {
1042 		goto exit;
1043 	}
1044 
1045 	*sig_obj = (void*)txm_call.return_words[0];
1046 	*txm_signature_addr = txm_call.return_words[1];
1047 
1048 exit:
1049 	if ((ret != KERN_SUCCESS) && (signature_size > txm_managed_code_signature_size())) {
1050 		txm_reclaim_region(signature_addr, signature_size);
1051 	}
1052 
1053 	return ret;
1054 }
1055 
1056 kern_return_t
txm_unregister_code_signature(void * sig_obj)1057 txm_unregister_code_signature(
1058 	void *sig_obj)
1059 {
1060 	txm_call_t txm_call = {
1061 		.selector = kTXMKernelSelectorUnregisterCodeSignature,
1062 		.failure_fatal = true,
1063 		.num_input_args = 1,
1064 		.num_output_args = 2,
1065 	};
1066 	TXMCodeSignature_t *cs_obj = sig_obj;
1067 	vm_address_t signature_addr = 0;
1068 	vm_size_t signature_size = 0;
1069 	bool txm_managed = false;
1070 
1071 	/* Check if the signature memory is TXM managed */
1072 	txm_managed = cs_obj->sptmType != TXM_BULK_DATA;
1073 
1074 	/*
1075 	 * Take the unregistration sync lock.
1076 	 * For more information: rdar://99205627.
1077 	 */
1078 	lck_mtx_lock(&unregister_sync_lock);
1079 
1080 	/* Unregister the signature from TXM -- cannot fail */
1081 	txm_kernel_call(&txm_call, sig_obj);
1082 
1083 	/* Release the unregistration sync lock */
1084 	lck_mtx_unlock(&unregister_sync_lock);
1085 
1086 	signature_addr = txm_call.return_words[0];
1087 	signature_size = txm_call.return_words[1];
1088 
1089 	/* Reclaim the memory range in case we need to */
1090 	if (txm_managed == false) {
1091 		txm_reclaim_region(signature_addr, signature_size);
1092 	}
1093 
1094 	return KERN_SUCCESS;
1095 }
1096 
1097 kern_return_t
txm_verify_code_signature(void * sig_obj)1098 txm_verify_code_signature(
1099 	void *sig_obj)
1100 {
1101 	txm_call_t txm_call = {
1102 		.selector = kTXMKernelSelectorValidateCodeSignature,
1103 		.num_input_args = 1,
1104 	};
1105 
1106 	return txm_kernel_call(&txm_call, sig_obj);
1107 }
1108 
1109 kern_return_t
txm_reconstitute_code_signature(void * sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1110 txm_reconstitute_code_signature(
1111 	void *sig_obj,
1112 	vm_address_t *unneeded_addr,
1113 	vm_size_t *unneeded_size)
1114 {
1115 	txm_call_t txm_call = {
1116 		.selector = kTXMKernelSelectorReconstituteCodeSignature,
1117 		.failure_fatal = true,
1118 		.num_input_args = 1,
1119 		.num_output_args = 2,
1120 	};
1121 	vm_address_t return_addr = 0;
1122 	vm_size_t return_size = 0;
1123 
1124 	/* Reconstitute the code signature -- cannot fail */
1125 	txm_kernel_call(&txm_call, sig_obj);
1126 
1127 	return_addr = txm_call.return_words[0];
1128 	return_size = txm_call.return_words[1];
1129 
1130 	/* Reclaim the memory region if we need to */
1131 	if ((return_addr != 0) && (return_size != 0)) {
1132 		txm_reclaim_region(return_addr, return_size);
1133 	}
1134 
1135 	*unneeded_addr = return_addr;
1136 	*unneeded_size = return_size;
1137 
1138 	return KERN_SUCCESS;
1139 }
1140 
1141 #pragma mark Address Spaces
1142 
1143 kern_return_t
txm_register_address_space(pmap_t pmap,uint16_t addr_space_id,TXMAddressSpaceFlags_t flags)1144 txm_register_address_space(
1145 	pmap_t pmap,
1146 	uint16_t addr_space_id,
1147 	TXMAddressSpaceFlags_t flags)
1148 {
1149 	txm_call_t txm_call = {
1150 		.selector = kTXMKernelSelectorRegisterAddressSpace,
1151 		.failure_fatal = true,
1152 		.num_input_args = 2,
1153 		.num_output_args = 1,
1154 	};
1155 	TXMAddressSpace_t *txm_addr_space = NULL;
1156 
1157 	/* Register the address space -- cannot fail */
1158 	txm_kernel_call(&txm_call, addr_space_id, flags);
1159 
1160 	/* Set the address space object within the PMAP */
1161 	txm_addr_space = (TXMAddressSpace_t*)txm_call.return_words[0];
1162 	pmap_txm_set_addr_space(pmap, txm_addr_space);
1163 
1164 	return KERN_SUCCESS;
1165 }
1166 
1167 kern_return_t
txm_unregister_address_space(pmap_t pmap)1168 txm_unregister_address_space(
1169 	pmap_t pmap)
1170 {
1171 	txm_call_t txm_call = {
1172 		.selector = kTXMKernelSelectorUnregisterAddressSpace,
1173 		.failure_fatal = true,
1174 		.num_input_args = 1,
1175 	};
1176 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1177 
1178 	/*
1179 	 * Take the unregistration sync lock.
1180 	 * For more information: rdar://99205627.
1181 	 */
1182 	lck_mtx_lock(&unregister_sync_lock);
1183 
1184 	/* Unregister the address space -- cannot fail */
1185 	txm_kernel_call(&txm_call, txm_addr_space);
1186 
1187 	/* Release the unregistration sync lock */
1188 	lck_mtx_unlock(&unregister_sync_lock);
1189 
1190 	/* Remove the address space from the pmap */
1191 	pmap_txm_set_addr_space(pmap, NULL);
1192 
1193 	return KERN_SUCCESS;
1194 }
1195 
1196 kern_return_t
txm_associate_code_signature(pmap_t pmap,void * sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1197 txm_associate_code_signature(
1198 	pmap_t pmap,
1199 	void *sig_obj,
1200 	const vm_address_t region_addr,
1201 	const vm_size_t region_size,
1202 	const vm_offset_t region_offset)
1203 {
1204 	txm_call_t txm_call = {
1205 		.selector = kTXMKernelSelectorAssociateCodeSignature,
1206 		.num_input_args = 5,
1207 	};
1208 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1209 	kern_return_t ret = KERN_DENIED;
1210 
1211 	/*
1212 	 * Associating a code signature may require exclusive access to the TXM address
1213 	 * space lock within TXM.
1214 	 */
1215 	pmap_txm_acquire_exclusive_lock(pmap);
1216 
1217 	/*
1218 	 * If the address space in question is a nested address space, then all associations
1219 	 * need to go into the shared region base range. The VM layer is inconsistent with
1220 	 * how it makes associations with TXM vs. how it maps pages into the shared region.
1221 	 *
1222 	 * For TXM, the associations are made without taking the base range into account,
1223 	 * but when mappings are entered into the shared region, the base range is taken
1224 	 * into account. To normalize this, we add the base range address here.
1225 	 */
1226 	vm_address_t adjusted_region_addr = region_addr;
1227 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeSharedRegion) {
1228 		adjusted_region_addr += SHARED_REGION_BASE;
1229 	}
1230 
1231 	/*
1232 	 * The VM tries a bunch of weird mappings within launchd for some platform code
1233 	 * which isn't mapped contiguously. These mappings don't succeed, but the failure
1234 	 * is fairly harmless since everything seems to work. However, since the call to
1235 	 * TXM fails, we make a series of logs. Hence, for launchd, we suppress failure
1236 	 * logs.
1237 	 */
1238 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeAddressSpace) {
1239 		/* TXMTODO: Scope this to launchd better */
1240 		txm_call.failure_code_silent = kTXMReturnPlatformCodeMapping;
1241 	}
1242 
1243 	/* Check if the main region has been set on the address space */
1244 	bool main_region_set = txm_addr_space->mainRegion != NULL;
1245 	bool main_region_set_after = false;
1246 
1247 	ret = txm_kernel_call(
1248 		&txm_call,
1249 		txm_addr_space,
1250 		sig_obj,
1251 		adjusted_region_addr,
1252 		region_size,
1253 		region_offset);
1254 
1255 	while (ret == KERN_OPERATION_TIMED_OUT) {
1256 		/*
1257 		 * There is no easy method to sleep in the kernel. This operation has the
1258 		 * potential to burn CPU cycles, but that is alright since we don't actually
1259 		 * ever expect to enter this case on legitimately operating systems.
1260 		 */
1261 		ret = txm_kernel_call(
1262 			&txm_call,
1263 			txm_addr_space,
1264 			sig_obj,
1265 			adjusted_region_addr,
1266 			region_size,
1267 			region_offset);
1268 	}
1269 
1270 	/*
1271 	 * If the main region wasn't set on the address space before hand, but this new
1272 	 * call into TXM was successful and sets the main region, it means this signature
1273 	 * object is associated with the main region on the address space. With this, we
1274 	 * can now set the appropriate trust level on the PMAP.
1275 	 */
1276 	if (ret == KERN_SUCCESS) {
1277 		main_region_set_after = txm_addr_space->mainRegion != NULL;
1278 	}
1279 
1280 	/* Unlock the TXM address space lock */
1281 	pmap_txm_release_exclusive_lock(pmap);
1282 
1283 	/* Check if we should set the trust level on the PMAP */
1284 	if (!main_region_set && main_region_set_after) {
1285 		const TXMCodeSignature_t *cs_obj = sig_obj;
1286 		const SignatureValidation_t *sig = &cs_obj->sig;
1287 
1288 		/*
1289 		 * This is gross, as we're dereferencing into a private data structure type.
1290 		 * There are 2 ways to clean this up in the future:
1291 		 * 1. Import libCodeSignature, so we can use "codeSignatureGetTrustLevel".
1292 		 * 2. Cache the trust level on the address space within TXM and then use it.
1293 		 */
1294 		pmap_txm_set_trust_level(pmap, sig->trustLevel);
1295 	}
1296 
1297 	return ret;
1298 }
1299 
1300 kern_return_t
txm_allow_jit_region(pmap_t pmap)1301 txm_allow_jit_region(
1302 	pmap_t pmap)
1303 {
1304 	txm_call_t txm_call = {
1305 		.selector = kTXMKernelSelectorAllowJITRegion,
1306 		.num_input_args = 1,
1307 	};
1308 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1309 	kern_return_t ret = KERN_DENIED;
1310 
1311 	pmap_txm_acquire_shared_lock(pmap);
1312 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1313 	pmap_txm_release_shared_lock(pmap);
1314 
1315 	return ret;
1316 }
1317 
1318 kern_return_t
txm_associate_jit_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1319 txm_associate_jit_region(
1320 	pmap_t pmap,
1321 	const vm_address_t region_addr,
1322 	const vm_size_t region_size)
1323 {
1324 	txm_call_t txm_call = {
1325 		.selector = kTXMKernelSelectorAssociateJITRegion,
1326 		.num_input_args = 3,
1327 	};
1328 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1329 	kern_return_t ret = KERN_DENIED;
1330 
1331 	/*
1332 	 * Associating a JIT region may require exclusive access to the TXM address
1333 	 * space lock within TXM.
1334 	 */
1335 	pmap_txm_acquire_exclusive_lock(pmap);
1336 
1337 	ret = txm_kernel_call(
1338 		&txm_call,
1339 		txm_addr_space,
1340 		region_addr,
1341 		region_size);
1342 
1343 	/* Unlock the TXM address space lock */
1344 	pmap_txm_release_exclusive_lock(pmap);
1345 
1346 	return ret;
1347 }
1348 
1349 kern_return_t
txm_address_space_debugged(pmap_t pmap)1350 txm_address_space_debugged(
1351 	pmap_t pmap)
1352 {
1353 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1354 	bool debug_regions_allowed = false;
1355 
1356 	/*
1357 	 * We do not actually need to trap into the monitor for this function for
1358 	 * now. It might be a tad bit more secure to actually trap into the monitor
1359 	 * as it implicitly verifies all of our pointers, but since this is a simple
1360 	 * state check against the address space, the real policy around it lies
1361 	 * within the kernel still, in which case entering the monitor doesn't
1362 	 * really provide much more security.
1363 	 */
1364 
1365 	pmap_txm_acquire_shared_lock(pmap);
1366 	debug_regions_allowed = os_atomic_load(&txm_addr_space->allowsInvalidCode, relaxed);
1367 	pmap_txm_release_shared_lock(pmap);
1368 
1369 	if (debug_regions_allowed == true) {
1370 		return KERN_SUCCESS;
1371 	}
1372 	return KERN_DENIED;
1373 }
1374 
1375 kern_return_t
txm_associate_debug_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1376 txm_associate_debug_region(
1377 	pmap_t pmap,
1378 	const vm_address_t region_addr,
1379 	const vm_size_t region_size)
1380 {
1381 	/*
1382 	 * This function is an interesting one. There is no need for us to make
1383 	 * a call into TXM for this one and instead, all we need to do here is
1384 	 * to verify that the TXM address space actually allows debug regions to
1385 	 * be mapped in or not.
1386 	 */
1387 	(void)region_addr;
1388 	(void)region_size;
1389 
1390 	kern_return_t ret = txm_address_space_debugged(pmap);
1391 	if (ret != KERN_SUCCESS) {
1392 		printf("address space does not allow creating debug regions\n");
1393 	}
1394 
1395 	return ret;
1396 }
1397 
1398 kern_return_t
txm_allow_invalid_code(pmap_t pmap)1399 txm_allow_invalid_code(
1400 	pmap_t pmap)
1401 {
1402 	txm_call_t txm_call = {
1403 		.selector = kTXMKernelSelectorAllowInvalidCode,
1404 		.num_input_args = 1,
1405 	};
1406 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1407 	kern_return_t ret = KERN_DENIED;
1408 
1409 	/*
1410 	 * Allowing invalid code may require exclusive access to the TXM address
1411 	 * space lock within TXM.
1412 	 */
1413 
1414 	pmap_txm_acquire_exclusive_lock(pmap);
1415 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1416 	pmap_txm_release_exclusive_lock(pmap);
1417 
1418 	return ret;
1419 }
1420 
1421 kern_return_t
txm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1422 txm_get_trust_level_kdp(
1423 	pmap_t pmap,
1424 	uint32_t *trust_level)
1425 {
1426 	CSTrust_t txm_trust_level = kCSTrustUntrusted;
1427 
1428 	kern_return_t ret = pmap_txm_get_trust_level_kdp(pmap, &txm_trust_level);
1429 	if (ret != KERN_SUCCESS) {
1430 		return ret;
1431 	}
1432 
1433 	if (trust_level != NULL) {
1434 		*trust_level = txm_trust_level;
1435 	}
1436 	return KERN_SUCCESS;
1437 }
1438 
1439 kern_return_t
txm_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)1440 txm_get_jit_address_range_kdp(
1441 	pmap_t pmap,
1442 	uintptr_t *jit_region_start,
1443 	uintptr_t *jit_region_end)
1444 {
1445 	return pmap_txm_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
1446 }
1447 
1448 kern_return_t
txm_address_space_exempt(const pmap_t pmap)1449 txm_address_space_exempt(
1450 	const pmap_t pmap)
1451 {
1452 	if (pmap_performs_stage2_translations(pmap) == true) {
1453 		return KERN_SUCCESS;
1454 	}
1455 
1456 	return KERN_DENIED;
1457 }
1458 
1459 kern_return_t
txm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1460 txm_fork_prepare(
1461 	pmap_t old_pmap,
1462 	pmap_t new_pmap)
1463 {
1464 	/*
1465 	 * We'll add support for this as the need for it becomes more important.
1466 	 * TXMTODO: Complete this implementation.
1467 	 */
1468 	(void)old_pmap;
1469 	(void)new_pmap;
1470 
1471 	return KERN_SUCCESS;
1472 }
1473 
1474 kern_return_t
txm_acquire_signing_identifier(const void * sig_obj,const char ** signing_id)1475 txm_acquire_signing_identifier(
1476 	const void *sig_obj,
1477 	const char **signing_id)
1478 {
1479 	txm_call_t txm_call = {
1480 		.selector = kTXMKernelSelectorAcquireSigningIdentifier,
1481 		.num_input_args = 1,
1482 		.num_output_args = 1,
1483 		.failure_fatal = true,
1484 	};
1485 
1486 	/* Get the signing ID -- should not fail */
1487 	txm_kernel_call(&txm_call, sig_obj);
1488 
1489 	if (signing_id != NULL) {
1490 		*signing_id = (const char*)txm_call.return_words[0];
1491 	}
1492 	return KERN_SUCCESS;
1493 }
1494 
1495 #pragma mark Entitlements
1496 
1497 kern_return_t
txm_associate_kernel_entitlements(void * sig_obj,const void * kernel_entitlements)1498 txm_associate_kernel_entitlements(
1499 	void *sig_obj,
1500 	const void *kernel_entitlements)
1501 {
1502 	txm_call_t txm_call = {
1503 		.selector = kTXMKernelSelectorAssociateKernelEntitlements,
1504 		.num_input_args = 2,
1505 		.failure_fatal = true,
1506 	};
1507 
1508 	/* Associate the kernel entitlements -- should not fail */
1509 	txm_kernel_call(&txm_call, sig_obj, kernel_entitlements);
1510 
1511 	return KERN_SUCCESS;
1512 }
1513 
1514 kern_return_t
txm_resolve_kernel_entitlements(pmap_t pmap,const void ** kernel_entitlements)1515 txm_resolve_kernel_entitlements(
1516 	pmap_t pmap,
1517 	const void **kernel_entitlements)
1518 {
1519 	txm_call_t txm_call = {
1520 		.selector = kTXMKernelSelectorResolveKernelEntitlementsAddressSpace,
1521 		.skip_logs = true,
1522 		.num_input_args = 1,
1523 		.num_output_args = 1,
1524 		.failure_silent = true,
1525 	};
1526 	TXMAddressSpace_t *txm_addr_space = NULL;
1527 	kern_return_t ret = KERN_DENIED;
1528 
1529 	if (pmap == pmap_txm_kernel_pmap()) {
1530 		return KERN_NOT_FOUND;
1531 	}
1532 	txm_addr_space = pmap_txm_addr_space(pmap);
1533 
1534 	pmap_txm_acquire_shared_lock(pmap);
1535 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1536 	pmap_txm_release_shared_lock(pmap);
1537 
1538 	if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) {
1539 		*kernel_entitlements = (const void*)txm_call.return_words[0];
1540 	}
1541 	return ret;
1542 }
1543 
1544 kern_return_t
txm_accelerate_entitlements(void * sig_obj,CEQueryContext_t * ce_ctx)1545 txm_accelerate_entitlements(
1546 	void *sig_obj,
1547 	CEQueryContext_t *ce_ctx)
1548 {
1549 	txm_call_t txm_call = {
1550 		.selector = kTXMKernelSelectorAccelerateEntitlements,
1551 		.num_input_args = 1,
1552 		.num_output_args = 1,
1553 	};
1554 	kern_return_t ret = KERN_DENIED;
1555 
1556 	ret = txm_kernel_call(&txm_call, sig_obj);
1557 	if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) {
1558 		*ce_ctx = (CEQueryContext_t)txm_call.return_words[0];
1559 	}
1560 
1561 	return ret;
1562 }
1563 
1564 #pragma mark Image4
1565 
1566 void*
txm_image4_storage_data(__unused size_t * allocated_size)1567 txm_image4_storage_data(
1568 	__unused size_t *allocated_size)
1569 {
1570 	/*
1571 	 * AppleImage4 builds a variant of TXM which TXM should link against statically
1572 	 * thereby removing the need for the kernel to allocate some data on behalf of
1573 	 * the kernel extension.
1574 	 */
1575 	panic("unsupported AppleImage4 interface");
1576 }
1577 
1578 void
txm_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1579 txm_image4_set_nonce(
1580 	const img4_nonce_domain_index_t ndi,
1581 	const img4_nonce_t *nonce)
1582 {
1583 	txm_call_t txm_call = {
1584 		.selector = kTXMKernelSelectorImage4SetNonce,
1585 		.failure_fatal = true,
1586 		.num_input_args = 2,
1587 	};
1588 
1589 	txm_kernel_call(&txm_call, ndi, nonce);
1590 }
1591 
1592 void
txm_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1593 txm_image4_roll_nonce(
1594 	const img4_nonce_domain_index_t ndi)
1595 {
1596 	txm_call_t txm_call = {
1597 		.selector = kTXMKernelSelectorImage4RollNonce,
1598 		.failure_fatal = true,
1599 		.num_input_args = 1,
1600 	};
1601 
1602 	txm_kernel_call(&txm_call, ndi);
1603 }
1604 
1605 errno_t
txm_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1606 txm_image4_copy_nonce(
1607 	const img4_nonce_domain_index_t ndi,
1608 	img4_nonce_t *nonce_out)
1609 {
1610 	txm_call_t txm_call = {
1611 		.selector = kTXMKernelSelectorImage4GetNonce,
1612 		.num_input_args = 1,
1613 		.num_output_args = 1,
1614 	};
1615 	const img4_nonce_t *nonce = NULL;
1616 	TXMReturn_t txm_ret = {0};
1617 	kern_return_t ret = KERN_DENIED;
1618 
1619 	ret = txm_kernel_call(&txm_call, ndi);
1620 	if (ret != KERN_SUCCESS) {
1621 		txm_ret = txm_call.txm_ret;
1622 		if (txm_ret.returnCode != kTXMReturnCodeErrno) {
1623 			return EPERM;
1624 		}
1625 		return txm_ret.errnoRet;
1626 	}
1627 
1628 	/* Acquire a pointer to the nonce from TXM */
1629 	nonce = (const img4_nonce_t*)txm_call.return_words[0];
1630 
1631 	if (nonce_out) {
1632 		*nonce_out = *nonce;
1633 	}
1634 	return 0;
1635 }
1636 
1637 errno_t
txm_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1638 txm_image4_execute_object(
1639 	img4_runtime_object_spec_index_t obj_spec_index,
1640 	const img4_buff_t *payload,
1641 	const img4_buff_t *manifest)
1642 {
1643 	/* Not supported within TXM yet */
1644 	(void)obj_spec_index;
1645 	(void)payload;
1646 	(void)manifest;
1647 
1648 	printf("image4 object execution isn't supported by TXM\n");
1649 	return ENOSYS;
1650 }
1651 
1652 errno_t
txm_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1653 txm_image4_copy_object(
1654 	img4_runtime_object_spec_index_t obj_spec_index,
1655 	vm_address_t object_out,
1656 	size_t *object_length)
1657 {
1658 	/* Not supported within TXM yet */
1659 	(void)obj_spec_index;
1660 	(void)object_out;
1661 	(void)object_length;
1662 
1663 	printf("image4 object copying isn't supported by TXM\n");
1664 	return ENOSYS;
1665 }
1666 
1667 const void*
txm_image4_get_monitor_exports(void)1668 txm_image4_get_monitor_exports(void)
1669 {
1670 	txm_call_t txm_call = {
1671 		.selector = kTXMKernelSelectorImage4GetExports,
1672 		.failure_fatal = true,
1673 		.num_output_args = 1,
1674 	};
1675 
1676 	txm_kernel_call(&txm_call);
1677 	return (const void*)txm_call.return_words[0];
1678 }
1679 
1680 errno_t
txm_image4_set_release_type(const char * release_type)1681 txm_image4_set_release_type(
1682 	const char *release_type)
1683 {
1684 	txm_call_t txm_call = {
1685 		.selector = kTXMKernelSelectorImage4SetReleaseType,
1686 		.failure_fatal = true,
1687 		.num_input_args = 1,
1688 	};
1689 
1690 	/* Set the release type -- cannot fail */
1691 	txm_kernel_call(&txm_call, release_type);
1692 
1693 	return 0;
1694 }
1695 
1696 errno_t
txm_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1697 txm_image4_set_bnch_shadow(
1698 	const img4_nonce_domain_index_t ndi)
1699 {
1700 	txm_call_t txm_call = {
1701 		.selector = kTXMKernelSelectorImage4SetBootNonceShadow,
1702 		.failure_fatal = true,
1703 		.num_input_args = 1,
1704 	};
1705 
1706 	/* Set the release type -- cannot fail */
1707 	txm_kernel_call(&txm_call, ndi);
1708 
1709 	return 0;
1710 }
1711 
1712 #pragma mark Image4 - New
1713 
1714 static inline bool
_txm_image4_monitor_trap_supported(image4_cs_trap_t selector)1715 _txm_image4_monitor_trap_supported(
1716 	image4_cs_trap_t selector)
1717 {
1718 	switch (selector) {
1719 #if kTXMImage4APIVersion >= 1
1720 	case IMAGE4_CS_TRAP_KMOD_SET_RELEASE_TYPE:
1721 	case IMAGE4_CS_TRAP_NONCE_SET:
1722 	case IMAGE4_CS_TRAP_NONCE_ROLL:
1723 	case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1724 		return true;
1725 #endif
1726 
1727 	default:
1728 		return false;
1729 	}
1730 }
1731 
1732 kern_return_t
txm_image4_transfer_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1733 txm_image4_transfer_region(
1734 	image4_cs_trap_t selector,
1735 	vm_address_t region_addr,
1736 	vm_size_t region_size)
1737 {
1738 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1739 		txm_transfer_region(region_addr, region_size);
1740 	}
1741 	return KERN_SUCCESS;
1742 }
1743 
1744 kern_return_t
txm_image4_reclaim_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1745 txm_image4_reclaim_region(
1746 	image4_cs_trap_t selector,
1747 	vm_address_t region_addr,
1748 	vm_size_t region_size)
1749 {
1750 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1751 		txm_reclaim_region(region_addr, region_size);
1752 	}
1753 	return KERN_SUCCESS;
1754 }
1755 
1756 errno_t
txm_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1757 txm_image4_monitor_trap(
1758 	image4_cs_trap_t selector,
1759 	const void *input_data,
1760 	size_t input_size)
1761 {
1762 	txm_call_t txm_call = {
1763 		.selector = kTXMKernelSelectorImage4Dispatch,
1764 		.num_input_args = 5,
1765 	};
1766 
1767 	kern_return_t ret = txm_kernel_call(
1768 		&txm_call, selector,
1769 		input_data, input_size,
1770 		NULL, NULL);
1771 
1772 	/* Return 0 for success */
1773 	if (ret == KERN_SUCCESS) {
1774 		return 0;
1775 	}
1776 
1777 	/* Check for an errno_t return */
1778 	if (txm_call.txm_ret.returnCode == kTXMReturnCodeErrno) {
1779 		if (txm_call.txm_ret.errnoRet == 0) {
1780 			panic("image4 dispatch: unexpected success errno_t: %llu", selector);
1781 		}
1782 		return txm_call.txm_ret.errnoRet;
1783 	}
1784 
1785 	/* Return a generic error */
1786 	return EPERM;
1787 }
1788 
1789 
1790 #endif /* CONFIG_SPTM */
1791