1 /*
2 * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern_xnu.h>
27 #include <kern/zalloc.h>
28 #include <kern/kalloc.h>
29 #include <kern/assert.h>
30 #include <kern/locks.h>
31 #include <kern/lock_rw.h>
32 #include <libkern/libkern.h>
33 #include <libkern/section_keywords.h>
34 #include <libkern/coretrust/coretrust.h>
35 #include <pexpert/pexpert.h>
36 #include <sys/vm.h>
37 #include <sys/proc.h>
38 #include <sys/codesign.h>
39 #include <sys/code_signing.h>
40 #include <uuid/uuid.h>
41 #include <IOKit/IOBSD.h>
42
43 #if PMAP_CS_PPL_MONITOR
44 /*
45 * The Page Protection Layer layer implements the PMAP_CS monitor environment which
46 * provides code signing and memory isolation enforcements for data structures which
47 * are critical to ensuring that all code executed on the system is authorized to do
48 * so.
49 *
50 * Unless the data is managed by the PPL itself, XNU needs to page-align everything,
51 * and then reference the memory as read-only.
52 */
53
54 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
55 extern vm_map_address_t phystokv(pmap_paddr_t pa);
56 extern pmap_paddr_t kvtophys_nofail(vm_offset_t va);
57
58 #pragma mark Initialization
59
60 void
code_signing_init()61 code_signing_init()
62 {
63 /* Does nothing */
64 }
65
66 void
ppl_enter_lockdown_mode(void)67 ppl_enter_lockdown_mode(void)
68 {
69 /*
70 * This function is expected to be called before read-only lockdown on the
71 * system. As a result, the PPL variable should be mutable. If not, then we
72 * will panic (as we should).
73 */
74 ppl_lockdown_mode_enabled = true;
75
76 printf("entered lockdown mode policy for the PPL");
77 }
78
79 kern_return_t
ppl_secure_channel_shared_page(__unused uint64_t * secure_channel_phys,__unused size_t * secure_channel_size)80 ppl_secure_channel_shared_page(
81 __unused uint64_t *secure_channel_phys,
82 __unused size_t *secure_channel_size)
83 {
84 return KERN_NOT_SUPPORTED;
85 }
86
87 #pragma mark Developer Mode
88
89 SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = &ppl_developer_mode_storage;
90
91 void
ppl_toggle_developer_mode(bool state)92 ppl_toggle_developer_mode(
93 bool state)
94 {
95 pmap_toggle_developer_mode(state);
96 }
97
98 #pragma mark Restricted Execution Mode
99
100 kern_return_t
ppl_rem_enable(void)101 ppl_rem_enable(void)
102 {
103 return KERN_NOT_SUPPORTED;
104 }
105
106 kern_return_t
ppl_rem_state(void)107 ppl_rem_state(void)
108 {
109 return KERN_NOT_SUPPORTED;
110 }
111
112 #pragma mark Device State
113
114 void
ppl_update_device_state(void)115 ppl_update_device_state(void)
116 {
117 /* Does nothing */
118 }
119
120 void
ppl_complete_security_boot_mode(__unused uint32_t security_boot_mode)121 ppl_complete_security_boot_mode(
122 __unused uint32_t security_boot_mode)
123 {
124 /* Does nothing */
125 }
126
127 #pragma mark Code Signing and Provisioning Profiles
128
129 bool
ppl_code_signing_enabled(void)130 ppl_code_signing_enabled(void)
131 {
132 return pmap_cs_enabled();
133 }
134
135 kern_return_t
ppl_register_provisioning_profile(const void * profile_blob,const size_t profile_blob_size,void ** profile_obj)136 ppl_register_provisioning_profile(
137 const void *profile_blob,
138 const size_t profile_blob_size,
139 void **profile_obj)
140 {
141 pmap_profile_payload_t *pmap_payload = NULL;
142 vm_address_t payload_addr = 0;
143 vm_size_t payload_size = 0;
144 vm_size_t payload_size_aligned = 0;
145 kern_return_t ret = KERN_DENIED;
146
147 if (os_add_overflow(sizeof(*pmap_payload), profile_blob_size, &payload_size)) {
148 panic("attempted to load a too-large profile: %lu bytes", profile_blob_size);
149 }
150 payload_size_aligned = round_page(payload_size);
151
152 ret = kmem_alloc(kernel_map, &payload_addr, payload_size_aligned,
153 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
154 if (ret != KERN_SUCCESS) {
155 printf("unable to allocate memory for pmap profile payload: %d\n", ret);
156 goto exit;
157 }
158
159 /* We need to setup the payload before we send it to the PPL */
160 pmap_payload = (pmap_profile_payload_t*)payload_addr;
161
162 pmap_payload->profile_blob_size = profile_blob_size;
163 memcpy(pmap_payload->profile_blob, profile_blob, profile_blob_size);
164
165 ret = pmap_register_provisioning_profile(payload_addr, payload_size_aligned);
166 if (ret == KERN_SUCCESS) {
167 *profile_obj = &pmap_payload->profile_obj_storage;
168 *profile_obj = (pmap_cs_profile_t*)phystokv(kvtophys_nofail((vm_offset_t)*profile_obj));
169 }
170
171 exit:
172 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
173 kmem_free(kernel_map, payload_addr, payload_size_aligned);
174 payload_addr = 0;
175 payload_size_aligned = 0;
176 }
177
178 return ret;
179 }
180
181 kern_return_t
ppl_trust_provisioning_profile(__unused void * profile_obj,__unused const void * sig_data,__unused size_t sig_size)182 ppl_trust_provisioning_profile(
183 __unused void *profile_obj,
184 __unused const void *sig_data,
185 __unused size_t sig_size)
186 {
187 /* PPL does not support profile trust */
188 return KERN_SUCCESS;
189 }
190
191 kern_return_t
ppl_unregister_provisioning_profile(void * profile_obj)192 ppl_unregister_provisioning_profile(
193 void *profile_obj)
194 {
195 pmap_cs_profile_t *ppl_profile_obj = profile_obj;
196 kern_return_t ret = KERN_DENIED;
197
198 ret = pmap_unregister_provisioning_profile(ppl_profile_obj);
199 if (ret != KERN_SUCCESS) {
200 return ret;
201 }
202
203 /* Get the original payload address */
204 const pmap_profile_payload_t *pmap_payload = ppl_profile_obj->original_payload;
205 const vm_address_t payload_addr = (const vm_address_t)pmap_payload;
206
207 /* Get the original payload size */
208 vm_size_t payload_size = pmap_payload->profile_blob_size + sizeof(*pmap_payload);
209 payload_size = round_page(payload_size);
210
211 /* Free the payload */
212 kmem_free(kernel_map, payload_addr, payload_size);
213 pmap_payload = NULL;
214
215 return KERN_SUCCESS;
216 }
217
218 kern_return_t
ppl_associate_provisioning_profile(void * sig_obj,void * profile_obj)219 ppl_associate_provisioning_profile(
220 void *sig_obj,
221 void *profile_obj)
222 {
223 return pmap_associate_provisioning_profile(sig_obj, profile_obj);
224 }
225
226 kern_return_t
ppl_disassociate_provisioning_profile(void * sig_obj)227 ppl_disassociate_provisioning_profile(
228 void *sig_obj)
229 {
230 return pmap_disassociate_provisioning_profile(sig_obj);
231 }
232
233 void
ppl_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])234 ppl_set_compilation_service_cdhash(
235 const uint8_t cdhash[CS_CDHASH_LEN])
236 {
237 pmap_set_compilation_service_cdhash(cdhash);
238 }
239
240 bool
ppl_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])241 ppl_match_compilation_service_cdhash(
242 const uint8_t cdhash[CS_CDHASH_LEN])
243 {
244 return pmap_match_compilation_service_cdhash(cdhash);
245 }
246
247 void
ppl_set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])248 ppl_set_local_signing_public_key(
249 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
250 {
251 return pmap_set_local_signing_public_key(public_key);
252 }
253
254 uint8_t*
ppl_get_local_signing_public_key(void)255 ppl_get_local_signing_public_key(void)
256 {
257 return pmap_get_local_signing_public_key();
258 }
259
260 void
ppl_unrestrict_local_signing_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])261 ppl_unrestrict_local_signing_cdhash(
262 const uint8_t cdhash[CS_CDHASH_LEN])
263 {
264 pmap_unrestrict_local_signing(cdhash);
265 }
266
267 vm_size_t
ppl_managed_code_signature_size(void)268 ppl_managed_code_signature_size(void)
269 {
270 return pmap_cs_blob_limit;
271 }
272
273 kern_return_t
ppl_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** sig_obj,vm_address_t * ppl_signature_addr)274 ppl_register_code_signature(
275 const vm_address_t signature_addr,
276 const vm_size_t signature_size,
277 const vm_offset_t code_directory_offset,
278 const char *signature_path,
279 void **sig_obj,
280 vm_address_t *ppl_signature_addr)
281 {
282 pmap_cs_code_directory_t *cd_entry = NULL;
283
284 /* PPL doesn't care about the signature path */
285 (void)signature_path;
286
287 kern_return_t ret = pmap_cs_register_code_signature_blob(
288 signature_addr,
289 signature_size,
290 code_directory_offset,
291 (pmap_cs_code_directory_t**)sig_obj);
292
293 if (ret != KERN_SUCCESS) {
294 return ret;
295 }
296 cd_entry = *((pmap_cs_code_directory_t**)sig_obj);
297
298 if (ppl_signature_addr) {
299 *ppl_signature_addr = (vm_address_t)cd_entry->superblob;
300 }
301
302 return KERN_SUCCESS;
303 }
304
305 kern_return_t
ppl_unregister_code_signature(void * sig_obj)306 ppl_unregister_code_signature(
307 void *sig_obj)
308 {
309 return pmap_cs_unregister_code_signature_blob(sig_obj);
310 }
311
312 kern_return_t
ppl_verify_code_signature(void * sig_obj)313 ppl_verify_code_signature(
314 void *sig_obj)
315 {
316 return pmap_cs_verify_code_signature_blob(sig_obj);
317 }
318
319 kern_return_t
ppl_reconstitute_code_signature(void * sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)320 ppl_reconstitute_code_signature(
321 void *sig_obj,
322 vm_address_t *unneeded_addr,
323 vm_size_t *unneeded_size)
324 {
325 return pmap_cs_unlock_unneeded_code_signature(
326 sig_obj,
327 unneeded_addr,
328 unneeded_size);
329 }
330
331 #pragma mark Address Spaces
332
333 kern_return_t
ppl_associate_code_signature(pmap_t pmap,void * sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)334 ppl_associate_code_signature(
335 pmap_t pmap,
336 void *sig_obj,
337 const vm_address_t region_addr,
338 const vm_size_t region_size,
339 const vm_offset_t region_offset)
340 {
341 return pmap_cs_associate(
342 pmap,
343 sig_obj,
344 region_addr,
345 region_size,
346 region_offset);
347 }
348
349 kern_return_t
ppl_allow_jit_region(__unused pmap_t pmap)350 ppl_allow_jit_region(
351 __unused pmap_t pmap)
352 {
353 /* PPL does not support this API */
354 return KERN_NOT_SUPPORTED;
355 }
356
357 kern_return_t
ppl_associate_jit_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)358 ppl_associate_jit_region(
359 pmap_t pmap,
360 const vm_address_t region_addr,
361 const vm_size_t region_size)
362 {
363 return pmap_cs_associate(
364 pmap,
365 PMAP_CS_ASSOCIATE_JIT,
366 region_addr,
367 region_size,
368 0);
369 }
370
371 kern_return_t
ppl_associate_debug_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)372 ppl_associate_debug_region(
373 pmap_t pmap,
374 const vm_address_t region_addr,
375 const vm_size_t region_size)
376 {
377 return pmap_cs_associate(
378 pmap,
379 PMAP_CS_ASSOCIATE_COW,
380 region_addr,
381 region_size,
382 0);
383 }
384
385 kern_return_t
ppl_address_space_debugged(pmap_t pmap)386 ppl_address_space_debugged(
387 pmap_t pmap)
388 {
389 /*
390 * ppl_associate_debug_region is a fairly idempotent function which simply
391 * checks if an address space is already debugged or not and returns a value
392 * based on that. The actual memory region is not inserted into the address
393 * space, so we can pass whatever in this case. The only caveat here though
394 * is that the memory region needs to be page-aligned and cannot be NULL.
395 */
396 return ppl_associate_debug_region(pmap, PAGE_SIZE, PAGE_SIZE);
397 }
398
399 kern_return_t
ppl_allow_invalid_code(pmap_t pmap)400 ppl_allow_invalid_code(
401 pmap_t pmap)
402 {
403 return pmap_cs_allow_invalid(pmap);
404 }
405
406 kern_return_t
ppl_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)407 ppl_get_trust_level_kdp(
408 pmap_t pmap,
409 uint32_t *trust_level)
410 {
411 return pmap_get_trust_level_kdp(pmap, trust_level);
412 }
413
414 kern_return_t
ppl_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)415 ppl_get_jit_address_range_kdp(
416 pmap_t pmap,
417 uintptr_t *jit_region_start,
418 uintptr_t *jit_region_end)
419 {
420 return pmap_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
421 }
422
423 kern_return_t
ppl_address_space_exempt(const pmap_t pmap)424 ppl_address_space_exempt(
425 const pmap_t pmap)
426 {
427 if (pmap_performs_stage2_translations(pmap) == true) {
428 return KERN_SUCCESS;
429 }
430
431 return KERN_DENIED;
432 }
433
434 kern_return_t
ppl_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)435 ppl_fork_prepare(
436 pmap_t old_pmap,
437 pmap_t new_pmap)
438 {
439 return pmap_cs_fork_prepare(old_pmap, new_pmap);
440 }
441
442 kern_return_t
ppl_acquire_signing_identifier(const void * sig_obj,const char ** signing_id)443 ppl_acquire_signing_identifier(
444 const void *sig_obj,
445 const char **signing_id)
446 {
447 const pmap_cs_code_directory_t *cd_entry = sig_obj;
448
449 /* If we reach here, the identifier must have been setup */
450 assert(cd_entry->identifier != NULL);
451
452 if (signing_id) {
453 *signing_id = cd_entry->identifier;
454 }
455
456 return KERN_SUCCESS;
457 }
458
459 #pragma mark Entitlements
460
461 kern_return_t
ppl_associate_kernel_entitlements(void * sig_obj,const void * kernel_entitlements)462 ppl_associate_kernel_entitlements(
463 void *sig_obj,
464 const void *kernel_entitlements)
465 {
466 pmap_cs_code_directory_t *cd_entry = sig_obj;
467 return pmap_associate_kernel_entitlements(cd_entry, kernel_entitlements);
468 }
469
470 kern_return_t
ppl_resolve_kernel_entitlements(pmap_t pmap,const void ** kernel_entitlements)471 ppl_resolve_kernel_entitlements(
472 pmap_t pmap,
473 const void **kernel_entitlements)
474 {
475 kern_return_t ret = KERN_DENIED;
476 const void *entitlements = NULL;
477
478 ret = pmap_resolve_kernel_entitlements(pmap, &entitlements);
479 if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) {
480 *kernel_entitlements = entitlements;
481 }
482
483 return ret;
484 }
485
486 kern_return_t
ppl_accelerate_entitlements(void * sig_obj,CEQueryContext_t * ce_ctx)487 ppl_accelerate_entitlements(
488 void *sig_obj,
489 CEQueryContext_t *ce_ctx)
490 {
491 pmap_cs_code_directory_t *cd_entry = sig_obj;
492 kern_return_t ret = KERN_DENIED;
493
494 ret = pmap_accelerate_entitlements(cd_entry);
495
496 /*
497 * We only ever get KERN_ABORTED when we cannot accelerate the entitlements
498 * because it would consume too much memory. In this case, we still want to
499 * return the ce_ctx since we don't want the system to fall-back to non-PPL
500 * locked down memory, so we switch this to a success case.
501 */
502 if (ret == KERN_ABORTED) {
503 ret = KERN_SUCCESS;
504 }
505
506 /* Return the accelerated context to the caller */
507 if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) {
508 *ce_ctx = cd_entry->ce_ctx;
509 }
510
511 return ret;
512 }
513
514 #pragma mark Image4
515
516 void*
ppl_image4_storage_data(size_t * allocated_size)517 ppl_image4_storage_data(
518 size_t *allocated_size)
519 {
520 return pmap_image4_pmap_data(allocated_size);
521 }
522
523 void
ppl_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)524 ppl_image4_set_nonce(
525 const img4_nonce_domain_index_t ndi,
526 const img4_nonce_t *nonce)
527 {
528 return pmap_image4_set_nonce(ndi, nonce);
529 }
530
531 void
ppl_image4_roll_nonce(const img4_nonce_domain_index_t ndi)532 ppl_image4_roll_nonce(
533 const img4_nonce_domain_index_t ndi)
534 {
535 return pmap_image4_roll_nonce(ndi);
536 }
537
538 errno_t
ppl_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)539 ppl_image4_copy_nonce(
540 const img4_nonce_domain_index_t ndi,
541 img4_nonce_t *nonce_out)
542 {
543 return pmap_image4_copy_nonce(ndi, nonce_out);
544 }
545
546 errno_t
ppl_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)547 ppl_image4_execute_object(
548 img4_runtime_object_spec_index_t obj_spec_index,
549 const img4_buff_t *payload,
550 const img4_buff_t *manifest)
551 {
552 errno_t err = EINVAL;
553 kern_return_t kr = KERN_DENIED;
554 img4_buff_t payload_aligned = IMG4_BUFF_INIT;
555 img4_buff_t manifest_aligned = IMG4_BUFF_INIT;
556 vm_address_t payload_addr = 0;
557 vm_size_t payload_len_aligned = 0;
558 vm_address_t manifest_addr = 0;
559 vm_size_t manifest_len_aligned = 0;
560
561 if (payload == NULL) {
562 printf("invalid object execution request: no payload\n");
563 goto out;
564 }
565
566 /*
567 * The PPL will attempt to lockdown both the payload and the manifest before executing
568 * the object. In order for that to happen, both the artifacts need to be page-aligned.
569 */
570 payload_len_aligned = round_page(payload->i4b_len);
571 if (manifest != NULL) {
572 manifest_len_aligned = round_page(manifest->i4b_len);
573 }
574
575 kr = kmem_alloc(
576 kernel_map,
577 &payload_addr,
578 payload_len_aligned,
579 KMA_KOBJECT,
580 VM_KERN_MEMORY_SECURITY);
581
582 if (kr != KERN_SUCCESS) {
583 printf("unable to allocate memory for image4 payload: %d\n", kr);
584 err = ENOMEM;
585 goto out;
586 }
587
588 /* Copy in the payload */
589 memcpy((uint8_t*)payload_addr, payload->i4b_bytes, payload->i4b_len);
590
591 /* Construct the aligned payload buffer */
592 payload_aligned.i4b_bytes = (uint8_t*)payload_addr;
593 payload_aligned.i4b_len = payload->i4b_len;
594
595 if (manifest != NULL) {
596 kr = kmem_alloc(
597 kernel_map,
598 &manifest_addr,
599 manifest_len_aligned,
600 KMA_KOBJECT,
601 VM_KERN_MEMORY_SECURITY);
602
603 if (kr != KERN_SUCCESS) {
604 printf("unable to allocate memory for image4 manifest: %d\n", kr);
605 err = ENOMEM;
606 goto out;
607 }
608
609 /* Construct the aligned manifest buffer */
610 manifest_aligned.i4b_bytes = (uint8_t*)manifest_addr;
611 manifest_aligned.i4b_len = manifest->i4b_len;
612
613 /* Copy in the manifest */
614 memcpy((uint8_t*)manifest_addr, manifest->i4b_bytes, manifest->i4b_len);
615 }
616
617 err = pmap_image4_execute_object(obj_spec_index, &payload_aligned, &manifest_aligned);
618 if (err != 0) {
619 printf("unable to execute image4 object: %d\n", err);
620 goto out;
621 }
622
623 out:
624 /* We always free the manifest as it isn't required anymore */
625 if (manifest_addr != 0) {
626 kmem_free(kernel_map, manifest_addr, manifest_len_aligned);
627 manifest_addr = 0;
628 manifest_len_aligned = 0;
629 }
630
631 /* If we encountered an error -- free the allocated payload */
632 if ((err != 0) && (payload_addr != 0)) {
633 kmem_free(kernel_map, payload_addr, payload_len_aligned);
634 payload_addr = 0;
635 payload_len_aligned = 0;
636 }
637
638 return err;
639 }
640
641 errno_t
ppl_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)642 ppl_image4_copy_object(
643 img4_runtime_object_spec_index_t obj_spec_index,
644 vm_address_t object_out,
645 size_t *object_length)
646 {
647 errno_t err = EINVAL;
648 kern_return_t kr = KERN_DENIED;
649 vm_address_t object_addr = 0;
650 vm_size_t object_len_aligned = 0;
651
652 if (object_out == 0) {
653 printf("invalid object copy request: no object input buffer\n");
654 goto out;
655 } else if (object_length == NULL) {
656 printf("invalid object copy request: no object input length\n");
657 goto out;
658 }
659
660 /*
661 * The PPL will attempt to pin the input buffer in order to ensure that the kernel
662 * didn't pass in PPL-owned buffers. The PPL cannot pin the same page more than once,
663 * and attempting to do so will panic the system. Hence, we allocate fresh pages for
664 * for the PPL to pin.
665 *
666 * We can send in the address for the length pointer since that is allocated on the
667 * stack, so the PPL can pin our stack for the duration of the call as no other
668 * thread can be using our stack, meaning the PPL will never attempt to double-pin
669 * the page.
670 */
671 object_len_aligned = round_page(*object_length);
672
673 kr = kmem_alloc(
674 kernel_map,
675 &object_addr,
676 object_len_aligned,
677 KMA_KOBJECT,
678 VM_KERN_MEMORY_SECURITY);
679
680 if (kr != KERN_SUCCESS) {
681 printf("unable to allocate memory for image4 object: %d\n", kr);
682 err = ENOMEM;
683 goto out;
684 }
685
686 err = pmap_image4_copy_object(obj_spec_index, object_addr, object_length);
687 if (err != 0) {
688 printf("unable to copy image4 object: %d\n", err);
689 goto out;
690 }
691
692 /* Copy the data back into the caller passed buffer */
693 memcpy((void*)object_out, (void*)object_addr, *object_length);
694
695 out:
696 /* We don't ever need to keep around our page-aligned buffer */
697 if (object_addr != 0) {
698 kmem_free(kernel_map, object_addr, object_len_aligned);
699 object_addr = 0;
700 object_len_aligned = 0;
701 }
702
703 return err;
704 }
705
706 const void*
ppl_image4_get_monitor_exports(void)707 ppl_image4_get_monitor_exports(void)
708 {
709 /*
710 * AppleImage4 can query the PMAP_CS runtime on its own since the PMAP_CS
711 * runtime is compiled within the kernel extension itself. As a result, we
712 * never expect this KPI to be called when the system uses the PPL monitor.
713 */
714
715 printf("explicit monitor-exports-get not required for the PPL\n");
716 return NULL;
717 }
718
719 errno_t
ppl_image4_set_release_type(__unused const char * release_type)720 ppl_image4_set_release_type(
721 __unused const char *release_type)
722 {
723 /*
724 * AppleImage4 stores the release type in the CTRR protected memory region
725 * of its kernel extension. This is accessible by the PMAP_CS runtime as the
726 * runtime is compiled alongside the kernel extension. As a result, we never
727 * expect this KPI to be called when the system uses the PPL monitor.
728 */
729
730 printf("explicit release-type-set set not required for the PPL\n");
731 return ENOTSUP;
732 }
733
734 errno_t
ppl_image4_set_bnch_shadow(__unused const img4_nonce_domain_index_t ndi)735 ppl_image4_set_bnch_shadow(
736 __unused const img4_nonce_domain_index_t ndi)
737 {
738 /*
739 * AppleImage4 stores the BNCH shadow in the CTRR protected memory region
740 * of its kernel extension. This is accessible by the PMAP_CS runtime as the
741 * runtime is compiled alongside the kernel extension. As a result, we never
742 * expect this KPI to be called when the system uses the PPL monitor.
743 */
744
745 printf("explicit BNCH-shadow-set not required for the PPL\n");
746 return ENOTSUP;
747 }
748
749 #pragma mark Image4 - New
750
751 kern_return_t
ppl_image4_transfer_region(__unused image4_cs_trap_t selector,__unused vm_address_t region_addr,__unused vm_size_t region_size)752 ppl_image4_transfer_region(
753 __unused image4_cs_trap_t selector,
754 __unused vm_address_t region_addr,
755 __unused vm_size_t region_size)
756 {
757 /* All regions transfers happen internally with the PPL */
758 return KERN_SUCCESS;
759 }
760
761 kern_return_t
ppl_image4_reclaim_region(__unused image4_cs_trap_t selector,__unused vm_address_t region_addr,__unused vm_size_t region_size)762 ppl_image4_reclaim_region(
763 __unused image4_cs_trap_t selector,
764 __unused vm_address_t region_addr,
765 __unused vm_size_t region_size)
766 {
767 /* All regions transfers happen internally with the PPL */
768 return KERN_SUCCESS;
769 }
770
771 errno_t
ppl_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)772 ppl_image4_monitor_trap(
773 image4_cs_trap_t selector,
774 const void *input_data,
775 size_t input_size)
776 {
777 return pmap_image4_monitor_trap(selector, input_data, input_size);
778 }
779
780 #endif /* PMAP_CS_PPL_MONITOR */
781