xref: /xnu-11215/bsd/kern/ubc_subr.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  *	File:	ubc_subr.c
30  *	Author:	Umesh Vaishampayan [[email protected]]
31  *		05-Aug-1999	umeshv	Created.
32  *
33  *	Functions related to Unified Buffer cache.
34  *
35  * Caller of UBC functions MUST have a valid reference on the vnode.
36  *
37  */
38 
39 #include <sys/types.h>
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/lock.h>
43 #include <sys/mman.h>
44 #include <sys/mount_internal.h>
45 #include <sys/vnode_internal.h>
46 #include <sys/ubc_internal.h>
47 #include <sys/ucred.h>
48 #include <sys/proc_internal.h>
49 #include <sys/kauth.h>
50 #include <sys/buf.h>
51 #include <sys/user.h>
52 #include <sys/codesign.h>
53 #include <sys/codedir_internal.h>
54 #include <sys/fsevents.h>
55 #include <sys/fcntl.h>
56 #include <sys/reboot.h>
57 #include <sys/code_signing.h>
58 
59 #include <mach/mach_types.h>
60 #include <mach/memory_object_types.h>
61 #include <mach/memory_object_control.h>
62 #include <mach/vm_map.h>
63 #include <mach/mach_vm.h>
64 #include <mach/upl.h>
65 
66 #include <kern/kern_types.h>
67 #include <kern/kalloc.h>
68 #include <kern/zalloc.h>
69 #include <kern/thread.h>
70 #include <vm/pmap.h>
71 #include <vm/vm_pageout.h>
72 #include <vm/vm_map.h>
73 #include <vm/vm_upl.h>
74 #include <vm/vm_kern_xnu.h>
75 #include <vm/vm_protos.h> /* last */
76 #include <vm/vm_ubc.h>
77 
78 #include <libkern/crypto/sha1.h>
79 #include <libkern/crypto/sha2.h>
80 #include <libkern/libkern.h>
81 
82 #include <security/mac_framework.h>
83 #include <stdbool.h>
84 #include <stdatomic.h>
85 #include <libkern/amfi/amfi.h>
86 
87 extern void Debugger(const char *message);
88 
89 #if DIAGNOSTIC
90 #if defined(assert)
91 #undef assert
92 #endif
93 #define assert(cond)    \
94     ((void) ((cond) ? 0 : panic("Assert failed: %s", # cond)))
95 #else
96 #include <kern/assert.h>
97 #endif /* DIAGNOSTIC */
98 
99 static int ubc_info_init_internal(struct vnode *vp, int withfsize, off_t filesize);
100 static int ubc_umcallback(vnode_t, void *);
101 static int ubc_msync_internal(vnode_t, off_t, off_t, off_t *, int, int *);
102 static void ubc_cs_free(struct ubc_info *uip);
103 
104 static boolean_t ubc_cs_supports_multilevel_hash(struct cs_blob *blob);
105 static kern_return_t ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob);
106 
107 ZONE_DEFINE_TYPE(ubc_info_zone, "ubc_info zone", struct ubc_info,
108     ZC_ZFREE_CLEARMEM);
109 static uint32_t cs_blob_generation_count = 1;
110 
111 /*
112  * CODESIGNING
113  * Routines to navigate code signing data structures in the kernel...
114  */
115 
116 ZONE_DEFINE_ID(ZONE_ID_CS_BLOB, "cs_blob zone", struct cs_blob,
117     ZC_READONLY | ZC_ZFREE_CLEARMEM);
118 
119 extern int cs_debug;
120 
121 #define PAGE_SHIFT_4K           (12)
122 
123 static boolean_t
cs_valid_range(const void * start,const void * end,const void * lower_bound,const void * upper_bound)124 cs_valid_range(
125 	const void *start,
126 	const void *end,
127 	const void *lower_bound,
128 	const void *upper_bound)
129 {
130 	if (upper_bound < lower_bound ||
131 	    end < start) {
132 		return FALSE;
133 	}
134 
135 	if (start < lower_bound ||
136 	    end > upper_bound) {
137 		return FALSE;
138 	}
139 
140 	return TRUE;
141 }
142 
143 typedef void (*cs_md_init)(void *ctx);
144 typedef void (*cs_md_update)(void *ctx, const void *data, size_t size);
145 typedef void (*cs_md_final)(void *hash, void *ctx);
146 
147 struct cs_hash {
148 	uint8_t             cs_type;    /* type code as per code signing */
149 	size_t              cs_size;    /* size of effective hash (may be truncated) */
150 	size_t              cs_digest_size;/* size of native hash */
151 	cs_md_init          cs_init;
152 	cs_md_update        cs_update;
153 	cs_md_final         cs_final;
154 };
155 
156 uint8_t
cs_hash_type(struct cs_hash const * const cs_hash)157 cs_hash_type(
158 	struct cs_hash const * const cs_hash)
159 {
160 	return cs_hash->cs_type;
161 }
162 
163 static const struct cs_hash cs_hash_sha1 = {
164 	.cs_type = CS_HASHTYPE_SHA1,
165 	.cs_size = CS_SHA1_LEN,
166 	.cs_digest_size = SHA_DIGEST_LENGTH,
167 	.cs_init = (cs_md_init)SHA1Init,
168 	.cs_update = (cs_md_update)SHA1Update,
169 	.cs_final = (cs_md_final)SHA1Final,
170 };
171 #if CRYPTO_SHA2
172 static const struct cs_hash cs_hash_sha256 = {
173 	.cs_type = CS_HASHTYPE_SHA256,
174 	.cs_size = SHA256_DIGEST_LENGTH,
175 	.cs_digest_size = SHA256_DIGEST_LENGTH,
176 	.cs_init = (cs_md_init)SHA256_Init,
177 	.cs_update = (cs_md_update)SHA256_Update,
178 	.cs_final = (cs_md_final)SHA256_Final,
179 };
180 static const struct cs_hash cs_hash_sha256_truncate = {
181 	.cs_type = CS_HASHTYPE_SHA256_TRUNCATED,
182 	.cs_size = CS_SHA256_TRUNCATED_LEN,
183 	.cs_digest_size = SHA256_DIGEST_LENGTH,
184 	.cs_init = (cs_md_init)SHA256_Init,
185 	.cs_update = (cs_md_update)SHA256_Update,
186 	.cs_final = (cs_md_final)SHA256_Final,
187 };
188 static const struct cs_hash cs_hash_sha384 = {
189 	.cs_type = CS_HASHTYPE_SHA384,
190 	.cs_size = SHA384_DIGEST_LENGTH,
191 	.cs_digest_size = SHA384_DIGEST_LENGTH,
192 	.cs_init = (cs_md_init)SHA384_Init,
193 	.cs_update = (cs_md_update)SHA384_Update,
194 	.cs_final = (cs_md_final)SHA384_Final,
195 };
196 #endif
197 
198 static struct cs_hash const *
cs_find_md(uint8_t type)199 cs_find_md(uint8_t type)
200 {
201 	if (type == CS_HASHTYPE_SHA1) {
202 		return &cs_hash_sha1;
203 #if CRYPTO_SHA2
204 	} else if (type == CS_HASHTYPE_SHA256) {
205 		return &cs_hash_sha256;
206 	} else if (type == CS_HASHTYPE_SHA256_TRUNCATED) {
207 		return &cs_hash_sha256_truncate;
208 	} else if (type == CS_HASHTYPE_SHA384) {
209 		return &cs_hash_sha384;
210 #endif
211 	}
212 	return NULL;
213 }
214 
215 union cs_hash_union {
216 	SHA1_CTX                sha1ctxt;
217 	SHA256_CTX              sha256ctx;
218 	SHA384_CTX              sha384ctx;
219 };
220 
221 
222 /*
223  * Choose among different hash algorithms.
224  * Higher is better, 0 => don't use at all.
225  */
226 static const uint32_t hashPriorities[] = {
227 	CS_HASHTYPE_SHA1,
228 	CS_HASHTYPE_SHA256_TRUNCATED,
229 	CS_HASHTYPE_SHA256,
230 	CS_HASHTYPE_SHA384,
231 };
232 
233 static unsigned int
hash_rank(const CS_CodeDirectory * cd)234 hash_rank(const CS_CodeDirectory *cd)
235 {
236 	uint32_t type = cd->hashType;
237 	unsigned int n;
238 
239 	for (n = 0; n < sizeof(hashPriorities) / sizeof(hashPriorities[0]); ++n) {
240 		if (hashPriorities[n] == type) {
241 			return n + 1;
242 		}
243 	}
244 	return 0;       /* not supported */
245 }
246 
247 
248 /*
249  * Locating a page hash
250  */
251 static const unsigned char *
hashes(const CS_CodeDirectory * cd,uint32_t page,size_t hash_len,const char * lower_bound,const char * upper_bound)252 hashes(
253 	const CS_CodeDirectory *cd,
254 	uint32_t page,
255 	size_t hash_len,
256 	const char *lower_bound,
257 	const char *upper_bound)
258 {
259 	const unsigned char *base, *top, *hash;
260 	uint32_t nCodeSlots = ntohl(cd->nCodeSlots);
261 
262 	assert(cs_valid_range(cd, cd + 1, lower_bound, upper_bound));
263 
264 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
265 		/* Get first scatter struct */
266 		const SC_Scatter *scatter = (const SC_Scatter*)
267 		    ((const char*)cd + ntohl(cd->scatterOffset));
268 		uint32_t hashindex = 0, scount, sbase = 0;
269 		/* iterate all scatter structs */
270 		do {
271 			if ((const char*)scatter > (const char*)cd + ntohl(cd->length)) {
272 				if (cs_debug) {
273 					printf("CODE SIGNING: Scatter extends past Code Directory\n");
274 				}
275 				return NULL;
276 			}
277 
278 			scount = ntohl(scatter->count);
279 			uint32_t new_base = ntohl(scatter->base);
280 
281 			/* last scatter? */
282 			if (scount == 0) {
283 				return NULL;
284 			}
285 
286 			if ((hashindex > 0) && (new_base <= sbase)) {
287 				if (cs_debug) {
288 					printf("CODE SIGNING: unordered Scatter, prev base %d, cur base %d\n",
289 					    sbase, new_base);
290 				}
291 				return NULL;    /* unordered scatter array */
292 			}
293 			sbase = new_base;
294 
295 			/* this scatter beyond page we're looking for? */
296 			if (sbase > page) {
297 				return NULL;
298 			}
299 
300 			if (sbase + scount >= page) {
301 				/* Found the scatter struct that is
302 				 * referencing our page */
303 
304 				/* base = address of first hash covered by scatter */
305 				base = (const unsigned char *)cd + ntohl(cd->hashOffset) +
306 				    hashindex * hash_len;
307 				/* top = address of first hash after this scatter */
308 				top = base + scount * hash_len;
309 				if (!cs_valid_range(base, top, lower_bound,
310 				    upper_bound) ||
311 				    hashindex > nCodeSlots) {
312 					return NULL;
313 				}
314 
315 				break;
316 			}
317 
318 			/* this scatter struct is before the page we're looking
319 			 * for. Iterate. */
320 			hashindex += scount;
321 			scatter++;
322 		} while (1);
323 
324 		hash = base + (page - sbase) * hash_len;
325 	} else {
326 		base = (const unsigned char *)cd + ntohl(cd->hashOffset);
327 		top = base + nCodeSlots * hash_len;
328 		if (!cs_valid_range(base, top, lower_bound, upper_bound) ||
329 		    page > nCodeSlots) {
330 			return NULL;
331 		}
332 		assert(page < nCodeSlots);
333 
334 		hash = base + page * hash_len;
335 	}
336 
337 	if (!cs_valid_range(hash, hash + hash_len,
338 	    lower_bound, upper_bound)) {
339 		hash = NULL;
340 	}
341 
342 	return hash;
343 }
344 
345 /*
346  * cs_validate_codedirectory
347  *
348  * Validate that pointers inside the code directory to make sure that
349  * all offsets and lengths are constrained within the buffer.
350  *
351  * Parameters:	cd			Pointer to code directory buffer
352  *		length			Length of buffer
353  *
354  * Returns:	0			Success
355  *		EBADEXEC		Invalid code signature
356  */
357 
358 static int
cs_validate_codedirectory(const CS_CodeDirectory * cd,size_t length)359 cs_validate_codedirectory(const CS_CodeDirectory *cd, size_t length)
360 {
361 	struct cs_hash const *hashtype;
362 
363 	if (length < sizeof(*cd)) {
364 		return EBADEXEC;
365 	}
366 	if (ntohl(cd->magic) != CSMAGIC_CODEDIRECTORY) {
367 		return EBADEXEC;
368 	}
369 	if ((cd->pageSize != PAGE_SHIFT_4K) && (cd->pageSize != PAGE_SHIFT)) {
370 		printf("disallowing unsupported code signature page shift: %u\n", cd->pageSize);
371 		return EBADEXEC;
372 	}
373 	hashtype = cs_find_md(cd->hashType);
374 	if (hashtype == NULL) {
375 		return EBADEXEC;
376 	}
377 
378 	if (cd->hashSize != hashtype->cs_size) {
379 		return EBADEXEC;
380 	}
381 
382 	if (length < ntohl(cd->hashOffset)) {
383 		return EBADEXEC;
384 	}
385 
386 	/* check that nSpecialSlots fits in the buffer in front of hashOffset */
387 	if (ntohl(cd->hashOffset) / hashtype->cs_size < ntohl(cd->nSpecialSlots)) {
388 		return EBADEXEC;
389 	}
390 
391 	/* check that codeslots fits in the buffer */
392 	if ((length - ntohl(cd->hashOffset)) / hashtype->cs_size < ntohl(cd->nCodeSlots)) {
393 		return EBADEXEC;
394 	}
395 
396 	if (ntohl(cd->version) >= CS_SUPPORTSSCATTER && cd->scatterOffset) {
397 		if (length < ntohl(cd->scatterOffset)) {
398 			return EBADEXEC;
399 		}
400 
401 		const SC_Scatter *scatter = (const SC_Scatter *)
402 		    (((const uint8_t *)cd) + ntohl(cd->scatterOffset));
403 		uint32_t nPages = 0;
404 
405 		/*
406 		 * Check each scatter buffer, since we don't know the
407 		 * length of the scatter buffer array, we have to
408 		 * check each entry.
409 		 */
410 		while (1) {
411 			/* check that the end of each scatter buffer in within the length */
412 			if (((const uint8_t *)scatter) + sizeof(scatter[0]) > (const uint8_t *)cd + length) {
413 				return EBADEXEC;
414 			}
415 			uint32_t scount = ntohl(scatter->count);
416 			if (scount == 0) {
417 				break;
418 			}
419 			if (nPages + scount < nPages) {
420 				return EBADEXEC;
421 			}
422 			nPages += scount;
423 			scatter++;
424 
425 			/* XXX check that basees doesn't overlap */
426 			/* XXX check that targetOffset doesn't overlap */
427 		}
428 #if 0 /* rdar://12579439 */
429 		if (nPages != ntohl(cd->nCodeSlots)) {
430 			return EBADEXEC;
431 		}
432 #endif
433 	}
434 
435 	if (length < ntohl(cd->identOffset)) {
436 		return EBADEXEC;
437 	}
438 
439 	/* identifier is NUL terminated string */
440 	if (cd->identOffset) {
441 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->identOffset);
442 		if (memchr(ptr, 0, length - ntohl(cd->identOffset)) == NULL) {
443 			return EBADEXEC;
444 		}
445 	}
446 
447 	/* team identifier is NULL terminated string */
448 	if (ntohl(cd->version) >= CS_SUPPORTSTEAMID && ntohl(cd->teamOffset)) {
449 		if (length < ntohl(cd->teamOffset)) {
450 			return EBADEXEC;
451 		}
452 
453 		const uint8_t *ptr = (const uint8_t *)cd + ntohl(cd->teamOffset);
454 		if (memchr(ptr, 0, length - ntohl(cd->teamOffset)) == NULL) {
455 			return EBADEXEC;
456 		}
457 	}
458 
459 	/* linkage is variable length binary data */
460 	if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0) {
461 		const uintptr_t ptr = (uintptr_t)cd + ntohl(cd->linkageOffset);
462 		const uintptr_t ptr_end = ptr + ntohl(cd->linkageSize);
463 
464 		if (ptr_end < ptr || ptr < (uintptr_t)cd || ptr_end > (uintptr_t)cd + length) {
465 			return EBADEXEC;
466 		}
467 	}
468 
469 
470 	return 0;
471 }
472 
473 /*
474  *
475  */
476 
477 static int
cs_validate_blob(const CS_GenericBlob * blob,size_t length)478 cs_validate_blob(const CS_GenericBlob *blob, size_t length)
479 {
480 	if (length < sizeof(CS_GenericBlob) || length < ntohl(blob->length)) {
481 		return EBADEXEC;
482 	}
483 	return 0;
484 }
485 
486 /*
487  * cs_validate_csblob
488  *
489  * Validate that superblob/embedded code directory to make sure that
490  * all internal pointers are valid.
491  *
492  * Will validate both a superblob csblob and a "raw" code directory.
493  *
494  *
495  * Parameters:	buffer			Pointer to code signature
496  *		length			Length of buffer
497  *		rcd			returns pointer to code directory
498  *
499  * Returns:	0			Success
500  *		EBADEXEC		Invalid code signature
501  */
502 
503 static int
cs_validate_csblob(const uint8_t * addr,const size_t blob_size,const CS_CodeDirectory ** rcd,const CS_GenericBlob ** rentitlements,const CS_GenericBlob ** rder_entitlements)504 cs_validate_csblob(
505 	const uint8_t *addr,
506 	const size_t blob_size,
507 	const CS_CodeDirectory **rcd,
508 	const CS_GenericBlob **rentitlements,
509 	const CS_GenericBlob **rder_entitlements)
510 {
511 	const CS_GenericBlob *blob;
512 	int error;
513 	size_t length;
514 	const CS_GenericBlob *self_constraint = NULL;
515 	const CS_GenericBlob *parent_constraint = NULL;
516 	const CS_GenericBlob *responsible_proc_constraint = NULL;
517 	const CS_GenericBlob *library_constraint = NULL;
518 
519 	*rcd = NULL;
520 	*rentitlements = NULL;
521 	*rder_entitlements = NULL;
522 
523 	blob = (const CS_GenericBlob *)(const void *)addr;
524 
525 	length = blob_size;
526 	error = cs_validate_blob(blob, length);
527 	if (error) {
528 		return error;
529 	}
530 	length = ntohl(blob->length);
531 
532 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
533 		const CS_SuperBlob *sb;
534 		uint32_t n, count;
535 		const CS_CodeDirectory *best_cd = NULL;
536 		unsigned int best_rank = 0;
537 #if XNU_PLATFORM_WatchOS
538 		const CS_CodeDirectory *sha1_cd = NULL;
539 #endif
540 
541 		if (length < sizeof(CS_SuperBlob)) {
542 			return EBADEXEC;
543 		}
544 
545 		sb = (const CS_SuperBlob *)blob;
546 		count = ntohl(sb->count);
547 
548 		/* check that the array of BlobIndex fits in the rest of the data */
549 		if ((length - sizeof(CS_SuperBlob)) / sizeof(CS_BlobIndex) < count) {
550 			return EBADEXEC;
551 		}
552 
553 		/* now check each BlobIndex */
554 		for (n = 0; n < count; n++) {
555 			const CS_BlobIndex *blobIndex = &sb->index[n];
556 			uint32_t type = ntohl(blobIndex->type);
557 			uint32_t offset = ntohl(blobIndex->offset);
558 			if (length < offset) {
559 				return EBADEXEC;
560 			}
561 
562 			const CS_GenericBlob *subBlob =
563 			    (const CS_GenericBlob *)(const void *)(addr + offset);
564 
565 			size_t subLength = length - offset;
566 
567 			if ((error = cs_validate_blob(subBlob, subLength)) != 0) {
568 				return error;
569 			}
570 			subLength = ntohl(subBlob->length);
571 
572 			/* extra validation for CDs, that is also returned */
573 			if (type == CSSLOT_CODEDIRECTORY || (type >= CSSLOT_ALTERNATE_CODEDIRECTORIES && type < CSSLOT_ALTERNATE_CODEDIRECTORY_LIMIT)) {
574 				const CS_CodeDirectory *candidate = (const CS_CodeDirectory *)subBlob;
575 				if ((error = cs_validate_codedirectory(candidate, subLength)) != 0) {
576 					return error;
577 				}
578 				unsigned int rank = hash_rank(candidate);
579 				if (cs_debug > 3) {
580 					printf("CodeDirectory type %d rank %d at slot 0x%x index %d\n", candidate->hashType, (int)rank, (int)type, (int)n);
581 				}
582 				if (best_cd == NULL || rank > best_rank) {
583 					best_cd = candidate;
584 					best_rank = rank;
585 
586 					if (cs_debug > 2) {
587 						printf("using CodeDirectory type %d (rank %d)\n", (int)best_cd->hashType, best_rank);
588 					}
589 					*rcd = best_cd;
590 				} else if (best_cd != NULL && rank == best_rank) {
591 					/* repeat of a hash type (1:1 mapped to ranks), illegal and suspicious */
592 					printf("multiple hash=%d CodeDirectories in signature; rejecting\n", best_cd->hashType);
593 					return EBADEXEC;
594 				}
595 #if XNU_PLATFORM_WatchOS
596 				if (candidate->hashType == CS_HASHTYPE_SHA1) {
597 					if (sha1_cd != NULL) {
598 						printf("multiple sha1 CodeDirectories in signature; rejecting\n");
599 						return EBADEXEC;
600 					}
601 					sha1_cd = candidate;
602 				}
603 #endif
604 			} else if (type == CSSLOT_ENTITLEMENTS) {
605 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_ENTITLEMENTS) {
606 					return EBADEXEC;
607 				}
608 				if (*rentitlements != NULL) {
609 					printf("multiple entitlements blobs\n");
610 					return EBADEXEC;
611 				}
612 				*rentitlements = subBlob;
613 			} else if (type == CSSLOT_DER_ENTITLEMENTS) {
614 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_DER_ENTITLEMENTS) {
615 					return EBADEXEC;
616 				}
617 				if (*rder_entitlements != NULL) {
618 					printf("multiple der entitlements blobs\n");
619 					return EBADEXEC;
620 				}
621 				*rder_entitlements = subBlob;
622 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_SELF) {
623 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
624 					return EBADEXEC;
625 				}
626 				if (self_constraint != NULL) {
627 					printf("multiple self constraint blobs\n");
628 					return EBADEXEC;
629 				}
630 				self_constraint = subBlob;
631 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_PARENT) {
632 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
633 					return EBADEXEC;
634 				}
635 				if (parent_constraint != NULL) {
636 					printf("multiple parent constraint blobs\n");
637 					return EBADEXEC;
638 				}
639 				parent_constraint = subBlob;
640 			} else if (type == CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE) {
641 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
642 					return EBADEXEC;
643 				}
644 				if (responsible_proc_constraint != NULL) {
645 					printf("multiple responsible process constraint blobs\n");
646 					return EBADEXEC;
647 				}
648 				responsible_proc_constraint = subBlob;
649 			} else if (type == CSSLOT_LIBRARY_CONSTRAINT) {
650 				if (ntohl(subBlob->magic) != CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT) {
651 					return EBADEXEC;
652 				}
653 				if (library_constraint != NULL) {
654 					printf("multiple library constraint blobs\n");
655 					return EBADEXEC;
656 				}
657 				library_constraint = subBlob;
658 			}
659 		}
660 
661 #if XNU_PLATFORM_WatchOS
662 		/* To keep watchOS fast enough, we have to resort to sha1 for
663 		 * some code.
664 		 *
665 		 * At the time of writing this comment, known sha1 attacks are
666 		 * collision attacks (not preimage or second preimage
667 		 * attacks), which do not apply to platform binaries since
668 		 * they have a fixed hash in the trust cache.  Given this
669 		 * property, we only prefer sha1 code directories for adhoc
670 		 * signatures, which always have to be in a trust cache to be
671 		 * valid (can-load-cdhash does not exist for watchOS). Those
672 		 * are, incidentally, also the platform binaries, for which we
673 		 * care about the performance hit that sha256 would bring us.
674 		 *
675 		 * Platform binaries may still contain a (not chosen) sha256
676 		 * code directory, which keeps software updates that switch to
677 		 * sha256-only small.
678 		 */
679 
680 		if (*rcd != NULL && sha1_cd != NULL && (ntohl(sha1_cd->flags) & CS_ADHOC)) {
681 			if (sha1_cd->flags != (*rcd)->flags) {
682 				printf("mismatched flags between hash %d (flags: %#x) and sha1 (flags: %#x) cd.\n",
683 				    (int)(*rcd)->hashType, (*rcd)->flags, sha1_cd->flags);
684 				*rcd = NULL;
685 				return EBADEXEC;
686 			}
687 
688 			*rcd = sha1_cd;
689 		}
690 #endif
691 	} else if (ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY) {
692 		if ((error = cs_validate_codedirectory((const CS_CodeDirectory *)(const void *)addr, length)) != 0) {
693 			return error;
694 		}
695 		*rcd = (const CS_CodeDirectory *)blob;
696 	} else {
697 		return EBADEXEC;
698 	}
699 
700 	if (*rcd == NULL) {
701 		return EBADEXEC;
702 	}
703 
704 	return 0;
705 }
706 
707 /*
708  * cs_find_blob_bytes
709  *
710  * Find an blob from the superblob/code directory. The blob must have
711  * been been validated by cs_validate_csblob() before calling
712  * this. Use csblob_find_blob() instead.
713  *
714  * Will also find a "raw" code directory if its stored as well as
715  * searching the superblob.
716  *
717  * Parameters:	buffer			Pointer to code signature
718  *		length			Length of buffer
719  *		type			type of blob to find
720  *		magic			the magic number for that blob
721  *
722  * Returns:	pointer			Success
723  *		NULL			Buffer not found
724  */
725 
726 const CS_GenericBlob *
csblob_find_blob_bytes(const uint8_t * addr,size_t length,uint32_t type,uint32_t magic)727 csblob_find_blob_bytes(const uint8_t *addr, size_t length, uint32_t type, uint32_t magic)
728 {
729 	const CS_GenericBlob *blob = (const CS_GenericBlob *)(const void *)addr;
730 
731 	if ((addr + length) < addr) {
732 		panic("CODE SIGNING: CS Blob length overflow for addr: %p", addr);
733 	}
734 
735 	if (ntohl(blob->magic) == CSMAGIC_EMBEDDED_SIGNATURE) {
736 		const CS_SuperBlob *sb = (const CS_SuperBlob *)blob;
737 		size_t n, count = ntohl(sb->count);
738 
739 		for (n = 0; n < count; n++) {
740 			if (ntohl(sb->index[n].type) != type) {
741 				continue;
742 			}
743 			uint32_t offset = ntohl(sb->index[n].offset);
744 			if (length - sizeof(const CS_GenericBlob) < offset) {
745 				return NULL;
746 			}
747 			blob = (const CS_GenericBlob *)(const void *)(addr + offset);
748 			if (ntohl(blob->magic) != magic) {
749 				continue;
750 			}
751 			if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
752 				panic("CODE SIGNING: CS Blob length overflow for blob at: %p", blob);
753 			} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
754 				continue;
755 			}
756 			return blob;
757 		}
758 	} else if (type == CSSLOT_CODEDIRECTORY && ntohl(blob->magic) == CSMAGIC_CODEDIRECTORY
759 	    && magic == CSMAGIC_CODEDIRECTORY) {
760 		if (((vm_address_t)blob + ntohl(blob->length)) < (vm_address_t)blob) {
761 			panic("CODE SIGNING: CS Blob length overflow for code directory blob at: %p", blob);
762 		} else if (((vm_address_t)blob + ntohl(blob->length)) > (vm_address_t)(addr + length)) {
763 			return NULL;
764 		}
765 		return blob;
766 	}
767 	return NULL;
768 }
769 
770 
771 const CS_GenericBlob *
csblob_find_blob(struct cs_blob * csblob,uint32_t type,uint32_t magic)772 csblob_find_blob(struct cs_blob *csblob, uint32_t type, uint32_t magic)
773 {
774 	if ((csblob->csb_flags & CS_VALID) == 0) {
775 		return NULL;
776 	}
777 	return csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, type, magic);
778 }
779 
780 static const uint8_t *
find_special_slot(const CS_CodeDirectory * cd,size_t slotsize,uint32_t slot)781 find_special_slot(const CS_CodeDirectory *cd, size_t slotsize, uint32_t slot)
782 {
783 	/* there is no zero special slot since that is the first code slot */
784 	if (ntohl(cd->nSpecialSlots) < slot || slot == 0) {
785 		return NULL;
786 	}
787 
788 	return (const uint8_t *)cd + ntohl(cd->hashOffset) - (slotsize * slot);
789 }
790 
791 static uint8_t cshash_zero[CS_HASH_MAX_SIZE] = { 0 };
792 
793 static int
csblob_find_special_slot_blob(struct cs_blob * csblob,uint32_t slot,uint32_t magic,const CS_GenericBlob ** out_start,size_t * out_length)794 csblob_find_special_slot_blob(struct cs_blob* csblob, uint32_t slot, uint32_t magic, const CS_GenericBlob **out_start, size_t *out_length)
795 {
796 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
797 	const CS_GenericBlob *blob;
798 	const CS_CodeDirectory *code_dir;
799 	const uint8_t *embedded_hash;
800 	union cs_hash_union context;
801 
802 	if (out_start) {
803 		*out_start = NULL;
804 	}
805 	if (out_length) {
806 		*out_length = 0;
807 	}
808 
809 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
810 		return EBADEXEC;
811 	}
812 
813 	code_dir = csblob->csb_cd;
814 
815 	blob = csblob_find_blob_bytes((const uint8_t *)csblob->csb_mem_kaddr, csblob->csb_mem_size, slot, magic);
816 
817 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, slot);
818 
819 	if (embedded_hash == NULL) {
820 		if (blob) {
821 			return EBADEXEC;
822 		}
823 		return 0;
824 	} else if (blob == NULL) {
825 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
826 			return EBADEXEC;
827 		} else {
828 			return 0;
829 		}
830 	}
831 
832 	csblob->csb_hashtype->cs_init(&context);
833 	csblob->csb_hashtype->cs_update(&context, blob, ntohl(blob->length));
834 	csblob->csb_hashtype->cs_final(computed_hash, &context);
835 
836 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
837 		return EBADEXEC;
838 	}
839 	if (out_start) {
840 		*out_start = blob;
841 	}
842 	if (out_length) {
843 		*out_length = ntohl(blob->length);
844 	}
845 
846 	return 0;
847 }
848 
849 int
csblob_get_entitlements(struct cs_blob * csblob,void ** out_start,size_t * out_length)850 csblob_get_entitlements(struct cs_blob *csblob, void **out_start, size_t *out_length)
851 {
852 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
853 	const CS_GenericBlob *entitlements;
854 	const CS_CodeDirectory *code_dir;
855 	const uint8_t *embedded_hash;
856 	union cs_hash_union context;
857 
858 	*out_start = NULL;
859 	*out_length = 0;
860 
861 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
862 		return EBADEXEC;
863 	}
864 
865 	code_dir = csblob->csb_cd;
866 
867 	if ((csblob->csb_flags & CS_VALID) == 0) {
868 		entitlements = NULL;
869 	} else {
870 		entitlements = csblob->csb_entitlements_blob;
871 	}
872 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_ENTITLEMENTS);
873 
874 	if (embedded_hash == NULL) {
875 		if (entitlements) {
876 			return EBADEXEC;
877 		}
878 		return 0;
879 	} else if (entitlements == NULL) {
880 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
881 			return EBADEXEC;
882 		} else {
883 			return 0;
884 		}
885 	}
886 
887 	csblob->csb_hashtype->cs_init(&context);
888 	csblob->csb_hashtype->cs_update(&context, entitlements, ntohl(entitlements->length));
889 	csblob->csb_hashtype->cs_final(computed_hash, &context);
890 
891 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
892 		return EBADEXEC;
893 	}
894 
895 	*out_start = __DECONST(void *, entitlements);
896 	*out_length = ntohl(entitlements->length);
897 
898 	return 0;
899 }
900 
901 const CS_GenericBlob*
csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)902 csblob_get_der_entitlements_unsafe(struct cs_blob * csblob)
903 {
904 	if ((csblob->csb_flags & CS_VALID) == 0) {
905 		return NULL;
906 	}
907 
908 	return csblob->csb_der_entitlements_blob;
909 }
910 
911 int
csblob_get_der_entitlements(struct cs_blob * csblob,const CS_GenericBlob ** out_start,size_t * out_length)912 csblob_get_der_entitlements(struct cs_blob *csblob, const CS_GenericBlob **out_start, size_t *out_length)
913 {
914 	uint8_t computed_hash[CS_HASH_MAX_SIZE];
915 	const CS_GenericBlob *der_entitlements;
916 	const CS_CodeDirectory *code_dir;
917 	const uint8_t *embedded_hash;
918 	union cs_hash_union context;
919 
920 	*out_start = NULL;
921 	*out_length = 0;
922 
923 	if (csblob->csb_hashtype == NULL || csblob->csb_hashtype->cs_digest_size > sizeof(computed_hash)) {
924 		return EBADEXEC;
925 	}
926 
927 	code_dir = csblob->csb_cd;
928 
929 	if ((csblob->csb_flags & CS_VALID) == 0) {
930 		der_entitlements = NULL;
931 	} else {
932 		der_entitlements = csblob->csb_der_entitlements_blob;
933 	}
934 	embedded_hash = find_special_slot(code_dir, csblob->csb_hashtype->cs_size, CSSLOT_DER_ENTITLEMENTS);
935 
936 	if (embedded_hash == NULL) {
937 		if (der_entitlements) {
938 			return EBADEXEC;
939 		}
940 		return 0;
941 	} else if (der_entitlements == NULL) {
942 		if (memcmp(embedded_hash, cshash_zero, csblob->csb_hashtype->cs_size) != 0) {
943 			return EBADEXEC;
944 		} else {
945 			return 0;
946 		}
947 	}
948 
949 	csblob->csb_hashtype->cs_init(&context);
950 	csblob->csb_hashtype->cs_update(&context, der_entitlements, ntohl(der_entitlements->length));
951 	csblob->csb_hashtype->cs_final(computed_hash, &context);
952 
953 	if (memcmp(computed_hash, embedded_hash, csblob->csb_hashtype->cs_size) != 0) {
954 		return EBADEXEC;
955 	}
956 
957 	*out_start = der_entitlements;
958 	*out_length = ntohl(der_entitlements->length);
959 
960 	return 0;
961 }
962 
963 static bool
ubc_cs_blob_pagewise_allocate(__unused vm_size_t size)964 ubc_cs_blob_pagewise_allocate(
965 	__unused vm_size_t size)
966 {
967 #if CODE_SIGNING_MONITOR
968 	/* If the monitor isn't enabled, then we don't need to page-align */
969 	if (csm_enabled() == false) {
970 		return false;
971 	}
972 
973 	/*
974 	 * Small allocations can be maanged by the monitor itself. We only need to allocate
975 	 * page-wise when it is a sufficiently large allocation and the monitor cannot manage
976 	 * it on its own.
977 	 */
978 	if (size <= csm_signature_size_limit()) {
979 		return false;
980 	}
981 
982 	return true;
983 #else
984 	/* Without a monitor, we never need to page align */
985 	return false;
986 #endif /* CODE_SIGNING_MONITOR */
987 }
988 
989 int
csblob_register_profile(__unused struct cs_blob * csblob,__unused cs_profile_register_t * profile)990 csblob_register_profile(
991 	__unused struct cs_blob *csblob,
992 	__unused cs_profile_register_t *profile)
993 {
994 #if CODE_SIGNING_MONITOR
995 	/* Profiles only need to be registered for monitor environments */
996 	assert(profile->data != NULL);
997 	assert(profile->size != 0);
998 	assert(csblob != NULL);
999 
1000 	kern_return_t kr = csm_register_provisioning_profile(
1001 		profile->uuid,
1002 		profile->data, profile->size);
1003 
1004 	if ((kr != KERN_SUCCESS) && (kr != KERN_ALREADY_IN_SET)) {
1005 		if (kr == KERN_NOT_SUPPORTED) {
1006 			return 0;
1007 		}
1008 		return EPERM;
1009 	}
1010 
1011 	/* Attempt to trust the profile */
1012 	kr = csm_trust_provisioning_profile(
1013 		profile->uuid,
1014 		profile->sig_data, profile->sig_size);
1015 
1016 	if (kr != KERN_SUCCESS) {
1017 		return EPERM;
1018 	}
1019 
1020 	/* Associate the profile with the monitor's signature object */
1021 	kr = csm_associate_provisioning_profile(
1022 		csblob->csb_csm_obj,
1023 		profile->uuid);
1024 
1025 	if (kr != KERN_SUCCESS) {
1026 		return EPERM;
1027 	}
1028 
1029 	return 0;
1030 #else
1031 	return 0;
1032 #endif /* CODE_SIGNING_MONITOR */
1033 }
1034 
1035 int
csblob_register_profile_uuid(struct cs_blob * csblob,const uuid_t profile_uuid,void * profile_addr,vm_size_t profile_size)1036 csblob_register_profile_uuid(
1037 	struct cs_blob *csblob,
1038 	const uuid_t profile_uuid,
1039 	void *profile_addr,
1040 	vm_size_t profile_size)
1041 {
1042 	cs_profile_register_t profile = {
1043 		.sig_data = NULL,
1044 		.sig_size = 0,
1045 		.data = profile_addr,
1046 		.size = profile_size
1047 	};
1048 
1049 	/* Copy the provided UUID */
1050 	memcpy(profile.uuid, profile_uuid, sizeof(profile.uuid));
1051 
1052 	return csblob_register_profile(csblob, &profile);
1053 }
1054 
1055 /*
1056  * CODESIGNING
1057  * End of routines to navigate code signing data structures in the kernel.
1058  */
1059 
1060 
1061 
1062 /*
1063  * ubc_info_init
1064  *
1065  * Allocate and attach an empty ubc_info structure to a vnode
1066  *
1067  * Parameters:	vp			Pointer to the vnode
1068  *
1069  * Returns:	0			Success
1070  *	vnode_size:ENOMEM		Not enough space
1071  *	vnode_size:???			Other error from vnode_getattr
1072  *
1073  */
1074 int
ubc_info_init(struct vnode * vp)1075 ubc_info_init(struct vnode *vp)
1076 {
1077 	return ubc_info_init_internal(vp, 0, 0);
1078 }
1079 
1080 
1081 /*
1082  * ubc_info_init_withsize
1083  *
1084  * Allocate and attach a sized ubc_info structure to a vnode
1085  *
1086  * Parameters:	vp			Pointer to the vnode
1087  *		filesize		The size of the file
1088  *
1089  * Returns:	0			Success
1090  *	vnode_size:ENOMEM		Not enough space
1091  *	vnode_size:???			Other error from vnode_getattr
1092  */
1093 int
ubc_info_init_withsize(struct vnode * vp,off_t filesize)1094 ubc_info_init_withsize(struct vnode *vp, off_t filesize)
1095 {
1096 	return ubc_info_init_internal(vp, 1, filesize);
1097 }
1098 
1099 
1100 /*
1101  * ubc_info_init_internal
1102  *
1103  * Allocate and attach a ubc_info structure to a vnode
1104  *
1105  * Parameters:	vp			Pointer to the vnode
1106  *		withfsize{0,1}		Zero if the size should be obtained
1107  *					from the vnode; otherwise, use filesize
1108  *		filesize		The size of the file, if withfsize == 1
1109  *
1110  * Returns:	0			Success
1111  *	vnode_size:ENOMEM		Not enough space
1112  *	vnode_size:???			Other error from vnode_getattr
1113  *
1114  * Notes:	We call a blocking zalloc(), and the zone was created as an
1115  *		expandable and collectable zone, so if no memory is available,
1116  *		it is possible for zalloc() to block indefinitely.  zalloc()
1117  *		may also panic if the zone of zones is exhausted, since it's
1118  *		NOT expandable.
1119  *
1120  *		We unconditionally call vnode_pager_setup(), even if this is
1121  *		a reuse of a ubc_info; in that case, we should probably assert
1122  *		that it does not already have a pager association, but do not.
1123  *
1124  *		Since memory_object_create_named() can only fail from receiving
1125  *		an invalid pager argument, the explicit check and panic is
1126  *		merely precautionary.
1127  */
1128 static int
ubc_info_init_internal(vnode_t vp,int withfsize,off_t filesize)1129 ubc_info_init_internal(vnode_t vp, int withfsize, off_t filesize)
1130 {
1131 	struct ubc_info *uip;
1132 	void *  pager;
1133 	int error = 0;
1134 	kern_return_t kret;
1135 	memory_object_control_t control;
1136 
1137 	uip = vp->v_ubcinfo;
1138 
1139 	/*
1140 	 * If there is not already a ubc_info attached to the vnode, we
1141 	 * attach one; otherwise, we will reuse the one that's there.
1142 	 */
1143 	if (uip == UBC_INFO_NULL) {
1144 		uip = zalloc_flags(ubc_info_zone, Z_WAITOK | Z_ZERO);
1145 
1146 		uip->ui_vnode = vp;
1147 		uip->ui_flags = UI_INITED;
1148 		uip->ui_ucred = NOCRED;
1149 	}
1150 	assert(uip->ui_flags != UI_NONE);
1151 	assert(uip->ui_vnode == vp);
1152 
1153 	/* now set this ubc_info in the vnode */
1154 	vp->v_ubcinfo = uip;
1155 
1156 	/*
1157 	 * Allocate a pager object for this vnode
1158 	 *
1159 	 * XXX The value of the pager parameter is currently ignored.
1160 	 * XXX Presumably, this API changed to avoid the race between
1161 	 * XXX setting the pager and the UI_HASPAGER flag.
1162 	 */
1163 	pager = (void *)vnode_pager_setup(vp, uip->ui_pager);
1164 	assert(pager);
1165 
1166 	/*
1167 	 * Explicitly set the pager into the ubc_info, after setting the
1168 	 * UI_HASPAGER flag.
1169 	 */
1170 	SET(uip->ui_flags, UI_HASPAGER);
1171 	uip->ui_pager = pager;
1172 
1173 	/*
1174 	 * Note: We can not use VNOP_GETATTR() to get accurate
1175 	 * value of ui_size because this may be an NFS vnode, and
1176 	 * nfs_getattr() can call vinvalbuf(); if this happens,
1177 	 * ubc_info is not set up to deal with that event.
1178 	 * So use bogus size.
1179 	 */
1180 
1181 	/*
1182 	 * create a vnode - vm_object association
1183 	 * memory_object_create_named() creates a "named" reference on the
1184 	 * memory object we hold this reference as long as the vnode is
1185 	 * "alive."  Since memory_object_create_named() took its own reference
1186 	 * on the vnode pager we passed it, we can drop the reference
1187 	 * vnode_pager_setup() returned here.
1188 	 */
1189 	kret = memory_object_create_named(pager,
1190 	    (memory_object_size_t)uip->ui_size, &control);
1191 	vnode_pager_deallocate(pager);
1192 	if (kret != KERN_SUCCESS) {
1193 		panic("ubc_info_init: memory_object_create_named returned %d", kret);
1194 	}
1195 
1196 	assert(control);
1197 	uip->ui_control = control;      /* cache the value of the mo control */
1198 	SET(uip->ui_flags, UI_HASOBJREF);       /* with a named reference */
1199 
1200 	if (withfsize == 0) {
1201 		/* initialize the size */
1202 		error = vnode_size(vp, &uip->ui_size, vfs_context_current());
1203 		if (error) {
1204 			uip->ui_size = 0;
1205 		}
1206 	} else {
1207 		uip->ui_size = filesize;
1208 	}
1209 	vp->v_lflag |= VNAMED_UBC;      /* vnode has a named ubc reference */
1210 
1211 	return error;
1212 }
1213 
1214 
1215 /*
1216  * ubc_info_free
1217  *
1218  * Free a ubc_info structure
1219  *
1220  * Parameters:	uip			A pointer to the ubc_info to free
1221  *
1222  * Returns:	(void)
1223  *
1224  * Notes:	If there is a credential that has subsequently been associated
1225  *		with the ubc_info, the reference to the credential is dropped.
1226  *
1227  *		It's actually impossible for a ubc_info.ui_control to take the
1228  *		value MEMORY_OBJECT_CONTROL_NULL.
1229  */
1230 static void
ubc_info_free(struct ubc_info * uip)1231 ubc_info_free(struct ubc_info *uip)
1232 {
1233 	if (IS_VALID_CRED(uip->ui_ucred)) {
1234 		kauth_cred_unref(&uip->ui_ucred);
1235 	}
1236 
1237 	if (uip->ui_control != MEMORY_OBJECT_CONTROL_NULL) {
1238 		memory_object_control_deallocate(uip->ui_control);
1239 	}
1240 
1241 	cluster_release(uip);
1242 	ubc_cs_free(uip);
1243 
1244 	zfree(ubc_info_zone, uip);
1245 	return;
1246 }
1247 
1248 
1249 void
ubc_info_deallocate(struct ubc_info * uip)1250 ubc_info_deallocate(struct ubc_info *uip)
1251 {
1252 	ubc_info_free(uip);
1253 }
1254 
1255 /*
1256  * ubc_setsize_ex
1257  *
1258  * Tell the VM that the the size of the file represented by the vnode has
1259  * changed
1260  *
1261  * Parameters:	vp	   The vp whose backing file size is
1262  *					   being changed
1263  *				nsize  The new size of the backing file
1264  *				opts   Options
1265  *
1266  * Returns:	EINVAL for new size < 0
1267  *			ENOENT if no UBC info exists
1268  *          EAGAIN if UBC_SETSIZE_NO_FS_REENTRY option is set and new_size < old size
1269  *          Other errors (mapped to errno_t) returned by VM functions
1270  *
1271  * Notes:   This function will indicate success if the new size is the
1272  *		    same or larger than the old size (in this case, the
1273  *		    remainder of the file will require modification or use of
1274  *		    an existing upl to access successfully).
1275  *
1276  *		    This function will fail if the new file size is smaller,
1277  *		    and the memory region being invalidated was unable to
1278  *		    actually be invalidated and/or the last page could not be
1279  *		    flushed, if the new size is not aligned to a page
1280  *		    boundary.  This is usually indicative of an I/O error.
1281  */
1282 errno_t
ubc_setsize_ex(struct vnode * vp,off_t nsize,ubc_setsize_opts_t opts)1283 ubc_setsize_ex(struct vnode *vp, off_t nsize, ubc_setsize_opts_t opts)
1284 {
1285 	off_t osize;    /* ui_size before change */
1286 	off_t lastpg, olastpgend, lastoff;
1287 	struct ubc_info *uip;
1288 	memory_object_control_t control;
1289 	kern_return_t kret = KERN_SUCCESS;
1290 
1291 	if (nsize < (off_t)0) {
1292 		return EINVAL;
1293 	}
1294 
1295 	if (!UBCINFOEXISTS(vp)) {
1296 		return ENOENT;
1297 	}
1298 
1299 	uip = vp->v_ubcinfo;
1300 	osize = uip->ui_size;
1301 
1302 	if (ISSET(opts, UBC_SETSIZE_NO_FS_REENTRY) && nsize < osize) {
1303 		return EAGAIN;
1304 	}
1305 
1306 	/*
1307 	 * Update the size before flushing the VM
1308 	 */
1309 	uip->ui_size = nsize;
1310 
1311 	if (nsize >= osize) {   /* Nothing more to do */
1312 		if (nsize > osize) {
1313 			lock_vnode_and_post(vp, NOTE_EXTEND);
1314 		}
1315 
1316 		return 0;
1317 	}
1318 
1319 	/*
1320 	 * When the file shrinks, invalidate the pages beyond the
1321 	 * new size. Also get rid of garbage beyond nsize on the
1322 	 * last page. The ui_size already has the nsize, so any
1323 	 * subsequent page-in will zero-fill the tail properly
1324 	 */
1325 	lastpg = trunc_page_64(nsize);
1326 	olastpgend = round_page_64(osize);
1327 	control = uip->ui_control;
1328 	assert(control);
1329 	lastoff = (nsize & PAGE_MASK_64);
1330 
1331 	if (lastoff) {
1332 		upl_t           upl;
1333 		upl_page_info_t *pl;
1334 
1335 		/*
1336 		 * new EOF ends up in the middle of a page
1337 		 * zero the tail of this page if it's currently
1338 		 * present in the cache
1339 		 */
1340 		kret = ubc_create_upl_kernel(vp, lastpg, PAGE_SIZE, &upl, &pl, UPL_SET_LITE | UPL_WILL_MODIFY, VM_KERN_MEMORY_FILE);
1341 
1342 		if (kret != KERN_SUCCESS) {
1343 			panic("ubc_setsize: ubc_create_upl (error = %d)", kret);
1344 		}
1345 
1346 		if (upl_valid_page(pl, 0)) {
1347 			cluster_zero(upl, (uint32_t)lastoff, PAGE_SIZE - (uint32_t)lastoff, NULL);
1348 		}
1349 
1350 		ubc_upl_abort_range(upl, 0, PAGE_SIZE, UPL_ABORT_FREE_ON_EMPTY);
1351 
1352 		lastpg += PAGE_SIZE_64;
1353 	}
1354 	if (olastpgend > lastpg) {
1355 		int     flags;
1356 
1357 		if (lastpg == 0) {
1358 			flags = MEMORY_OBJECT_DATA_FLUSH_ALL;
1359 		} else {
1360 			flags = MEMORY_OBJECT_DATA_FLUSH;
1361 		}
1362 		/*
1363 		 * invalidate the pages beyond the new EOF page
1364 		 *
1365 		 */
1366 		kret = memory_object_lock_request(control,
1367 		    (memory_object_offset_t)lastpg,
1368 		    (memory_object_size_t)(olastpgend - lastpg), NULL, NULL,
1369 		    MEMORY_OBJECT_RETURN_NONE, flags, VM_PROT_NO_CHANGE);
1370 		if (kret != KERN_SUCCESS) {
1371 			printf("ubc_setsize: invalidate failed (error = %d)\n", kret);
1372 		}
1373 	}
1374 	return mach_to_bsd_errno(kret);
1375 }
1376 
1377 // Returns true for success
1378 int
ubc_setsize(vnode_t vp,off_t nsize)1379 ubc_setsize(vnode_t vp, off_t nsize)
1380 {
1381 	return ubc_setsize_ex(vp, nsize, 0) == 0;
1382 }
1383 
1384 /*
1385  * ubc_getsize
1386  *
1387  * Get the size of the file assocated with the specified vnode
1388  *
1389  * Parameters:	vp			The vnode whose size is of interest
1390  *
1391  * Returns:	0			There is no ubc_info associated with
1392  *					this vnode, or the size is zero
1393  *		!0			The size of the file
1394  *
1395  * Notes:	Using this routine, it is not possible for a caller to
1396  *		successfully distinguish between a vnode associate with a zero
1397  *		length file, and a vnode with no associated ubc_info.  The
1398  *		caller therefore needs to not care, or needs to ensure that
1399  *		they have previously successfully called ubc_info_init() or
1400  *		ubc_info_init_withsize().
1401  */
1402 off_t
ubc_getsize(struct vnode * vp)1403 ubc_getsize(struct vnode *vp)
1404 {
1405 	/* people depend on the side effect of this working this way
1406 	 * as they call this for directory
1407 	 */
1408 	if (!UBCINFOEXISTS(vp)) {
1409 		return (off_t)0;
1410 	}
1411 	return vp->v_ubcinfo->ui_size;
1412 }
1413 
1414 
1415 /*
1416  * ubc_umount
1417  *
1418  * Call ubc_msync(vp, 0, EOF, NULL, UBC_PUSHALL) on all the vnodes for this
1419  * mount point
1420  *
1421  * Parameters:	mp			The mount point
1422  *
1423  * Returns:	0			Success
1424  *
1425  * Notes:	There is no failure indication for this function.
1426  *
1427  *		This function is used in the unmount path; since it may block
1428  *		I/O indefinitely, it should not be used in the forced unmount
1429  *		path, since a device unavailability could also block that
1430  *		indefinitely.
1431  *
1432  *		Because there is no device ejection interlock on USB, FireWire,
1433  *		or similar devices, it's possible that an ejection that begins
1434  *		subsequent to the vnode_iterate() completing, either on one of
1435  *		those devices, or a network mount for which the server quits
1436  *		responding, etc., may cause the caller to block indefinitely.
1437  */
1438 __private_extern__ int
ubc_umount(struct mount * mp)1439 ubc_umount(struct mount *mp)
1440 {
1441 	vnode_iterate(mp, 0, ubc_umcallback, 0);
1442 	return 0;
1443 }
1444 
1445 
1446 /*
1447  * ubc_umcallback
1448  *
1449  * Used by ubc_umount() as an internal implementation detail; see ubc_umount()
1450  * and vnode_iterate() for details of implementation.
1451  */
1452 static int
ubc_umcallback(vnode_t vp,__unused void * args)1453 ubc_umcallback(vnode_t vp, __unused void * args)
1454 {
1455 	if (UBCINFOEXISTS(vp)) {
1456 		(void) ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL);
1457 	}
1458 	return VNODE_RETURNED;
1459 }
1460 
1461 
1462 /*
1463  * ubc_getcred
1464  *
1465  * Get the credentials currently active for the ubc_info associated with the
1466  * vnode.
1467  *
1468  * Parameters:	vp			The vnode whose ubc_info credentials
1469  *					are to be retrieved
1470  *
1471  * Returns:	!NOCRED			The credentials
1472  *		NOCRED			If there is no ubc_info for the vnode,
1473  *					or if there is one, but it has not had
1474  *					any credentials associated with it.
1475  */
1476 kauth_cred_t
ubc_getcred(struct vnode * vp)1477 ubc_getcred(struct vnode *vp)
1478 {
1479 	if (UBCINFOEXISTS(vp)) {
1480 		return vp->v_ubcinfo->ui_ucred;
1481 	}
1482 
1483 	return NOCRED;
1484 }
1485 
1486 
1487 /*
1488  * ubc_setthreadcred
1489  *
1490  * If they are not already set, set the credentials of the ubc_info structure
1491  * associated with the vnode to those of the supplied thread; otherwise leave
1492  * them alone.
1493  *
1494  * Parameters:	vp			The vnode whose ubc_info creds are to
1495  *					be set
1496  *		p			The process whose credentials are to
1497  *					be used, if not running on an assumed
1498  *					credential
1499  *		thread			The thread whose credentials are to
1500  *					be used
1501  *
1502  * Returns:	1			This vnode has no associated ubc_info
1503  *		0			Success
1504  *
1505  * Notes:	This function is generally used only in the following cases:
1506  *
1507  *		o	a memory mapped file via the mmap() system call
1508  *		o	a swap store backing file
1509  *		o	subsequent to a successful write via vn_write()
1510  *
1511  *		The information is then used by the NFS client in order to
1512  *		cons up a wire message in either the page-in or page-out path.
1513  *
1514  *		There are two potential problems with the use of this API:
1515  *
1516  *		o	Because the write path only set it on a successful
1517  *			write, there is a race window between setting the
1518  *			credential and its use to evict the pages to the
1519  *			remote file server
1520  *
1521  *		o	Because a page-in may occur prior to a write, the
1522  *			credential may not be set at this time, if the page-in
1523  *			is not the result of a mapping established via mmap().
1524  *
1525  *		In both these cases, this will be triggered from the paging
1526  *		path, which will instead use the credential of the current
1527  *		process, which in this case is either the dynamic_pager or
1528  *		the kernel task, both of which utilize "root" credentials.
1529  *
1530  *		This may potentially permit operations to occur which should
1531  *		be denied, or it may cause to be denied operations which
1532  *		should be permitted, depending on the configuration of the NFS
1533  *		server.
1534  */
1535 int
ubc_setthreadcred(struct vnode * vp,proc_t p,thread_t thread)1536 ubc_setthreadcred(struct vnode *vp, proc_t p, thread_t thread)
1537 {
1538 #pragma unused(p, thread)
1539 	assert(p == current_proc());
1540 	assert(thread == current_thread());
1541 
1542 	return ubc_setcred(vp, kauth_cred_get());
1543 }
1544 
1545 
1546 /*
1547  * ubc_setcred
1548  *
1549  * If they are not already set, set the credentials of the ubc_info structure
1550  * associated with the vnode to those specified; otherwise leave them
1551  * alone.
1552  *
1553  * Parameters:	vp			The vnode whose ubc_info creds are to
1554  *					be set
1555  *		ucred			The credentials to use
1556  *
1557  * Returns:	0			This vnode has no associated ubc_info
1558  *		1			Success
1559  *
1560  * Notes:	The return values for this function are inverted from nearly
1561  *		all other uses in the kernel.
1562  *
1563  *		See also ubc_setthreadcred(), above.
1564  */
1565 int
ubc_setcred(struct vnode * vp,kauth_cred_t ucred)1566 ubc_setcred(struct vnode *vp, kauth_cred_t ucred)
1567 {
1568 	struct ubc_info *uip;
1569 
1570 	/* If there is no ubc_info, deny the operation */
1571 	if (!UBCINFOEXISTS(vp)) {
1572 		return 0;
1573 	}
1574 
1575 	/*
1576 	 * Check to see if there is already a credential reference in the
1577 	 * ubc_info; if there is not, take one on the supplied credential.
1578 	 */
1579 	vnode_lock(vp);
1580 	uip = vp->v_ubcinfo;
1581 	if (!IS_VALID_CRED(uip->ui_ucred)) {
1582 		kauth_cred_ref(ucred);
1583 		uip->ui_ucred = ucred;
1584 	}
1585 	vnode_unlock(vp);
1586 
1587 	return 1;
1588 }
1589 
1590 /*
1591  * ubc_getpager
1592  *
1593  * Get the pager associated with the ubc_info associated with the vnode.
1594  *
1595  * Parameters:	vp			The vnode to obtain the pager from
1596  *
1597  * Returns:	!VNODE_PAGER_NULL	The memory_object_t for the pager
1598  *		VNODE_PAGER_NULL	There is no ubc_info for this vnode
1599  *
1600  * Notes:	For each vnode that has a ubc_info associated with it, that
1601  *		ubc_info SHALL have a pager associated with it, so in the
1602  *		normal case, it's impossible to return VNODE_PAGER_NULL for
1603  *		a vnode with an associated ubc_info.
1604  */
1605 __private_extern__ memory_object_t
ubc_getpager(struct vnode * vp)1606 ubc_getpager(struct vnode *vp)
1607 {
1608 	if (UBCINFOEXISTS(vp)) {
1609 		return vp->v_ubcinfo->ui_pager;
1610 	}
1611 
1612 	return 0;
1613 }
1614 
1615 
1616 /*
1617  * ubc_getobject
1618  *
1619  * Get the memory object control associated with the ubc_info associated with
1620  * the vnode
1621  *
1622  * Parameters:	vp			The vnode to obtain the memory object
1623  *					from
1624  *		flags			DEPRECATED
1625  *
1626  * Returns:	!MEMORY_OBJECT_CONTROL_NULL
1627  *		MEMORY_OBJECT_CONTROL_NULL
1628  *
1629  * Notes:	Historically, if the flags were not "do not reactivate", this
1630  *		function would look up the memory object using the pager if
1631  *		it did not exist (this could be the case if the vnode had
1632  *		been previously reactivated).  The flags would also permit a
1633  *		hold to be requested, which would have created an object
1634  *		reference, if one had not already existed.  This usage is
1635  *		deprecated, as it would permit a race between finding and
1636  *		taking the reference vs. a single reference being dropped in
1637  *		another thread.
1638  */
1639 memory_object_control_t
ubc_getobject(struct vnode * vp,__unused int flags)1640 ubc_getobject(struct vnode *vp, __unused int flags)
1641 {
1642 	if (UBCINFOEXISTS(vp)) {
1643 		return vp->v_ubcinfo->ui_control;
1644 	}
1645 
1646 	return MEMORY_OBJECT_CONTROL_NULL;
1647 }
1648 
1649 /*
1650  * ubc_blktooff
1651  *
1652  * Convert a given block number to a memory backing object (file) offset for a
1653  * given vnode
1654  *
1655  * Parameters:	vp			The vnode in which the block is located
1656  *		blkno			The block number to convert
1657  *
1658  * Returns:	!-1			The offset into the backing object
1659  *		-1			There is no ubc_info associated with
1660  *					the vnode
1661  *		-1			An error occurred in the underlying VFS
1662  *					while translating the block to an
1663  *					offset; the most likely cause is that
1664  *					the caller specified a block past the
1665  *					end of the file, but this could also be
1666  *					any other error from VNOP_BLKTOOFF().
1667  *
1668  * Note:	Representing the error in band loses some information, but does
1669  *		not occlude a valid offset, since an off_t of -1 is normally
1670  *		used to represent EOF.  If we had a more reliable constant in
1671  *		our header files for it (i.e. explicitly cast to an off_t), we
1672  *		would use it here instead.
1673  */
1674 off_t
ubc_blktooff(vnode_t vp,daddr64_t blkno)1675 ubc_blktooff(vnode_t vp, daddr64_t blkno)
1676 {
1677 	off_t file_offset = -1;
1678 	int error;
1679 
1680 	if (UBCINFOEXISTS(vp)) {
1681 		error = VNOP_BLKTOOFF(vp, blkno, &file_offset);
1682 		if (error) {
1683 			file_offset = -1;
1684 		}
1685 	}
1686 
1687 	return file_offset;
1688 }
1689 
1690 
1691 /*
1692  * ubc_offtoblk
1693  *
1694  * Convert a given offset in a memory backing object into a block number for a
1695  * given vnode
1696  *
1697  * Parameters:	vp			The vnode in which the offset is
1698  *					located
1699  *		offset			The offset into the backing object
1700  *
1701  * Returns:	!-1			The returned block number
1702  *		-1			There is no ubc_info associated with
1703  *					the vnode
1704  *		-1			An error occurred in the underlying VFS
1705  *					while translating the block to an
1706  *					offset; the most likely cause is that
1707  *					the caller specified a block past the
1708  *					end of the file, but this could also be
1709  *					any other error from VNOP_OFFTOBLK().
1710  *
1711  * Note:	Representing the error in band loses some information, but does
1712  *		not occlude a valid block number, since block numbers exceed
1713  *		the valid range for offsets, due to their relative sizes.  If
1714  *		we had a more reliable constant than -1 in our header files
1715  *		for it (i.e. explicitly cast to an daddr64_t), we would use it
1716  *		here instead.
1717  */
1718 daddr64_t
ubc_offtoblk(vnode_t vp,off_t offset)1719 ubc_offtoblk(vnode_t vp, off_t offset)
1720 {
1721 	daddr64_t blkno = -1;
1722 	int error = 0;
1723 
1724 	if (UBCINFOEXISTS(vp)) {
1725 		error = VNOP_OFFTOBLK(vp, offset, &blkno);
1726 		if (error) {
1727 			blkno = -1;
1728 		}
1729 	}
1730 
1731 	return blkno;
1732 }
1733 
1734 
1735 /*
1736  * ubc_pages_resident
1737  *
1738  * Determine whether or not a given vnode has pages resident via the memory
1739  * object control associated with the ubc_info associated with the vnode
1740  *
1741  * Parameters:	vp			The vnode we want to know about
1742  *
1743  * Returns:	1			Yes
1744  *		0			No
1745  */
1746 int
ubc_pages_resident(vnode_t vp)1747 ubc_pages_resident(vnode_t vp)
1748 {
1749 	kern_return_t           kret;
1750 	boolean_t                       has_pages_resident;
1751 
1752 	if (!UBCINFOEXISTS(vp)) {
1753 		return 0;
1754 	}
1755 
1756 	/*
1757 	 * The following call may fail if an invalid ui_control is specified,
1758 	 * or if there is no VM object associated with the control object.  In
1759 	 * either case, reacting to it as if there were no pages resident will
1760 	 * result in correct behavior.
1761 	 */
1762 	kret = memory_object_pages_resident(vp->v_ubcinfo->ui_control, &has_pages_resident);
1763 
1764 	if (kret != KERN_SUCCESS) {
1765 		return 0;
1766 	}
1767 
1768 	if (has_pages_resident == TRUE) {
1769 		return 1;
1770 	}
1771 
1772 	return 0;
1773 }
1774 
1775 /*
1776  * ubc_msync
1777  *
1778  * Clean and/or invalidate a range in the memory object that backs this vnode
1779  *
1780  * Parameters:	vp			The vnode whose associated ubc_info's
1781  *					associated memory object is to have a
1782  *					range invalidated within it
1783  *		beg_off			The start of the range, as an offset
1784  *		end_off			The end of the range, as an offset
1785  *		resid_off		The address of an off_t supplied by the
1786  *					caller; may be set to NULL to ignore
1787  *		flags			See ubc_msync_internal()
1788  *
1789  * Returns:	0			Success
1790  *		!0			Failure; an errno is returned
1791  *
1792  * Implicit Returns:
1793  *		*resid_off, modified	If non-NULL, the  contents are ALWAYS
1794  *					modified; they are initialized to the
1795  *					beg_off, and in case of an I/O error,
1796  *					the difference between beg_off and the
1797  *					current value will reflect what was
1798  *					able to be written before the error
1799  *					occurred.  If no error is returned, the
1800  *					value of the resid_off is undefined; do
1801  *					NOT use it in place of end_off if you
1802  *					intend to increment from the end of the
1803  *					last call and call iteratively.
1804  *
1805  * Notes:	see ubc_msync_internal() for more detailed information.
1806  *
1807  */
1808 errno_t
ubc_msync(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags)1809 ubc_msync(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags)
1810 {
1811 	int retval;
1812 	int io_errno = 0;
1813 
1814 	if (resid_off) {
1815 		*resid_off = beg_off;
1816 	}
1817 
1818 	retval = ubc_msync_internal(vp, beg_off, end_off, resid_off, flags, &io_errno);
1819 
1820 	if (retval == 0 && io_errno == 0) {
1821 		return EINVAL;
1822 	}
1823 	return io_errno;
1824 }
1825 
1826 
1827 /*
1828  * ubc_msync_internal
1829  *
1830  * Clean and/or invalidate a range in the memory object that backs this vnode
1831  *
1832  * Parameters:	vp			The vnode whose associated ubc_info's
1833  *					associated memory object is to have a
1834  *					range invalidated within it
1835  *		beg_off			The start of the range, as an offset
1836  *		end_off			The end of the range, as an offset
1837  *		resid_off		The address of an off_t supplied by the
1838  *					caller; may be set to NULL to ignore
1839  *		flags			MUST contain at least one of the flags
1840  *					UBC_INVALIDATE, UBC_PUSHDIRTY, or
1841  *					UBC_PUSHALL; if UBC_PUSHDIRTY is used,
1842  *					UBC_SYNC may also be specified to cause
1843  *					this function to block until the
1844  *					operation is complete.  The behavior
1845  *					of UBC_SYNC is otherwise undefined.
1846  *		io_errno		The address of an int to contain the
1847  *					errno from a failed I/O operation, if
1848  *					one occurs; may be set to NULL to
1849  *					ignore
1850  *
1851  * Returns:	1			Success
1852  *		0			Failure
1853  *
1854  * Implicit Returns:
1855  *		*resid_off, modified	The contents of this offset MAY be
1856  *					modified; in case of an I/O error, the
1857  *					difference between beg_off and the
1858  *					current value will reflect what was
1859  *					able to be written before the error
1860  *					occurred.
1861  *		*io_errno, modified	The contents of this offset are set to
1862  *					an errno, if an error occurs; if the
1863  *					caller supplies an io_errno parameter,
1864  *					they should be careful to initialize it
1865  *					to 0 before calling this function to
1866  *					enable them to distinguish an error
1867  *					with a valid *resid_off from an invalid
1868  *					one, and to avoid potentially falsely
1869  *					reporting an error, depending on use.
1870  *
1871  * Notes:	If there is no ubc_info associated with the vnode supplied,
1872  *		this function immediately returns success.
1873  *
1874  *		If the value of end_off is less than or equal to beg_off, this
1875  *		function immediately returns success; that is, end_off is NOT
1876  *		inclusive.
1877  *
1878  *		IMPORTANT: one of the flags UBC_INVALIDATE, UBC_PUSHDIRTY, or
1879  *		UBC_PUSHALL MUST be specified; that is, it is NOT possible to
1880  *		attempt to block on in-progress I/O by calling this function
1881  *		with UBC_PUSHDIRTY, and then later call it with just UBC_SYNC
1882  *		in order to block pending on the I/O already in progress.
1883  *
1884  *		The start offset is truncated to the page boundary and the
1885  *		size is adjusted to include the last page in the range; that
1886  *		is, end_off on exactly a page boundary will not change if it
1887  *		is rounded, and the range of bytes written will be from the
1888  *		truncate beg_off to the rounded (end_off - 1).
1889  */
1890 static int
ubc_msync_internal(vnode_t vp,off_t beg_off,off_t end_off,off_t * resid_off,int flags,int * io_errno)1891 ubc_msync_internal(vnode_t vp, off_t beg_off, off_t end_off, off_t *resid_off, int flags, int *io_errno)
1892 {
1893 	memory_object_size_t    tsize;
1894 	kern_return_t           kret;
1895 	int request_flags = 0;
1896 	int flush_flags   = MEMORY_OBJECT_RETURN_NONE;
1897 
1898 	if (!UBCINFOEXISTS(vp)) {
1899 		return 0;
1900 	}
1901 	if ((flags & (UBC_INVALIDATE | UBC_PUSHDIRTY | UBC_PUSHALL)) == 0) {
1902 		return 0;
1903 	}
1904 	if (end_off <= beg_off) {
1905 		return 1;
1906 	}
1907 
1908 	if (flags & UBC_INVALIDATE) {
1909 		/*
1910 		 * discard the resident pages
1911 		 */
1912 		request_flags = (MEMORY_OBJECT_DATA_FLUSH | MEMORY_OBJECT_DATA_NO_CHANGE);
1913 	}
1914 
1915 	if (flags & UBC_SYNC) {
1916 		/*
1917 		 * wait for all the I/O to complete before returning
1918 		 */
1919 		request_flags |= MEMORY_OBJECT_IO_SYNC;
1920 	}
1921 
1922 	if (flags & UBC_PUSHDIRTY) {
1923 		/*
1924 		 * we only return the dirty pages in the range
1925 		 */
1926 		flush_flags = MEMORY_OBJECT_RETURN_DIRTY;
1927 	}
1928 
1929 	if (flags & UBC_PUSHALL) {
1930 		/*
1931 		 * then return all the interesting pages in the range (both
1932 		 * dirty and precious) to the pager
1933 		 */
1934 		flush_flags = MEMORY_OBJECT_RETURN_ALL;
1935 	}
1936 
1937 	beg_off = trunc_page_64(beg_off);
1938 	end_off = round_page_64(end_off);
1939 	tsize   = (memory_object_size_t)end_off - beg_off;
1940 
1941 	/* flush and/or invalidate pages in the range requested */
1942 	kret = memory_object_lock_request(vp->v_ubcinfo->ui_control,
1943 	    beg_off, tsize,
1944 	    (memory_object_offset_t *)resid_off,
1945 	    io_errno, flush_flags, request_flags,
1946 	    VM_PROT_NO_CHANGE);
1947 
1948 	return (kret == KERN_SUCCESS) ? 1 : 0;
1949 }
1950 
1951 
1952 /*
1953  * ubc_map
1954  *
1955  * Explicitly map a vnode that has an associate ubc_info, and add a reference
1956  * to it for the ubc system, if there isn't one already, so it will not be
1957  * recycled while it's in use, and set flags on the ubc_info to indicate that
1958  * we have done this
1959  *
1960  * Parameters:	vp			The vnode to map
1961  *		flags			The mapping flags for the vnode; this
1962  *					will be a combination of one or more of
1963  *					PROT_READ, PROT_WRITE, and PROT_EXEC
1964  *
1965  * Returns:	0			Success
1966  *		EPERM			Permission was denied
1967  *
1968  * Notes:	An I/O reference on the vnode must already be held on entry
1969  *
1970  *		If there is no ubc_info associated with the vnode, this function
1971  *		will return success.
1972  *
1973  *		If a permission error occurs, this function will return
1974  *		failure; all other failures will cause this function to return
1975  *		success.
1976  *
1977  *		IMPORTANT: This is an internal use function, and its symbols
1978  *		are not exported, hence its error checking is not very robust.
1979  *		It is primarily used by:
1980  *
1981  *		o	mmap(), when mapping a file
1982  *		o	When mapping a shared file (a shared library in the
1983  *			shared segment region)
1984  *		o	When loading a program image during the exec process
1985  *
1986  *		...all of these uses ignore the return code, and any fault that
1987  *		results later because of a failure is handled in the fix-up path
1988  *		of the fault handler.  The interface exists primarily as a
1989  *		performance hint.
1990  *
1991  *		Given that third party implementation of the type of interfaces
1992  *		that would use this function, such as alternative executable
1993  *		formats, etc., are unsupported, this function is not exported
1994  *		for general use.
1995  *
1996  *		The extra reference is held until the VM system unmaps the
1997  *		vnode from its own context to maintain a vnode reference in
1998  *		cases like open()/mmap()/close(), which leave the backing
1999  *		object referenced by a mapped memory region in a process
2000  *		address space.
2001  */
2002 __private_extern__ int
ubc_map(vnode_t vp,int flags)2003 ubc_map(vnode_t vp, int flags)
2004 {
2005 	struct ubc_info *uip;
2006 	int error = 0;
2007 	int need_ref = 0;
2008 	int need_wakeup = 0;
2009 
2010 	if (UBCINFOEXISTS(vp)) {
2011 		vnode_lock(vp);
2012 		uip = vp->v_ubcinfo;
2013 
2014 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2015 			SET(uip->ui_flags, UI_MAPWAITING);
2016 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2017 			    PRIBIO, "ubc_map", NULL);
2018 		}
2019 		SET(uip->ui_flags, UI_MAPBUSY);
2020 		vnode_unlock(vp);
2021 
2022 		error = VNOP_MMAP(vp, flags, vfs_context_current());
2023 
2024 		/*
2025 		 * rdar://problem/22587101 required that we stop propagating
2026 		 * EPERM up the stack. Otherwise, we would have to funnel up
2027 		 * the error at all the call sites for memory_object_map().
2028 		 * The risk is in having to undo the map/object/entry state at
2029 		 * all these call sites. It would also affect more than just mmap()
2030 		 * e.g. vm_remap().
2031 		 *
2032 		 *	if (error != EPERM)
2033 		 *              error = 0;
2034 		 */
2035 
2036 		error = 0;
2037 
2038 		vnode_lock_spin(vp);
2039 
2040 		if (error == 0) {
2041 			if (!ISSET(uip->ui_flags, UI_ISMAPPED)) {
2042 				need_ref = 1;
2043 			}
2044 			SET(uip->ui_flags, (UI_WASMAPPED | UI_ISMAPPED));
2045 			if (flags & PROT_WRITE) {
2046 				SET(uip->ui_flags, (UI_WASMAPPEDWRITE | UI_MAPPEDWRITE));
2047 			}
2048 		}
2049 		CLR(uip->ui_flags, UI_MAPBUSY);
2050 
2051 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2052 			CLR(uip->ui_flags, UI_MAPWAITING);
2053 			need_wakeup = 1;
2054 		}
2055 		vnode_unlock(vp);
2056 
2057 		if (need_wakeup) {
2058 			wakeup(&uip->ui_flags);
2059 		}
2060 
2061 		if (need_ref) {
2062 			/*
2063 			 * Make sure we get a ref as we can't unwind from here
2064 			 */
2065 			if (vnode_ref_ext(vp, 0, VNODE_REF_FORCE)) {
2066 				panic("%s : VNODE_REF_FORCE failed", __FUNCTION__);
2067 			}
2068 			/*
2069 			 * Vnodes that are on "unreliable" media (like disk
2070 			 * images, network filesystems, 3rd-party filesystems,
2071 			 * and possibly external devices) could see their
2072 			 * contents be changed via the backing store without
2073 			 * triggering copy-on-write, so we can't fully rely
2074 			 * on copy-on-write and might have to resort to
2075 			 * copy-on-read to protect "privileged" processes and
2076 			 * prevent privilege escalation.
2077 			 *
2078 			 * The root filesystem is considered "reliable" because
2079 			 * there's not much point in trying to protect
2080 			 * ourselves from such a vulnerability and the extra
2081 			 * cost of copy-on-read (CPU time and memory pressure)
2082 			 * could result in some serious regressions.
2083 			 */
2084 			if (vp->v_mount != NULL &&
2085 			    ((vp->v_mount->mnt_flag & MNT_ROOTFS) ||
2086 			    vnode_on_reliable_media(vp))) {
2087 				/*
2088 				 * This vnode is deemed "reliable" so mark
2089 				 * its VM object as "trusted".
2090 				 */
2091 				memory_object_mark_trusted(uip->ui_control);
2092 			} else {
2093 //				printf("BUGGYCOW: %s:%d vp %p \"%s\" in mnt %p \"%s\" is untrusted\n", __FUNCTION__, __LINE__, vp, vp->v_name, vp->v_mount, vp->v_mount->mnt_vnodecovered->v_name);
2094 			}
2095 		}
2096 	}
2097 	return error;
2098 }
2099 
2100 
2101 /*
2102  * ubc_destroy_named
2103  *
2104  * Destroy the named memory object associated with the ubc_info control object
2105  * associated with the designated vnode, if there is a ubc_info associated
2106  * with the vnode, and a control object is associated with it
2107  *
2108  * Parameters:	vp			The designated vnode
2109  *
2110  * Returns:	(void)
2111  *
2112  * Notes:	This function is called on vnode termination for all vnodes,
2113  *		and must therefore not assume that there is a ubc_info that is
2114  *		associated with the vnode, nor that there is a control object
2115  *		associated with the ubc_info.
2116  *
2117  *		If all the conditions necessary are present, this function
2118  *		calls memory_object_destory(), which will in turn end up
2119  *		calling ubc_unmap() to release any vnode references that were
2120  *		established via ubc_map().
2121  *
2122  *		IMPORTANT: This is an internal use function that is used
2123  *		exclusively by the internal use function vclean().
2124  */
2125 __private_extern__ void
ubc_destroy_named(vnode_t vp,vm_object_destroy_reason_t reason)2126 ubc_destroy_named(vnode_t vp, vm_object_destroy_reason_t reason)
2127 {
2128 	memory_object_control_t control;
2129 	struct ubc_info *uip;
2130 	kern_return_t kret;
2131 
2132 	if (UBCINFOEXISTS(vp)) {
2133 		uip = vp->v_ubcinfo;
2134 
2135 		/* Terminate the memory object  */
2136 		control = ubc_getobject(vp, UBC_HOLDOBJECT);
2137 		if (control != MEMORY_OBJECT_CONTROL_NULL) {
2138 			kret = memory_object_destroy(control, reason);
2139 			if (kret != KERN_SUCCESS) {
2140 				panic("ubc_destroy_named: memory_object_destroy failed");
2141 			}
2142 		}
2143 	}
2144 }
2145 
2146 
2147 /*
2148  * ubc_isinuse
2149  *
2150  * Determine whether or not a vnode is currently in use by ubc at a level in
2151  * excess of the requested busycount
2152  *
2153  * Parameters:	vp			The vnode to check
2154  *		busycount		The threshold busy count, used to bias
2155  *					the count usually already held by the
2156  *					caller to avoid races
2157  *
2158  * Returns:	1			The vnode is in use over the threshold
2159  *		0			The vnode is not in use over the
2160  *					threshold
2161  *
2162  * Notes:	Because the vnode is only held locked while actually asking
2163  *		the use count, this function only represents a snapshot of the
2164  *		current state of the vnode.  If more accurate information is
2165  *		required, an additional busycount should be held by the caller
2166  *		and a non-zero busycount used.
2167  *
2168  *		If there is no ubc_info associated with the vnode, this
2169  *		function will report that the vnode is not in use by ubc.
2170  */
2171 int
ubc_isinuse(struct vnode * vp,int busycount)2172 ubc_isinuse(struct vnode *vp, int busycount)
2173 {
2174 	if (!UBCINFOEXISTS(vp)) {
2175 		return 0;
2176 	}
2177 	return ubc_isinuse_locked(vp, busycount, 0);
2178 }
2179 
2180 
2181 /*
2182  * ubc_isinuse_locked
2183  *
2184  * Determine whether or not a vnode is currently in use by ubc at a level in
2185  * excess of the requested busycount
2186  *
2187  * Parameters:	vp			The vnode to check
2188  *		busycount		The threshold busy count, used to bias
2189  *					the count usually already held by the
2190  *					caller to avoid races
2191  *		locked			True if the vnode is already locked by
2192  *					the caller
2193  *
2194  * Returns:	1			The vnode is in use over the threshold
2195  *		0			The vnode is not in use over the
2196  *					threshold
2197  *
2198  * Notes:	If the vnode is not locked on entry, it is locked while
2199  *		actually asking the use count.  If this is the case, this
2200  *		function only represents a snapshot of the current state of
2201  *		the vnode.  If more accurate information is required, the
2202  *		vnode lock should be held by the caller, otherwise an
2203  *		additional busycount should be held by the caller and a
2204  *		non-zero busycount used.
2205  *
2206  *		If there is no ubc_info associated with the vnode, this
2207  *		function will report that the vnode is not in use by ubc.
2208  */
2209 int
ubc_isinuse_locked(struct vnode * vp,int busycount,int locked)2210 ubc_isinuse_locked(struct vnode *vp, int busycount, int locked)
2211 {
2212 	int retval = 0;
2213 
2214 
2215 	if (!locked) {
2216 		vnode_lock_spin(vp);
2217 	}
2218 
2219 	if ((vp->v_usecount - vp->v_kusecount) > busycount) {
2220 		retval = 1;
2221 	}
2222 
2223 	if (!locked) {
2224 		vnode_unlock(vp);
2225 	}
2226 	return retval;
2227 }
2228 
2229 
2230 /*
2231  * ubc_unmap
2232  *
2233  * Reverse the effects of a ubc_map() call for a given vnode
2234  *
2235  * Parameters:	vp			vnode to unmap from ubc
2236  *
2237  * Returns:	(void)
2238  *
2239  * Notes:	This is an internal use function used by vnode_pager_unmap().
2240  *		It will attempt to obtain a reference on the supplied vnode,
2241  *		and if it can do so, and there is an associated ubc_info, and
2242  *		the flags indicate that it was mapped via ubc_map(), then the
2243  *		flag is cleared, the mapping removed, and the reference taken
2244  *		by ubc_map() is released.
2245  *
2246  *		IMPORTANT: This MUST only be called by the VM
2247  *		to prevent race conditions.
2248  */
2249 __private_extern__ void
ubc_unmap(struct vnode * vp)2250 ubc_unmap(struct vnode *vp)
2251 {
2252 	struct ubc_info *uip;
2253 	int     need_rele = 0;
2254 	int     need_wakeup = 0;
2255 
2256 	if (vnode_getwithref(vp)) {
2257 		return;
2258 	}
2259 
2260 	if (UBCINFOEXISTS(vp)) {
2261 		bool want_fsevent = false;
2262 
2263 		vnode_lock(vp);
2264 		uip = vp->v_ubcinfo;
2265 
2266 		while (ISSET(uip->ui_flags, UI_MAPBUSY)) {
2267 			SET(uip->ui_flags, UI_MAPWAITING);
2268 			(void) msleep(&uip->ui_flags, &vp->v_lock,
2269 			    PRIBIO, "ubc_unmap", NULL);
2270 		}
2271 		SET(uip->ui_flags, UI_MAPBUSY);
2272 
2273 		if (ISSET(uip->ui_flags, UI_ISMAPPED)) {
2274 			if (ISSET(uip->ui_flags, UI_MAPPEDWRITE)) {
2275 				want_fsevent = true;
2276 			}
2277 
2278 			need_rele = 1;
2279 
2280 			/*
2281 			 * We want to clear the mapped flags after we've called
2282 			 * VNOP_MNOMAP to avoid certain races and allow
2283 			 * VNOP_MNOMAP to call ubc_is_mapped_writable.
2284 			 */
2285 		}
2286 		vnode_unlock(vp);
2287 
2288 		if (need_rele) {
2289 			vfs_context_t ctx = vfs_context_current();
2290 
2291 			(void)VNOP_MNOMAP(vp, ctx);
2292 
2293 #if CONFIG_FSE
2294 			/*
2295 			 * Why do we want an fsevent here?  Normally the
2296 			 * content modified fsevent is posted when a file is
2297 			 * closed and only if it's written to via conventional
2298 			 * means.  It's perfectly legal to close a file and
2299 			 * keep your mappings and we don't currently track
2300 			 * whether it was written to via a mapping.
2301 			 * Therefore, we need to post an fsevent here if the
2302 			 * file was mapped writable.  This may result in false
2303 			 * events, i.e. we post a notification when nothing
2304 			 * has really changed.
2305 			 */
2306 			if (want_fsevent && need_fsevent(FSE_CONTENT_MODIFIED, vp)) {
2307 				add_fsevent(FSE_CONTENT_MODIFIED_NO_HLINK, ctx,
2308 				    FSE_ARG_VNODE, vp,
2309 				    FSE_ARG_DONE);
2310 			}
2311 #endif
2312 
2313 			vnode_rele(vp);
2314 		}
2315 
2316 		vnode_lock_spin(vp);
2317 
2318 		if (need_rele) {
2319 			CLR(uip->ui_flags, UI_ISMAPPED | UI_MAPPEDWRITE);
2320 		}
2321 
2322 		CLR(uip->ui_flags, UI_MAPBUSY);
2323 
2324 		if (ISSET(uip->ui_flags, UI_MAPWAITING)) {
2325 			CLR(uip->ui_flags, UI_MAPWAITING);
2326 			need_wakeup = 1;
2327 		}
2328 		vnode_unlock(vp);
2329 
2330 		if (need_wakeup) {
2331 			wakeup(&uip->ui_flags);
2332 		}
2333 	}
2334 	/*
2335 	 * the drop of the vnode ref will cleanup
2336 	 */
2337 	vnode_put(vp);
2338 }
2339 
2340 
2341 /*
2342  * ubc_page_op
2343  *
2344  * Manipulate individual page state for a vnode with an associated ubc_info
2345  * with an associated memory object control.
2346  *
2347  * Parameters:	vp			The vnode backing the page
2348  *		f_offset		A file offset interior to the page
2349  *		ops			The operations to perform, as a bitmap
2350  *					(see below for more information)
2351  *		phys_entryp		The address of a ppnum_t; may be NULL
2352  *					to ignore
2353  *		flagsp			A pointer to an int to contain flags;
2354  *					may be NULL to ignore
2355  *
2356  * Returns:	KERN_SUCCESS		Success
2357  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2358  *					object associated
2359  *		KERN_INVALID_OBJECT	If UPL_POP_PHYSICAL and the object is
2360  *					not physically contiguous
2361  *		KERN_INVALID_OBJECT	If !UPL_POP_PHYSICAL and the object is
2362  *					physically contiguous
2363  *		KERN_FAILURE		If the page cannot be looked up
2364  *
2365  * Implicit Returns:
2366  *		*phys_entryp (modified)	If phys_entryp is non-NULL and
2367  *					UPL_POP_PHYSICAL
2368  *		*flagsp (modified)	If flagsp is non-NULL and there was
2369  *					!UPL_POP_PHYSICAL and a KERN_SUCCESS
2370  *
2371  * Notes:	For object boundaries, it is considerably more efficient to
2372  *		ensure that f_offset is in fact on a page boundary, as this
2373  *		will avoid internal use of the hash table to identify the
2374  *		page, and would therefore skip a number of early optimizations.
2375  *		Since this is a page operation anyway, the caller should try
2376  *		to pass only a page aligned offset because of this.
2377  *
2378  *		*flagsp may be modified even if this function fails.  If it is
2379  *		modified, it will contain the condition of the page before the
2380  *		requested operation was attempted; these will only include the
2381  *		bitmap flags, and not the PL_POP_PHYSICAL, UPL_POP_DUMP,
2382  *		UPL_POP_SET, or UPL_POP_CLR bits.
2383  *
2384  *		The flags field may contain a specific operation, such as
2385  *		UPL_POP_PHYSICAL or UPL_POP_DUMP:
2386  *
2387  *		o	UPL_POP_PHYSICAL	Fail if not contiguous; if
2388  *						*phys_entryp and successful, set
2389  *						*phys_entryp
2390  *		o	UPL_POP_DUMP		Dump the specified page
2391  *
2392  *		Otherwise, it is treated as a bitmap of one or more page
2393  *		operations to perform on the final memory object; allowable
2394  *		bit values are:
2395  *
2396  *		o	UPL_POP_DIRTY		The page is dirty
2397  *		o	UPL_POP_PAGEOUT		The page is paged out
2398  *		o	UPL_POP_PRECIOUS	The page is precious
2399  *		o	UPL_POP_ABSENT		The page is absent
2400  *		o	UPL_POP_BUSY		The page is busy
2401  *
2402  *		If the page status is only being queried and not modified, then
2403  *		not other bits should be specified.  However, if it is being
2404  *		modified, exactly ONE of the following bits should be set:
2405  *
2406  *		o	UPL_POP_SET		Set the current bitmap bits
2407  *		o	UPL_POP_CLR		Clear the current bitmap bits
2408  *
2409  *		Thus to effect a combination of setting an clearing, it may be
2410  *		necessary to call this function twice.  If this is done, the
2411  *		set should be used before the clear, since clearing may trigger
2412  *		a wakeup on the destination page, and if the page is backed by
2413  *		an encrypted swap file, setting will trigger the decryption
2414  *		needed before the wakeup occurs.
2415  */
2416 kern_return_t
ubc_page_op(struct vnode * vp,off_t f_offset,int ops,ppnum_t * phys_entryp,int * flagsp)2417 ubc_page_op(
2418 	struct vnode    *vp,
2419 	off_t           f_offset,
2420 	int             ops,
2421 	ppnum_t *phys_entryp,
2422 	int             *flagsp)
2423 {
2424 	memory_object_control_t         control;
2425 
2426 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2427 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2428 		return KERN_INVALID_ARGUMENT;
2429 	}
2430 
2431 	return memory_object_page_op(control,
2432 	           (memory_object_offset_t)f_offset,
2433 	           ops,
2434 	           phys_entryp,
2435 	           flagsp);
2436 }
2437 
2438 
2439 /*
2440  * ubc_range_op
2441  *
2442  * Manipulate page state for a range of memory for a vnode with an associated
2443  * ubc_info with an associated memory object control, when page level state is
2444  * not required to be returned from the call (i.e. there are no phys_entryp or
2445  * flagsp parameters to this call, and it takes a range which may contain
2446  * multiple pages, rather than an offset interior to a single page).
2447  *
2448  * Parameters:	vp			The vnode backing the page
2449  *		f_offset_beg		A file offset interior to the start page
2450  *		f_offset_end		A file offset interior to the end page
2451  *		ops			The operations to perform, as a bitmap
2452  *					(see below for more information)
2453  *		range			The address of an int; may be NULL to
2454  *					ignore
2455  *
2456  * Returns:	KERN_SUCCESS		Success
2457  *		KERN_INVALID_ARGUMENT	If the memory object control has no VM
2458  *					object associated
2459  *		KERN_INVALID_OBJECT	If the object is physically contiguous
2460  *
2461  * Implicit Returns:
2462  *		*range (modified)	If range is non-NULL, its contents will
2463  *					be modified to contain the number of
2464  *					bytes successfully operated upon.
2465  *
2466  * Notes:	IMPORTANT: This function cannot be used on a range that
2467  *		consists of physically contiguous pages.
2468  *
2469  *		For object boundaries, it is considerably more efficient to
2470  *		ensure that f_offset_beg and f_offset_end are in fact on page
2471  *		boundaries, as this will avoid internal use of the hash table
2472  *		to identify the page, and would therefore skip a number of
2473  *		early optimizations.  Since this is an operation on a set of
2474  *		pages anyway, the caller should try to pass only a page aligned
2475  *		offsets because of this.
2476  *
2477  *		*range will be modified only if this function succeeds.
2478  *
2479  *		The flags field MUST contain a specific operation; allowable
2480  *		values are:
2481  *
2482  *		o	UPL_ROP_ABSENT	Returns the extent of the range
2483  *					presented which is absent, starting
2484  *					with the start address presented
2485  *
2486  *		o	UPL_ROP_PRESENT	Returns the extent of the range
2487  *					presented which is present (resident),
2488  *					starting with the start address
2489  *					presented
2490  *		o	UPL_ROP_DUMP	Dump the pages which are found in the
2491  *					target object for the target range.
2492  *
2493  *		IMPORTANT: For UPL_ROP_ABSENT and UPL_ROP_PRESENT; if there are
2494  *		multiple regions in the range, only the first matching region
2495  *		is returned.
2496  */
2497 kern_return_t
ubc_range_op(struct vnode * vp,off_t f_offset_beg,off_t f_offset_end,int ops,int * range)2498 ubc_range_op(
2499 	struct vnode    *vp,
2500 	off_t           f_offset_beg,
2501 	off_t           f_offset_end,
2502 	int             ops,
2503 	int             *range)
2504 {
2505 	memory_object_control_t         control;
2506 
2507 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2508 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2509 		return KERN_INVALID_ARGUMENT;
2510 	}
2511 
2512 	return memory_object_range_op(control,
2513 	           (memory_object_offset_t)f_offset_beg,
2514 	           (memory_object_offset_t)f_offset_end,
2515 	           ops,
2516 	           range);
2517 }
2518 
2519 
2520 /*
2521  * ubc_create_upl
2522  *
2523  * Given a vnode, cause the population of a portion of the vm_object; based on
2524  * the nature of the request, the pages returned may contain valid data, or
2525  * they may be uninitialized.
2526  *
2527  * Parameters:	vp			The vnode from which to create the upl
2528  *		f_offset		The start offset into the backing store
2529  *					represented by the vnode
2530  *		bufsize			The size of the upl to create
2531  *		uplp			Pointer to the upl_t to receive the
2532  *					created upl; MUST NOT be NULL
2533  *		plp			Pointer to receive the internal page
2534  *					list for the created upl; MAY be NULL
2535  *					to ignore
2536  *
2537  * Returns:	KERN_SUCCESS		The requested upl has been created
2538  *		KERN_INVALID_ARGUMENT	The bufsize argument is not an even
2539  *					multiple of the page size
2540  *		KERN_INVALID_ARGUMENT	There is no ubc_info associated with
2541  *					the vnode, or there is no memory object
2542  *					control associated with the ubc_info
2543  *	memory_object_upl_request:KERN_INVALID_VALUE
2544  *					The supplied upl_flags argument is
2545  *					invalid
2546  * Implicit Returns:
2547  *		*uplp (modified)
2548  *		*plp (modified)		If non-NULL, the value of *plp will be
2549  *					modified to point to the internal page
2550  *					list; this modification may occur even
2551  *					if this function is unsuccessful, in
2552  *					which case the contents may be invalid
2553  *
2554  * Note:	If successful, the returned *uplp MUST subsequently be freed
2555  *		via a call to ubc_upl_commit(), ubc_upl_commit_range(),
2556  *		ubc_upl_abort(), or ubc_upl_abort_range().
2557  */
2558 kern_return_t
ubc_create_upl_external(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags)2559 ubc_create_upl_external(
2560 	struct vnode    *vp,
2561 	off_t           f_offset,
2562 	int             bufsize,
2563 	upl_t           *uplp,
2564 	upl_page_info_t **plp,
2565 	int             uplflags)
2566 {
2567 	return ubc_create_upl_kernel(vp, f_offset, bufsize, uplp, plp, uplflags, vm_tag_bt());
2568 }
2569 
2570 kern_return_t
ubc_create_upl_kernel(struct vnode * vp,off_t f_offset,int bufsize,upl_t * uplp,upl_page_info_t ** plp,int uplflags,vm_tag_t tag)2571 ubc_create_upl_kernel(
2572 	struct vnode    *vp,
2573 	off_t           f_offset,
2574 	int             bufsize,
2575 	upl_t           *uplp,
2576 	upl_page_info_t **plp,
2577 	int             uplflags,
2578 	vm_tag_t tag)
2579 {
2580 	memory_object_control_t         control;
2581 	kern_return_t                   kr;
2582 
2583 	if (plp != NULL) {
2584 		*plp = NULL;
2585 	}
2586 	*uplp = NULL;
2587 
2588 	if (bufsize & 0xfff) {
2589 		return KERN_INVALID_ARGUMENT;
2590 	}
2591 
2592 	if (bufsize > MAX_UPL_SIZE_BYTES) {
2593 		return KERN_INVALID_ARGUMENT;
2594 	}
2595 
2596 	if (uplflags & (UPL_UBC_MSYNC | UPL_UBC_PAGEOUT | UPL_UBC_PAGEIN)) {
2597 		if (uplflags & UPL_UBC_MSYNC) {
2598 			uplflags &= UPL_RET_ONLY_DIRTY;
2599 
2600 			uplflags |= UPL_COPYOUT_FROM | UPL_CLEAN_IN_PLACE |
2601 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2602 		} else if (uplflags & UPL_UBC_PAGEOUT) {
2603 			uplflags &= UPL_RET_ONLY_DIRTY;
2604 
2605 			if (uplflags & UPL_RET_ONLY_DIRTY) {
2606 				uplflags |= UPL_NOBLOCK;
2607 			}
2608 
2609 			uplflags |= UPL_FOR_PAGEOUT | UPL_CLEAN_IN_PLACE |
2610 			    UPL_COPYOUT_FROM | UPL_SET_INTERNAL | UPL_SET_LITE;
2611 		} else {
2612 			uplflags |= UPL_RET_ONLY_ABSENT |
2613 			    UPL_NO_SYNC | UPL_CLEAN_IN_PLACE |
2614 			    UPL_SET_INTERNAL | UPL_SET_LITE;
2615 
2616 			/*
2617 			 * if the requested size == PAGE_SIZE, we don't want to set
2618 			 * the UPL_NOBLOCK since we may be trying to recover from a
2619 			 * previous partial pagein I/O that occurred because we were low
2620 			 * on memory and bailed early in order to honor the UPL_NOBLOCK...
2621 			 * since we're only asking for a single page, we can block w/o fear
2622 			 * of tying up pages while waiting for more to become available
2623 			 */
2624 			if (bufsize > PAGE_SIZE) {
2625 				uplflags |= UPL_NOBLOCK;
2626 			}
2627 		}
2628 	} else {
2629 		uplflags &= ~UPL_FOR_PAGEOUT;
2630 
2631 		if (uplflags & UPL_WILL_BE_DUMPED) {
2632 			uplflags &= ~UPL_WILL_BE_DUMPED;
2633 			uplflags |= (UPL_NO_SYNC | UPL_SET_INTERNAL);
2634 		} else {
2635 			uplflags |= (UPL_NO_SYNC | UPL_CLEAN_IN_PLACE | UPL_SET_INTERNAL);
2636 		}
2637 	}
2638 	control = ubc_getobject(vp, UBC_FLAGS_NONE);
2639 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
2640 		return KERN_INVALID_ARGUMENT;
2641 	}
2642 
2643 	kr = memory_object_upl_request(control, f_offset, bufsize, uplp, NULL, NULL, uplflags, tag);
2644 	if (kr == KERN_SUCCESS && plp != NULL) {
2645 		*plp = UPL_GET_INTERNAL_PAGE_LIST(*uplp);
2646 	}
2647 	return kr;
2648 }
2649 
2650 
2651 /*
2652  * ubc_upl_maxbufsize
2653  *
2654  * Return the maximum bufsize ubc_create_upl( ) will take.
2655  *
2656  * Parameters:	none
2657  *
2658  * Returns:	maximum size buffer (in bytes) ubc_create_upl( ) will take.
2659  */
2660 upl_size_t
ubc_upl_maxbufsize(void)2661 ubc_upl_maxbufsize(
2662 	void)
2663 {
2664 	return MAX_UPL_SIZE_BYTES;
2665 }
2666 
2667 /*
2668  * ubc_upl_map
2669  *
2670  * Map the page list assocated with the supplied upl into the kernel virtual
2671  * address space at the virtual address indicated by the dst_addr argument;
2672  * the entire upl is mapped
2673  *
2674  * Parameters:	upl			The upl to map
2675  *		dst_addr		The address at which to map the upl
2676  *
2677  * Returns:	KERN_SUCCESS		The upl has been mapped
2678  *		KERN_INVALID_ARGUMENT	The upl is UPL_NULL
2679  *		KERN_FAILURE		The upl is already mapped
2680  *	vm_map_enter:KERN_INVALID_ARGUMENT
2681  *					A failure code from vm_map_enter() due
2682  *					to an invalid argument
2683  */
2684 kern_return_t
ubc_upl_map(upl_t upl,vm_offset_t * dst_addr)2685 ubc_upl_map(
2686 	upl_t           upl,
2687 	vm_offset_t     *dst_addr)
2688 {
2689 	return vm_upl_map(kernel_map, upl, dst_addr);
2690 }
2691 
2692 /*
2693  * ubc_upl_map_range:- similar to ubc_upl_map but the focus is on a range
2694  * of the UPL. Takes an offset, size, and protection so that only a  part
2695  * of the UPL can be mapped with the right protections.
2696  */
2697 kern_return_t
ubc_upl_map_range(upl_t upl,vm_offset_t offset_to_map,vm_size_t size_to_map,vm_prot_t prot_to_map,vm_offset_t * dst_addr)2698 ubc_upl_map_range(
2699 	upl_t           upl,
2700 	vm_offset_t     offset_to_map,
2701 	vm_size_t       size_to_map,
2702 	vm_prot_t       prot_to_map,
2703 	vm_offset_t     *dst_addr)
2704 {
2705 	return vm_upl_map_range(kernel_map, upl, offset_to_map, size_to_map, prot_to_map, dst_addr);
2706 }
2707 
2708 
2709 /*
2710  * ubc_upl_unmap
2711  *
2712  * Unmap the page list assocated with the supplied upl from the kernel virtual
2713  * address space; the entire upl is unmapped.
2714  *
2715  * Parameters:	upl			The upl to unmap
2716  *
2717  * Returns:	KERN_SUCCESS		The upl has been unmapped
2718  *		KERN_FAILURE		The upl is not currently mapped
2719  *		KERN_INVALID_ARGUMENT	If the upl is UPL_NULL
2720  */
2721 kern_return_t
ubc_upl_unmap(upl_t upl)2722 ubc_upl_unmap(
2723 	upl_t   upl)
2724 {
2725 	return vm_upl_unmap(kernel_map, upl);
2726 }
2727 
2728 /*
2729  * ubc_upl_unmap_range:- similar to ubc_upl_unmap but the focus is
2730  * on part of the UPL that is mapped. The offset and size parameter
2731  * specifies what part of the UPL needs to be unmapped.
2732  *
2733  * Note: Currrently offset & size are unused as we always initiate the unmap from the
2734  * very beginning of the UPL's mapping and track the mapped size in the UPL. But we
2735  * might want to allow unmapping a UPL in the middle, for example, and we can use the
2736  * offset + size parameters for that purpose.
2737  */
2738 kern_return_t
ubc_upl_unmap_range(upl_t upl,vm_offset_t offset_to_unmap,vm_size_t size_to_unmap)2739 ubc_upl_unmap_range(
2740 	upl_t   upl,
2741 	vm_offset_t     offset_to_unmap,
2742 	vm_size_t       size_to_unmap)
2743 {
2744 	return vm_upl_unmap_range(kernel_map, upl, offset_to_unmap, size_to_unmap);
2745 }
2746 
2747 
2748 /*
2749  * ubc_upl_commit
2750  *
2751  * Commit the contents of the upl to the backing store
2752  *
2753  * Parameters:	upl			The upl to commit
2754  *
2755  * Returns:	KERN_SUCCESS		The upl has been committed
2756  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2757  *		KERN_FAILURE		The supplied upl does not represent
2758  *					device memory, and the offset plus the
2759  *					size would exceed the actual size of
2760  *					the upl
2761  *
2762  * Notes:	In practice, the only return value for this function should be
2763  *		KERN_SUCCESS, unless there has been data structure corruption;
2764  *		since the upl is deallocated regardless of success or failure,
2765  *		there's really nothing to do about this other than panic.
2766  *
2767  *		IMPORTANT: Use of this function should not be mixed with use of
2768  *		ubc_upl_commit_range(), due to the unconditional deallocation
2769  *		by this function.
2770  */
2771 kern_return_t
ubc_upl_commit(upl_t upl)2772 ubc_upl_commit(
2773 	upl_t                   upl)
2774 {
2775 	upl_page_info_t *pl;
2776 	kern_return_t   kr;
2777 
2778 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2779 	kr = upl_commit(upl, pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT);
2780 	upl_deallocate(upl);
2781 	return kr;
2782 }
2783 
2784 
2785 /*
2786  * ubc_upl_commit
2787  *
2788  * Commit the contents of the specified range of the upl to the backing store
2789  *
2790  * Parameters:	upl			The upl to commit
2791  *		offset			The offset into the upl
2792  *		size			The size of the region to be committed,
2793  *					starting at the specified offset
2794  *		flags			commit type (see below)
2795  *
2796  * Returns:	KERN_SUCCESS		The range has been committed
2797  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2798  *		KERN_FAILURE		The supplied upl does not represent
2799  *					device memory, and the offset plus the
2800  *					size would exceed the actual size of
2801  *					the upl
2802  *
2803  * Notes:	IMPORTANT: If the commit is successful, and the object is now
2804  *		empty, the upl will be deallocated.  Since the caller cannot
2805  *		check that this is the case, the UPL_COMMIT_FREE_ON_EMPTY flag
2806  *		should generally only be used when the offset is 0 and the size
2807  *		is equal to the upl size.
2808  *
2809  *		The flags argument is a bitmap of flags on the rage of pages in
2810  *		the upl to be committed; allowable flags are:
2811  *
2812  *		o	UPL_COMMIT_FREE_ON_EMPTY	Free the upl when it is
2813  *							both empty and has been
2814  *							successfully committed
2815  *		o	UPL_COMMIT_CLEAR_DIRTY		Clear each pages dirty
2816  *							bit; will prevent a
2817  *							later pageout
2818  *		o	UPL_COMMIT_SET_DIRTY		Set each pages dirty
2819  *							bit; will cause a later
2820  *							pageout
2821  *		o	UPL_COMMIT_INACTIVATE		Clear each pages
2822  *							reference bit; the page
2823  *							will not be accessed
2824  *		o	UPL_COMMIT_ALLOW_ACCESS		Unbusy each page; pages
2825  *							become busy when an
2826  *							IOMemoryDescriptor is
2827  *							mapped or redirected,
2828  *							and we have to wait for
2829  *							an IOKit driver
2830  *
2831  *		The flag UPL_COMMIT_NOTIFY_EMPTY is used internally, and should
2832  *		not be specified by the caller.
2833  *
2834  *		The UPL_COMMIT_CLEAR_DIRTY and UPL_COMMIT_SET_DIRTY flags are
2835  *		mutually exclusive, and should not be combined.
2836  */
2837 kern_return_t
ubc_upl_commit_range(upl_t upl,upl_offset_t offset,upl_size_t size,int flags)2838 ubc_upl_commit_range(
2839 	upl_t                   upl,
2840 	upl_offset_t            offset,
2841 	upl_size_t              size,
2842 	int                             flags)
2843 {
2844 	upl_page_info_t *pl;
2845 	boolean_t               empty;
2846 	kern_return_t   kr;
2847 
2848 	if (flags & UPL_COMMIT_FREE_ON_EMPTY) {
2849 		flags |= UPL_COMMIT_NOTIFY_EMPTY;
2850 	}
2851 
2852 	if (flags & UPL_COMMIT_KERNEL_ONLY_FLAGS) {
2853 		return KERN_INVALID_ARGUMENT;
2854 	}
2855 
2856 	pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
2857 
2858 	kr = upl_commit_range(upl, offset, size, flags,
2859 	    pl, MAX_UPL_SIZE_BYTES >> PAGE_SHIFT, &empty);
2860 
2861 	if ((flags & UPL_COMMIT_FREE_ON_EMPTY) && empty) {
2862 		upl_deallocate(upl);
2863 	}
2864 
2865 	return kr;
2866 }
2867 
2868 
2869 /*
2870  * ubc_upl_abort_range
2871  *
2872  * Abort the contents of the specified range of the specified upl
2873  *
2874  * Parameters:	upl			The upl to abort
2875  *		offset			The offset into the upl
2876  *		size			The size of the region to be aborted,
2877  *					starting at the specified offset
2878  *		abort_flags		abort type (see below)
2879  *
2880  * Returns:	KERN_SUCCESS		The range has been aborted
2881  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2882  *		KERN_FAILURE		The supplied upl does not represent
2883  *					device memory, and the offset plus the
2884  *					size would exceed the actual size of
2885  *					the upl
2886  *
2887  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2888  *		empty, the upl will be deallocated.  Since the caller cannot
2889  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2890  *		should generally only be used when the offset is 0 and the size
2891  *		is equal to the upl size.
2892  *
2893  *		The abort_flags argument is a bitmap of flags on the range of
2894  *		pages in the upl to be aborted; allowable flags are:
2895  *
2896  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2897  *						empty and has been successfully
2898  *						aborted
2899  *		o	UPL_ABORT_RESTART	The operation must be restarted
2900  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2901  *		o	UPL_ABORT_ERROR		An I/O error occurred
2902  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2903  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2904  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2905  *
2906  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2907  *		not be specified by the caller.  It is intended to fulfill the
2908  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2909  *		ubc_upl_commit_range(), but is never referenced internally.
2910  *
2911  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2912  *		referenced; do not use it.
2913  */
2914 kern_return_t
ubc_upl_abort_range(upl_t upl,upl_offset_t offset,upl_size_t size,int abort_flags)2915 ubc_upl_abort_range(
2916 	upl_t                   upl,
2917 	upl_offset_t            offset,
2918 	upl_size_t              size,
2919 	int                             abort_flags)
2920 {
2921 	kern_return_t   kr;
2922 	boolean_t               empty = FALSE;
2923 
2924 	if (abort_flags & UPL_ABORT_FREE_ON_EMPTY) {
2925 		abort_flags |= UPL_ABORT_NOTIFY_EMPTY;
2926 	}
2927 
2928 	kr = upl_abort_range(upl, offset, size, abort_flags, &empty);
2929 
2930 	if ((abort_flags & UPL_ABORT_FREE_ON_EMPTY) && empty) {
2931 		upl_deallocate(upl);
2932 	}
2933 
2934 	return kr;
2935 }
2936 
2937 
2938 /*
2939  * ubc_upl_abort
2940  *
2941  * Abort the contents of the specified upl
2942  *
2943  * Parameters:	upl			The upl to abort
2944  *		abort_type		abort type (see below)
2945  *
2946  * Returns:	KERN_SUCCESS		The range has been aborted
2947  *		KERN_INVALID_ARGUMENT	The supplied upl was UPL_NULL
2948  *		KERN_FAILURE		The supplied upl does not represent
2949  *					device memory, and the offset plus the
2950  *					size would exceed the actual size of
2951  *					the upl
2952  *
2953  * Notes:	IMPORTANT: If the abort is successful, and the object is now
2954  *		empty, the upl will be deallocated.  Since the caller cannot
2955  *		check that this is the case, the UPL_ABORT_FREE_ON_EMPTY flag
2956  *		should generally only be used when the offset is 0 and the size
2957  *		is equal to the upl size.
2958  *
2959  *		The abort_type is a bitmap of flags on the range of
2960  *		pages in the upl to be aborted; allowable flags are:
2961  *
2962  *		o	UPL_ABORT_FREE_ON_EMPTY	Free the upl when it is both
2963  *						empty and has been successfully
2964  *						aborted
2965  *		o	UPL_ABORT_RESTART	The operation must be restarted
2966  *		o	UPL_ABORT_UNAVAILABLE	The pages are unavailable
2967  *		o	UPL_ABORT_ERROR		An I/O error occurred
2968  *		o	UPL_ABORT_DUMP_PAGES	Just free the pages
2969  *		o	UPL_ABORT_NOTIFY_EMPTY	RESERVED
2970  *		o	UPL_ABORT_ALLOW_ACCESS	RESERVED
2971  *
2972  *		The UPL_ABORT_NOTIFY_EMPTY is an internal use flag and should
2973  *		not be specified by the caller.  It is intended to fulfill the
2974  *		same role as UPL_COMMIT_NOTIFY_EMPTY does in the function
2975  *		ubc_upl_commit_range(), but is never referenced internally.
2976  *
2977  *		The UPL_ABORT_ALLOW_ACCESS is defined, but neither set nor
2978  *		referenced; do not use it.
2979  */
2980 kern_return_t
ubc_upl_abort(upl_t upl,int abort_type)2981 ubc_upl_abort(
2982 	upl_t                   upl,
2983 	int                             abort_type)
2984 {
2985 	kern_return_t   kr;
2986 
2987 	kr = upl_abort(upl, abort_type);
2988 	upl_deallocate(upl);
2989 	return kr;
2990 }
2991 
2992 
2993 /*
2994  * ubc_upl_pageinfo
2995  *
2996  *  Retrieve the internal page list for the specified upl
2997  *
2998  * Parameters:	upl			The upl to obtain the page list from
2999  *
3000  * Returns:	!NULL			The (upl_page_info_t *) for the page
3001  *					list internal to the upl
3002  *		NULL			Error/no page list associated
3003  *
3004  * Notes:	IMPORTANT: The function is only valid on internal objects
3005  *		where the list request was made with the UPL_INTERNAL flag.
3006  *
3007  *		This function is a utility helper function, since some callers
3008  *		may not have direct access to the header defining the macro,
3009  *		due to abstraction layering constraints.
3010  */
3011 upl_page_info_t *
ubc_upl_pageinfo(upl_t upl)3012 ubc_upl_pageinfo(
3013 	upl_t                   upl)
3014 {
3015 	return UPL_GET_INTERNAL_PAGE_LIST(upl);
3016 }
3017 
3018 
3019 int
UBCINFOEXISTS(const struct vnode * vp)3020 UBCINFOEXISTS(const struct vnode * vp)
3021 {
3022 	return (vp) && ((vp)->v_type == VREG) && ((vp)->v_ubcinfo != UBC_INFO_NULL);
3023 }
3024 
3025 
3026 void
ubc_upl_range_needed(upl_t upl,int index,int count)3027 ubc_upl_range_needed(
3028 	upl_t           upl,
3029 	int             index,
3030 	int             count)
3031 {
3032 	upl_range_needed(upl, index, count);
3033 }
3034 
3035 boolean_t
ubc_is_mapped(const struct vnode * vp,boolean_t * writable)3036 ubc_is_mapped(const struct vnode *vp, boolean_t *writable)
3037 {
3038 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_ISMAPPED)) {
3039 		return FALSE;
3040 	}
3041 	if (writable) {
3042 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_MAPPEDWRITE);
3043 	}
3044 	return TRUE;
3045 }
3046 
3047 boolean_t
ubc_is_mapped_writable(const struct vnode * vp)3048 ubc_is_mapped_writable(const struct vnode *vp)
3049 {
3050 	boolean_t writable;
3051 	return ubc_is_mapped(vp, &writable) && writable;
3052 }
3053 
3054 boolean_t
ubc_was_mapped(const struct vnode * vp,boolean_t * writable)3055 ubc_was_mapped(const struct vnode *vp, boolean_t *writable)
3056 {
3057 	if (!UBCINFOEXISTS(vp) || !ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPED)) {
3058 		return FALSE;
3059 	}
3060 	if (writable) {
3061 		*writable = ISSET(vp->v_ubcinfo->ui_flags, UI_WASMAPPEDWRITE);
3062 	}
3063 	return TRUE;
3064 }
3065 
3066 boolean_t
ubc_was_mapped_writable(const struct vnode * vp)3067 ubc_was_mapped_writable(const struct vnode *vp)
3068 {
3069 	boolean_t writable;
3070 	return ubc_was_mapped(vp, &writable) && writable;
3071 }
3072 
3073 
3074 /*
3075  * CODE SIGNING
3076  */
3077 static atomic_size_t cs_blob_size = 0;
3078 static atomic_uint_fast32_t cs_blob_count = 0;
3079 static atomic_size_t cs_blob_size_peak = 0;
3080 static atomic_size_t cs_blob_size_max = 0;
3081 static atomic_uint_fast32_t cs_blob_count_peak = 0;
3082 
3083 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count, 0, "Current number of code signature blobs");
3084 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size, "Current size of all code signature blobs");
3085 SYSCTL_UINT(_vm, OID_AUTO, cs_blob_count_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_count_peak, 0, "Peak number of code signature blobs");
3086 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_peak, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_peak, "Peak size of code signature blobs");
3087 SYSCTL_ULONG(_vm, OID_AUTO, cs_blob_size_max, CTLFLAG_RD | CTLFLAG_LOCKED, &cs_blob_size_max, "Size of biggest code signature blob");
3088 
3089 /*
3090  * Function: csblob_parse_teamid
3091  *
3092  * Description: This function returns a pointer to the team id
3093  *               stored within the codedirectory of the csblob.
3094  *               If the codedirectory predates team-ids, it returns
3095  *               NULL.
3096  *               This does not copy the name but returns a pointer to
3097  *               it within the CD. Subsequently, the CD must be
3098  *               available when this is used.
3099  */
3100 
3101 static const char *
csblob_parse_teamid(struct cs_blob * csblob)3102 csblob_parse_teamid(struct cs_blob *csblob)
3103 {
3104 	const CS_CodeDirectory *cd;
3105 
3106 	cd = csblob->csb_cd;
3107 
3108 	if (ntohl(cd->version) < CS_SUPPORTSTEAMID) {
3109 		return NULL;
3110 	}
3111 
3112 	if (cd->teamOffset == 0) {
3113 		return NULL;
3114 	}
3115 
3116 	const char *name = ((const char *)cd) + ntohl(cd->teamOffset);
3117 	if (cs_debug > 1) {
3118 		printf("found team-id %s in cdblob\n", name);
3119 	}
3120 
3121 	return name;
3122 }
3123 
3124 kern_return_t
ubc_cs_blob_allocate(vm_offset_t * blob_addr_p,vm_size_t * blob_size_p)3125 ubc_cs_blob_allocate(
3126 	vm_offset_t     *blob_addr_p,
3127 	vm_size_t       *blob_size_p)
3128 {
3129 	kern_return_t   kr = KERN_FAILURE;
3130 	vm_size_t       allocation_size = 0;
3131 
3132 	if (!blob_addr_p || !blob_size_p) {
3133 		return KERN_INVALID_ARGUMENT;
3134 	}
3135 	allocation_size = *blob_size_p;
3136 
3137 	if (ubc_cs_blob_pagewise_allocate(allocation_size) == true) {
3138 		/* Round up to page size */
3139 		allocation_size = round_page(allocation_size);
3140 
3141 		/* Allocate page-wise */
3142 		kr = kmem_alloc(
3143 			kernel_map,
3144 			blob_addr_p,
3145 			allocation_size,
3146 			KMA_KOBJECT | KMA_DATA | KMA_ZERO,
3147 			VM_KERN_MEMORY_SECURITY);
3148 	} else {
3149 		*blob_addr_p = (vm_offset_t)kalloc_data_tag(
3150 			allocation_size,
3151 			Z_WAITOK | Z_ZERO,
3152 			VM_KERN_MEMORY_SECURITY);
3153 
3154 		assert(*blob_addr_p != 0);
3155 		kr = KERN_SUCCESS;
3156 	}
3157 
3158 	if (kr == KERN_SUCCESS) {
3159 		*blob_size_p = allocation_size;
3160 	}
3161 
3162 	return kr;
3163 }
3164 
3165 void
ubc_cs_blob_deallocate(vm_offset_t blob_addr,vm_size_t blob_size)3166 ubc_cs_blob_deallocate(
3167 	vm_offset_t     blob_addr,
3168 	vm_size_t       blob_size)
3169 {
3170 	if (ubc_cs_blob_pagewise_allocate(blob_size) == true) {
3171 		kmem_free(kernel_map, blob_addr, blob_size);
3172 	} else {
3173 		kfree_data(blob_addr, blob_size);
3174 	}
3175 }
3176 
3177 /*
3178  * Some codesigned files use a lowest common denominator page size of
3179  * 4KiB, but can be used on systems that have a runtime page size of
3180  * 16KiB. Since faults will only occur on 16KiB ranges in
3181  * cs_validate_range(), we can convert the original Code Directory to
3182  * a multi-level scheme where groups of 4 hashes are combined to form
3183  * a new hash, which represents 16KiB in the on-disk file.  This can
3184  * reduce the wired memory requirement for the Code Directory by
3185  * 75%.
3186  */
3187 static boolean_t
ubc_cs_supports_multilevel_hash(struct cs_blob * blob __unused)3188 ubc_cs_supports_multilevel_hash(struct cs_blob *blob __unused)
3189 {
3190 	const CS_CodeDirectory *cd;
3191 
3192 #if CODE_SIGNING_MONITOR
3193 	// TODO: <rdar://problem/30954826>
3194 	if (csm_enabled() == true) {
3195 		return FALSE;
3196 	}
3197 #endif
3198 
3199 	/*
3200 	 * Only applies to binaries that ship as part of the OS,
3201 	 * primarily the shared cache.
3202 	 */
3203 	if (!blob->csb_platform_binary || blob->csb_teamid != NULL) {
3204 		return FALSE;
3205 	}
3206 
3207 	/*
3208 	 * If the runtime page size matches the code signing page
3209 	 * size, there is no work to do.
3210 	 */
3211 	if (PAGE_SHIFT <= blob->csb_hash_pageshift) {
3212 		return FALSE;
3213 	}
3214 
3215 	cd = blob->csb_cd;
3216 
3217 	/*
3218 	 * There must be a valid integral multiple of hashes
3219 	 */
3220 	if (ntohl(cd->nCodeSlots) & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3221 		return FALSE;
3222 	}
3223 
3224 	/*
3225 	 * Scatter lists must also have ranges that have an integral number of hashes
3226 	 */
3227 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3228 		const SC_Scatter *scatter = (const SC_Scatter*)
3229 		    ((const char*)cd + ntohl(cd->scatterOffset));
3230 		/* iterate all scatter structs to make sure they are all aligned */
3231 		do {
3232 			uint32_t sbase = ntohl(scatter->base);
3233 			uint32_t scount = ntohl(scatter->count);
3234 
3235 			/* last scatter? */
3236 			if (scount == 0) {
3237 				break;
3238 			}
3239 
3240 			if (sbase & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3241 				return FALSE;
3242 			}
3243 
3244 			if (scount & (PAGE_MASK >> blob->csb_hash_pageshift)) {
3245 				return FALSE;
3246 			}
3247 
3248 			scatter++;
3249 		} while (1);
3250 	}
3251 
3252 	/* Covered range must be a multiple of the new page size */
3253 	if (ntohl(cd->codeLimit) & PAGE_MASK) {
3254 		return FALSE;
3255 	}
3256 
3257 	/* All checks pass */
3258 	return TRUE;
3259 }
3260 
3261 /*
3262  * Reconstruct a cs_blob with the code signature fields. This helper function
3263  * is useful because a lot of things often change the base address of the code
3264  * signature blob, which requires reconstructing some of the other pointers
3265  * within.
3266  */
3267 static errno_t
ubc_cs_blob_reconstruct(struct cs_blob * cs_blob,const vm_address_t signature_addr,const vm_address_t signature_size,const vm_offset_t code_directory_offset)3268 ubc_cs_blob_reconstruct(
3269 	struct cs_blob *cs_blob,
3270 	const vm_address_t signature_addr,
3271 	const vm_address_t signature_size,
3272 	const vm_offset_t code_directory_offset)
3273 {
3274 	const CS_CodeDirectory *code_directory = NULL;
3275 
3276 	/* Setup the signature blob address */
3277 	cs_blob->csb_mem_kaddr = (void*)signature_addr;
3278 	cs_blob->csb_mem_size = signature_size;
3279 
3280 	/* Setup the code directory in the blob */
3281 	code_directory = (const CS_CodeDirectory*)(signature_addr + code_directory_offset);
3282 	cs_blob->csb_cd = code_directory;
3283 
3284 	/* Setup the XML entitlements */
3285 	cs_blob->csb_entitlements_blob = csblob_find_blob_bytes(
3286 		(uint8_t*)signature_addr,
3287 		signature_size,
3288 		CSSLOT_ENTITLEMENTS,
3289 		CSMAGIC_EMBEDDED_ENTITLEMENTS);
3290 
3291 	/* Setup the DER entitlements */
3292 	cs_blob->csb_der_entitlements_blob = csblob_find_blob_bytes(
3293 		(uint8_t*)signature_addr,
3294 		signature_size,
3295 		CSSLOT_DER_ENTITLEMENTS,
3296 		CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3297 
3298 	return 0;
3299 }
3300 
3301 /*
3302  * Given a validated cs_blob, we reformat the structure to only include
3303  * the blobs which are required by the kernel for our current platform.
3304  * This saves significant memory with agile signatures.
3305  *
3306  * To support rewriting the code directory, potentially through
3307  * multilevel hashes, we provide a mechanism to allocate a code directory
3308  * of a specified size and zero it out --> caller can fill it in.
3309  *
3310  * We don't need to perform a lot of overflow checks as the assumption
3311  * here is that the cs_blob has already been validated.
3312  */
3313 static errno_t
ubc_cs_reconstitute_code_signature(const struct cs_blob * const blob,vm_address_t * const ret_mem_kaddr,vm_size_t * const ret_mem_size,vm_size_t code_directory_size,CS_CodeDirectory ** const code_directory)3314 ubc_cs_reconstitute_code_signature(
3315 	const struct cs_blob * const blob,
3316 	vm_address_t * const ret_mem_kaddr,
3317 	vm_size_t * const ret_mem_size,
3318 	vm_size_t code_directory_size,
3319 	CS_CodeDirectory ** const code_directory
3320 	)
3321 {
3322 	vm_address_t new_blob_addr = 0;
3323 	vm_size_t new_blob_size = 0;
3324 	vm_size_t new_code_directory_size = 0;
3325 	const CS_GenericBlob *best_code_directory = NULL;
3326 	const CS_GenericBlob *first_code_directory = NULL;
3327 	const CS_GenericBlob *der_entitlements_blob = NULL;
3328 	const CS_GenericBlob *entitlements_blob = NULL;
3329 	const CS_GenericBlob *cms_blob = NULL;
3330 	const CS_GenericBlob *launch_constraint_self = NULL;
3331 	const CS_GenericBlob *launch_constraint_parent = NULL;
3332 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3333 	const CS_GenericBlob *library_constraint = NULL;
3334 	CS_SuperBlob *superblob = NULL;
3335 	uint32_t num_blobs = 0;
3336 	uint32_t blob_index = 0;
3337 	uint32_t blob_offset = 0;
3338 	kern_return_t ret;
3339 	int err;
3340 
3341 	if (!blob) {
3342 		if (cs_debug > 1) {
3343 			printf("CODE SIGNING: CS Blob passed in is NULL\n");
3344 		}
3345 		return EINVAL;
3346 	}
3347 
3348 	best_code_directory = (const CS_GenericBlob*)blob->csb_cd;
3349 	if (!best_code_directory) {
3350 		/* This case can never happen, and it is a sign of bad things */
3351 		panic("CODE SIGNING: Validated CS Blob has no code directory");
3352 	}
3353 
3354 	new_code_directory_size = code_directory_size;
3355 	if (new_code_directory_size == 0) {
3356 		new_code_directory_size = ntohl(best_code_directory->length);
3357 	}
3358 
3359 	/*
3360 	 * A code signature can contain multiple code directories, each of which contains hashes
3361 	 * of pages based on a hashing algorithm. The kernel selects which hashing algorithm is
3362 	 * the strongest, and consequently, marks one of these code directories as the best
3363 	 * matched one. More often than not, the best matched one is _not_ the first one.
3364 	 *
3365 	 * However, the CMS blob which cryptographically verifies the code signature is only
3366 	 * signed against the first code directory. Therefore, if the CMS blob is present, we also
3367 	 * need the first code directory to be able to verify it. Given this, we organize the
3368 	 * new cs_blob as following order:
3369 	 *
3370 	 * 1. best code directory
3371 	 * 2. DER encoded entitlements blob (if present)
3372 	 * 3. launch constraint self (if present)
3373 	 * 4. launch constraint parent (if present)
3374 	 * 5. launch constraint responsible (if present)
3375 	 * 6. library constraint (if present)
3376 	 * 7. entitlements blob (if present)
3377 	 * 8. cms blob (if present)
3378 	 * 9. first code directory (if not already the best match, and if cms blob is present)
3379 	 *
3380 	 * This order is chosen deliberately, as later on, we expect to get rid of the CMS blob
3381 	 * and the first code directory once their verification is complete.
3382 	 */
3383 
3384 	/* Storage for the super blob header */
3385 	new_blob_size += sizeof(CS_SuperBlob);
3386 
3387 	/* Guaranteed storage for the best code directory */
3388 	new_blob_size += sizeof(CS_BlobIndex);
3389 	new_blob_size += new_code_directory_size;
3390 	num_blobs += 1;
3391 
3392 	/* Conditional storage for the DER entitlements blob */
3393 	der_entitlements_blob = blob->csb_der_entitlements_blob;
3394 	if (der_entitlements_blob) {
3395 		new_blob_size += sizeof(CS_BlobIndex);
3396 		new_blob_size += ntohl(der_entitlements_blob->length);
3397 		num_blobs += 1;
3398 	}
3399 
3400 	/* Conditional storage for the launch constraints self blob */
3401 	launch_constraint_self = csblob_find_blob_bytes(
3402 		(const uint8_t *)blob->csb_mem_kaddr,
3403 		blob->csb_mem_size,
3404 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3405 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3406 	if (launch_constraint_self) {
3407 		new_blob_size += sizeof(CS_BlobIndex);
3408 		new_blob_size += ntohl(launch_constraint_self->length);
3409 		num_blobs += 1;
3410 	}
3411 
3412 	/* Conditional storage for the launch constraints parent blob */
3413 	launch_constraint_parent = csblob_find_blob_bytes(
3414 		(const uint8_t *)blob->csb_mem_kaddr,
3415 		blob->csb_mem_size,
3416 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3417 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3418 	if (launch_constraint_parent) {
3419 		new_blob_size += sizeof(CS_BlobIndex);
3420 		new_blob_size += ntohl(launch_constraint_parent->length);
3421 		num_blobs += 1;
3422 	}
3423 
3424 	/* Conditional storage for the launch constraints responsible blob */
3425 	launch_constraint_responsible = csblob_find_blob_bytes(
3426 		(const uint8_t *)blob->csb_mem_kaddr,
3427 		blob->csb_mem_size,
3428 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3429 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3430 	if (launch_constraint_responsible) {
3431 		new_blob_size += sizeof(CS_BlobIndex);
3432 		new_blob_size += ntohl(launch_constraint_responsible->length);
3433 		num_blobs += 1;
3434 	}
3435 
3436 	/* Conditional storage for the library constraintsblob */
3437 	library_constraint = csblob_find_blob_bytes(
3438 		(const uint8_t *)blob->csb_mem_kaddr,
3439 		blob->csb_mem_size,
3440 		CSSLOT_LIBRARY_CONSTRAINT,
3441 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3442 	if (library_constraint) {
3443 		new_blob_size += sizeof(CS_BlobIndex);
3444 		new_blob_size += ntohl(library_constraint->length);
3445 		num_blobs += 1;
3446 	}
3447 
3448 	/* Conditional storage for the entitlements blob */
3449 	entitlements_blob = blob->csb_entitlements_blob;
3450 	if (entitlements_blob) {
3451 		new_blob_size += sizeof(CS_BlobIndex);
3452 		new_blob_size += ntohl(entitlements_blob->length);
3453 		num_blobs += 1;
3454 	}
3455 
3456 	/* Conditional storage for the CMS blob */
3457 	cms_blob = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_SIGNATURESLOT, CSMAGIC_BLOBWRAPPER);
3458 	if (cms_blob) {
3459 		new_blob_size += sizeof(CS_BlobIndex);
3460 		new_blob_size += ntohl(cms_blob->length);
3461 		num_blobs += 1;
3462 	}
3463 
3464 	/*
3465 	 * Conditional storage for the first code directory.
3466 	 * This is only needed if a CMS blob exists and the best code directory isn't already
3467 	 * the first one. It is an error if we find a CMS blob but do not find a first code directory.
3468 	 */
3469 	if (cms_blob) {
3470 		first_code_directory = csblob_find_blob_bytes((const uint8_t *)blob->csb_mem_kaddr, blob->csb_mem_size, CSSLOT_CODEDIRECTORY, CSMAGIC_CODEDIRECTORY);
3471 		if (first_code_directory == best_code_directory) {
3472 			/* We don't need the first code directory anymore, since the best one is already it */
3473 			first_code_directory = NULL;
3474 		} else if (first_code_directory) {
3475 			new_blob_size += sizeof(CS_BlobIndex);
3476 			new_blob_size += ntohl(first_code_directory->length);
3477 			num_blobs += 1;
3478 		} else {
3479 			printf("CODE SIGNING: Invalid CS Blob: found CMS blob but not a first code directory\n");
3480 			return EINVAL;
3481 		}
3482 	}
3483 
3484 	/*
3485 	 * The blob size could be rouded up to page size here, so we keep a copy
3486 	 * of the actual superblob length as well.
3487 	 */
3488 	vm_size_t new_blob_allocation_size = new_blob_size;
3489 	ret = ubc_cs_blob_allocate(&new_blob_addr, &new_blob_allocation_size);
3490 	if (ret != KERN_SUCCESS) {
3491 		printf("CODE SIGNING: Failed to allocate memory for new code signing blob: %d\n", ret);
3492 		return ENOMEM;
3493 	}
3494 
3495 	/*
3496 	 * Fill out the superblob header and then all the blobs in the order listed
3497 	 * above.
3498 	 */
3499 	superblob = (CS_SuperBlob*)new_blob_addr;
3500 	superblob->magic = htonl(CSMAGIC_EMBEDDED_SIGNATURE);
3501 	superblob->length = htonl((uint32_t)new_blob_size);
3502 	superblob->count = htonl(num_blobs);
3503 
3504 	blob_index = 0;
3505 	blob_offset = sizeof(CS_SuperBlob) + (num_blobs * sizeof(CS_BlobIndex));
3506 
3507 	/* Best code directory */
3508 	superblob->index[blob_index].offset = htonl(blob_offset);
3509 	if (first_code_directory) {
3510 		superblob->index[blob_index].type = htonl(CSSLOT_ALTERNATE_CODEDIRECTORIES);
3511 	} else {
3512 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3513 	}
3514 
3515 	if (code_directory_size > 0) {
3516 		/* We zero out the code directory, as we expect the caller to fill it in */
3517 		memset((void*)(new_blob_addr + blob_offset), 0, new_code_directory_size);
3518 	} else {
3519 		memcpy((void*)(new_blob_addr + blob_offset), best_code_directory, new_code_directory_size);
3520 	}
3521 
3522 	if (code_directory) {
3523 		*code_directory = (CS_CodeDirectory*)(new_blob_addr + blob_offset);
3524 	}
3525 	blob_offset += new_code_directory_size;
3526 
3527 	/* DER entitlements blob */
3528 	if (der_entitlements_blob) {
3529 		blob_index += 1;
3530 		superblob->index[blob_index].offset = htonl(blob_offset);
3531 		superblob->index[blob_index].type = htonl(CSSLOT_DER_ENTITLEMENTS);
3532 
3533 		memcpy((void*)(new_blob_addr + blob_offset), der_entitlements_blob, ntohl(der_entitlements_blob->length));
3534 		blob_offset += ntohl(der_entitlements_blob->length);
3535 	}
3536 
3537 	/* Launch constraints self blob */
3538 	if (launch_constraint_self) {
3539 		blob_index += 1;
3540 		superblob->index[blob_index].offset = htonl(blob_offset);
3541 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_SELF);
3542 
3543 		memcpy(
3544 			(void*)(new_blob_addr + blob_offset),
3545 			launch_constraint_self,
3546 			ntohl(launch_constraint_self->length));
3547 
3548 		blob_offset += ntohl(launch_constraint_self->length);
3549 	}
3550 
3551 	/* Launch constraints parent blob */
3552 	if (launch_constraint_parent) {
3553 		blob_index += 1;
3554 		superblob->index[blob_index].offset = htonl(blob_offset);
3555 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_PARENT);
3556 
3557 		memcpy(
3558 			(void*)(new_blob_addr + blob_offset),
3559 			launch_constraint_parent,
3560 			ntohl(launch_constraint_parent->length));
3561 
3562 		blob_offset += ntohl(launch_constraint_parent->length);
3563 	}
3564 
3565 	/* Launch constraints responsible blob */
3566 	if (launch_constraint_responsible) {
3567 		blob_index += 1;
3568 		superblob->index[blob_index].offset = htonl(blob_offset);
3569 		superblob->index[blob_index].type = htonl(CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE);
3570 
3571 		memcpy(
3572 			(void*)(new_blob_addr + blob_offset),
3573 			launch_constraint_responsible,
3574 			ntohl(launch_constraint_responsible->length));
3575 
3576 		blob_offset += ntohl(launch_constraint_responsible->length);
3577 	}
3578 
3579 	/* library constraints blob */
3580 	if (library_constraint) {
3581 		blob_index += 1;
3582 		superblob->index[blob_index].offset = htonl(blob_offset);
3583 		superblob->index[blob_index].type = htonl(CSSLOT_LIBRARY_CONSTRAINT);
3584 
3585 		memcpy(
3586 			(void*)(new_blob_addr + blob_offset),
3587 			library_constraint,
3588 			ntohl(library_constraint->length));
3589 
3590 		blob_offset += ntohl(library_constraint->length);
3591 	}
3592 
3593 	/* Entitlements blob */
3594 	if (entitlements_blob) {
3595 		blob_index += 1;
3596 		superblob->index[blob_index].offset = htonl(blob_offset);
3597 		superblob->index[blob_index].type = htonl(CSSLOT_ENTITLEMENTS);
3598 
3599 		memcpy((void*)(new_blob_addr + blob_offset), entitlements_blob, ntohl(entitlements_blob->length));
3600 		blob_offset += ntohl(entitlements_blob->length);
3601 	}
3602 
3603 	/* CMS blob */
3604 	if (cms_blob) {
3605 		blob_index += 1;
3606 		superblob->index[blob_index].offset = htonl(blob_offset);
3607 		superblob->index[blob_index].type = htonl(CSSLOT_SIGNATURESLOT);
3608 		memcpy((void*)(new_blob_addr + blob_offset), cms_blob, ntohl(cms_blob->length));
3609 		blob_offset += ntohl(cms_blob->length);
3610 	}
3611 
3612 	/* First code directory */
3613 	if (first_code_directory) {
3614 		blob_index += 1;
3615 		superblob->index[blob_index].offset = htonl(blob_offset);
3616 		superblob->index[blob_index].type = htonl(CSSLOT_CODEDIRECTORY);
3617 		memcpy((void*)(new_blob_addr + blob_offset), first_code_directory, ntohl(first_code_directory->length));
3618 		blob_offset += ntohl(first_code_directory->length);
3619 	}
3620 
3621 	/*
3622 	 * We only validate the blob in case we copied in the best code directory.
3623 	 * In case the code directory size we were passed in wasn't 0, we memset the best
3624 	 * code directory to 0 and expect the caller to fill it in. In the same spirit, we
3625 	 * expect the caller to validate the code signature after they fill in the code
3626 	 * directory.
3627 	 */
3628 	if (code_directory_size == 0) {
3629 		const CS_CodeDirectory *validated_code_directory = NULL;
3630 		const CS_GenericBlob *validated_entitlements_blob = NULL;
3631 		const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3632 
3633 		ret = cs_validate_csblob(
3634 			(const uint8_t *)superblob,
3635 			new_blob_size,
3636 			&validated_code_directory,
3637 			&validated_entitlements_blob,
3638 			&validated_der_entitlements_blob);
3639 
3640 		if (ret) {
3641 			printf("unable to validate reconstituted cs_blob: %d\n", ret);
3642 			err = EINVAL;
3643 			goto fail;
3644 		}
3645 	}
3646 
3647 	if (ret_mem_kaddr) {
3648 		*ret_mem_kaddr = new_blob_addr;
3649 	}
3650 	if (ret_mem_size) {
3651 		*ret_mem_size = new_blob_allocation_size;
3652 	}
3653 
3654 	return 0;
3655 
3656 fail:
3657 	ubc_cs_blob_deallocate(new_blob_addr, new_blob_allocation_size);
3658 	return err;
3659 }
3660 
3661 /*
3662  * We use this function to clear out unnecessary bits from the code signature
3663  * blob which are no longer needed. We free these bits and give them back to
3664  * the kernel. This is needed since reconstitution includes extra data which is
3665  * needed only for verification but has no point in keeping afterwards.
3666  *
3667  * This results in significant memory reduction, especially for 3rd party apps
3668  * since we also get rid of the CMS blob.
3669  */
3670 static errno_t
ubc_cs_reconstitute_code_signature_2nd_stage(struct cs_blob * blob)3671 ubc_cs_reconstitute_code_signature_2nd_stage(
3672 	struct cs_blob *blob
3673 	)
3674 {
3675 	kern_return_t ret = KERN_FAILURE;
3676 	const CS_GenericBlob *launch_constraint_self = NULL;
3677 	const CS_GenericBlob *launch_constraint_parent = NULL;
3678 	const CS_GenericBlob *launch_constraint_responsible = NULL;
3679 	const CS_GenericBlob *library_constraint = NULL;
3680 	CS_SuperBlob *superblob = NULL;
3681 	uint32_t num_blobs = 0;
3682 	vm_size_t last_needed_blob_offset = 0;
3683 	vm_offset_t code_directory_offset = 0;
3684 
3685 	/*
3686 	 * Ordering of blobs we need to keep:
3687 	 * 1. Code directory
3688 	 * 2. DER encoded entitlements (if present)
3689 	 * 3. Launch constraints self (if present)
3690 	 * 4. Launch constraints parent (if present)
3691 	 * 5. Launch constraints responsible (if present)
3692 	 * 6. Library constraints (if present)
3693 	 *
3694 	 * We need to clear out the remaining page after these blobs end, and fix up
3695 	 * the superblob for the changes. Things gets a little more complicated for
3696 	 * blobs which may not have been kmem_allocated. For those, we simply just
3697 	 * allocate the new required space and copy into it.
3698 	 */
3699 
3700 	if (blob == NULL) {
3701 		printf("NULL blob passed in for 2nd stage reconstitution\n");
3702 		return EINVAL;
3703 	}
3704 	assert(blob->csb_reconstituted == true);
3705 
3706 	/* Ensure we're not page-wise allocated when in this function */
3707 	assert(ubc_cs_blob_pagewise_allocate(blob->csb_mem_size) == false);
3708 
3709 	if (!blob->csb_cd) {
3710 		/* This case can never happen, and it is a sign of bad things */
3711 		panic("validated cs_blob has no code directory");
3712 	}
3713 	superblob = (CS_SuperBlob*)blob->csb_mem_kaddr;
3714 
3715 	num_blobs = 1;
3716 	last_needed_blob_offset = ntohl(superblob->index[0].offset) + ntohl(blob->csb_cd->length);
3717 
3718 	/* Check for DER entitlements */
3719 	if (blob->csb_der_entitlements_blob) {
3720 		num_blobs += 1;
3721 		last_needed_blob_offset += ntohl(blob->csb_der_entitlements_blob->length);
3722 	}
3723 
3724 	/* Check for launch constraints self */
3725 	launch_constraint_self = csblob_find_blob_bytes(
3726 		(const uint8_t *)blob->csb_mem_kaddr,
3727 		blob->csb_mem_size,
3728 		CSSLOT_LAUNCH_CONSTRAINT_SELF,
3729 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3730 	if (launch_constraint_self) {
3731 		num_blobs += 1;
3732 		last_needed_blob_offset += ntohl(launch_constraint_self->length);
3733 	}
3734 
3735 	/* Check for launch constraints parent */
3736 	launch_constraint_parent = csblob_find_blob_bytes(
3737 		(const uint8_t *)blob->csb_mem_kaddr,
3738 		blob->csb_mem_size,
3739 		CSSLOT_LAUNCH_CONSTRAINT_PARENT,
3740 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3741 	if (launch_constraint_parent) {
3742 		num_blobs += 1;
3743 		last_needed_blob_offset += ntohl(launch_constraint_parent->length);
3744 	}
3745 
3746 	/* Check for launch constraints responsible */
3747 	launch_constraint_responsible = csblob_find_blob_bytes(
3748 		(const uint8_t *)blob->csb_mem_kaddr,
3749 		blob->csb_mem_size,
3750 		CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE,
3751 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3752 	if (launch_constraint_responsible) {
3753 		num_blobs += 1;
3754 		last_needed_blob_offset += ntohl(launch_constraint_responsible->length);
3755 	}
3756 
3757 	/* Check for library constraint */
3758 	library_constraint = csblob_find_blob_bytes(
3759 		(const uint8_t *)blob->csb_mem_kaddr,
3760 		blob->csb_mem_size,
3761 		CSSLOT_LIBRARY_CONSTRAINT,
3762 		CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT);
3763 	if (library_constraint) {
3764 		num_blobs += 1;
3765 		last_needed_blob_offset += ntohl(library_constraint->length);
3766 	}
3767 
3768 	superblob->count = htonl(num_blobs);
3769 	superblob->length = htonl((uint32_t)last_needed_blob_offset);
3770 
3771 	/*
3772 	 * There is a chance that the code directory is marked within the superblob as an
3773 	 * alternate code directory. This happens when the first code directory isn't the
3774 	 * best one chosen by the kernel, so to be able to access both the first and the best,
3775 	 * we save the best one as an alternate one. Since we're getting rid of the first one
3776 	 * here, we mark the best one as the first one.
3777 	 */
3778 	superblob->index[0].type = htonl(CSSLOT_CODEDIRECTORY);
3779 
3780 	vm_address_t new_superblob = 0;
3781 	vm_size_t new_superblob_size = last_needed_blob_offset;
3782 
3783 	ret = ubc_cs_blob_allocate(&new_superblob, &new_superblob_size);
3784 	if (ret != KERN_SUCCESS) {
3785 		printf("unable to allocate memory for 2nd stage reconstitution: %d\n", ret);
3786 		return ENOMEM;
3787 	}
3788 	assert(new_superblob_size == last_needed_blob_offset);
3789 
3790 	/* Calculate the code directory offset */
3791 	code_directory_offset = (vm_offset_t)blob->csb_cd - (vm_offset_t)blob->csb_mem_kaddr;
3792 
3793 	/* Copy in the updated superblob into the new memory */
3794 	memcpy((void*)new_superblob, superblob, new_superblob_size);
3795 
3796 	/* Free the old code signature and old memory */
3797 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3798 
3799 	/* Reconstruct critical fields in the blob object */
3800 	ubc_cs_blob_reconstruct(
3801 		blob,
3802 		new_superblob,
3803 		new_superblob_size,
3804 		code_directory_offset);
3805 
3806 	/* XML entitlements should've been removed */
3807 	assert(blob->csb_entitlements_blob == NULL);
3808 
3809 	const CS_CodeDirectory *validated_code_directory = NULL;
3810 	const CS_GenericBlob *validated_entitlements_blob = NULL;
3811 	const CS_GenericBlob *validated_der_entitlements_blob = NULL;
3812 
3813 	ret = cs_validate_csblob(
3814 		(const uint8_t*)blob->csb_mem_kaddr,
3815 		blob->csb_mem_size,
3816 		&validated_code_directory,
3817 		&validated_entitlements_blob,
3818 		&validated_der_entitlements_blob);
3819 	if (ret) {
3820 		printf("unable to validate code signature after 2nd stage reconstitution: %d\n", ret);
3821 		return EINVAL;
3822 	}
3823 
3824 	return 0;
3825 }
3826 
3827 static int
ubc_cs_convert_to_multilevel_hash(struct cs_blob * blob)3828 ubc_cs_convert_to_multilevel_hash(struct cs_blob *blob)
3829 {
3830 	const CS_CodeDirectory  *old_cd, *cd;
3831 	CS_CodeDirectory        *new_cd;
3832 	const CS_GenericBlob *entitlements;
3833 	const CS_GenericBlob *der_entitlements;
3834 	vm_offset_t     new_blob_addr;
3835 	vm_size_t       new_blob_size;
3836 	vm_size_t       new_cdsize;
3837 	int                             error;
3838 
3839 	uint32_t                hashes_per_new_hash_shift = (uint32_t)(PAGE_SHIFT - blob->csb_hash_pageshift);
3840 
3841 	if (cs_debug > 1) {
3842 		printf("CODE SIGNING: Attempting to convert Code Directory for %lu -> %lu page shift\n",
3843 		    (unsigned long)blob->csb_hash_pageshift, (unsigned long)PAGE_SHIFT);
3844 	}
3845 
3846 	old_cd = blob->csb_cd;
3847 
3848 	/* Up to the hashes, we can copy all data */
3849 	new_cdsize  = ntohl(old_cd->hashOffset);
3850 	new_cdsize += (ntohl(old_cd->nCodeSlots) >> hashes_per_new_hash_shift) * old_cd->hashSize;
3851 
3852 	error = ubc_cs_reconstitute_code_signature(blob, &new_blob_addr, &new_blob_size, new_cdsize, &new_cd);
3853 	if (error != 0) {
3854 		printf("CODE SIGNING: Failed to reconsitute code signature: %d\n", error);
3855 		return error;
3856 	}
3857 	entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_ENTITLEMENTS, CSMAGIC_EMBEDDED_ENTITLEMENTS);
3858 	der_entitlements = csblob_find_blob_bytes((uint8_t*)new_blob_addr, new_blob_size, CSSLOT_DER_ENTITLEMENTS, CSMAGIC_EMBEDDED_DER_ENTITLEMENTS);
3859 
3860 	memcpy(new_cd, old_cd, ntohl(old_cd->hashOffset));
3861 
3862 	/* Update fields in the Code Directory structure */
3863 	new_cd->length = htonl((uint32_t)new_cdsize);
3864 
3865 	uint32_t nCodeSlots = ntohl(new_cd->nCodeSlots);
3866 	nCodeSlots >>= hashes_per_new_hash_shift;
3867 	new_cd->nCodeSlots = htonl(nCodeSlots);
3868 
3869 	new_cd->pageSize = (uint8_t)PAGE_SHIFT; /* Not byte-swapped */
3870 
3871 	if ((ntohl(new_cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(new_cd->scatterOffset))) {
3872 		SC_Scatter *scatter = (SC_Scatter*)
3873 		    ((char *)new_cd + ntohl(new_cd->scatterOffset));
3874 		/* iterate all scatter structs to scale their counts */
3875 		do {
3876 			uint32_t scount = ntohl(scatter->count);
3877 			uint32_t sbase  = ntohl(scatter->base);
3878 
3879 			/* last scatter? */
3880 			if (scount == 0) {
3881 				break;
3882 			}
3883 
3884 			scount >>= hashes_per_new_hash_shift;
3885 			scatter->count = htonl(scount);
3886 
3887 			sbase >>= hashes_per_new_hash_shift;
3888 			scatter->base = htonl(sbase);
3889 
3890 			scatter++;
3891 		} while (1);
3892 	}
3893 
3894 	/* For each group of hashes, hash them together */
3895 	const unsigned char *src_base = (const unsigned char *)old_cd + ntohl(old_cd->hashOffset);
3896 	unsigned char *dst_base = (unsigned char *)new_cd + ntohl(new_cd->hashOffset);
3897 
3898 	uint32_t hash_index;
3899 	for (hash_index = 0; hash_index < nCodeSlots; hash_index++) {
3900 		union cs_hash_union     mdctx;
3901 
3902 		uint32_t source_hash_len = old_cd->hashSize << hashes_per_new_hash_shift;
3903 		const unsigned char *src = src_base + hash_index * source_hash_len;
3904 		unsigned char *dst = dst_base + hash_index * new_cd->hashSize;
3905 
3906 		blob->csb_hashtype->cs_init(&mdctx);
3907 		blob->csb_hashtype->cs_update(&mdctx, src, source_hash_len);
3908 		blob->csb_hashtype->cs_final(dst, &mdctx);
3909 	}
3910 
3911 	error = cs_validate_csblob((const uint8_t *)new_blob_addr, new_blob_size, &cd, &entitlements, &der_entitlements);
3912 	if (error != 0) {
3913 		printf("CODE SIGNING: Failed to validate new Code Signing Blob: %d\n",
3914 		    error);
3915 
3916 		ubc_cs_blob_deallocate(new_blob_addr, new_blob_size);
3917 		return error;
3918 	}
3919 
3920 	/* New Code Directory is ready for use, swap it out in the blob structure */
3921 	ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3922 
3923 	blob->csb_mem_size = new_blob_size;
3924 	blob->csb_mem_kaddr = (void *)new_blob_addr;
3925 	blob->csb_cd = cd;
3926 	blob->csb_entitlements_blob = NULL;
3927 
3928 	blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
3929 	blob->csb_reconstituted = true;
3930 
3931 	/* The blob has some cached attributes of the Code Directory, so update those */
3932 
3933 	blob->csb_hash_firstlevel_pageshift = blob->csb_hash_pageshift; /* Save the original page size */
3934 
3935 	blob->csb_hash_pageshift = PAGE_SHIFT;
3936 	blob->csb_end_offset = ntohl(cd->codeLimit);
3937 	if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
3938 		const SC_Scatter *scatter = (const SC_Scatter*)
3939 		    ((const char*)cd + ntohl(cd->scatterOffset));
3940 		blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * PAGE_SIZE;
3941 	} else {
3942 		blob->csb_start_offset = 0;
3943 	}
3944 
3945 	return 0;
3946 }
3947 
3948 static void
cs_blob_cleanup(struct cs_blob * blob)3949 cs_blob_cleanup(struct cs_blob *blob)
3950 {
3951 	if (blob->csb_entitlements != NULL) {
3952 		amfi->OSEntitlements_invalidate(blob->csb_entitlements);
3953 		osobject_release(blob->csb_entitlements);
3954 		blob->csb_entitlements = NULL;
3955 	}
3956 
3957 #if CODE_SIGNING_MONITOR
3958 	if (blob->csb_csm_obj != NULL) {
3959 		/* Unconditionally remove any profiles we may have associated */
3960 		csm_disassociate_provisioning_profile(blob->csb_csm_obj);
3961 
3962 		kern_return_t kr = csm_unregister_code_signature(blob->csb_csm_obj);
3963 		if (kr == KERN_SUCCESS) {
3964 			/*
3965 			 * If the code signature was monitor managed, the monitor will have freed it
3966 			 * itself in the unregistration call. It means we do not need to free the data
3967 			 * over here.
3968 			 */
3969 			if (blob->csb_csm_managed) {
3970 				blob->csb_mem_kaddr = NULL;
3971 				blob->csb_mem_size = 0;
3972 			}
3973 		}
3974 	}
3975 
3976 	/* Unconditionally remove references to the monitor */
3977 	blob->csb_csm_obj = NULL;
3978 	blob->csb_csm_managed = false;
3979 #endif
3980 
3981 	if (blob->csb_mem_kaddr) {
3982 		ubc_cs_blob_deallocate((vm_offset_t)blob->csb_mem_kaddr, blob->csb_mem_size);
3983 	}
3984 	blob->csb_mem_kaddr = NULL;
3985 	blob->csb_mem_size = 0;
3986 }
3987 
3988 static void
cs_blob_ro_free(struct cs_blob * blob)3989 cs_blob_ro_free(struct cs_blob *blob)
3990 {
3991 	struct cs_blob tmp;
3992 
3993 	if (blob != NULL) {
3994 		/*
3995 		 * cs_blob_cleanup clears fields, so we need to pass it a
3996 		 * mutable copy.
3997 		 */
3998 		tmp = *blob;
3999 		cs_blob_cleanup(&tmp);
4000 
4001 		zfree_ro(ZONE_ID_CS_BLOB, blob);
4002 	}
4003 }
4004 
4005 /*
4006  * Free a cs_blob previously created by cs_blob_create_validated.
4007  */
4008 void
cs_blob_free(struct cs_blob * blob)4009 cs_blob_free(
4010 	struct cs_blob *blob)
4011 {
4012 	cs_blob_ro_free(blob);
4013 }
4014 
4015 static int
cs_blob_init_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob * blob,CS_CodeDirectory const ** const ret_cd)4016 cs_blob_init_validated(
4017 	vm_address_t * const addr,
4018 	vm_size_t size,
4019 	struct cs_blob *blob,
4020 	CS_CodeDirectory const ** const ret_cd)
4021 {
4022 	int error = EINVAL;
4023 	const CS_CodeDirectory *cd = NULL;
4024 	const CS_GenericBlob *entitlements = NULL;
4025 	const CS_GenericBlob *der_entitlements = NULL;
4026 	union cs_hash_union mdctx;
4027 	size_t length;
4028 
4029 	bzero(blob, sizeof(*blob));
4030 
4031 	/* fill in the new blob */
4032 	blob->csb_mem_size = size;
4033 	blob->csb_mem_offset = 0;
4034 	blob->csb_mem_kaddr = (void *)*addr;
4035 	blob->csb_flags = 0;
4036 	blob->csb_signer_type = CS_SIGNER_TYPE_UNKNOWN;
4037 	blob->csb_platform_binary = 0;
4038 	blob->csb_platform_path = 0;
4039 	blob->csb_teamid = NULL;
4040 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4041 	blob->csb_supplement_teamid = NULL;
4042 #endif
4043 	blob->csb_entitlements_blob = NULL;
4044 	blob->csb_der_entitlements_blob = NULL;
4045 	blob->csb_entitlements = NULL;
4046 #if CODE_SIGNING_MONITOR
4047 	blob->csb_csm_obj = NULL;
4048 	blob->csb_csm_managed = false;
4049 #endif
4050 	blob->csb_reconstituted = false;
4051 	blob->csb_validation_category = CS_VALIDATION_CATEGORY_INVALID;
4052 
4053 	/* Transfer ownership. Even on error, this function will deallocate */
4054 	*addr = 0;
4055 
4056 	/*
4057 	 * Validate the blob's contents
4058 	 */
4059 	length = (size_t) size;
4060 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
4061 	    length, &cd, &entitlements, &der_entitlements);
4062 	if (error) {
4063 		if (cs_debug) {
4064 			printf("CODESIGNING: csblob invalid: %d\n", error);
4065 		}
4066 		/*
4067 		 * The vnode checker can't make the rest of this function
4068 		 * succeed if csblob validation failed, so bail */
4069 		goto out;
4070 	} else {
4071 		const unsigned char *md_base;
4072 		uint8_t hash[CS_HASH_MAX_SIZE];
4073 		int md_size;
4074 		vm_offset_t hash_pagemask;
4075 
4076 		blob->csb_cd = cd;
4077 		blob->csb_entitlements_blob = entitlements; /* may be NULL, not yet validated */
4078 		blob->csb_der_entitlements_blob = der_entitlements; /* may be NULL, not yet validated */
4079 		blob->csb_hashtype = cs_find_md(cd->hashType);
4080 		if (blob->csb_hashtype == NULL || blob->csb_hashtype->cs_digest_size > sizeof(hash)) {
4081 			panic("validated CodeDirectory but unsupported type");
4082 		}
4083 
4084 		blob->csb_hash_pageshift = cd->pageSize;
4085 		hash_pagemask = (1U << cd->pageSize) - 1;
4086 		blob->csb_hash_firstlevel_pageshift = 0;
4087 		blob->csb_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
4088 		blob->csb_end_offset = (((vm_offset_t)ntohl(cd->codeLimit) + hash_pagemask) & ~hash_pagemask);
4089 		if ((ntohl(cd->version) >= CS_SUPPORTSSCATTER) && (ntohl(cd->scatterOffset))) {
4090 			const SC_Scatter *scatter = (const SC_Scatter*)
4091 			    ((const char*)cd + ntohl(cd->scatterOffset));
4092 			blob->csb_start_offset = ((off_t)ntohl(scatter->base)) * (1U << blob->csb_hash_pageshift);
4093 		} else {
4094 			blob->csb_start_offset = 0;
4095 		}
4096 		/* compute the blob's cdhash */
4097 		md_base = (const unsigned char *) cd;
4098 		md_size = ntohl(cd->length);
4099 
4100 		blob->csb_hashtype->cs_init(&mdctx);
4101 		blob->csb_hashtype->cs_update(&mdctx, md_base, md_size);
4102 		blob->csb_hashtype->cs_final(hash, &mdctx);
4103 
4104 		memcpy(blob->csb_cdhash, hash, CS_CDHASH_LEN);
4105 
4106 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4107 		blob->csb_linkage_hashtype = NULL;
4108 		if (ntohl(cd->version) >= CS_SUPPORTSLINKAGE && cd->linkageHashType != 0 &&
4109 		    ntohl(cd->linkageSize) >= CS_CDHASH_LEN) {
4110 			blob->csb_linkage_hashtype = cs_find_md(cd->linkageHashType);
4111 
4112 			if (blob->csb_linkage_hashtype != NULL) {
4113 				memcpy(blob->csb_linkage, (uint8_t const*)cd + ntohl(cd->linkageOffset),
4114 				    CS_CDHASH_LEN);
4115 			}
4116 		}
4117 #endif
4118 	}
4119 
4120 	error = 0;
4121 
4122 out:
4123 	if (error != 0) {
4124 		cs_blob_cleanup(blob);
4125 		blob = NULL;
4126 		cd = NULL;
4127 	}
4128 
4129 	if (ret_cd != NULL) {
4130 		*ret_cd = cd;
4131 	}
4132 
4133 	return error;
4134 }
4135 
4136 /*
4137  * Validate the code signature blob, create a struct cs_blob wrapper
4138  * and return it together with a pointer to the chosen code directory
4139  * and entitlements blob.
4140  *
4141  * Note that this takes ownership of the memory as addr, mainly because
4142  * this function can actually replace the passed in blob with another
4143  * one, e.g. when performing multilevel hashing optimization.
4144  */
4145 int
cs_blob_create_validated(vm_address_t * const addr,vm_size_t size,struct cs_blob ** const ret_blob,CS_CodeDirectory const ** const ret_cd)4146 cs_blob_create_validated(
4147 	vm_address_t * const            addr,
4148 	vm_size_t                       size,
4149 	struct cs_blob ** const         ret_blob,
4150 	CS_CodeDirectory const ** const     ret_cd)
4151 {
4152 	struct cs_blob blob = {};
4153 	struct cs_blob *ro_blob;
4154 	int error;
4155 
4156 	if (ret_blob) {
4157 		*ret_blob = NULL;
4158 	}
4159 
4160 	if ((error = cs_blob_init_validated(addr, size, &blob, ret_cd)) != 0) {
4161 		return error;
4162 	}
4163 
4164 	if (ret_blob != NULL) {
4165 		ro_blob = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4166 		zalloc_ro_update_elem(ZONE_ID_CS_BLOB, ro_blob, &blob);
4167 		*ret_blob = ro_blob;
4168 	}
4169 
4170 	return error;
4171 }
4172 
4173 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4174 static void
cs_blob_supplement_free(struct cs_blob * const blob)4175 cs_blob_supplement_free(struct cs_blob * const blob)
4176 {
4177 	void *teamid;
4178 
4179 	if (blob != NULL) {
4180 		if (blob->csb_supplement_teamid != NULL) {
4181 			teamid = blob->csb_supplement_teamid;
4182 			vm_size_t teamid_size = strlen(blob->csb_supplement_teamid) + 1;
4183 			kfree_data(teamid, teamid_size);
4184 		}
4185 		cs_blob_ro_free(blob);
4186 	}
4187 }
4188 #endif
4189 
4190 static void
ubc_cs_blob_adjust_statistics(struct cs_blob const * blob)4191 ubc_cs_blob_adjust_statistics(struct cs_blob const *blob)
4192 {
4193 	/* Note that the atomic ops are not enough to guarantee
4194 	 * correctness: If a blob with an intermediate size is inserted
4195 	 * concurrently, we can lose a peak value assignment. But these
4196 	 * statistics are only advisory anyway, so we're not going to
4197 	 * employ full locking here. (Consequently, we are also okay with
4198 	 * relaxed ordering of those accesses.)
4199 	 */
4200 
4201 	unsigned int new_cs_blob_count = os_atomic_add(&cs_blob_count, 1, relaxed);
4202 	if (new_cs_blob_count > os_atomic_load(&cs_blob_count_peak, relaxed)) {
4203 		os_atomic_store(&cs_blob_count_peak, new_cs_blob_count, relaxed);
4204 	}
4205 
4206 	size_t new_cs_blob_size = os_atomic_add(&cs_blob_size, blob->csb_mem_size, relaxed);
4207 
4208 	if (new_cs_blob_size > os_atomic_load(&cs_blob_size_peak, relaxed)) {
4209 		os_atomic_store(&cs_blob_size_peak, new_cs_blob_size, relaxed);
4210 	}
4211 	if (blob->csb_mem_size > os_atomic_load(&cs_blob_size_max, relaxed)) {
4212 		os_atomic_store(&cs_blob_size_max, blob->csb_mem_size, relaxed);
4213 	}
4214 }
4215 
4216 static void
cs_blob_set_cpu_type(struct cs_blob * blob,cpu_type_t cputype)4217 cs_blob_set_cpu_type(struct cs_blob *blob, cpu_type_t cputype)
4218 {
4219 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_cpu_type, &cputype);
4220 }
4221 
4222 __abortlike
4223 static void
panic_cs_blob_backref_mismatch(struct cs_blob * blob,struct vnode * vp)4224 panic_cs_blob_backref_mismatch(struct cs_blob *blob, struct vnode *vp)
4225 {
4226 	panic("cs_blob vnode backref mismatch: blob=%p, vp=%p, "
4227 	    "blob->csb_vnode=%p", blob, vp, blob->csb_vnode);
4228 }
4229 
4230 void
cs_blob_require(struct cs_blob * blob,vnode_t vp)4231 cs_blob_require(struct cs_blob *blob, vnode_t vp)
4232 {
4233 	zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), blob);
4234 
4235 	if (vp != NULL && __improbable(blob->csb_vnode != vp)) {
4236 		panic_cs_blob_backref_mismatch(blob, vp);
4237 	}
4238 }
4239 
4240 #if CODE_SIGNING_MONITOR
4241 
4242 /**
4243  * Independently verify the authenticity of the code signature through the monitor
4244  * environment. This is required as otherwise the monitor won't allow associations
4245  * of the code signature with address spaces.
4246  *
4247  * Once we've verified the code signature, we no longer need to keep around any
4248  * provisioning profiles we may have registered with it. AMFI associates profiles
4249  * with the monitor during its validation (which happens before the monitor's).
4250  */
4251 static errno_t
verify_code_signature_monitor(struct cs_blob * cs_blob)4252 verify_code_signature_monitor(
4253 	struct cs_blob *cs_blob)
4254 {
4255 	kern_return_t ret = KERN_DENIED;
4256 
4257 	ret = csm_verify_code_signature(cs_blob->csb_csm_obj);
4258 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4259 		printf("unable to verify code signature with monitor: %d\n", ret);
4260 		return EPERM;
4261 	}
4262 
4263 	ret = csm_disassociate_provisioning_profile(cs_blob->csb_csm_obj);
4264 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND) && (ret != KERN_NOT_SUPPORTED)) {
4265 		printf("unable to disassociate profile from code signature: %d\n", ret);
4266 		return EPERM;
4267 	}
4268 
4269 	/* Associate the OSEntitlements kernel object with the monitor */
4270 	ret = csm_associate_os_entitlements(cs_blob->csb_csm_obj, cs_blob->csb_entitlements);
4271 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4272 		printf("unable to associate OSEntitlements with monitor: %d\n", ret);
4273 		return EPERM;
4274 	}
4275 
4276 	return 0;
4277 }
4278 
4279 /**
4280  * Register the code signature with the code signing monitor environment. This
4281  * will effectively make the blob data immutable, either because the blob memory
4282  * will be allocated and managed directory by the monitor, or because the monitor
4283  * will lockdown the memory associated with the blob.
4284  */
4285 static errno_t
register_code_signature_monitor(struct vnode * vnode,struct cs_blob * cs_blob,vm_offset_t code_directory_offset)4286 register_code_signature_monitor(
4287 	struct vnode *vnode,
4288 	struct cs_blob *cs_blob,
4289 	vm_offset_t code_directory_offset)
4290 {
4291 	kern_return_t ret = KERN_DENIED;
4292 	vm_address_t monitor_signature_addr = 0;
4293 	void *monitor_sig_object = NULL;
4294 	const char *vnode_path_ptr = NULL;
4295 
4296 	/*
4297 	 * Attempt to resolve the path for this vnode and pass it in to the code
4298 	 * signing monitor during registration.
4299 	 */
4300 	int vnode_path_len = MAXPATHLEN;
4301 	char *vnode_path = kalloc_data(vnode_path_len, Z_WAITOK);
4302 
4303 	/*
4304 	 * Taking a reference on the vnode recursively can sometimes lead to a
4305 	 * deadlock on the system. Since we already have a vnode pointer, it means
4306 	 * the caller performed a vnode lookup, which implicitly takes a reference
4307 	 * on the vnode. However, there is more than just having a reference on a
4308 	 * vnode which is important. vnode's also have an iocount, and we must only
4309 	 * access a vnode which has an iocount of greater than 0. Thankfully, all
4310 	 * the conditions which lead to calling this function ensure that this
4311 	 * vnode is safe to access here.
4312 	 *
4313 	 * For more details: rdar://105819068.
4314 	 */
4315 	errno_t error = vn_getpath(vnode, vnode_path, &vnode_path_len);
4316 	if (error == 0) {
4317 		vnode_path_ptr = vnode_path;
4318 	}
4319 
4320 	ret = csm_register_code_signature(
4321 		(vm_address_t)cs_blob->csb_mem_kaddr,
4322 		cs_blob->csb_mem_size,
4323 		code_directory_offset,
4324 		vnode_path_ptr,
4325 		&monitor_sig_object,
4326 		&monitor_signature_addr);
4327 
4328 	kfree_data(vnode_path, MAXPATHLEN);
4329 	vnode_path_ptr = NULL;
4330 
4331 	if (ret == KERN_SUCCESS) {
4332 		/* Reconstruct the cs_blob if the monitor used its own allocation */
4333 		if (monitor_signature_addr != (vm_address_t)cs_blob->csb_mem_kaddr) {
4334 			vm_address_t monitor_signature_size = cs_blob->csb_mem_size;
4335 
4336 			/* Free the old memory for the blob */
4337 			ubc_cs_blob_deallocate(
4338 				(vm_address_t)cs_blob->csb_mem_kaddr,
4339 				cs_blob->csb_mem_size);
4340 
4341 			/* Reconstruct critical fields in the blob object */
4342 			ubc_cs_blob_reconstruct(
4343 				cs_blob,
4344 				monitor_signature_addr,
4345 				monitor_signature_size,
4346 				code_directory_offset);
4347 
4348 			/* Mark the signature as monitor managed */
4349 			cs_blob->csb_csm_managed = true;
4350 		}
4351 	} else if (ret != KERN_NOT_SUPPORTED) {
4352 		printf("unable to register code signature with monitor: %d\n", ret);
4353 		return EPERM;
4354 	}
4355 
4356 	/* Save the monitor handle for the signature object -- may be NULL */
4357 	cs_blob->csb_csm_obj = monitor_sig_object;
4358 
4359 	return 0;
4360 }
4361 
4362 #endif /* CODE_SIGNING_MONITOR */
4363 
4364 static errno_t
validate_main_binary_check(struct cs_blob * csblob,cs_blob_add_flags_t csblob_add_flags)4365 validate_main_binary_check(
4366 	struct cs_blob *csblob,
4367 	cs_blob_add_flags_t csblob_add_flags)
4368 {
4369 #if XNU_TARGET_OS_OSX
4370 	(void)csblob;
4371 	(void)csblob_add_flags;
4372 	return 0;
4373 #else
4374 	const CS_CodeDirectory *first_cd = NULL;
4375 	const CS_CodeDirectory *alt_cd = NULL;
4376 	uint64_t exec_seg_flags = 0;
4377 	uint32_t slot = CSSLOT_CODEDIRECTORY;
4378 
4379 	/* Nothing to enforce if we're allowing main binaries */
4380 	if ((csblob_add_flags & CS_BLOB_ADD_ALLOW_MAIN_BINARY) != 0) {
4381 		return 0;
4382 	}
4383 
4384 	first_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4385 	if ((first_cd != NULL) && (ntohl(first_cd->version) >= CS_SUPPORTSEXECSEG)) {
4386 		exec_seg_flags |= ntohll(first_cd->execSegFlags);
4387 	}
4388 
4389 	for (uint32_t i = 0; i < CSSLOT_ALTERNATE_CODEDIRECTORY_MAX; i++) {
4390 		slot = CSSLOT_ALTERNATE_CODEDIRECTORIES + i;
4391 		alt_cd = (const CS_CodeDirectory*)csblob_find_blob(csblob, slot, CSMAGIC_CODEDIRECTORY);
4392 		if ((alt_cd == NULL) || (ntohl(alt_cd->version) < CS_SUPPORTSEXECSEG)) {
4393 			continue;
4394 		}
4395 		exec_seg_flags |= ntohll(alt_cd->execSegFlags);
4396 	}
4397 
4398 	if ((exec_seg_flags & CS_EXECSEG_MAIN_BINARY) != 0) {
4399 		return EBADEXEC;
4400 	}
4401 	return 0;
4402 #endif /* XNU_TARGET_OS_OSX */
4403 }
4404 
4405 /**
4406  * Accelerate entitlements for a code signature object. When we have a code
4407  * signing monitor, this acceleration is done within the monitor which then
4408  * passes back a CoreEntitlements query context the kernel can use. When we
4409  * don't have a code signing monitor, we accelerate the queries within the
4410  * kernel memory itself.
4411  *
4412  * This function must be called when the storage for the code signature can
4413  * no longer change.
4414  */
4415 static errno_t
accelerate_entitlement_queries(struct cs_blob * cs_blob)4416 accelerate_entitlement_queries(
4417 	struct cs_blob *cs_blob)
4418 {
4419 	kern_return_t ret = KERN_NOT_SUPPORTED;
4420 
4421 #if CODE_SIGNING_MONITOR
4422 	CEQueryContext_t ce_ctx = NULL;
4423 	const char *signing_id = NULL;
4424 
4425 	ret = csm_accelerate_entitlements(cs_blob->csb_csm_obj, &ce_ctx);
4426 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_SUPPORTED)) {
4427 		printf("unable to accelerate entitlements through the monitor: %d\n", ret);
4428 		return EPERM;
4429 	}
4430 
4431 	if (ret == KERN_SUCCESS) {
4432 		/* Call cannot not fail at this stage */
4433 		ret = csm_acquire_signing_identifier(cs_blob->csb_csm_obj, &signing_id);
4434 		assert(ret == KERN_SUCCESS);
4435 
4436 		/* Adjust the OSEntitlements context with AMFI */
4437 		ret = amfi->OSEntitlements.adjustContextWithMonitor(
4438 			cs_blob->csb_entitlements,
4439 			ce_ctx,
4440 			cs_blob->csb_csm_obj,
4441 			signing_id,
4442 			cs_blob->csb_flags);
4443 		if (ret != KERN_SUCCESS) {
4444 			printf("unable to adjust OSEntitlements context with monitor: %d\n", ret);
4445 			return EPERM;
4446 		}
4447 
4448 		return 0;
4449 	}
4450 #endif
4451 
4452 	/*
4453 	 * If we reach here, then either we don't have a code signing monitor, or
4454 	 * the code signing monitor isn't enabled for code signing, in which case,
4455 	 * AMFI is going to accelerate the entitlements context and adjust its
4456 	 * context on its own.
4457 	 */
4458 	assert(ret == KERN_NOT_SUPPORTED);
4459 
4460 	ret = amfi->OSEntitlements.adjustContextWithoutMonitor(
4461 		cs_blob->csb_entitlements,
4462 		cs_blob);
4463 
4464 	if (ret != KERN_SUCCESS) {
4465 		printf("unable to adjust OSEntitlements context without monitor: %d\n", ret);
4466 		return EPERM;
4467 	}
4468 
4469 	return 0;
4470 }
4471 
4472 /**
4473  * Ensure and validate that some security critical code signing blobs haven't
4474  * been stripped off from the code signature. This can happen if an attacker
4475  * chose to load a code signature sans these critical blobs, or if there is a
4476  * bug in reconstitution logic which remove these blobs from the code signature.
4477  */
4478 static errno_t
validate_auxiliary_signed_blobs(struct cs_blob * cs_blob)4479 validate_auxiliary_signed_blobs(
4480 	struct cs_blob *cs_blob)
4481 {
4482 	struct cs_blob_identifier {
4483 		uint32_t cs_slot;
4484 		uint32_t cs_magic;
4485 	};
4486 
4487 	const struct cs_blob_identifier identifiers[] = {
4488 		{CSSLOT_LAUNCH_CONSTRAINT_SELF, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4489 		{CSSLOT_LAUNCH_CONSTRAINT_PARENT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4490 		{CSSLOT_LAUNCH_CONSTRAINT_RESPONSIBLE, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT},
4491 		{CSSLOT_LIBRARY_CONSTRAINT, CSMAGIC_EMBEDDED_LAUNCH_CONSTRAINT}
4492 	};
4493 	const uint32_t num_identifiers = sizeof(identifiers) / sizeof(identifiers[0]);
4494 
4495 	for (uint32_t i = 0; i < num_identifiers; i++) {
4496 		errno_t err = csblob_find_special_slot_blob(
4497 			cs_blob,
4498 			identifiers[i].cs_slot,
4499 			identifiers[i].cs_magic,
4500 			NULL,
4501 			NULL);
4502 
4503 		if (err != 0) {
4504 			printf("unable to validate security-critical blob: %d [%u|%u]\n",
4505 			    err, identifiers[i].cs_slot, identifiers[i].cs_magic);
4506 
4507 			return EPERM;
4508 		}
4509 	}
4510 
4511 	return 0;
4512 }
4513 
4514 /**
4515  * Setup multi-level hashing for the code signature. This isn't supported on most
4516  * shipping devices, but on ones where it is, it can result in significant savings
4517  * of memory from the code signature standpoint.
4518  *
4519  * Multi-level hashing is used to condense the code directory hashes in order to
4520  * improve memory consumption. We take four 4K page hashes, and condense them into
4521  * a single 16K hash, hence reducing the space consumed by the code directory by
4522  * about ~75%.
4523  */
4524 static errno_t
setup_multilevel_hashing(struct cs_blob * cs_blob)4525 setup_multilevel_hashing(
4526 	struct cs_blob *cs_blob)
4527 {
4528 	code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
4529 	errno_t err = -1;
4530 
4531 	/*
4532 	 * When we have a code signing monitor, we do not support multi-level hashing
4533 	 * since the code signature data is expected to be locked within memory which
4534 	 * cannot be written to by the kernel.
4535 	 *
4536 	 * Even when the code signing monitor isn't explicitly enabled, there are other
4537 	 * reasons for not performing multi-level hashing. For instance, Rosetta creates
4538 	 * issues with multi-level hashing on Apple Silicon Macs.
4539 	 */
4540 	code_signing_configuration(&monitor_type, NULL);
4541 	if (monitor_type != CS_MONITOR_TYPE_NONE) {
4542 		return 0;
4543 	}
4544 
4545 	/* We need to check if multi-level hashing is supported for this blob */
4546 	if (ubc_cs_supports_multilevel_hash(cs_blob) == false) {
4547 		return 0;
4548 	}
4549 
4550 	err = ubc_cs_convert_to_multilevel_hash(cs_blob);
4551 	if (err != 0) {
4552 		printf("unable to setup multi-level hashing: %d\n", err);
4553 		return err;
4554 	}
4555 
4556 	assert(cs_blob->csb_reconstituted == true);
4557 	return 0;
4558 }
4559 
4560 /**
4561  * Once code signature validation is complete, we can remove even more blobs from the
4562  * code signature as they are no longer needed. This goes on to conserve even more
4563  * system memory.
4564  */
4565 static errno_t
reconstitute_code_signature_2nd_stage(struct cs_blob * cs_blob)4566 reconstitute_code_signature_2nd_stage(
4567 	struct cs_blob *cs_blob)
4568 {
4569 	kern_return_t ret = KERN_NOT_SUPPORTED;
4570 	errno_t err = EPERM;
4571 
4572 	/* If we never reconstituted before, we won't be reconstituting again */
4573 	if (cs_blob->csb_reconstituted == false) {
4574 		return 0;
4575 	}
4576 
4577 #if CODE_SIGNING_MONITOR
4578 	/*
4579 	 * When we have a code signing monitor, the code signature is immutable until the
4580 	 * monitor decides to unlock parts of it. Therefore, 2nd stage reconstitution takes
4581 	 * place in the monitor when we have a monitor available.
4582 	 *
4583 	 * If the monitor isn't enforcing code signing (in which case the code signature is
4584 	 * NOT immutable), then we perform 2nd stage reconstitution within the kernel itself.
4585 	 */
4586 	vm_address_t unneeded_addr = 0;
4587 	vm_size_t unneeded_size = 0;
4588 
4589 	ret = csm_reconstitute_code_signature(
4590 		cs_blob->csb_csm_obj,
4591 		&unneeded_addr,
4592 		&unneeded_size);
4593 
4594 	if ((ret == KERN_SUCCESS) && unneeded_addr && unneeded_size) {
4595 		/* Free the unneded part of the blob */
4596 		kmem_free(kernel_map, unneeded_addr, unneeded_size);
4597 
4598 		/* Adjust the size in the blob object */
4599 		cs_blob->csb_mem_size -= unneeded_size;
4600 	}
4601 #endif
4602 
4603 	if (ret == KERN_SUCCESS) {
4604 		goto success;
4605 	} else if (ret != KERN_NOT_SUPPORTED) {
4606 		/*
4607 		 * A monitor environment is available, and it failed in performing 2nd stage
4608 		 * reconstitution. This is a fatal issue for code signing validation.
4609 		 */
4610 		printf("unable to reconstitute code signature through monitor: %d\n", ret);
4611 		return EPERM;
4612 	}
4613 
4614 	/* No monitor available if we reached here */
4615 	err = ubc_cs_reconstitute_code_signature_2nd_stage(cs_blob);
4616 	if (err != 0) {
4617 		return err;
4618 	}
4619 
4620 success:
4621 	/*
4622 	 * Regardless of whether we are performing 2nd stage reconstitution in the monitor
4623 	 * or in the kernel, we remove references to XML entitlements from the blob here.
4624 	 * None of the 2nd stage reconstitution code ever keeps these around, and they have
4625 	 * been explicitly deprecated and disallowed.
4626 	 */
4627 	cs_blob->csb_entitlements_blob = NULL;
4628 
4629 	return 0;
4630 }
4631 
4632 /**
4633  * A code signature blob often contains blob which aren't needed in the kernel. Since
4634  * the code signature is wired into kernel memory for the time it is used, it behooves
4635  * us to remove any blobs we have no need for in order to conserve memory.
4636  *
4637  * Some platforms support copying the entire SuperBlob stored in kernel memory into
4638  * userspace memory through the "csops" system call. There is an expectation that when
4639  * this happens, all the blobs which were a part of the code signature are copied in
4640  * to userspace memory. As a result, these platforms cannot reconstitute the code
4641  * signature since, or rather, these platforms cannot remove blobs from the signature,
4642  * thereby making reconstitution useless.
4643  */
4644 static errno_t
reconstitute_code_signature(struct cs_blob * cs_blob)4645 reconstitute_code_signature(
4646 	struct cs_blob *cs_blob)
4647 {
4648 	CS_CodeDirectory *code_directory = NULL;
4649 	vm_address_t signature_addr = 0;
4650 	vm_size_t signature_size = 0;
4651 	vm_offset_t code_directory_offset = 0;
4652 	bool platform_supports_reconstitution = false;
4653 
4654 #if CONFIG_CODE_SIGNATURE_RECONSTITUTION
4655 	platform_supports_reconstitution = true;
4656 #endif
4657 
4658 	/*
4659 	 * We can skip reconstitution if the code signing monitor isn't available or not
4660 	 * enabled. But if we do have a monitor, then reconsitution becomes required, as
4661 	 * there is an expectation of performing 2nd stage reconstitution through the
4662 	 * monitor itself.
4663 	 */
4664 	if (platform_supports_reconstitution == false) {
4665 #if CODE_SIGNING_MONITOR
4666 		if (csm_enabled() == true) {
4667 			printf("reconstitution required when code signing monitor is enabled\n");
4668 			return EPERM;
4669 		}
4670 #endif
4671 		return 0;
4672 	}
4673 
4674 	errno_t err = ubc_cs_reconstitute_code_signature(
4675 		cs_blob,
4676 		&signature_addr,
4677 		&signature_size,
4678 		0,
4679 		&code_directory);
4680 
4681 	if (err != 0) {
4682 		printf("unable to reconstitute code signature: %d\n", err);
4683 		return err;
4684 	}
4685 
4686 	/* Calculate the code directory offset */
4687 	code_directory_offset = (vm_offset_t)code_directory - signature_addr;
4688 
4689 	/* Reconstitution allocates new memory -- free the old one */
4690 	ubc_cs_blob_deallocate((vm_address_t)cs_blob->csb_mem_kaddr, cs_blob->csb_mem_size);
4691 
4692 	/* Reconstruct critical fields in the blob object */
4693 	ubc_cs_blob_reconstruct(
4694 		cs_blob,
4695 		signature_addr,
4696 		signature_size,
4697 		code_directory_offset);
4698 
4699 	/* Mark the object as reconstituted */
4700 	cs_blob->csb_reconstituted = true;
4701 
4702 	return 0;
4703 }
4704 
4705 int
ubc_cs_blob_add(struct vnode * vp,uint32_t platform,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t base_offset,vm_address_t * addr,vm_size_t size,struct image_params * imgp,__unused int flags,struct cs_blob ** ret_blob,cs_blob_add_flags_t csblob_add_flags)4706 ubc_cs_blob_add(
4707 	struct vnode    *vp,
4708 	uint32_t        platform,
4709 	cpu_type_t      cputype,
4710 	cpu_subtype_t   cpusubtype,
4711 	off_t           base_offset,
4712 	vm_address_t    *addr,
4713 	vm_size_t       size,
4714 	struct image_params *imgp,
4715 	__unused int    flags,
4716 	struct cs_blob  **ret_blob,
4717 	cs_blob_add_flags_t csblob_add_flags)
4718 {
4719 	ptrauth_generic_signature_t cs_blob_sig = {0};
4720 	struct ubc_info *uip = NULL;
4721 	struct cs_blob tmp_blob = {0};
4722 	struct cs_blob *blob_ro = NULL;
4723 	struct cs_blob *oblob = NULL;
4724 	CS_CodeDirectory const *cd = NULL;
4725 	off_t blob_start_offset = 0;
4726 	off_t blob_end_offset = 0;
4727 	boolean_t record_mtime = false;
4728 	kern_return_t kr = KERN_DENIED;
4729 	errno_t error = -1;
4730 
4731 #if HAS_APPLE_PAC
4732 	void *signed_entitlements = NULL;
4733 #if CODE_SIGNING_MONITOR
4734 	void *signed_monitor_obj = NULL;
4735 #endif
4736 #endif
4737 
4738 	if (ret_blob) {
4739 		*ret_blob = NULL;
4740 	}
4741 
4742 	/*
4743 	 * Create the struct cs_blob abstract data type which will get attached to
4744 	 * the vnode object. This function also validates the structural integrity
4745 	 * of the code signature blob being passed in.
4746 	 *
4747 	 * We initialize a temporary blob whose contents are then copied into an RO
4748 	 * blob which we allocate from the read-only allocator.
4749 	 */
4750 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
4751 	if (error != 0) {
4752 		printf("unable to create a validated cs_blob object: %d\n", error);
4753 		return error;
4754 	}
4755 
4756 	tmp_blob.csb_cpu_type = cputype;
4757 	tmp_blob.csb_cpu_subtype = cpusubtype & ~CPU_SUBTYPE_MASK;
4758 	tmp_blob.csb_base_offset = base_offset;
4759 
4760 	/* Perform 1st stage reconstitution */
4761 	error = reconstitute_code_signature(&tmp_blob);
4762 	if (error != 0) {
4763 		goto out;
4764 	}
4765 
4766 	/*
4767 	 * There is a strong design pattern we have to follow carefully within this
4768 	 * function. Since we're storing the struct cs_blob within RO-allocated
4769 	 * memory, it is immutable to modifications from within the kernel itself.
4770 	 *
4771 	 * However, before the contents of the blob are transferred to the immutable
4772 	 * cs_blob, they are kept on the stack. In order to protect against a kernel
4773 	 * R/W attacker, we must protect this stack variable. Most importantly, any
4774 	 * code paths which can block for a while must compute a PAC signature over
4775 	 * the stack variable, then perform the blocking operation, and then ensure
4776 	 * that the PAC signature over the stack variable is still valid to ensure
4777 	 * that an attacker did not overwrite contents of the blob by introducing a
4778 	 * maliciously long blocking operation, giving them the time required to go
4779 	 * and overwrite the contents of the blob.
4780 	 *
4781 	 * The most important fields to protect here are the OSEntitlements and the
4782 	 * code signing monitor object references. For these ones, we keep around
4783 	 * extra signed pointers diversified against the read-only blobs' memory
4784 	 * and then update the stack variable with these before updating the full
4785 	 * read-only blob.
4786 	 */
4787 
4788 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
4789 	assert(blob_ro != NULL);
4790 
4791 	tmp_blob.csb_ro_addr = blob_ro;
4792 	tmp_blob.csb_vnode = vp;
4793 
4794 	/* AMFI needs to see the current blob state at the RO address */
4795 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
4796 
4797 #if CODE_SIGNING_MONITOR
4798 	error = register_code_signature_monitor(
4799 		vp,
4800 		&tmp_blob,
4801 		(vm_offset_t)tmp_blob.csb_cd - (vm_offset_t)tmp_blob.csb_mem_kaddr);
4802 
4803 	if (error != 0) {
4804 		goto out;
4805 	}
4806 
4807 #if HAS_APPLE_PAC
4808 	signed_monitor_obj = ptrauth_sign_unauthenticated(
4809 		tmp_blob.csb_csm_obj,
4810 		ptrauth_key_process_independent_data,
4811 		ptrauth_blend_discriminator(&blob_ro->csb_csm_obj,
4812 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_csm_obj")));
4813 #endif /* HAS_APPLE_PAC */
4814 
4815 #endif /* CODE_SIGNING_MONITOR */
4816 
4817 	/*
4818 	 * Ensure that we're honoring the main binary policy check on platforms which
4819 	 * require it. We perform this check at this stage to ensure the blob we're
4820 	 * looking at has been locked down by a code signing monitor if the system
4821 	 * has one.
4822 	 */
4823 	error = validate_main_binary_check(&tmp_blob, csblob_add_flags);
4824 	if (error != 0) {
4825 		printf("failed to verify main binary policy: %d\n", error);
4826 		goto out;
4827 	}
4828 
4829 #if CONFIG_MACF
4830 	unsigned int cs_flags = tmp_blob.csb_flags;
4831 	unsigned int signer_type = tmp_blob.csb_signer_type;
4832 
4833 	error = mac_vnode_check_signature(
4834 		vp,
4835 		&tmp_blob,
4836 		imgp,
4837 		&cs_flags,
4838 		&signer_type,
4839 		flags,
4840 		platform);
4841 
4842 	if (error != 0) {
4843 		printf("validation of code signature failed through MACF policy: %d\n", error);
4844 		goto out;
4845 	}
4846 
4847 #if HAS_APPLE_PAC
4848 	signed_entitlements = ptrauth_sign_unauthenticated(
4849 		tmp_blob.csb_entitlements,
4850 		ptrauth_key_process_independent_data,
4851 		ptrauth_blend_discriminator(&blob_ro->csb_entitlements,
4852 		OS_PTRAUTH_DISCRIMINATOR("cs_blob.csb_entitlements")));
4853 #endif
4854 
4855 	tmp_blob.csb_flags = cs_flags;
4856 	tmp_blob.csb_signer_type = signer_type;
4857 
4858 	if (tmp_blob.csb_flags & CS_PLATFORM_BINARY) {
4859 		tmp_blob.csb_platform_binary = 1;
4860 		tmp_blob.csb_platform_path = !!(tmp_blob.csb_flags & CS_PLATFORM_PATH);
4861 		tmp_blob.csb_teamid = NULL;
4862 	} else {
4863 		tmp_blob.csb_platform_binary = 0;
4864 		tmp_blob.csb_platform_path = 0;
4865 	}
4866 
4867 	if ((flags & MAC_VNODE_CHECK_DYLD_SIM) && !tmp_blob.csb_platform_binary) {
4868 		printf("dyld simulator runtime is not apple signed: proc: %d\n",
4869 		    proc_getpid(current_proc()));
4870 
4871 		error = EPERM;
4872 		goto out;
4873 	}
4874 #endif /* CONFIG_MACF */
4875 
4876 #if CODE_SIGNING_MONITOR
4877 	error = verify_code_signature_monitor(&tmp_blob);
4878 	if (error != 0) {
4879 		goto out;
4880 	}
4881 #endif
4882 
4883 	/* Perform 2nd stage reconstitution */
4884 	error = reconstitute_code_signature_2nd_stage(&tmp_blob);
4885 	if (error != 0) {
4886 		goto out;
4887 	}
4888 
4889 	/* Setup any multi-level hashing for the code signature */
4890 	error = setup_multilevel_hashing(&tmp_blob);
4891 	if (error != 0) {
4892 		goto out;
4893 	}
4894 
4895 	/* Ensure security critical auxiliary blobs still exist */
4896 	error = validate_auxiliary_signed_blobs(&tmp_blob);
4897 	if (error != 0) {
4898 		goto out;
4899 	}
4900 
4901 	/*
4902 	 * Accelerate the entitlement queries for this code signature. This must
4903 	 * be done only after we know that the code signature pointers within the
4904 	 * struct cs_blob aren't going to be shifted around anymore, which is why
4905 	 * this acceleration is done after setting up multilevel hashing, since
4906 	 * that is the last part of signature validation which can shift the code
4907 	 * signature around.
4908 	 */
4909 	error = accelerate_entitlement_queries(&tmp_blob);
4910 	if (error != 0) {
4911 		goto out;
4912 	}
4913 
4914 	/*
4915 	 * Parse and set the Team ID for this code signature. This only needs to
4916 	 * happen when the signature isn't marked as platform. Like above, this
4917 	 * has to happen after we know the pointers within struct cs_blob aren't
4918 	 * going to be shifted anymore.
4919 	 */
4920 	if ((tmp_blob.csb_flags & CS_PLATFORM_BINARY) == 0) {
4921 		tmp_blob.csb_teamid = csblob_parse_teamid(&tmp_blob);
4922 	}
4923 
4924 	/*
4925 	 * Validate the code signing blob's coverage. Ideally, we can just do this
4926 	 * in the beginning, right after structural validation, however, multilevel
4927 	 * hashing can change some offets.
4928 	 */
4929 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
4930 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
4931 	if (blob_start_offset >= blob_end_offset) {
4932 		error = EINVAL;
4933 		goto out;
4934 	} else if (blob_start_offset < 0 || blob_end_offset <= 0) {
4935 		error = EINVAL;
4936 		goto out;
4937 	}
4938 
4939 	/*
4940 	 * The vnode_lock, linked list traversal, and marking of the memory object as
4941 	 * signed can all be blocking operations. Compute a PAC over the tmp_blob.
4942 	 */
4943 	cs_blob_sig = ptrauth_utils_sign_blob_generic(
4944 		&tmp_blob,
4945 		sizeof(tmp_blob),
4946 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
4947 		PTRAUTH_ADDR_DIVERSIFY);
4948 
4949 	vnode_lock(vp);
4950 	if (!UBCINFOEXISTS(vp)) {
4951 		vnode_unlock(vp);
4952 		error = ENOENT;
4953 		goto out;
4954 	}
4955 	uip = vp->v_ubcinfo;
4956 
4957 	/* check if this new blob overlaps with an existing blob */
4958 	for (oblob = ubc_get_cs_blobs(vp);
4959 	    oblob != NULL;
4960 	    oblob = oblob->csb_next) {
4961 		off_t oblob_start_offset, oblob_end_offset;
4962 
4963 		if (tmp_blob.csb_signer_type != oblob->csb_signer_type) {  // signer type needs to be the same for slices
4964 			vnode_unlock(vp);
4965 			error = EALREADY;
4966 			goto out;
4967 		} else if (tmp_blob.csb_platform_binary) {  //platform binary needs to be the same for app slices
4968 			if (!oblob->csb_platform_binary) {
4969 				vnode_unlock(vp);
4970 				error = EALREADY;
4971 				goto out;
4972 			}
4973 		} else if (tmp_blob.csb_teamid) {  //teamid binary needs to be the same for app slices
4974 			if (oblob->csb_platform_binary ||
4975 			    oblob->csb_teamid == NULL ||
4976 			    strcmp(oblob->csb_teamid, tmp_blob.csb_teamid) != 0) {
4977 				vnode_unlock(vp);
4978 				error = EALREADY;
4979 				goto out;
4980 			}
4981 		} else {  // non teamid binary needs to be the same for app slices
4982 			if (oblob->csb_platform_binary ||
4983 			    oblob->csb_teamid != NULL) {
4984 				vnode_unlock(vp);
4985 				error = EALREADY;
4986 				goto out;
4987 			}
4988 		}
4989 
4990 		oblob_start_offset = (oblob->csb_base_offset +
4991 		    oblob->csb_start_offset);
4992 		oblob_end_offset = (oblob->csb_base_offset +
4993 		    oblob->csb_end_offset);
4994 		if (blob_start_offset >= oblob_end_offset ||
4995 		    blob_end_offset <= oblob_start_offset) {
4996 			/* no conflict with this existing blob */
4997 		} else {
4998 			/* conflict ! */
4999 			if (blob_start_offset == oblob_start_offset &&
5000 			    blob_end_offset == oblob_end_offset &&
5001 			    tmp_blob.csb_mem_size == oblob->csb_mem_size &&
5002 			    tmp_blob.csb_flags == oblob->csb_flags &&
5003 			    (tmp_blob.csb_cpu_type == CPU_TYPE_ANY ||
5004 			    oblob->csb_cpu_type == CPU_TYPE_ANY ||
5005 			    tmp_blob.csb_cpu_type == oblob->csb_cpu_type) &&
5006 			    !bcmp(tmp_blob.csb_cdhash,
5007 			    oblob->csb_cdhash,
5008 			    CS_CDHASH_LEN)) {
5009 				/*
5010 				 * We already have this blob:
5011 				 * we'll return success but
5012 				 * throw away the new blob.
5013 				 */
5014 				if (oblob->csb_cpu_type == CPU_TYPE_ANY) {
5015 					/*
5016 					 * The old blob matches this one
5017 					 * but doesn't have any CPU type.
5018 					 * Update it with whatever the caller
5019 					 * provided this time.
5020 					 */
5021 					cs_blob_set_cpu_type(oblob, cputype);
5022 				}
5023 
5024 				/* The signature is still accepted, so update the
5025 				 * generation count. */
5026 				uip->cs_add_gen = cs_blob_generation_count;
5027 
5028 				vnode_unlock(vp);
5029 				if (ret_blob) {
5030 					*ret_blob = oblob;
5031 				}
5032 				error = EAGAIN;
5033 				goto out;
5034 			} else {
5035 				/* different blob: reject the new one */
5036 				vnode_unlock(vp);
5037 				error = EALREADY;
5038 				goto out;
5039 			}
5040 		}
5041 	}
5042 
5043 	/* mark this vnode's VM object as having "signed pages" */
5044 	kr = memory_object_signed(uip->ui_control, TRUE);
5045 	if (kr != KERN_SUCCESS) {
5046 		vnode_unlock(vp);
5047 		error = ENOENT;
5048 		goto out;
5049 	}
5050 
5051 	if (uip->cs_blobs == NULL) {
5052 		/* loading 1st blob: record the file's current "modify time" */
5053 		record_mtime = TRUE;
5054 	}
5055 
5056 	/* set the generation count for cs_blobs */
5057 	uip->cs_add_gen = cs_blob_generation_count;
5058 
5059 	/* Authenticate the PAC signature after blocking operation */
5060 	ptrauth_utils_auth_blob_generic(
5061 		&tmp_blob,
5062 		sizeof(tmp_blob),
5063 		OS_PTRAUTH_DISCRIMINATOR("ubc_cs_blob_add.blocking_op0"),
5064 		PTRAUTH_ADDR_DIVERSIFY,
5065 		cs_blob_sig);
5066 
5067 	/* Update the system statistics for code signatures blobs */
5068 	ubc_cs_blob_adjust_statistics(&tmp_blob);
5069 
5070 	/* Update the list pointer to reference other blobs for this vnode */
5071 	tmp_blob.csb_next = uip->cs_blobs;
5072 
5073 #if HAS_APPLE_PAC
5074 	/*
5075 	 * Update all the critical pointers in the blob with the RO diversified
5076 	 * values before updating the read-only blob with the full contents of
5077 	 * the struct cs_blob. We need to use memcpy here as otherwise a simple
5078 	 * assignment will cause the compiler to re-sign using the stack variable
5079 	 * as the address diversifier.
5080 	 */
5081 	memcpy((void*)&tmp_blob.csb_entitlements, &signed_entitlements, sizeof(void*));
5082 #if CODE_SIGNING_MONITOR
5083 	memcpy((void*)&tmp_blob.csb_csm_obj, &signed_monitor_obj, sizeof(void*));
5084 #endif
5085 #endif
5086 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5087 
5088 	/* Add a fence to ensure writes to the blob are visible on all threads */
5089 	os_atomic_thread_fence(seq_cst);
5090 
5091 	/*
5092 	 * Add the cs_blob to the front of the list of blobs for this vnode. We
5093 	 * add to the front of the list, and we never remove a blob from the list
5094 	 * which means ubc_cs_get_blobs can return whatever the top of the list
5095 	 * is, while still keeping the list valid. Useful for if we validate a
5096 	 * page while adding in a new blob for this vnode.
5097 	 */
5098 	uip->cs_blobs = blob_ro;
5099 
5100 	/* Make sure to reload pointer from uip to double check */
5101 	if (uip->cs_blobs->csb_next) {
5102 		zone_require_ro(ZONE_ID_CS_BLOB, sizeof(struct cs_blob), uip->cs_blobs->csb_next);
5103 	}
5104 
5105 	if (cs_debug > 1) {
5106 		proc_t p;
5107 		const char *name = vnode_getname_printable(vp);
5108 		p = current_proc();
5109 		printf("CODE SIGNING: proc %d(%s) "
5110 		    "loaded %s signatures for file (%s) "
5111 		    "range 0x%llx:0x%llx flags 0x%x\n",
5112 		    proc_getpid(p), p->p_comm,
5113 		    blob_ro->csb_cpu_type == -1 ? "detached" : "embedded",
5114 		    name,
5115 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5116 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset,
5117 		    blob_ro->csb_flags);
5118 		vnode_putname_printable(name);
5119 	}
5120 
5121 	vnode_unlock(vp);
5122 
5123 	if (record_mtime) {
5124 		vnode_mtime(vp, &uip->cs_mtime, vfs_context_current());
5125 	}
5126 
5127 	if (ret_blob) {
5128 		*ret_blob = blob_ro;
5129 	}
5130 
5131 	error = 0;      /* success ! */
5132 
5133 out:
5134 	if (error) {
5135 		if (error != EAGAIN) {
5136 			printf("check_signature[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5137 		}
5138 
5139 		cs_blob_cleanup(&tmp_blob);
5140 		if (blob_ro) {
5141 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5142 		}
5143 	}
5144 
5145 	if (error == EAGAIN) {
5146 		/*
5147 		 * See above:  error is EAGAIN if we were asked
5148 		 * to add an existing blob again.  We cleaned the new
5149 		 * blob and we want to return success.
5150 		 */
5151 		error = 0;
5152 	}
5153 
5154 	return error;
5155 }
5156 
5157 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5158 int
ubc_cs_blob_add_supplement(struct vnode * vp,struct vnode * orig_vp,off_t base_offset,vm_address_t * addr,vm_size_t size,struct cs_blob ** ret_blob)5159 ubc_cs_blob_add_supplement(
5160 	struct vnode    *vp,
5161 	struct vnode    *orig_vp,
5162 	off_t           base_offset,
5163 	vm_address_t    *addr,
5164 	vm_size_t       size,
5165 	struct cs_blob  **ret_blob)
5166 {
5167 	kern_return_t           kr;
5168 	struct ubc_info         *uip, *orig_uip;
5169 	int                     error;
5170 	struct cs_blob          tmp_blob;
5171 	struct cs_blob          *orig_blob;
5172 	struct cs_blob          *blob_ro = NULL;
5173 	CS_CodeDirectory const *cd;
5174 	off_t                   blob_start_offset, blob_end_offset;
5175 
5176 	if (ret_blob) {
5177 		*ret_blob = NULL;
5178 	}
5179 
5180 	/* Create the struct cs_blob wrapper that will be attached to the vnode.
5181 	 * Validates the passed in blob in the process. */
5182 	error = cs_blob_init_validated(addr, size, &tmp_blob, &cd);
5183 
5184 	if (error != 0) {
5185 		printf("malformed code signature supplement blob: %d\n", error);
5186 		return error;
5187 	}
5188 
5189 	tmp_blob.csb_cpu_type = -1;
5190 	tmp_blob.csb_base_offset = base_offset;
5191 
5192 	tmp_blob.csb_reconstituted = false;
5193 
5194 	vnode_lock(orig_vp);
5195 	if (!UBCINFOEXISTS(orig_vp)) {
5196 		vnode_unlock(orig_vp);
5197 		error = ENOENT;
5198 		goto out;
5199 	}
5200 
5201 	orig_uip = orig_vp->v_ubcinfo;
5202 
5203 	/* check that the supplement's linked cdhash matches a cdhash of
5204 	 * the target image.
5205 	 */
5206 
5207 	if (tmp_blob.csb_linkage_hashtype == NULL) {
5208 		proc_t p;
5209 		const char *iname = vnode_getname_printable(vp);
5210 		p = current_proc();
5211 
5212 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5213 		    "is not a supplemental.\n",
5214 		    proc_getpid(p), p->p_comm, iname);
5215 
5216 		error = EINVAL;
5217 
5218 		vnode_putname_printable(iname);
5219 		vnode_unlock(orig_vp);
5220 		goto out;
5221 	}
5222 	bool found_but_not_valid = false;
5223 	for (orig_blob = ubc_get_cs_blobs(orig_vp); orig_blob != NULL;
5224 	    orig_blob = orig_blob->csb_next) {
5225 		if (orig_blob->csb_hashtype == tmp_blob.csb_linkage_hashtype &&
5226 		    memcmp(orig_blob->csb_cdhash, tmp_blob.csb_linkage, CS_CDHASH_LEN) == 0) {
5227 			// Found match!
5228 			found_but_not_valid = ((orig_blob->csb_flags & CS_VALID) != CS_VALID);
5229 			break;
5230 		}
5231 	}
5232 
5233 	if (orig_blob == NULL || found_but_not_valid) {
5234 		// Not found.
5235 
5236 		proc_t p;
5237 		const char *iname = vnode_getname_printable(vp);
5238 		p = current_proc();
5239 
5240 		error = (orig_blob == NULL) ? ESRCH : EPERM;
5241 
5242 		printf("CODE SIGNING: proc %d(%s) supplemental signature for file (%s) "
5243 		    "does not match any attached cdhash (error: %d).\n",
5244 		    proc_getpid(p), p->p_comm, iname, error);
5245 
5246 		vnode_putname_printable(iname);
5247 		vnode_unlock(orig_vp);
5248 		goto out;
5249 	}
5250 
5251 	vnode_unlock(orig_vp);
5252 
5253 	blob_ro = zalloc_ro(ZONE_ID_CS_BLOB, Z_WAITOK | Z_NOFAIL);
5254 	tmp_blob.csb_ro_addr = blob_ro;
5255 	tmp_blob.csb_vnode = vp;
5256 
5257 	/* AMFI needs to see the current blob state at the RO address. */
5258 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5259 
5260 	// validate the signature against policy!
5261 #if CONFIG_MACF
5262 	unsigned int signer_type = tmp_blob.csb_signer_type;
5263 	error = mac_vnode_check_supplemental_signature(vp, &tmp_blob, orig_vp, orig_blob, &signer_type);
5264 
5265 	tmp_blob.csb_signer_type = signer_type;
5266 
5267 	if (error) {
5268 		if (cs_debug) {
5269 			printf("check_supplemental_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5270 		}
5271 		goto out;
5272 	}
5273 #endif
5274 
5275 	// We allowed the supplemental signature blob so
5276 	// copy the platform bit or team-id from the linked signature and whether or not the original is developer code
5277 	tmp_blob.csb_platform_binary = 0;
5278 	tmp_blob.csb_platform_path = 0;
5279 	if (orig_blob->csb_platform_binary == 1) {
5280 		tmp_blob.csb_platform_binary = orig_blob->csb_platform_binary;
5281 		tmp_blob.csb_platform_path = orig_blob->csb_platform_path;
5282 	} else if (orig_blob->csb_teamid != NULL) {
5283 		vm_size_t teamid_size = strlen(orig_blob->csb_teamid) + 1;
5284 		tmp_blob.csb_supplement_teamid = kalloc_data(teamid_size, Z_WAITOK);
5285 		if (tmp_blob.csb_supplement_teamid == NULL) {
5286 			error = ENOMEM;
5287 			goto out;
5288 		}
5289 		strlcpy(tmp_blob.csb_supplement_teamid, orig_blob->csb_teamid, teamid_size);
5290 	}
5291 	tmp_blob.csb_flags = (orig_blob->csb_flags & CS_DEV_CODE);
5292 
5293 	// Validate the blob's coverage
5294 	blob_start_offset = tmp_blob.csb_base_offset + tmp_blob.csb_start_offset;
5295 	blob_end_offset = tmp_blob.csb_base_offset + tmp_blob.csb_end_offset;
5296 
5297 	if (blob_start_offset >= blob_end_offset || blob_start_offset < 0 || blob_end_offset <= 0) {
5298 		/* reject empty or backwards blob */
5299 		error = EINVAL;
5300 		goto out;
5301 	}
5302 
5303 	vnode_lock(vp);
5304 	if (!UBCINFOEXISTS(vp)) {
5305 		vnode_unlock(vp);
5306 		error = ENOENT;
5307 		goto out;
5308 	}
5309 	uip = vp->v_ubcinfo;
5310 
5311 	struct cs_blob *existing = uip->cs_blob_supplement;
5312 	if (existing != NULL) {
5313 		if (tmp_blob.csb_hashtype == existing->csb_hashtype &&
5314 		    memcmp(tmp_blob.csb_cdhash, existing->csb_cdhash, CS_CDHASH_LEN) == 0) {
5315 			error = EAGAIN; // non-fatal
5316 		} else {
5317 			error = EALREADY; // fatal
5318 		}
5319 
5320 		vnode_unlock(vp);
5321 		goto out;
5322 	}
5323 
5324 	/* mark this vnode's VM object as having "signed pages" */
5325 	kr = memory_object_signed(uip->ui_control, TRUE);
5326 	if (kr != KERN_SUCCESS) {
5327 		vnode_unlock(vp);
5328 		error = ENOENT;
5329 		goto out;
5330 	}
5331 
5332 
5333 	/* We still adjust statistics even for supplemental blobs, as they
5334 	 * consume memory just the same. */
5335 	ubc_cs_blob_adjust_statistics(&tmp_blob);
5336 	/* Unlike regular cs_blobs, we only ever support one supplement. */
5337 	tmp_blob.csb_next = NULL;
5338 	zalloc_ro_update_elem(ZONE_ID_CS_BLOB, blob_ro, &tmp_blob);
5339 
5340 	os_atomic_thread_fence(seq_cst); // Fence to prevent reordering here
5341 	uip->cs_blob_supplement = blob_ro;
5342 
5343 	/* Make sure to reload pointer from uip to double check */
5344 	if (__improbable(uip->cs_blob_supplement->csb_next)) {
5345 		panic("csb_next does not match expected NULL value");
5346 	}
5347 
5348 	vnode_unlock(vp);
5349 
5350 
5351 	if (cs_debug > 1) {
5352 		proc_t p;
5353 		const char *name = vnode_getname_printable(vp);
5354 		p = current_proc();
5355 		printf("CODE SIGNING: proc %d(%s) "
5356 		    "loaded supplemental signature for file (%s) "
5357 		    "range 0x%llx:0x%llx\n",
5358 		    proc_getpid(p), p->p_comm,
5359 		    name,
5360 		    blob_ro->csb_base_offset + blob_ro->csb_start_offset,
5361 		    blob_ro->csb_base_offset + blob_ro->csb_end_offset);
5362 		vnode_putname_printable(name);
5363 	}
5364 
5365 	if (ret_blob) {
5366 		*ret_blob = blob_ro;
5367 	}
5368 
5369 	error = 0; // Success!
5370 out:
5371 	if (error) {
5372 		if (cs_debug) {
5373 			printf("ubc_cs_blob_add_supplement[pid: %d]: error = %d\n", proc_getpid(current_proc()), error);
5374 		}
5375 
5376 		cs_blob_cleanup(&tmp_blob);
5377 		if (blob_ro) {
5378 			zfree_ro(ZONE_ID_CS_BLOB, blob_ro);
5379 		}
5380 	}
5381 
5382 	if (error == EAGAIN) {
5383 		/* We were asked to add an existing blob.
5384 		 * We cleaned up and ignore the attempt. */
5385 		error = 0;
5386 	}
5387 
5388 	return error;
5389 }
5390 #endif
5391 
5392 
5393 
5394 void
csvnode_print_debug(struct vnode * vp)5395 csvnode_print_debug(struct vnode *vp)
5396 {
5397 	const char      *name = NULL;
5398 	struct ubc_info *uip;
5399 	struct cs_blob *blob;
5400 
5401 	name = vnode_getname_printable(vp);
5402 	if (name) {
5403 		printf("csvnode: name: %s\n", name);
5404 		vnode_putname_printable(name);
5405 	}
5406 
5407 	vnode_lock_spin(vp);
5408 
5409 	if (!UBCINFOEXISTS(vp)) {
5410 		blob = NULL;
5411 		goto out;
5412 	}
5413 
5414 	uip = vp->v_ubcinfo;
5415 	for (blob = uip->cs_blobs; blob != NULL; blob = blob->csb_next) {
5416 		printf("csvnode: range: %lu -> %lu flags: 0x%08x platform: %s path: %s team: %s\n",
5417 		    (unsigned long)blob->csb_start_offset,
5418 		    (unsigned long)blob->csb_end_offset,
5419 		    blob->csb_flags,
5420 		    blob->csb_platform_binary ? "yes" : "no",
5421 		    blob->csb_platform_path ? "yes" : "no",
5422 		    blob->csb_teamid ? blob->csb_teamid : "<NO-TEAM>");
5423 	}
5424 
5425 out:
5426 	vnode_unlock(vp);
5427 }
5428 
5429 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5430 struct cs_blob *
ubc_cs_blob_get_supplement(struct vnode * vp,off_t offset)5431 ubc_cs_blob_get_supplement(
5432 	struct vnode    *vp,
5433 	off_t           offset)
5434 {
5435 	struct cs_blob *blob;
5436 	off_t offset_in_blob;
5437 
5438 	vnode_lock_spin(vp);
5439 
5440 	if (!UBCINFOEXISTS(vp)) {
5441 		blob = NULL;
5442 		goto out;
5443 	}
5444 
5445 	blob = vp->v_ubcinfo->cs_blob_supplement;
5446 
5447 	if (blob == NULL) {
5448 		// no supplemental blob
5449 		goto out;
5450 	}
5451 
5452 
5453 	if (offset != -1) {
5454 		offset_in_blob = offset - blob->csb_base_offset;
5455 		if (offset_in_blob < blob->csb_start_offset || offset_in_blob >= blob->csb_end_offset) {
5456 			// not actually covered by this blob
5457 			blob = NULL;
5458 		}
5459 	}
5460 
5461 out:
5462 	vnode_unlock(vp);
5463 
5464 	return blob;
5465 }
5466 #endif
5467 
5468 struct cs_blob *
ubc_cs_blob_get(struct vnode * vp,cpu_type_t cputype,cpu_subtype_t cpusubtype,off_t offset)5469 ubc_cs_blob_get(
5470 	struct vnode    *vp,
5471 	cpu_type_t      cputype,
5472 	cpu_subtype_t   cpusubtype,
5473 	off_t           offset)
5474 {
5475 	struct cs_blob  *blob;
5476 	off_t offset_in_blob;
5477 
5478 	vnode_lock_spin(vp);
5479 
5480 	if (!UBCINFOEXISTS(vp)) {
5481 		blob = NULL;
5482 		goto out;
5483 	}
5484 
5485 	for (blob = ubc_get_cs_blobs(vp);
5486 	    blob != NULL;
5487 	    blob = blob->csb_next) {
5488 		if (cputype != -1 && blob->csb_cpu_type == cputype && (cpusubtype == -1 || blob->csb_cpu_subtype == (cpusubtype & ~CPU_SUBTYPE_MASK))) {
5489 			break;
5490 		}
5491 		if (offset != -1) {
5492 			offset_in_blob = offset - blob->csb_base_offset;
5493 			if (offset_in_blob >= blob->csb_start_offset &&
5494 			    offset_in_blob < blob->csb_end_offset) {
5495 				/* our offset is covered by this blob */
5496 				break;
5497 			}
5498 		}
5499 	}
5500 
5501 out:
5502 	vnode_unlock(vp);
5503 
5504 	return blob;
5505 }
5506 
5507 void
ubc_cs_free_and_vnode_unlock(vnode_t vp)5508 ubc_cs_free_and_vnode_unlock(
5509 	vnode_t vp)
5510 {
5511 	struct ubc_info *uip = vp->v_ubcinfo;
5512 	struct cs_blob  *cs_blobs, *blob, *next_blob;
5513 
5514 	if (!(uip->ui_flags & UI_CSBLOBINVALID)) {
5515 		vnode_unlock(vp);
5516 		return;
5517 	}
5518 
5519 	uip->ui_flags &= ~UI_CSBLOBINVALID;
5520 
5521 	cs_blobs = uip->cs_blobs;
5522 	uip->cs_blobs = NULL;
5523 
5524 #if CHECK_CS_VALIDATION_BITMAP
5525 	ubc_cs_validation_bitmap_deallocate( uip );
5526 #endif
5527 
5528 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5529 	struct cs_blob  *cs_blob_supplement = uip->cs_blob_supplement;
5530 	uip->cs_blob_supplement = NULL;
5531 #endif
5532 
5533 	vnode_unlock(vp);
5534 
5535 	for (blob = cs_blobs;
5536 	    blob != NULL;
5537 	    blob = next_blob) {
5538 		next_blob = blob->csb_next;
5539 		os_atomic_add(&cs_blob_count, -1, relaxed);
5540 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5541 		cs_blob_ro_free(blob);
5542 	}
5543 
5544 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5545 	if (cs_blob_supplement != NULL) {
5546 		os_atomic_add(&cs_blob_count, -1, relaxed);
5547 		os_atomic_add(&cs_blob_size, -cs_blob_supplement->csb_mem_size, relaxed);
5548 		cs_blob_supplement_free(cs_blob_supplement);
5549 	}
5550 #endif
5551 }
5552 
5553 static void
ubc_cs_free(struct ubc_info * uip)5554 ubc_cs_free(
5555 	struct ubc_info *uip)
5556 {
5557 	struct cs_blob  *blob, *next_blob;
5558 
5559 	for (blob = uip->cs_blobs;
5560 	    blob != NULL;
5561 	    blob = next_blob) {
5562 		next_blob = blob->csb_next;
5563 		os_atomic_add(&cs_blob_count, -1, relaxed);
5564 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5565 		cs_blob_ro_free(blob);
5566 	}
5567 #if CHECK_CS_VALIDATION_BITMAP
5568 	ubc_cs_validation_bitmap_deallocate( uip );
5569 #endif
5570 	uip->cs_blobs = NULL;
5571 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5572 	if (uip->cs_blob_supplement != NULL) {
5573 		blob = uip->cs_blob_supplement;
5574 		os_atomic_add(&cs_blob_count, -1, relaxed);
5575 		os_atomic_add(&cs_blob_size, -blob->csb_mem_size, relaxed);
5576 		cs_blob_supplement_free(uip->cs_blob_supplement);
5577 		uip->cs_blob_supplement = NULL;
5578 	}
5579 #endif
5580 }
5581 
5582 /* check cs blob generation on vnode
5583  * returns:
5584  *    0         : Success, the cs_blob attached is current
5585  *    ENEEDAUTH : Generation count mismatch. Needs authentication again.
5586  */
5587 int
ubc_cs_generation_check(struct vnode * vp)5588 ubc_cs_generation_check(
5589 	struct vnode    *vp)
5590 {
5591 	int retval = ENEEDAUTH;
5592 
5593 	vnode_lock_spin(vp);
5594 
5595 	if (UBCINFOEXISTS(vp) && vp->v_ubcinfo->cs_add_gen == cs_blob_generation_count) {
5596 		retval = 0;
5597 	}
5598 
5599 	vnode_unlock(vp);
5600 	return retval;
5601 }
5602 
5603 int
ubc_cs_blob_revalidate(struct vnode * vp,struct cs_blob * blob,struct image_params * imgp,int flags,uint32_t platform)5604 ubc_cs_blob_revalidate(
5605 	struct vnode    *vp,
5606 	struct cs_blob *blob,
5607 	struct image_params *imgp,
5608 	int flags,
5609 	uint32_t platform
5610 	)
5611 {
5612 	int error = 0;
5613 	const CS_CodeDirectory *cd = NULL;
5614 	const CS_GenericBlob *entitlements = NULL;
5615 	const CS_GenericBlob *der_entitlements = NULL;
5616 	size_t size;
5617 	assert(vp != NULL);
5618 	assert(blob != NULL);
5619 
5620 	if ((blob->csb_flags & CS_VALID) == 0) {
5621 		// If the blob attached to the vnode was invalidated, don't try to revalidate it
5622 		// Blob invalidation only occurs when the file that the blob is attached to is
5623 		// opened for writing, giving us a signal that the file is modified.
5624 		printf("CODESIGNING: can not re-validate a previously invalidated blob, reboot or create a new file.\n");
5625 		error = EPERM;
5626 		goto out;
5627 	}
5628 
5629 	size = blob->csb_mem_size;
5630 	error = cs_validate_csblob((const uint8_t *)blob->csb_mem_kaddr,
5631 	    size, &cd, &entitlements, &der_entitlements);
5632 	if (error) {
5633 		if (cs_debug) {
5634 			printf("CODESIGNING: csblob invalid: %d\n", error);
5635 		}
5636 		goto out;
5637 	}
5638 
5639 	unsigned int cs_flags = (ntohl(cd->flags) & CS_ALLOWED_MACHO) | CS_VALID;
5640 	unsigned int signer_type = CS_SIGNER_TYPE_UNKNOWN;
5641 
5642 	if (blob->csb_reconstituted) {
5643 		/*
5644 		 * Code signatures that have been modified after validation
5645 		 * cannot be revalidated inline from their in-memory blob.
5646 		 *
5647 		 * That's okay, though, because the only path left that relies
5648 		 * on revalidation of existing in-memory blobs is the legacy
5649 		 * detached signature database path, which only exists on macOS,
5650 		 * which does not do reconstitution of any kind.
5651 		 */
5652 		if (cs_debug) {
5653 			printf("CODESIGNING: revalidate: not inline revalidating reconstituted signature.\n");
5654 		}
5655 
5656 		/*
5657 		 * EAGAIN tells the caller that they may reread the code
5658 		 * signature and try attaching it again, which is the same
5659 		 * thing they would do if there was no cs_blob yet in the
5660 		 * first place.
5661 		 *
5662 		 * Conveniently, after ubc_cs_blob_add did a successful
5663 		 * validation, it will detect that a matching cs_blob (cdhash,
5664 		 * offset, arch etc.) already exists, and return success
5665 		 * without re-adding a cs_blob to the vnode.
5666 		 */
5667 		return EAGAIN;
5668 	}
5669 
5670 	/* callout to mac_vnode_check_signature */
5671 #if CONFIG_MACF
5672 	error = mac_vnode_check_signature(vp, blob, imgp, &cs_flags, &signer_type, flags, platform);
5673 	if (cs_debug && error) {
5674 		printf("revalidate: check_signature[pid: %d], error = %d\n", proc_getpid(current_proc()), error);
5675 	}
5676 #else
5677 	(void)flags;
5678 	(void)signer_type;
5679 #endif
5680 
5681 	/* update generation number if success */
5682 	vnode_lock_spin(vp);
5683 	struct cs_signer_info signer_info = {
5684 		.csb_flags = cs_flags,
5685 		.csb_signer_type = signer_type
5686 	};
5687 	zalloc_ro_update_field(ZONE_ID_CS_BLOB, blob, csb_signer_info, &signer_info);
5688 	if (UBCINFOEXISTS(vp)) {
5689 		if (error == 0) {
5690 			vp->v_ubcinfo->cs_add_gen = cs_blob_generation_count;
5691 		} else {
5692 			vp->v_ubcinfo->cs_add_gen = 0;
5693 		}
5694 	}
5695 
5696 	vnode_unlock(vp);
5697 
5698 out:
5699 	return error;
5700 }
5701 
5702 void
cs_blob_reset_cache()5703 cs_blob_reset_cache()
5704 {
5705 	/* incrementing odd no by 2 makes sure '0' is never reached. */
5706 	OSAddAtomic(+2, &cs_blob_generation_count);
5707 	printf("Reseting cs_blob cache from all vnodes. \n");
5708 }
5709 
5710 struct cs_blob *
ubc_get_cs_blobs(struct vnode * vp)5711 ubc_get_cs_blobs(
5712 	struct vnode    *vp)
5713 {
5714 	struct ubc_info *uip;
5715 	struct cs_blob  *blobs;
5716 
5717 	/*
5718 	 * No need to take the vnode lock here.  The caller must be holding
5719 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5720 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5721 	 * goes away.  And we only modify "blobs" by adding to the head of the
5722 	 * list.
5723 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5724 	 * part of a forced unmount.  In the case of a code-signature validation
5725 	 * during a page fault, the "paging_in_progress" reference on the VM
5726 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5727 	 * away during the fault.
5728 	 * Other callers need to protect against vnode reclaim by holding the
5729 	 * vnode lock, for example.
5730 	 */
5731 
5732 	if (!UBCINFOEXISTS(vp)) {
5733 		blobs = NULL;
5734 		goto out;
5735 	}
5736 
5737 	uip = vp->v_ubcinfo;
5738 	blobs = uip->cs_blobs;
5739 	if (blobs != NULL) {
5740 		cs_blob_require(blobs, vp);
5741 	}
5742 
5743 out:
5744 	return blobs;
5745 }
5746 
5747 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5748 struct cs_blob *
ubc_get_cs_supplement(struct vnode * vp)5749 ubc_get_cs_supplement(
5750 	struct vnode    *vp)
5751 {
5752 	struct ubc_info *uip;
5753 	struct cs_blob  *blob;
5754 
5755 	/*
5756 	 * No need to take the vnode lock here.  The caller must be holding
5757 	 * a reference on the vnode (via a VM mapping or open file descriptor),
5758 	 * so the vnode will not go away.  The ubc_info stays until the vnode
5759 	 * goes away.
5760 	 * The ubc_info could go away entirely if the vnode gets reclaimed as
5761 	 * part of a forced unmount.  In the case of a code-signature validation
5762 	 * during a page fault, the "paging_in_progress" reference on the VM
5763 	 * object guarantess that the vnode pager (and the ubc_info) won't go
5764 	 * away during the fault.
5765 	 * Other callers need to protect against vnode reclaim by holding the
5766 	 * vnode lock, for example.
5767 	 */
5768 
5769 	if (!UBCINFOEXISTS(vp)) {
5770 		blob = NULL;
5771 		goto out;
5772 	}
5773 
5774 	uip = vp->v_ubcinfo;
5775 	blob = uip->cs_blob_supplement;
5776 	if (blob != NULL) {
5777 		cs_blob_require(blob, vp);
5778 	}
5779 
5780 out:
5781 	return blob;
5782 }
5783 #endif
5784 
5785 
5786 void
ubc_get_cs_mtime(struct vnode * vp,struct timespec * cs_mtime)5787 ubc_get_cs_mtime(
5788 	struct vnode    *vp,
5789 	struct timespec *cs_mtime)
5790 {
5791 	struct ubc_info *uip;
5792 
5793 	if (!UBCINFOEXISTS(vp)) {
5794 		cs_mtime->tv_sec = 0;
5795 		cs_mtime->tv_nsec = 0;
5796 		return;
5797 	}
5798 
5799 	uip = vp->v_ubcinfo;
5800 	cs_mtime->tv_sec = uip->cs_mtime.tv_sec;
5801 	cs_mtime->tv_nsec = uip->cs_mtime.tv_nsec;
5802 }
5803 
5804 unsigned long cs_validate_page_no_hash = 0;
5805 unsigned long cs_validate_page_bad_hash = 0;
5806 static boolean_t
cs_validate_hash(struct cs_blob * blobs,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t * bytes_processed,unsigned * tainted)5807 cs_validate_hash(
5808 	struct cs_blob          *blobs,
5809 	memory_object_t         pager,
5810 	memory_object_offset_t  page_offset,
5811 	const void              *data,
5812 	vm_size_t               *bytes_processed,
5813 	unsigned                *tainted)
5814 {
5815 	union cs_hash_union     mdctx;
5816 	struct cs_hash const    *hashtype = NULL;
5817 	unsigned char           actual_hash[CS_HASH_MAX_SIZE];
5818 	unsigned char           expected_hash[CS_HASH_MAX_SIZE];
5819 	boolean_t               found_hash;
5820 	struct cs_blob          *blob;
5821 	const CS_CodeDirectory  *cd;
5822 	const unsigned char     *hash;
5823 	boolean_t               validated;
5824 	off_t                   offset; /* page offset in the file */
5825 	size_t                  size;
5826 	off_t                   codeLimit = 0;
5827 	const char              *lower_bound, *upper_bound;
5828 	vm_offset_t             kaddr, blob_addr;
5829 
5830 	/* retrieve the expected hash */
5831 	found_hash = FALSE;
5832 
5833 	for (blob = blobs;
5834 	    blob != NULL;
5835 	    blob = blob->csb_next) {
5836 		offset = page_offset - blob->csb_base_offset;
5837 		if (offset < blob->csb_start_offset ||
5838 		    offset >= blob->csb_end_offset) {
5839 			/* our page is not covered by this blob */
5840 			continue;
5841 		}
5842 
5843 		/* blob data has been released */
5844 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
5845 		if (kaddr == 0) {
5846 			continue;
5847 		}
5848 
5849 		blob_addr = kaddr + blob->csb_mem_offset;
5850 		lower_bound = CAST_DOWN(char *, blob_addr);
5851 		upper_bound = lower_bound + blob->csb_mem_size;
5852 
5853 		cd = blob->csb_cd;
5854 		if (cd != NULL) {
5855 			/* all CD's that have been injected is already validated */
5856 
5857 			hashtype = blob->csb_hashtype;
5858 			if (hashtype == NULL) {
5859 				panic("unknown hash type ?");
5860 			}
5861 			if (hashtype->cs_digest_size > sizeof(actual_hash)) {
5862 				panic("hash size too large");
5863 			}
5864 			if (offset & ((1U << blob->csb_hash_pageshift) - 1)) {
5865 				panic("offset not aligned to cshash boundary");
5866 			}
5867 
5868 			codeLimit = ntohl(cd->codeLimit);
5869 
5870 			hash = hashes(cd, (uint32_t)(offset >> blob->csb_hash_pageshift),
5871 			    hashtype->cs_size,
5872 			    lower_bound, upper_bound);
5873 			if (hash != NULL) {
5874 				bcopy(hash, expected_hash, hashtype->cs_size);
5875 				found_hash = TRUE;
5876 			}
5877 
5878 			break;
5879 		}
5880 	}
5881 
5882 	if (found_hash == FALSE) {
5883 		/*
5884 		 * We can't verify this page because there is no signature
5885 		 * for it (yet).  It's possible that this part of the object
5886 		 * is not signed, or that signatures for that part have not
5887 		 * been loaded yet.
5888 		 * Report that the page has not been validated and let the
5889 		 * caller decide if it wants to accept it or not.
5890 		 */
5891 		cs_validate_page_no_hash++;
5892 		if (cs_debug > 1) {
5893 			printf("CODE SIGNING: cs_validate_page: "
5894 			    "mobj %p off 0x%llx: no hash to validate !?\n",
5895 			    pager, page_offset);
5896 		}
5897 		validated = FALSE;
5898 		*tainted = 0;
5899 	} else {
5900 		*tainted = 0;
5901 
5902 		size = (1U << blob->csb_hash_pageshift);
5903 		*bytes_processed = size;
5904 
5905 		const uint32_t *asha1, *esha1;
5906 		if ((off_t)(offset + size) > codeLimit) {
5907 			/* partial page at end of segment */
5908 			assert(offset < codeLimit);
5909 			size = (size_t) (codeLimit & (size - 1));
5910 			*tainted |= CS_VALIDATE_NX;
5911 		}
5912 
5913 		hashtype->cs_init(&mdctx);
5914 
5915 		if (blob->csb_hash_firstlevel_pageshift) {
5916 			const unsigned char *partial_data = (const unsigned char *)data;
5917 			size_t i;
5918 			for (i = 0; i < size;) {
5919 				union cs_hash_union     partialctx;
5920 				unsigned char partial_digest[CS_HASH_MAX_SIZE];
5921 				size_t partial_size = MIN(size - i, (1U << blob->csb_hash_firstlevel_pageshift));
5922 
5923 				hashtype->cs_init(&partialctx);
5924 				hashtype->cs_update(&partialctx, partial_data, partial_size);
5925 				hashtype->cs_final(partial_digest, &partialctx);
5926 
5927 				/* Update cumulative multi-level hash */
5928 				hashtype->cs_update(&mdctx, partial_digest, hashtype->cs_size);
5929 				partial_data = partial_data + partial_size;
5930 				i += partial_size;
5931 			}
5932 		} else {
5933 			hashtype->cs_update(&mdctx, data, size);
5934 		}
5935 		hashtype->cs_final(actual_hash, &mdctx);
5936 
5937 		asha1 = (const uint32_t *) actual_hash;
5938 		esha1 = (const uint32_t *) expected_hash;
5939 
5940 		if (bcmp(expected_hash, actual_hash, hashtype->cs_size) != 0) {
5941 			if (cs_debug) {
5942 				printf("CODE SIGNING: cs_validate_page: "
5943 				    "mobj %p off 0x%llx size 0x%lx: "
5944 				    "actual [0x%x 0x%x 0x%x 0x%x 0x%x] != "
5945 				    "expected [0x%x 0x%x 0x%x 0x%x 0x%x]\n",
5946 				    pager, page_offset, size,
5947 				    asha1[0], asha1[1], asha1[2],
5948 				    asha1[3], asha1[4],
5949 				    esha1[0], esha1[1], esha1[2],
5950 				    esha1[3], esha1[4]);
5951 			}
5952 			cs_validate_page_bad_hash++;
5953 			*tainted |= CS_VALIDATE_TAINTED;
5954 		} else {
5955 			if (cs_debug > 10) {
5956 				printf("CODE SIGNING: cs_validate_page: "
5957 				    "mobj %p off 0x%llx size 0x%lx: "
5958 				    "SHA1 OK\n",
5959 				    pager, page_offset, size);
5960 			}
5961 		}
5962 		validated = TRUE;
5963 	}
5964 
5965 	return validated;
5966 }
5967 
5968 boolean_t
cs_validate_range(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,vm_size_t dsize,unsigned * tainted)5969 cs_validate_range(
5970 	struct vnode    *vp,
5971 	memory_object_t         pager,
5972 	memory_object_offset_t  page_offset,
5973 	const void              *data,
5974 	vm_size_t               dsize,
5975 	unsigned                *tainted)
5976 {
5977 	vm_size_t offset_in_range;
5978 	boolean_t all_subranges_validated = TRUE; /* turn false if any subrange fails */
5979 
5980 	struct cs_blob *blobs = ubc_get_cs_blobs(vp);
5981 
5982 #if CONFIG_SUPPLEMENTAL_SIGNATURES
5983 	if (blobs == NULL && proc_is_translated(current_proc())) {
5984 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
5985 
5986 		if (supp != NULL) {
5987 			blobs = supp;
5988 		} else {
5989 			return FALSE;
5990 		}
5991 	}
5992 #endif
5993 
5994 #if DEVELOPMENT || DEBUG
5995 	code_signing_config_t cs_config = 0;
5996 
5997 	/*
5998 	 * This exemption is specifically useful for systems which want to avoid paying
5999 	 * the cost of verifying the integrity of pages, since that is done by computing
6000 	 * hashes, which can take some time.
6001 	 */
6002 	code_signing_configuration(NULL, &cs_config);
6003 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6004 		*tainted = 0;
6005 
6006 		/* Return early to avoid paying the cost of hashing */
6007 		return true;
6008 	}
6009 #endif
6010 
6011 	*tainted = 0;
6012 
6013 	for (offset_in_range = 0;
6014 	    offset_in_range < dsize;
6015 	    /* offset_in_range updated based on bytes processed */) {
6016 		unsigned subrange_tainted = 0;
6017 		boolean_t subrange_validated;
6018 		vm_size_t bytes_processed = 0;
6019 
6020 		subrange_validated = cs_validate_hash(blobs,
6021 		    pager,
6022 		    page_offset + offset_in_range,
6023 		    (const void *)((const char *)data + offset_in_range),
6024 		    &bytes_processed,
6025 		    &subrange_tainted);
6026 
6027 		*tainted |= subrange_tainted;
6028 
6029 		if (bytes_processed == 0) {
6030 			/* Cannote make forward progress, so return an error */
6031 			all_subranges_validated = FALSE;
6032 			break;
6033 		} else if (subrange_validated == FALSE) {
6034 			all_subranges_validated = FALSE;
6035 			/* Keep going to detect other types of failures in subranges */
6036 		}
6037 
6038 		offset_in_range += bytes_processed;
6039 	}
6040 
6041 	return all_subranges_validated;
6042 }
6043 
6044 void
cs_validate_page(struct vnode * vp,memory_object_t pager,memory_object_offset_t page_offset,const void * data,int * validated_p,int * tainted_p,int * nx_p)6045 cs_validate_page(
6046 	struct vnode            *vp,
6047 	memory_object_t         pager,
6048 	memory_object_offset_t  page_offset,
6049 	const void              *data,
6050 	int                     *validated_p,
6051 	int                     *tainted_p,
6052 	int                     *nx_p)
6053 {
6054 	vm_size_t offset_in_page;
6055 	struct cs_blob *blobs;
6056 
6057 	blobs = ubc_get_cs_blobs(vp);
6058 
6059 #if CONFIG_SUPPLEMENTAL_SIGNATURES
6060 	if (blobs == NULL && proc_is_translated(current_proc())) {
6061 		struct cs_blob *supp = ubc_get_cs_supplement(vp);
6062 
6063 		if (supp != NULL) {
6064 			blobs = supp;
6065 		}
6066 	}
6067 #endif
6068 
6069 #if DEVELOPMENT || DEBUG
6070 	code_signing_config_t cs_config = 0;
6071 
6072 	/*
6073 	 * This exemption is specifically useful for systems which want to avoid paying
6074 	 * the cost of verifying the integrity of pages, since that is done by computing
6075 	 * hashes, which can take some time.
6076 	 */
6077 	code_signing_configuration(NULL, &cs_config);
6078 	if (cs_config & CS_CONFIG_INTEGRITY_SKIP) {
6079 		*validated_p = VMP_CS_ALL_TRUE;
6080 		*tainted_p = VMP_CS_ALL_FALSE;
6081 		*nx_p = VMP_CS_ALL_FALSE;
6082 
6083 		/* Return early to avoid paying the cost of hashing */
6084 		return;
6085 	}
6086 #endif
6087 
6088 	*validated_p = VMP_CS_ALL_FALSE;
6089 	*tainted_p = VMP_CS_ALL_FALSE;
6090 	*nx_p = VMP_CS_ALL_FALSE;
6091 
6092 	for (offset_in_page = 0;
6093 	    offset_in_page < PAGE_SIZE;
6094 	    /* offset_in_page updated based on bytes processed */) {
6095 		unsigned subrange_tainted = 0;
6096 		boolean_t subrange_validated;
6097 		vm_size_t bytes_processed = 0;
6098 		int sub_bit;
6099 
6100 		subrange_validated = cs_validate_hash(blobs,
6101 		    pager,
6102 		    page_offset + offset_in_page,
6103 		    (const void *)((const char *)data + offset_in_page),
6104 		    &bytes_processed,
6105 		    &subrange_tainted);
6106 
6107 		if (bytes_processed == 0) {
6108 			/* 4k chunk not code-signed: try next one */
6109 			offset_in_page += FOURK_PAGE_SIZE;
6110 			continue;
6111 		}
6112 		if (offset_in_page == 0 &&
6113 		    bytes_processed > PAGE_SIZE - FOURK_PAGE_SIZE) {
6114 			/* all processed: no 4k granularity */
6115 			if (subrange_validated) {
6116 				*validated_p = VMP_CS_ALL_TRUE;
6117 			}
6118 			if (subrange_tainted & CS_VALIDATE_TAINTED) {
6119 				*tainted_p = VMP_CS_ALL_TRUE;
6120 			}
6121 			if (subrange_tainted & CS_VALIDATE_NX) {
6122 				*nx_p = VMP_CS_ALL_TRUE;
6123 			}
6124 			break;
6125 		}
6126 		/* we only handle 4k or 16k code-signing granularity... */
6127 		assertf(bytes_processed <= FOURK_PAGE_SIZE,
6128 		    "vp %p blobs %p offset 0x%llx + 0x%llx bytes_processed 0x%llx\n",
6129 		    vp, blobs, (uint64_t)page_offset,
6130 		    (uint64_t)offset_in_page, (uint64_t)bytes_processed);
6131 		sub_bit = 1 << (offset_in_page >> FOURK_PAGE_SHIFT);
6132 		if (subrange_validated) {
6133 			*validated_p |= sub_bit;
6134 		}
6135 		if (subrange_tainted & CS_VALIDATE_TAINTED) {
6136 			*tainted_p |= sub_bit;
6137 		}
6138 		if (subrange_tainted & CS_VALIDATE_NX) {
6139 			*nx_p |= sub_bit;
6140 		}
6141 		/* go to next 4k chunk */
6142 		offset_in_page += FOURK_PAGE_SIZE;
6143 	}
6144 
6145 	return;
6146 }
6147 
6148 int
ubc_cs_getcdhash(vnode_t vp,off_t offset,unsigned char * cdhash)6149 ubc_cs_getcdhash(
6150 	vnode_t         vp,
6151 	off_t           offset,
6152 	unsigned char   *cdhash)
6153 {
6154 	struct cs_blob  *blobs, *blob;
6155 	off_t           rel_offset;
6156 	int             ret;
6157 
6158 	vnode_lock(vp);
6159 
6160 	blobs = ubc_get_cs_blobs(vp);
6161 	for (blob = blobs;
6162 	    blob != NULL;
6163 	    blob = blob->csb_next) {
6164 		/* compute offset relative to this blob */
6165 		rel_offset = offset - blob->csb_base_offset;
6166 		if (rel_offset >= blob->csb_start_offset &&
6167 		    rel_offset < blob->csb_end_offset) {
6168 			/* this blob does cover our "offset" ! */
6169 			break;
6170 		}
6171 	}
6172 
6173 	if (blob == NULL) {
6174 		/* we didn't find a blob covering "offset" */
6175 		ret = EBADEXEC; /* XXX any better error ? */
6176 	} else {
6177 		/* get the SHA1 hash of that blob */
6178 		bcopy(blob->csb_cdhash, cdhash, sizeof(blob->csb_cdhash));
6179 		ret = 0;
6180 	}
6181 
6182 	vnode_unlock(vp);
6183 
6184 	return ret;
6185 }
6186 
6187 boolean_t
ubc_cs_is_range_codesigned(vnode_t vp,mach_vm_offset_t start,mach_vm_size_t size)6188 ubc_cs_is_range_codesigned(
6189 	vnode_t                 vp,
6190 	mach_vm_offset_t        start,
6191 	mach_vm_size_t          size)
6192 {
6193 	struct cs_blob          *csblob;
6194 	mach_vm_offset_t        blob_start;
6195 	mach_vm_offset_t        blob_end;
6196 
6197 	if (vp == NULL) {
6198 		/* no file: no code signature */
6199 		return FALSE;
6200 	}
6201 	if (size == 0) {
6202 		/* no range: no code signature */
6203 		return FALSE;
6204 	}
6205 	if (start + size < start) {
6206 		/* overflow */
6207 		return FALSE;
6208 	}
6209 
6210 	csblob = ubc_cs_blob_get(vp, -1, -1, start);
6211 	if (csblob == NULL) {
6212 		return FALSE;
6213 	}
6214 
6215 	/*
6216 	 * We currently check if the range is covered by a single blob,
6217 	 * which should always be the case for the dyld shared cache.
6218 	 * If we ever want to make this routine handle other cases, we
6219 	 * would have to iterate if the blob does not cover the full range.
6220 	 */
6221 	blob_start = (mach_vm_offset_t) (csblob->csb_base_offset +
6222 	    csblob->csb_start_offset);
6223 	blob_end = (mach_vm_offset_t) (csblob->csb_base_offset +
6224 	    csblob->csb_end_offset);
6225 	if (blob_start > start || blob_end < (start + size)) {
6226 		/* range not fully covered by this code-signing blob */
6227 		return FALSE;
6228 	}
6229 
6230 	return TRUE;
6231 }
6232 
6233 #if CHECK_CS_VALIDATION_BITMAP
6234 #define stob(s) (((atop_64(round_page_64(s))) + 07) >> 3)
6235 extern  boolean_t       root_fs_upgrade_try;
6236 
6237 /*
6238  * Should we use the code-sign bitmap to avoid repeated code-sign validation?
6239  * Depends:
6240  * a) Is the target vnode on the root filesystem?
6241  * b) Has someone tried to mount the root filesystem read-write?
6242  * If answers are (a) yes AND (b) no, then we can use the bitmap.
6243  */
6244 #define USE_CODE_SIGN_BITMAP(vp)        ( (vp != NULL) && (vp->v_mount != NULL) && (vp->v_mount->mnt_flag & MNT_ROOTFS) && !root_fs_upgrade_try)
6245 kern_return_t
ubc_cs_validation_bitmap_allocate(vnode_t vp)6246 ubc_cs_validation_bitmap_allocate(
6247 	vnode_t         vp)
6248 {
6249 	kern_return_t   kr = KERN_SUCCESS;
6250 	struct ubc_info *uip;
6251 	char            *target_bitmap;
6252 	vm_object_size_t        bitmap_size;
6253 
6254 	if (!USE_CODE_SIGN_BITMAP(vp) || (!UBCINFOEXISTS(vp))) {
6255 		kr = KERN_INVALID_ARGUMENT;
6256 	} else {
6257 		uip = vp->v_ubcinfo;
6258 
6259 		if (uip->cs_valid_bitmap == NULL) {
6260 			bitmap_size = stob(uip->ui_size);
6261 			target_bitmap = (char*) kalloc_data((vm_size_t)bitmap_size, Z_WAITOK | Z_ZERO);
6262 			if (target_bitmap == 0) {
6263 				kr = KERN_NO_SPACE;
6264 			} else {
6265 				kr = KERN_SUCCESS;
6266 			}
6267 			if (kr == KERN_SUCCESS) {
6268 				uip->cs_valid_bitmap = (void*)target_bitmap;
6269 				uip->cs_valid_bitmap_size = bitmap_size;
6270 			}
6271 		}
6272 	}
6273 	return kr;
6274 }
6275 
6276 kern_return_t
ubc_cs_check_validation_bitmap(vnode_t vp,memory_object_offset_t offset,int optype)6277 ubc_cs_check_validation_bitmap(
6278 	vnode_t                 vp,
6279 	memory_object_offset_t          offset,
6280 	int                     optype)
6281 {
6282 	kern_return_t   kr = KERN_SUCCESS;
6283 
6284 	if (!USE_CODE_SIGN_BITMAP(vp) || !UBCINFOEXISTS(vp)) {
6285 		kr = KERN_INVALID_ARGUMENT;
6286 	} else {
6287 		struct ubc_info *uip = vp->v_ubcinfo;
6288 		char            *target_bitmap = uip->cs_valid_bitmap;
6289 
6290 		if (target_bitmap == NULL) {
6291 			kr = KERN_INVALID_ARGUMENT;
6292 		} else {
6293 			uint64_t        bit, byte;
6294 			bit = atop_64( offset );
6295 			byte = bit >> 3;
6296 
6297 			if (byte > uip->cs_valid_bitmap_size) {
6298 				kr = KERN_INVALID_ARGUMENT;
6299 			} else {
6300 				if (optype == CS_BITMAP_SET) {
6301 					target_bitmap[byte] |= (1 << (bit & 07));
6302 					kr = KERN_SUCCESS;
6303 				} else if (optype == CS_BITMAP_CLEAR) {
6304 					target_bitmap[byte] &= ~(1 << (bit & 07));
6305 					kr = KERN_SUCCESS;
6306 				} else if (optype == CS_BITMAP_CHECK) {
6307 					if (target_bitmap[byte] & (1 << (bit & 07))) {
6308 						kr = KERN_SUCCESS;
6309 					} else {
6310 						kr = KERN_FAILURE;
6311 					}
6312 				}
6313 			}
6314 		}
6315 	}
6316 	return kr;
6317 }
6318 
6319 void
ubc_cs_validation_bitmap_deallocate(struct ubc_info * uip)6320 ubc_cs_validation_bitmap_deallocate(
6321 	struct ubc_info *uip)
6322 {
6323 	if (uip->cs_valid_bitmap != NULL) {
6324 		kfree_data(uip->cs_valid_bitmap, (vm_size_t)uip->cs_valid_bitmap_size);
6325 		uip->cs_valid_bitmap = NULL;
6326 	}
6327 }
6328 #else
6329 kern_return_t
ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)6330 ubc_cs_validation_bitmap_allocate(__unused vnode_t vp)
6331 {
6332 	return KERN_INVALID_ARGUMENT;
6333 }
6334 
6335 kern_return_t
ubc_cs_check_validation_bitmap(__unused struct vnode * vp,__unused memory_object_offset_t offset,__unused int optype)6336 ubc_cs_check_validation_bitmap(
6337 	__unused struct vnode *vp,
6338 	__unused memory_object_offset_t offset,
6339 	__unused int optype)
6340 {
6341 	return KERN_INVALID_ARGUMENT;
6342 }
6343 
6344 void
ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info * uip)6345 ubc_cs_validation_bitmap_deallocate(__unused struct ubc_info *uip)
6346 {
6347 	return;
6348 }
6349 #endif /* CHECK_CS_VALIDATION_BITMAP */
6350 
6351 #if CODE_SIGNING_MONITOR
6352 
6353 kern_return_t
cs_associate_blob_with_mapping(void * pmap,vm_map_offset_t start,vm_map_size_t size,vm_object_offset_t offset,void * blobs_p)6354 cs_associate_blob_with_mapping(
6355 	void                    *pmap,
6356 	vm_map_offset_t         start,
6357 	vm_map_size_t           size,
6358 	vm_object_offset_t      offset,
6359 	void                    *blobs_p)
6360 {
6361 	off_t                   blob_start_offset, blob_end_offset;
6362 	kern_return_t           kr;
6363 	struct cs_blob          *blobs, *blob;
6364 	vm_offset_t             kaddr;
6365 	void                    *monitor_sig_obj = NULL;
6366 
6367 	if (csm_enabled() == false) {
6368 		return KERN_NOT_SUPPORTED;
6369 	}
6370 
6371 	blobs = (struct cs_blob *)blobs_p;
6372 
6373 	for (blob = blobs;
6374 	    blob != NULL;
6375 	    blob = blob->csb_next) {
6376 		blob_start_offset = (blob->csb_base_offset +
6377 		    blob->csb_start_offset);
6378 		blob_end_offset = (blob->csb_base_offset +
6379 		    blob->csb_end_offset);
6380 		if ((off_t) offset < blob_start_offset ||
6381 		    (off_t) offset >= blob_end_offset ||
6382 		    (off_t) (offset + size) <= blob_start_offset ||
6383 		    (off_t) (offset + size) > blob_end_offset) {
6384 			continue;
6385 		}
6386 
6387 		kaddr = (vm_offset_t)blob->csb_mem_kaddr;
6388 		if (kaddr == 0) {
6389 			/* blob data has been released */
6390 			continue;
6391 		}
6392 
6393 		monitor_sig_obj = blob->csb_csm_obj;
6394 		if (monitor_sig_obj == NULL) {
6395 			continue;
6396 		}
6397 
6398 		break;
6399 	}
6400 
6401 	if (monitor_sig_obj != NULL) {
6402 		vm_offset_t segment_offset = offset - blob_start_offset;
6403 		kr = csm_associate_code_signature(pmap, monitor_sig_obj, start, size, segment_offset);
6404 	} else {
6405 		kr = KERN_CODESIGN_ERROR;
6406 	}
6407 
6408 	return kr;
6409 }
6410 
6411 #endif /* CODE_SIGNING_MONITOR */
6412