xref: /xnu-11215/bsd/kern/kern_descrip.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995, 1997 Apple Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1982, 1986, 1989, 1991, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kern_descrip.c	8.8 (Berkeley) 2/14/95
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/filedesc.h>
78 #include <sys/kernel.h>
79 #include <sys/vnode_internal.h>
80 #include <sys/proc_internal.h>
81 #include <sys/kauth.h>
82 #include <sys/file_internal.h>
83 #include <sys/guarded.h>
84 #include <sys/priv.h>
85 #include <sys/socket.h>
86 #include <sys/socketvar.h>
87 #include <sys/stat.h>
88 #include <sys/ioctl.h>
89 #include <sys/fcntl.h>
90 #include <sys/fsctl.h>
91 #include <sys/malloc.h>
92 #include <sys/mman.h>
93 #include <sys/mount.h>
94 #include <sys/syslog.h>
95 #include <sys/unistd.h>
96 #include <sys/resourcevar.h>
97 #include <sys/aio_kern.h>
98 #include <sys/ev.h>
99 #include <kern/locks.h>
100 #include <sys/uio_internal.h>
101 #include <sys/codesign.h>
102 #include <sys/codedir_internal.h>
103 #include <sys/mount_internal.h>
104 #include <sys/kdebug.h>
105 #include <sys/sysproto.h>
106 #include <sys/pipe.h>
107 #include <sys/spawn.h>
108 #include <sys/cprotect.h>
109 #include <sys/ubc_internal.h>
110 
111 #include <kern/kern_types.h>
112 #include <kern/kalloc.h>
113 #include <kern/waitq.h>
114 #include <kern/ipc_misc.h>
115 #include <kern/ast.h>
116 
117 #include <vm/vm_protos.h>
118 #include <mach/mach_port.h>
119 
120 #include <security/audit/audit.h>
121 #if CONFIG_MACF
122 #include <security/mac_framework.h>
123 #endif
124 
125 #include <stdbool.h>
126 #include <os/atomic_private.h>
127 #include <os/overflow.h>
128 #include <IOKit/IOBSD.h>
129 
130 #define IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND 0x1
131 kern_return_t ipc_object_copyin(ipc_space_t, mach_port_name_t,
132     mach_msg_type_name_t, ipc_port_t *, mach_port_context_t, mach_msg_guard_flags_t *, uint32_t);
133 void ipc_port_release_send(ipc_port_t);
134 
135 void fileport_releasefg(struct fileglob *fg);
136 
137 /* flags for fp_close_and_unlock */
138 #define FD_DUP2RESV 1
139 
140 /* We don't want these exported */
141 
142 __private_extern__
143 int unlink1(vfs_context_t, vnode_t, user_addr_t, enum uio_seg, int);
144 
145 /* Conflict wait queue for when selects collide (opaque type) */
146 extern struct waitq select_conflict_queue;
147 
148 #define f_flag fp_glob->fg_flag
149 #define f_type fp_glob->fg_ops->fo_type
150 #define f_cred fp_glob->fg_cred
151 #define f_ops fp_glob->fg_ops
152 #define f_offset fp_glob->fg_offset
153 
154 ZONE_DEFINE_TYPE(fg_zone, "fileglob", struct fileglob, ZC_ZFREE_CLEARMEM);
155 ZONE_DEFINE_ID(ZONE_ID_FILEPROC, "fileproc", struct fileproc, ZC_ZFREE_CLEARMEM);
156 
157 /*
158  * Descriptor management.
159  */
160 int nfiles;                     /* actual number of open files */
161 /*
162  * "uninitialized" ops -- ensure FILEGLOB_DTYPE(fg) always exists
163  */
164 static const struct fileops uninitops;
165 
166 os_refgrp_decl(, f_refgrp, "files refcounts", NULL);
167 static LCK_GRP_DECLARE(file_lck_grp, "file");
168 
169 
170 #pragma mark fileglobs
171 
172 /*!
173  * @function fg_alloc_init
174  *
175  * @brief
176  * Allocate and minimally initialize a file structure.
177  */
178 struct fileglob *
fg_alloc_init(vfs_context_t ctx)179 fg_alloc_init(vfs_context_t ctx)
180 {
181 	struct fileglob *fg;
182 
183 	fg = zalloc_flags(fg_zone, Z_WAITOK | Z_ZERO);
184 	lck_mtx_init(&fg->fg_lock, &file_lck_grp, LCK_ATTR_NULL);
185 
186 	os_ref_init_raw(&fg->fg_count, &f_refgrp);
187 	fg->fg_ops = &uninitops;
188 
189 	kauth_cred_ref(ctx->vc_ucred);
190 	fg->fg_cred = ctx->vc_ucred;
191 
192 	os_atomic_inc(&nfiles, relaxed);
193 
194 	return fg;
195 }
196 
197 /*!
198  * @function fg_free
199  *
200  * @brief
201  * Free a file structure.
202  */
203 static void
fg_free(struct fileglob * fg)204 fg_free(struct fileglob *fg)
205 {
206 	os_atomic_dec(&nfiles, relaxed);
207 
208 	if (fg->fg_vn_data) {
209 		fg_vn_data_free(fg->fg_vn_data);
210 		fg->fg_vn_data = NULL;
211 	}
212 
213 	kauth_cred_t cred = fg->fg_cred;
214 	if (IS_VALID_CRED(cred)) {
215 		kauth_cred_unref(&cred);
216 		fg->fg_cred = NOCRED;
217 	}
218 	lck_mtx_destroy(&fg->fg_lock, &file_lck_grp);
219 
220 #if CONFIG_MACF && CONFIG_VNGUARD
221 	vng_file_label_destroy(fg);
222 #endif
223 	zfree(fg_zone, fg);
224 }
225 
226 OS_ALWAYS_INLINE
227 void
fg_ref(proc_t p,struct fileglob * fg)228 fg_ref(proc_t p, struct fileglob *fg)
229 {
230 #if DEBUG || DEVELOPMENT
231 	/* Allow fileglob refs to be taken outside of a process context. */
232 	if (p != FG_NOPROC) {
233 		proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
234 	}
235 #else
236 	(void)p;
237 #endif
238 	os_ref_retain_raw(&fg->fg_count, &f_refgrp);
239 }
240 
241 void
fg_drop_live(struct fileglob * fg)242 fg_drop_live(struct fileglob *fg)
243 {
244 	os_ref_release_live_raw(&fg->fg_count, &f_refgrp);
245 }
246 
247 int
fg_drop(proc_t p,struct fileglob * fg)248 fg_drop(proc_t p, struct fileglob *fg)
249 {
250 	struct vnode *vp;
251 	struct vfs_context context;
252 	int error = 0;
253 
254 	if (fg == NULL) {
255 		return 0;
256 	}
257 
258 	/* Set up context with cred stashed in fg */
259 	if (p == current_proc()) {
260 		context.vc_thread = current_thread();
261 	} else {
262 		context.vc_thread = NULL;
263 	}
264 	context.vc_ucred = fg->fg_cred;
265 
266 	/*
267 	 * POSIX record locking dictates that any close releases ALL
268 	 * locks owned by this process.  This is handled by setting
269 	 * a flag in the unlock to free ONLY locks obeying POSIX
270 	 * semantics, and not to free BSD-style file locks.
271 	 * If the descriptor was in a message, POSIX-style locks
272 	 * aren't passed with the descriptor.
273 	 */
274 	if (p != FG_NOPROC && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
275 	    (p->p_ladvflag & P_LADVLOCK)) {
276 		struct flock lf = {
277 			.l_whence = SEEK_SET,
278 			.l_type = F_UNLCK,
279 		};
280 
281 		vp = (struct vnode *)fg_get_data(fg);
282 		if ((error = vnode_getwithref(vp)) == 0) {
283 			(void)VNOP_ADVLOCK(vp, (caddr_t)p, F_UNLCK, &lf, F_POSIX, &context, NULL);
284 			(void)vnode_put(vp);
285 		}
286 	}
287 
288 	if (os_ref_release_raw(&fg->fg_count, &f_refgrp) == 0) {
289 		/*
290 		 * Since we ensure that fg->fg_ops is always initialized,
291 		 * it is safe to invoke fo_close on the fg
292 		 */
293 		error = fo_close(fg, &context);
294 
295 		fg_free(fg);
296 	}
297 
298 	return error;
299 }
300 
301 inline
302 void
fg_set_data(struct fileglob * fg,void * fg_data)303 fg_set_data(
304 	struct fileglob *fg,
305 	void *fg_data)
306 {
307 	uintptr_t *store = &fg->fg_data;
308 
309 #if __has_feature(ptrauth_calls)
310 	int type = FILEGLOB_DTYPE(fg);
311 
312 	if (fg_data) {
313 		type ^= OS_PTRAUTH_DISCRIMINATOR("fileglob.fg_data");
314 		fg_data = ptrauth_sign_unauthenticated(fg_data,
315 		    ptrauth_key_process_independent_data,
316 		    ptrauth_blend_discriminator(store, type));
317 	}
318 #endif // __has_feature(ptrauth_calls)
319 
320 	*store = (uintptr_t)fg_data;
321 }
322 
323 inline
324 void *
fg_get_data_volatile(struct fileglob * fg)325 fg_get_data_volatile(struct fileglob *fg)
326 {
327 	uintptr_t *store = &fg->fg_data;
328 	void *fg_data = (void *)*store;
329 
330 #if __has_feature(ptrauth_calls)
331 	int type = FILEGLOB_DTYPE(fg);
332 
333 	if (fg_data) {
334 		type ^= OS_PTRAUTH_DISCRIMINATOR("fileglob.fg_data");
335 		fg_data = ptrauth_auth_data(fg_data,
336 		    ptrauth_key_process_independent_data,
337 		    ptrauth_blend_discriminator(store, type));
338 	}
339 #endif // __has_feature(ptrauth_calls)
340 
341 	return fg_data;
342 }
343 
344 static void
fg_transfer_filelocks(proc_t p,struct fileglob * fg,thread_t thread)345 fg_transfer_filelocks(proc_t p, struct fileglob *fg, thread_t thread)
346 {
347 	struct vnode *vp;
348 	struct vfs_context context;
349 	struct proc *old_proc = current_proc();
350 
351 	assert(fg != NULL);
352 
353 	assert(p != old_proc);
354 	context.vc_thread = thread;
355 	context.vc_ucred = fg->fg_cred;
356 
357 	/* Transfer all POSIX Style locks to new proc */
358 	if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
359 	    (p->p_ladvflag & P_LADVLOCK)) {
360 		struct flock lf = {
361 			.l_whence = SEEK_SET,
362 			.l_start = 0,
363 			.l_len = 0,
364 			.l_type = F_TRANSFER,
365 		};
366 
367 		vp = (struct vnode *)fg_get_data(fg);
368 		if (vnode_getwithref(vp) == 0) {
369 			(void)VNOP_ADVLOCK(vp, (caddr_t)old_proc, F_TRANSFER, &lf, F_POSIX, &context, NULL);
370 			(void)vnode_put(vp);
371 		}
372 	}
373 
374 	/* Transfer all OFD Style locks to new proc */
375 	if (p && DTYPE_VNODE == FILEGLOB_DTYPE(fg) &&
376 	    (fg->fg_lflags & FG_HAS_OFDLOCK)) {
377 		struct flock lf = {
378 			.l_whence = SEEK_SET,
379 			.l_start = 0,
380 			.l_len = 0,
381 			.l_type = F_TRANSFER,
382 		};
383 
384 		vp = (struct vnode *)fg_get_data(fg);
385 		if (vnode_getwithref(vp) == 0) {
386 			(void)VNOP_ADVLOCK(vp, ofd_to_id(fg), F_TRANSFER, &lf, F_OFD_LOCK, &context, NULL);
387 			(void)vnode_put(vp);
388 		}
389 	}
390 	return;
391 }
392 
393 bool
fg_sendable(struct fileglob * fg)394 fg_sendable(struct fileglob *fg)
395 {
396 	switch (FILEGLOB_DTYPE(fg)) {
397 	case DTYPE_VNODE:
398 	case DTYPE_SOCKET:
399 	case DTYPE_PIPE:
400 	case DTYPE_PSXSHM:
401 	case DTYPE_NETPOLICY:
402 		return (fg->fg_lflags & FG_CONFINED) == 0;
403 
404 	default:
405 		return false;
406 	}
407 }
408 
409 #pragma mark file descriptor table (static helpers)
410 
411 static void
procfdtbl_reservefd(struct proc * p,int fd)412 procfdtbl_reservefd(struct proc * p, int fd)
413 {
414 	p->p_fd.fd_ofiles[fd] = NULL;
415 	p->p_fd.fd_ofileflags[fd] |= UF_RESERVED;
416 }
417 
418 void
procfdtbl_releasefd(struct proc * p,int fd,struct fileproc * fp)419 procfdtbl_releasefd(struct proc * p, int fd, struct fileproc * fp)
420 {
421 	if (fp != NULL) {
422 		p->p_fd.fd_ofiles[fd] = fp;
423 	}
424 	p->p_fd.fd_ofileflags[fd] &= ~UF_RESERVED;
425 	if ((p->p_fd.fd_ofileflags[fd] & UF_RESVWAIT) == UF_RESVWAIT) {
426 		p->p_fd.fd_ofileflags[fd] &= ~UF_RESVWAIT;
427 		wakeup(&p->p_fd);
428 	}
429 }
430 
431 static void
procfdtbl_waitfd(struct proc * p,int fd)432 procfdtbl_waitfd(struct proc * p, int fd)
433 {
434 	p->p_fd.fd_ofileflags[fd] |= UF_RESVWAIT;
435 	msleep(&p->p_fd, &p->p_fd.fd_lock, PRIBIO, "ftbl_waitfd", NULL);
436 }
437 
438 static void
procfdtbl_clearfd(struct proc * p,int fd)439 procfdtbl_clearfd(struct proc * p, int fd)
440 {
441 	int waiting;
442 
443 	waiting = (p->p_fd.fd_ofileflags[fd] & UF_RESVWAIT);
444 	p->p_fd.fd_ofiles[fd] = NULL;
445 	p->p_fd.fd_ofileflags[fd] = 0;
446 	if (waiting == UF_RESVWAIT) {
447 		wakeup(&p->p_fd);
448 	}
449 }
450 
451 /*
452  * fdrelse
453  *
454  * Description:	Inline utility function to free an fd in a filedesc
455  *
456  * Parameters:	fdp				Pointer to filedesc fd lies in
457  *		fd				fd to free
458  *		reserv				fd should be reserved
459  *
460  * Returns:	void
461  *
462  * Locks:	Assumes proc_fdlock for process pointing to fdp is held by
463  *		the caller
464  */
465 void
fdrelse(struct proc * p,int fd)466 fdrelse(struct proc * p, int fd)
467 {
468 	struct filedesc *fdp = &p->p_fd;
469 	int nfd = 0;
470 
471 	if (fd < fdp->fd_freefile) {
472 		fdp->fd_freefile = fd;
473 	}
474 #if DIAGNOSTIC
475 	if (fd >= fdp->fd_afterlast) {
476 		panic("fdrelse: fd_afterlast inconsistent");
477 	}
478 #endif
479 	procfdtbl_clearfd(p, fd);
480 
481 	nfd = fdp->fd_afterlast;
482 	while (nfd > 0 && fdp->fd_ofiles[nfd - 1] == NULL &&
483 	    !(fdp->fd_ofileflags[nfd - 1] & UF_RESERVED)) {
484 		nfd--;
485 	}
486 	fdp->fd_afterlast = nfd;
487 
488 #if CONFIG_PROC_RESOURCE_LIMITS
489 	fdp->fd_nfiles_open--;
490 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
491 }
492 
493 
494 /*
495  * finishdup
496  *
497  * Description:	Common code for dup, dup2, and fcntl(F_DUPFD).
498  *
499  * Parameters:	p				Process performing the dup
500  *		old				The fd to dup
501  *		new				The fd to dup it to
502  *		fp_flags			Flags to augment the new fp
503  *		retval				Pointer to the call return area
504  *
505  * Returns:	0				Success
506  *		EBADF
507  *		ENOMEM
508  *
509  * Implicit returns:
510  *		*retval (modified)		The new descriptor
511  *
512  * Locks:	Assumes proc_fdlock for process pointing to fdp is held by
513  *		the caller
514  *
515  * Notes:	This function may drop and reacquire this lock; it is unsafe
516  *		for a caller to assume that other state protected by the lock
517  *		has not been subsequently changed out from under it.
518  */
519 static int
finishdup(proc_t p,kauth_cred_t p_cred,int old,int new,fileproc_flags_t fp_flags,int32_t * retval)520 finishdup(
521 	proc_t                  p,
522 	kauth_cred_t            p_cred,
523 	int                     old,
524 	int                     new,
525 	fileproc_flags_t        fp_flags,
526 	int32_t                *retval)
527 {
528 	struct filedesc *fdp = &p->p_fd;
529 	struct fileproc *nfp;
530 	struct fileproc *ofp;
531 #if CONFIG_MACF
532 	int error;
533 #endif
534 
535 #if DIAGNOSTIC
536 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
537 #endif
538 	if ((ofp = fdp->fd_ofiles[old]) == NULL ||
539 	    (fdp->fd_ofileflags[old] & UF_RESERVED)) {
540 		fdrelse(p, new);
541 		return EBADF;
542 	}
543 
544 #if CONFIG_MACF
545 	error = mac_file_check_dup(p_cred, ofp->fp_glob, new);
546 
547 	if (error) {
548 		fdrelse(p, new);
549 		return error;
550 	}
551 #else
552 	(void)p_cred;
553 #endif
554 
555 	fg_ref(p, ofp->fp_glob);
556 
557 	proc_fdunlock(p);
558 
559 	nfp = fileproc_alloc_init();
560 
561 	if (fp_flags) {
562 		nfp->fp_flags |= fp_flags;
563 	}
564 	nfp->fp_glob = ofp->fp_glob;
565 
566 	proc_fdlock(p);
567 
568 #if DIAGNOSTIC
569 	if (fdp->fd_ofiles[new] != 0) {
570 		panic("finishdup: overwriting fd_ofiles with new %d", new);
571 	}
572 	if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) {
573 		panic("finishdup: unreserved fileflags with new %d", new);
574 	}
575 #endif
576 
577 	if (new >= fdp->fd_afterlast) {
578 		fdp->fd_afterlast = new + 1;
579 	}
580 	procfdtbl_releasefd(p, new, nfp);
581 	*retval = new;
582 	return 0;
583 }
584 
585 
586 #pragma mark file descriptor table (exported functions)
587 
588 void
proc_dirs_lock_shared(proc_t p)589 proc_dirs_lock_shared(proc_t p)
590 {
591 	lck_rw_lock_shared(&p->p_fd.fd_dirs_lock);
592 }
593 
594 void
proc_dirs_unlock_shared(proc_t p)595 proc_dirs_unlock_shared(proc_t p)
596 {
597 	lck_rw_unlock_shared(&p->p_fd.fd_dirs_lock);
598 }
599 
600 void
proc_dirs_lock_exclusive(proc_t p)601 proc_dirs_lock_exclusive(proc_t p)
602 {
603 	lck_rw_lock_exclusive(&p->p_fd.fd_dirs_lock);
604 }
605 
606 void
proc_dirs_unlock_exclusive(proc_t p)607 proc_dirs_unlock_exclusive(proc_t p)
608 {
609 	lck_rw_unlock_exclusive(&p->p_fd.fd_dirs_lock);
610 }
611 
612 /*
613  * proc_fdlock, proc_fdlock_spin
614  *
615  * Description:	Lock to control access to the per process struct fileproc
616  *		and struct filedesc
617  *
618  * Parameters:	p				Process to take the lock on
619  *
620  * Returns:	void
621  *
622  * Notes:	The lock is initialized in forkproc() and destroyed in
623  *		reap_child_process().
624  */
625 void
proc_fdlock(proc_t p)626 proc_fdlock(proc_t p)
627 {
628 	lck_mtx_lock(&p->p_fd.fd_lock);
629 }
630 
631 void
proc_fdlock_spin(proc_t p)632 proc_fdlock_spin(proc_t p)
633 {
634 	lck_mtx_lock_spin(&p->p_fd.fd_lock);
635 }
636 
637 void
proc_fdlock_assert(proc_t p,int assertflags)638 proc_fdlock_assert(proc_t p, int assertflags)
639 {
640 	lck_mtx_assert(&p->p_fd.fd_lock, assertflags);
641 }
642 
643 
644 /*
645  * proc_fdunlock
646  *
647  * Description:	Unlock the lock previously locked by a call to proc_fdlock()
648  *
649  * Parameters:	p				Process to drop the lock on
650  *
651  * Returns:	void
652  */
653 void
proc_fdunlock(proc_t p)654 proc_fdunlock(proc_t p)
655 {
656 	lck_mtx_unlock(&p->p_fd.fd_lock);
657 }
658 
659 bool
fdt_available_locked(proc_t p,int n)660 fdt_available_locked(proc_t p, int n)
661 {
662 	struct filedesc *fdp = &p->p_fd;
663 	struct fileproc **fpp;
664 	char *flags;
665 	int i;
666 	int lim = proc_limitgetcur_nofile(p);
667 
668 	if ((i = lim - fdp->fd_nfiles) > 0 && (n -= i) <= 0) {
669 		return true;
670 	}
671 	fpp = &fdp->fd_ofiles[fdp->fd_freefile];
672 	flags = &fdp->fd_ofileflags[fdp->fd_freefile];
673 	for (i = fdp->fd_nfiles - fdp->fd_freefile; --i >= 0; fpp++, flags++) {
674 		if (*fpp == NULL && !(*flags & UF_RESERVED) && --n <= 0) {
675 			return true;
676 		}
677 	}
678 	return false;
679 }
680 
681 
682 struct fdt_iterator
fdt_next(proc_t p,int fd,bool only_settled)683 fdt_next(proc_t p, int fd, bool only_settled)
684 {
685 	struct fdt_iterator it;
686 	struct filedesc *fdp = &p->p_fd;
687 	struct fileproc *fp;
688 	int nfds = fdp->fd_afterlast;
689 
690 	while (++fd < nfds) {
691 		fp = fdp->fd_ofiles[fd];
692 		if (fp == NULL || fp->fp_glob == NULL) {
693 			continue;
694 		}
695 		if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
696 			continue;
697 		}
698 		it.fdti_fd = fd;
699 		it.fdti_fp = fp;
700 		return it;
701 	}
702 
703 	it.fdti_fd = nfds;
704 	it.fdti_fp = NULL;
705 	return it;
706 }
707 
708 struct fdt_iterator
fdt_prev(proc_t p,int fd,bool only_settled)709 fdt_prev(proc_t p, int fd, bool only_settled)
710 {
711 	struct fdt_iterator it;
712 	struct filedesc *fdp = &p->p_fd;
713 	struct fileproc *fp;
714 
715 	while (--fd >= 0) {
716 		fp = fdp->fd_ofiles[fd];
717 		if (fp == NULL || fp->fp_glob == NULL) {
718 			continue;
719 		}
720 		if (only_settled && (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
721 			continue;
722 		}
723 		it.fdti_fd = fd;
724 		it.fdti_fp = fp;
725 		return it;
726 	}
727 
728 	it.fdti_fd = -1;
729 	it.fdti_fp = NULL;
730 	return it;
731 }
732 
733 void
fdt_init(proc_t p)734 fdt_init(proc_t p)
735 {
736 	struct filedesc *fdp = &p->p_fd;
737 
738 	lck_mtx_init(&fdp->fd_kqhashlock, &proc_kqhashlock_grp, &proc_lck_attr);
739 	lck_mtx_init(&fdp->fd_knhashlock, &proc_knhashlock_grp, &proc_lck_attr);
740 	lck_mtx_init(&fdp->fd_lock, &proc_fdmlock_grp, &proc_lck_attr);
741 	lck_rw_init(&fdp->fd_dirs_lock, &proc_dirslock_grp, &proc_lck_attr);
742 }
743 
744 void
fdt_destroy(proc_t p)745 fdt_destroy(proc_t p)
746 {
747 	struct filedesc *fdp = &p->p_fd;
748 
749 	lck_mtx_destroy(&fdp->fd_kqhashlock, &proc_kqhashlock_grp);
750 	lck_mtx_destroy(&fdp->fd_knhashlock, &proc_knhashlock_grp);
751 	lck_mtx_destroy(&fdp->fd_lock, &proc_fdmlock_grp);
752 	lck_rw_destroy(&fdp->fd_dirs_lock, &proc_dirslock_grp);
753 }
754 
755 void
fdt_exec(proc_t p,kauth_cred_t p_cred,short posix_spawn_flags,thread_t thread,bool in_exec)756 fdt_exec(proc_t p, kauth_cred_t p_cred, short posix_spawn_flags, thread_t thread, bool in_exec)
757 {
758 	struct filedesc *fdp = &p->p_fd;
759 	thread_t self = current_thread();
760 	struct uthread *ut = get_bsdthread_info(self);
761 	struct kqworkq *dealloc_kqwq = NULL;
762 
763 	/*
764 	 * If the current thread is bound as a workq/workloop
765 	 * servicing thread, we need to unbind it first.
766 	 */
767 	if (ut->uu_kqr_bound && get_bsdthreadtask_info(self) == p) {
768 		kqueue_threadreq_unbind(p, ut->uu_kqr_bound);
769 	}
770 
771 	/*
772 	 * Deallocate the knotes for this process
773 	 * and mark the tables non-existent so
774 	 * subsequent kqueue closes go faster.
775 	 */
776 	knotes_dealloc(p);
777 	assert(fdp->fd_knlistsize == 0);
778 	assert(fdp->fd_knhashmask == 0);
779 
780 	proc_fdlock(p);
781 
782 	/* Set the P_LADVLOCK flag if the flag set on old proc */
783 	if (in_exec && (current_proc()->p_ladvflag & P_LADVLOCK)) {
784 		os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
785 	}
786 
787 	for (int i = fdp->fd_afterlast; i-- > 0;) {
788 		struct fileproc *fp = fdp->fd_ofiles[i];
789 		char *flagp = &fdp->fd_ofileflags[i];
790 		bool inherit_file = true;
791 
792 		if (fp == FILEPROC_NULL) {
793 			continue;
794 		}
795 
796 		/*
797 		 * no file descriptor should be in flux when in exec,
798 		 * because we stopped all other threads
799 		 */
800 		if (*flagp & ~UF_INHERIT) {
801 			panic("file %d/%p in flux during exec of %p", i, fp, p);
802 		}
803 
804 		if (fp->fp_flags & FP_CLOEXEC) {
805 			inherit_file = false;
806 		} else if ((posix_spawn_flags & POSIX_SPAWN_CLOEXEC_DEFAULT) &&
807 		    !(*flagp & UF_INHERIT)) {
808 			/*
809 			 * Reverse the usual semantics of file descriptor
810 			 * inheritance - all of them should be closed
811 			 * except files marked explicitly as "inherit" and
812 			 * not marked close-on-exec.
813 			 */
814 			inherit_file = false;
815 #if CONFIG_MACF
816 		} else if (mac_file_check_inherit(p_cred, fp->fp_glob)) {
817 			inherit_file = false;
818 #endif
819 		}
820 
821 		*flagp = 0; /* clear UF_INHERIT */
822 
823 		if (!inherit_file) {
824 			fp_close_and_unlock(p, p_cred, i, fp, 0);
825 			proc_fdlock(p);
826 		} else if (in_exec) {
827 			/* Transfer F_POSIX style lock to new proc */
828 			proc_fdunlock(p);
829 			fg_transfer_filelocks(p, fp->fp_glob, thread);
830 			proc_fdlock(p);
831 		}
832 	}
833 
834 	/* release the per-process workq kq */
835 	if (fdp->fd_wqkqueue) {
836 		dealloc_kqwq = fdp->fd_wqkqueue;
837 		fdp->fd_wqkqueue = NULL;
838 	}
839 
840 	proc_fdunlock(p);
841 
842 	/* Anything to free? */
843 	if (dealloc_kqwq) {
844 		kqworkq_dealloc(dealloc_kqwq);
845 	}
846 }
847 
848 
849 int
fdt_fork(struct filedesc * newfdp,proc_t p,vnode_t uth_cdir,bool in_exec)850 fdt_fork(struct filedesc *newfdp, proc_t p, vnode_t uth_cdir, bool in_exec)
851 {
852 	struct filedesc *fdp = &p->p_fd;
853 	struct fileproc **ofiles;
854 	char *ofileflags;
855 	int n_files, afterlast, freefile;
856 	vnode_t v_dir;
857 #if CONFIG_PROC_RESOURCE_LIMITS
858 	int fd_nfiles_open = 0;
859 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
860 	proc_fdlock(p);
861 
862 	newfdp->fd_flags = (fdp->fd_flags & FILEDESC_FORK_INHERITED_MASK);
863 	newfdp->fd_cmask = fdp->fd_cmask;
864 #if CONFIG_PROC_RESOURCE_LIMITS
865 	newfdp->fd_nfiles_soft_limit = fdp->fd_nfiles_soft_limit;
866 	newfdp->fd_nfiles_hard_limit = fdp->fd_nfiles_hard_limit;
867 
868 	newfdp->kqwl_dyn_soft_limit = fdp->kqwl_dyn_soft_limit;
869 	newfdp->kqwl_dyn_hard_limit = fdp->kqwl_dyn_hard_limit;
870 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
871 
872 	/*
873 	 * For both fd_cdir and fd_rdir make sure we get
874 	 * a valid reference... if we can't, than set
875 	 * set the pointer(s) to NULL in the child... this
876 	 * will keep us from using a non-referenced vp
877 	 * and allows us to do the vnode_rele only on
878 	 * a properly referenced vp
879 	 */
880 	if ((v_dir = fdp->fd_rdir)) {
881 		if (vnode_getwithref(v_dir) == 0) {
882 			if (vnode_ref(v_dir) == 0) {
883 				newfdp->fd_rdir = v_dir;
884 			}
885 			vnode_put(v_dir);
886 		}
887 		if (newfdp->fd_rdir == NULL) {
888 			/*
889 			 * We couldn't get a new reference on
890 			 * the chroot directory being
891 			 * inherited... this is fatal, since
892 			 * otherwise it would constitute an
893 			 * escape from a chroot environment by
894 			 * the new process.
895 			 */
896 			proc_fdunlock(p);
897 			return EPERM;
898 		}
899 	}
900 
901 	/*
902 	 * If we are running with per-thread current working directories,
903 	 * inherit the new current working directory from the current thread.
904 	 */
905 	if ((v_dir = uth_cdir ? uth_cdir : fdp->fd_cdir)) {
906 		if (vnode_getwithref(v_dir) == 0) {
907 			if (vnode_ref(v_dir) == 0) {
908 				newfdp->fd_cdir = v_dir;
909 			}
910 			vnode_put(v_dir);
911 		}
912 		if (newfdp->fd_cdir == NULL && v_dir == fdp->fd_cdir) {
913 			/*
914 			 * we couldn't get a new reference on
915 			 * the current working directory being
916 			 * inherited... we might as well drop
917 			 * our reference from the parent also
918 			 * since the vnode has gone DEAD making
919 			 * it useless... by dropping it we'll
920 			 * be that much closer to recycling it
921 			 */
922 			vnode_rele(fdp->fd_cdir);
923 			fdp->fd_cdir = NULL;
924 		}
925 	}
926 
927 	/*
928 	 * If the number of open files fits in the internal arrays
929 	 * of the open file structure, use them, otherwise allocate
930 	 * additional memory for the number of descriptors currently
931 	 * in use.
932 	 */
933 	afterlast = fdp->fd_afterlast;
934 	freefile = fdp->fd_freefile;
935 	if (afterlast <= NDFILE) {
936 		n_files = NDFILE;
937 	} else {
938 		n_files = roundup(afterlast, NDEXTENT);
939 	}
940 
941 	proc_fdunlock(p);
942 
943 	ofiles = kalloc_type(struct fileproc *, n_files, Z_WAITOK | Z_ZERO);
944 	ofileflags = kalloc_data(n_files, Z_WAITOK | Z_ZERO);
945 	if (ofiles == NULL || ofileflags == NULL) {
946 		kfree_type(struct fileproc *, n_files, ofiles);
947 		kfree_data(ofileflags, n_files);
948 		if (newfdp->fd_cdir) {
949 			vnode_rele(newfdp->fd_cdir);
950 			newfdp->fd_cdir = NULL;
951 		}
952 		if (newfdp->fd_rdir) {
953 			vnode_rele(newfdp->fd_rdir);
954 			newfdp->fd_rdir = NULL;
955 		}
956 		return ENOMEM;
957 	}
958 
959 	proc_fdlock(p);
960 
961 	for (int i = afterlast; i-- > 0;) {
962 		struct fileproc *ofp, *nfp;
963 		char flags;
964 
965 		ofp = fdp->fd_ofiles[i];
966 		flags = fdp->fd_ofileflags[i];
967 
968 		if (ofp == NULL ||
969 		    (ofp->fp_glob->fg_lflags & FG_CONFINED) ||
970 		    ((ofp->fp_flags & FP_CLOFORK) && !in_exec) ||
971 		    ((ofp->fp_flags & FP_CLOEXEC) && in_exec) ||
972 		    (flags & UF_RESERVED)) {
973 			if (i + 1 == afterlast) {
974 				afterlast = i;
975 			}
976 			if (i < freefile) {
977 				freefile = i;
978 			}
979 
980 			continue;
981 		}
982 
983 		nfp = fileproc_alloc_init();
984 		nfp->fp_glob = ofp->fp_glob;
985 		if (in_exec) {
986 			nfp->fp_flags = (ofp->fp_flags & (FP_CLOEXEC | FP_CLOFORK));
987 			if (ofp->fp_guard_attrs) {
988 				guarded_fileproc_copy_guard(ofp, nfp);
989 			}
990 		} else {
991 			assert(ofp->fp_guard_attrs == 0);
992 			nfp->fp_flags = (ofp->fp_flags & FP_CLOEXEC);
993 		}
994 		fg_ref(p, nfp->fp_glob);
995 
996 		ofiles[i] = nfp;
997 #if CONFIG_PROC_RESOURCE_LIMITS
998 		fd_nfiles_open++;
999 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1000 	}
1001 
1002 	proc_fdunlock(p);
1003 
1004 	newfdp->fd_ofiles = ofiles;
1005 	newfdp->fd_ofileflags = ofileflags;
1006 	newfdp->fd_nfiles = n_files;
1007 	newfdp->fd_afterlast = afterlast;
1008 	newfdp->fd_freefile = freefile;
1009 
1010 #if CONFIG_PROC_RESOURCE_LIMITS
1011 	newfdp->fd_nfiles_open = fd_nfiles_open;
1012 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1013 
1014 	return 0;
1015 }
1016 
1017 void
fdt_invalidate(proc_t p)1018 fdt_invalidate(proc_t p)
1019 {
1020 	struct filedesc *fdp = &p->p_fd;
1021 	struct fileproc *fp, **ofiles;
1022 	kauth_cred_t p_cred;
1023 	char *ofileflags;
1024 	struct kqworkq *kqwq = NULL;
1025 	vnode_t vn1 = NULL, vn2 = NULL;
1026 	struct kqwllist *kqhash = NULL;
1027 	u_long kqhashmask = 0;
1028 	int n_files = 0;
1029 
1030 	/*
1031 	 * deallocate all the knotes up front and claim empty
1032 	 * tables to make any subsequent kqueue closes faster.
1033 	 */
1034 	knotes_dealloc(p);
1035 	assert(fdp->fd_knlistsize == 0);
1036 	assert(fdp->fd_knhashmask == 0);
1037 
1038 	/*
1039 	 * dealloc all workloops that have outstanding retains
1040 	 * when created with scheduling parameters.
1041 	 */
1042 	kqworkloops_dealloc(p);
1043 
1044 	proc_fdlock(p);
1045 
1046 	/* proc_ucred_unsafe() is ok: process is terminating */
1047 	p_cred = proc_ucred_unsafe(p);
1048 
1049 	/* close file descriptors */
1050 	if (fdp->fd_nfiles > 0 && fdp->fd_ofiles) {
1051 		for (int i = fdp->fd_afterlast; i-- > 0;) {
1052 			if ((fp = fdp->fd_ofiles[i]) != NULL) {
1053 				if (fdp->fd_ofileflags[i] & UF_RESERVED) {
1054 					panic("fdfree: found fp with UF_RESERVED");
1055 				}
1056 				/* proc_ucred_unsafe() is ok: process is terminating */
1057 				fp_close_and_unlock(p, p_cred, i, fp, 0);
1058 				proc_fdlock(p);
1059 			}
1060 		}
1061 	}
1062 
1063 	n_files = fdp->fd_nfiles;
1064 	ofileflags = fdp->fd_ofileflags;
1065 	ofiles = fdp->fd_ofiles;
1066 	kqwq = fdp->fd_wqkqueue;
1067 	vn1 = fdp->fd_cdir;
1068 	vn2 = fdp->fd_rdir;
1069 
1070 	fdp->fd_ofileflags = NULL;
1071 	fdp->fd_ofiles = NULL;
1072 	fdp->fd_nfiles = 0;
1073 	fdp->fd_wqkqueue = NULL;
1074 	fdp->fd_cdir = NULL;
1075 	fdp->fd_rdir = NULL;
1076 
1077 	proc_fdunlock(p);
1078 
1079 	lck_mtx_lock(&fdp->fd_kqhashlock);
1080 
1081 	kqhash = fdp->fd_kqhash;
1082 	kqhashmask = fdp->fd_kqhashmask;
1083 
1084 	fdp->fd_kqhash = 0;
1085 	fdp->fd_kqhashmask = 0;
1086 
1087 	lck_mtx_unlock(&fdp->fd_kqhashlock);
1088 
1089 	kfree_type(struct fileproc *, n_files, ofiles);
1090 	kfree_data(ofileflags, n_files);
1091 
1092 	if (kqwq) {
1093 		kqworkq_dealloc(kqwq);
1094 	}
1095 	if (vn1) {
1096 		vnode_rele(vn1);
1097 	}
1098 	if (vn2) {
1099 		vnode_rele(vn2);
1100 	}
1101 	if (kqhash) {
1102 		for (uint32_t i = 0; i <= kqhashmask; i++) {
1103 			assert(LIST_EMPTY(&kqhash[i]));
1104 		}
1105 		hashdestroy(kqhash, M_KQUEUE, kqhashmask);
1106 	}
1107 }
1108 
1109 
1110 struct fileproc *
fileproc_alloc_init(void)1111 fileproc_alloc_init(void)
1112 {
1113 	struct fileproc *fp;
1114 
1115 	fp = zalloc_id(ZONE_ID_FILEPROC, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1116 	os_ref_init(&fp->fp_iocount, &f_refgrp);
1117 	return fp;
1118 }
1119 
1120 
1121 void
fileproc_free(struct fileproc * fp)1122 fileproc_free(struct fileproc *fp)
1123 {
1124 	os_ref_count_t refc = os_ref_release(&fp->fp_iocount);
1125 	if (0 != refc) {
1126 		panic("%s: pid %d refc: %u != 0",
1127 		    __func__, proc_pid(current_proc()), refc);
1128 	}
1129 	if (fp->fp_guard_attrs) {
1130 		guarded_fileproc_unguard(fp);
1131 	}
1132 	assert(fp->fp_wset == NULL);
1133 	zfree_id(ZONE_ID_FILEPROC, fp);
1134 }
1135 
1136 
1137 /*
1138  * Statistics counter for the number of times a process calling fdalloc()
1139  * has resulted in an expansion of the per process open file table.
1140  *
1141  * XXX This would likely be of more use if it were per process
1142  */
1143 int fdexpand;
1144 
1145 #if CONFIG_PROC_RESOURCE_LIMITS
1146 /*
1147  * Should be called only with the proc_fdlock held.
1148  */
1149 void
fd_check_limit_exceeded(struct filedesc * fdp)1150 fd_check_limit_exceeded(struct filedesc *fdp)
1151 {
1152 #if DIAGNOSTIC
1153 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1154 #endif
1155 
1156 	if (!fd_above_soft_limit_notified(fdp) && fdp->fd_nfiles_soft_limit &&
1157 	    (fdp->fd_nfiles_open > fdp->fd_nfiles_soft_limit)) {
1158 		fd_above_soft_limit_send_notification(fdp);
1159 		act_set_astproc_resource(current_thread());
1160 	} else if (!fd_above_hard_limit_notified(fdp) && fdp->fd_nfiles_hard_limit &&
1161 	    (fdp->fd_nfiles_open > fdp->fd_nfiles_hard_limit)) {
1162 		fd_above_hard_limit_send_notification(fdp);
1163 		act_set_astproc_resource(current_thread());
1164 	}
1165 }
1166 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1167 
1168 /*
1169  * fdalloc
1170  *
1171  * Description:	Allocate a file descriptor for the process.
1172  *
1173  * Parameters:	p				Process to allocate the fd in
1174  *		want				The fd we would prefer to get
1175  *		result				Pointer to fd we got
1176  *
1177  * Returns:	0				Success
1178  *		EMFILE
1179  *		ENOMEM
1180  *
1181  * Implicit returns:
1182  *		*result (modified)		The fd which was allocated
1183  */
1184 int
fdalloc(proc_t p,int want,int * result)1185 fdalloc(proc_t p, int want, int *result)
1186 {
1187 	struct filedesc *fdp = &p->p_fd;
1188 	int i;
1189 	int last, numfiles, oldnfiles;
1190 	struct fileproc **newofiles;
1191 	char *newofileflags;
1192 	int lim = proc_limitgetcur_nofile(p);
1193 
1194 	/*
1195 	 * Search for a free descriptor starting at the higher
1196 	 * of want or fd_freefile.  If that fails, consider
1197 	 * expanding the ofile array.
1198 	 */
1199 #if DIAGNOSTIC
1200 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1201 #endif
1202 
1203 	for (;;) {
1204 		last = (int)MIN((unsigned int)fdp->fd_nfiles, (unsigned int)lim);
1205 		if ((i = want) < fdp->fd_freefile) {
1206 			i = fdp->fd_freefile;
1207 		}
1208 		for (; i < last; i++) {
1209 			if (fdp->fd_ofiles[i] == NULL && !(fdp->fd_ofileflags[i] & UF_RESERVED)) {
1210 				procfdtbl_reservefd(p, i);
1211 				if (i >= fdp->fd_afterlast) {
1212 					fdp->fd_afterlast = i + 1;
1213 				}
1214 				if (want <= fdp->fd_freefile) {
1215 					fdp->fd_freefile = i;
1216 				}
1217 				*result = i;
1218 #if CONFIG_PROC_RESOURCE_LIMITS
1219 				fdp->fd_nfiles_open++;
1220 				fd_check_limit_exceeded(fdp);
1221 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1222 				return 0;
1223 			}
1224 		}
1225 
1226 		/*
1227 		 * No space in current array.  Expand?
1228 		 */
1229 		if ((rlim_t)fdp->fd_nfiles >= lim) {
1230 			return EMFILE;
1231 		}
1232 		if (fdp->fd_nfiles < NDEXTENT) {
1233 			numfiles = NDEXTENT;
1234 		} else {
1235 			numfiles = 2 * fdp->fd_nfiles;
1236 		}
1237 		/* Enforce lim */
1238 		if ((rlim_t)numfiles > lim) {
1239 			numfiles = (int)lim;
1240 		}
1241 		proc_fdunlock(p);
1242 		newofiles = kalloc_type(struct fileproc *, numfiles, Z_WAITOK | Z_ZERO);
1243 		newofileflags = kalloc_data(numfiles, Z_WAITOK | Z_ZERO);
1244 		proc_fdlock(p);
1245 		if (newofileflags == NULL || newofiles == NULL) {
1246 			kfree_type(struct fileproc *, numfiles, newofiles);
1247 			kfree_data(newofileflags, numfiles);
1248 			return ENOMEM;
1249 		}
1250 		if (fdp->fd_nfiles >= numfiles) {
1251 			kfree_type(struct fileproc *, numfiles, newofiles);
1252 			kfree_data(newofileflags, numfiles);
1253 			continue;
1254 		}
1255 
1256 		/*
1257 		 * Copy the existing ofile and ofileflags arrays
1258 		 * and zero the new portion of each array.
1259 		 */
1260 		oldnfiles = fdp->fd_nfiles;
1261 		memcpy(newofiles, fdp->fd_ofiles,
1262 		    oldnfiles * sizeof(*fdp->fd_ofiles));
1263 		memcpy(newofileflags, fdp->fd_ofileflags, oldnfiles);
1264 
1265 		kfree_type(struct fileproc *, oldnfiles, fdp->fd_ofiles);
1266 		kfree_data(fdp->fd_ofileflags, oldnfiles);
1267 		fdp->fd_ofiles = newofiles;
1268 		fdp->fd_ofileflags = newofileflags;
1269 		fdp->fd_nfiles = numfiles;
1270 		fdexpand++;
1271 	}
1272 }
1273 
1274 
1275 #pragma mark fileprocs
1276 
1277 void
fileproc_modify_vflags(struct fileproc * fp,fileproc_vflags_t vflags,boolean_t clearflags)1278 fileproc_modify_vflags(struct fileproc *fp, fileproc_vflags_t vflags, boolean_t clearflags)
1279 {
1280 	if (clearflags) {
1281 		os_atomic_andnot(&fp->fp_vflags, vflags, relaxed);
1282 	} else {
1283 		os_atomic_or(&fp->fp_vflags, vflags, relaxed);
1284 	}
1285 }
1286 
1287 fileproc_vflags_t
fileproc_get_vflags(struct fileproc * fp)1288 fileproc_get_vflags(struct fileproc *fp)
1289 {
1290 	return os_atomic_load(&fp->fp_vflags, relaxed);
1291 }
1292 
1293 /*
1294  * falloc_withinit
1295  *
1296  * Create a new open file structure and allocate
1297  * a file descriptor for the process that refers to it.
1298  *
1299  * Returns:	0			Success
1300  *
1301  * Description:	Allocate an entry in the per process open file table and
1302  *		return the corresponding fileproc and fd.
1303  *
1304  * Parameters:	p				The process in whose open file
1305  *						table the fd is to be allocated
1306  *		resultfp			Pointer to fileproc pointer
1307  *						return area
1308  *		resultfd			Pointer to fd return area
1309  *		ctx				VFS context
1310  *		fp_zalloc			fileproc allocator to use
1311  *		crarg				allocator args
1312  *
1313  * Returns:	0				Success
1314  *		ENFILE				Too many open files in system
1315  *		fdalloc:EMFILE			Too many open files in process
1316  *		fdalloc:ENOMEM			M_OFILETABL zone exhausted
1317  *		ENOMEM				fp_zone or fg_zone zone
1318  *						exhausted
1319  *
1320  * Implicit returns:
1321  *		*resultfd (modified)		Returned fileproc pointer
1322  *		*resultfd (modified)		Returned fd
1323  *
1324  * Notes:	This function takes separate process and context arguments
1325  *		solely to support kern_exec.c; otherwise, it would take
1326  *		neither, and use the vfs_context_current() routine internally.
1327  */
1328 int
falloc_withinit(proc_t p,struct ucred * p_cred,struct vfs_context * ctx,struct fileproc ** resultfp,int * resultfd,fp_initfn_t fp_init,void * initarg)1329 falloc_withinit(
1330 	proc_t                  p,
1331 	struct ucred           *p_cred,
1332 	struct vfs_context     *ctx,
1333 	struct fileproc       **resultfp,
1334 	int                    *resultfd,
1335 	fp_initfn_t             fp_init,
1336 	void                   *initarg)
1337 {
1338 	struct fileproc *fp;
1339 	struct fileglob *fg;
1340 	int error, nfd;
1341 
1342 	/* Make sure we don't go beyond the system-wide limit */
1343 	if (nfiles >= maxfiles) {
1344 		tablefull("file");
1345 		return ENFILE;
1346 	}
1347 
1348 	proc_fdlock(p);
1349 
1350 	/* fdalloc will make sure the process stays below per-process limit */
1351 	if ((error = fdalloc(p, 0, &nfd))) {
1352 		proc_fdunlock(p);
1353 		return error;
1354 	}
1355 
1356 #if CONFIG_MACF
1357 	error = mac_file_check_create(p_cred);
1358 	if (error) {
1359 		proc_fdunlock(p);
1360 		return error;
1361 	}
1362 #else
1363 	(void)p_cred;
1364 #endif
1365 
1366 	/*
1367 	 * Allocate a new file descriptor.
1368 	 * If the process has file descriptor zero open, add to the list
1369 	 * of open files at that point, otherwise put it at the front of
1370 	 * the list of open files.
1371 	 */
1372 	proc_fdunlock(p);
1373 
1374 	fp = fileproc_alloc_init();
1375 	if (fp_init) {
1376 		fp_init(fp, initarg);
1377 	}
1378 
1379 	fg = fg_alloc_init(ctx);
1380 
1381 	os_ref_retain_locked(&fp->fp_iocount);
1382 	fp->fp_glob = fg;
1383 
1384 	proc_fdlock(p);
1385 
1386 	p->p_fd.fd_ofiles[nfd] = fp;
1387 
1388 	proc_fdunlock(p);
1389 
1390 	if (resultfp) {
1391 		*resultfp = fp;
1392 	}
1393 	if (resultfd) {
1394 		*resultfd = nfd;
1395 	}
1396 
1397 	return 0;
1398 }
1399 
1400 /*
1401  * fp_free
1402  *
1403  * Description:	Release the fd and free the fileproc associated with the fd
1404  *		in the per process open file table of the specified process;
1405  *		these values must correspond.
1406  *
1407  * Parameters:	p				Process containing fd
1408  *		fd				fd to be released
1409  *		fp				fileproc to be freed
1410  */
1411 void
fp_free(proc_t p,int fd,struct fileproc * fp)1412 fp_free(proc_t p, int fd, struct fileproc * fp)
1413 {
1414 	proc_fdlock_spin(p);
1415 	fdrelse(p, fd);
1416 	proc_fdunlock(p);
1417 
1418 	fg_free(fp->fp_glob);
1419 	os_ref_release_live(&fp->fp_iocount);
1420 	fileproc_free(fp);
1421 }
1422 
1423 
1424 struct fileproc *
fp_get_noref_locked(proc_t p,int fd)1425 fp_get_noref_locked(proc_t p, int fd)
1426 {
1427 	struct filedesc *fdp = &p->p_fd;
1428 	struct fileproc *fp;
1429 
1430 	if (fd < 0 || fd >= fdp->fd_nfiles ||
1431 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1432 	    (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1433 		return NULL;
1434 	}
1435 
1436 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1437 	return fp;
1438 }
1439 
1440 struct fileproc *
fp_get_noref_locked_with_iocount(proc_t p,int fd)1441 fp_get_noref_locked_with_iocount(proc_t p, int fd)
1442 {
1443 	struct filedesc *fdp = &p->p_fd;
1444 	struct fileproc *fp = NULL;
1445 
1446 	if (fd < 0 || fd >= fdp->fd_nfiles ||
1447 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1448 	    os_ref_get_count(&fp->fp_iocount) <= 1 ||
1449 	    ((fdp->fd_ofileflags[fd] & UF_RESERVED) &&
1450 	    !(fdp->fd_ofileflags[fd] & UF_CLOSING))) {
1451 		panic("%s: caller without an ioccount on fileproc (%d/:%p)",
1452 		    __func__, fd, fp);
1453 	}
1454 
1455 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1456 	return fp;
1457 }
1458 
1459 
1460 /*
1461  * fp_lookup
1462  *
1463  * Description:	Get fileproc pointer for a given fd from the per process
1464  *		open file table of the specified process and if successful,
1465  *		increment the fp_iocount
1466  *
1467  * Parameters:	p				Process in which fd lives
1468  *		fd				fd to get information for
1469  *		resultfp			Pointer to result fileproc
1470  *						pointer area, or 0 if none
1471  *		locked				!0 if the caller holds the
1472  *						proc_fdlock, 0 otherwise
1473  *
1474  * Returns:	0			Success
1475  *		EBADF			Bad file descriptor
1476  *
1477  * Implicit returns:
1478  *		*resultfp (modified)		Fileproc pointer
1479  *
1480  * Locks:	If the argument 'locked' is non-zero, then the caller is
1481  *		expected to have taken and held the proc_fdlock; if it is
1482  *		zero, than this routine internally takes and drops this lock.
1483  */
1484 int
fp_lookup(proc_t p,int fd,struct fileproc ** resultfp,int locked)1485 fp_lookup(proc_t p, int fd, struct fileproc **resultfp, int locked)
1486 {
1487 	struct filedesc *fdp = &p->p_fd;
1488 	struct fileproc *fp;
1489 
1490 	if (!locked) {
1491 		proc_fdlock_spin(p);
1492 	}
1493 	if (fd < 0 || fdp == NULL || fd >= fdp->fd_nfiles ||
1494 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1495 	    (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1496 		if (!locked) {
1497 			proc_fdunlock(p);
1498 		}
1499 		return EBADF;
1500 	}
1501 
1502 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1503 	os_ref_retain_locked(&fp->fp_iocount);
1504 
1505 	if (resultfp) {
1506 		*resultfp = fp;
1507 	}
1508 	if (!locked) {
1509 		proc_fdunlock(p);
1510 	}
1511 
1512 	return 0;
1513 }
1514 
1515 
1516 int
fp_get_ftype(proc_t p,int fd,file_type_t ftype,int err,struct fileproc ** fpp)1517 fp_get_ftype(proc_t p, int fd, file_type_t ftype, int err, struct fileproc **fpp)
1518 {
1519 	struct filedesc *fdp = &p->p_fd;
1520 	struct fileproc *fp;
1521 
1522 	proc_fdlock_spin(p);
1523 	if (fd < 0 || fd >= fdp->fd_nfiles ||
1524 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1525 	    (fdp->fd_ofileflags[fd] & UF_RESERVED)) {
1526 		proc_fdunlock(p);
1527 		return EBADF;
1528 	}
1529 
1530 	if (fp->f_type != ftype) {
1531 		proc_fdunlock(p);
1532 		return err;
1533 	}
1534 
1535 	zone_id_require(ZONE_ID_FILEPROC, sizeof(*fp), fp);
1536 	os_ref_retain_locked(&fp->fp_iocount);
1537 	proc_fdunlock(p);
1538 
1539 	*fpp = fp;
1540 	return 0;
1541 }
1542 
1543 
1544 /*
1545  * fp_drop
1546  *
1547  * Description:	Drop the I/O reference previously taken by calling fp_lookup
1548  *		et. al.
1549  *
1550  * Parameters:	p				Process in which the fd lives
1551  *		fd				fd associated with the fileproc
1552  *		fp				fileproc on which to set the
1553  *						flag and drop the reference
1554  *		locked				flag to internally take and
1555  *						drop proc_fdlock if it is not
1556  *						already held by the caller
1557  *
1558  * Returns:	0				Success
1559  *		EBADF				Bad file descriptor
1560  *
1561  * Locks:	This function internally takes and drops the proc_fdlock for
1562  *		the supplied process if 'locked' is non-zero, and assumes that
1563  *		the caller already holds this lock if 'locked' is non-zero.
1564  *
1565  * Notes:	The fileproc must correspond to the fd in the supplied proc
1566  */
1567 int
fp_drop(proc_t p,int fd,struct fileproc * fp,int locked)1568 fp_drop(proc_t p, int fd, struct fileproc *fp, int locked)
1569 {
1570 	struct filedesc *fdp = &p->p_fd;
1571 	int     needwakeup = 0;
1572 
1573 	if (!locked) {
1574 		proc_fdlock_spin(p);
1575 	}
1576 	if ((fp == FILEPROC_NULL) && (fd < 0 || fd >= fdp->fd_nfiles ||
1577 	    (fp = fdp->fd_ofiles[fd]) == NULL ||
1578 	    ((fdp->fd_ofileflags[fd] & UF_RESERVED) &&
1579 	    !(fdp->fd_ofileflags[fd] & UF_CLOSING)))) {
1580 		if (!locked) {
1581 			proc_fdunlock(p);
1582 		}
1583 		return EBADF;
1584 	}
1585 
1586 	if (1 == os_ref_release_locked(&fp->fp_iocount)) {
1587 		if (fp->fp_flags & FP_SELCONFLICT) {
1588 			fp->fp_flags &= ~FP_SELCONFLICT;
1589 		}
1590 
1591 		if (fdp->fd_fpdrainwait) {
1592 			fdp->fd_fpdrainwait = 0;
1593 			needwakeup = 1;
1594 		}
1595 	}
1596 	if (!locked) {
1597 		proc_fdunlock(p);
1598 	}
1599 	if (needwakeup) {
1600 		wakeup(&fdp->fd_fpdrainwait);
1601 	}
1602 
1603 	return 0;
1604 }
1605 
1606 
1607 /*
1608  * fileproc_drain
1609  *
1610  * Description:	Drain out pending I/O operations
1611  *
1612  * Parameters:	p				Process closing this file
1613  *		fp				fileproc struct for the open
1614  *						instance on the file
1615  *
1616  * Returns:	void
1617  *
1618  * Locks:	Assumes the caller holds the proc_fdlock
1619  *
1620  * Notes:	For character devices, this occurs on the last close of the
1621  *		device; for all other file descriptors, this occurs on each
1622  *		close to prevent fd's from being closed out from under
1623  *		operations currently in progress and blocked
1624  *
1625  * See Also:    file_vnode(), file_socket(), file_drop(), and the cautions
1626  *		regarding their use and interaction with this function.
1627  */
1628 static void
fileproc_drain(proc_t p,struct fileproc * fp)1629 fileproc_drain(proc_t p, struct fileproc * fp)
1630 {
1631 	struct filedesc *fdp = &p->p_fd;
1632 	struct vfs_context context;
1633 	thread_t thread;
1634 	bool is_current_proc;
1635 
1636 	is_current_proc = (p == current_proc());
1637 
1638 	if (!is_current_proc) {
1639 		proc_lock(p);
1640 		thread = proc_thread(p); /* XXX */
1641 		thread_reference(thread);
1642 		proc_unlock(p);
1643 	} else {
1644 		thread = current_thread();
1645 	}
1646 
1647 	context.vc_thread = thread;
1648 	context.vc_ucred = fp->fp_glob->fg_cred;
1649 
1650 	/* Set the vflag for drain */
1651 	fileproc_modify_vflags(fp, FPV_DRAIN, FALSE);
1652 
1653 	while (os_ref_get_count(&fp->fp_iocount) > 1) {
1654 		lck_mtx_convert_spin(&fdp->fd_lock);
1655 
1656 		fo_drain(fp, &context);
1657 		if ((fp->fp_flags & FP_INSELECT) == FP_INSELECT) {
1658 			struct select_set *selset;
1659 
1660 			if (fp->fp_guard_attrs) {
1661 				selset = fp->fp_guard->fpg_wset;
1662 			} else {
1663 				selset = fp->fp_wset;
1664 			}
1665 			if (waitq_wakeup64_all(selset, NO_EVENT64,
1666 			    THREAD_INTERRUPTED, WAITQ_WAKEUP_DEFAULT) == KERN_INVALID_ARGUMENT) {
1667 				panic("bad wait queue for waitq_wakeup64_all %p (%sfp:%p)",
1668 				    selset, fp->fp_guard_attrs ? "guarded " : "", fp);
1669 			}
1670 		}
1671 		if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1672 			if (waitq_wakeup64_all(&select_conflict_queue, NO_EVENT64,
1673 			    THREAD_INTERRUPTED, WAITQ_WAKEUP_DEFAULT) == KERN_INVALID_ARGUMENT) {
1674 				panic("bad select_conflict_queue");
1675 			}
1676 		}
1677 		fdp->fd_fpdrainwait = 1;
1678 		msleep(&fdp->fd_fpdrainwait, &fdp->fd_lock, PRIBIO, "fpdrain", NULL);
1679 	}
1680 #if DIAGNOSTIC
1681 	if ((fp->fp_flags & FP_INSELECT) != 0) {
1682 		panic("FP_INSELECT set on drained fp");
1683 	}
1684 #endif
1685 	if ((fp->fp_flags & FP_SELCONFLICT) == FP_SELCONFLICT) {
1686 		fp->fp_flags &= ~FP_SELCONFLICT;
1687 	}
1688 
1689 	if (!is_current_proc) {
1690 		thread_deallocate(thread);
1691 	}
1692 }
1693 
1694 
1695 int
fp_close_and_unlock(proc_t p,kauth_cred_t cred,int fd,struct fileproc * fp,int flags)1696 fp_close_and_unlock(proc_t p, kauth_cred_t cred, int fd, struct fileproc *fp, int flags)
1697 {
1698 	struct filedesc *fdp = &p->p_fd;
1699 	struct fileglob *fg = fp->fp_glob;
1700 
1701 #if DIAGNOSTIC
1702 	proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
1703 #endif
1704 
1705 	/*
1706 	 * Keep most people from finding the filedesc while we are closing it.
1707 	 *
1708 	 * Callers are:
1709 	 *
1710 	 * - dup2() which always waits for UF_RESERVED to clear
1711 	 *
1712 	 * - close/guarded_close/... who will fail the fileproc lookup if
1713 	 *   UF_RESERVED is set,
1714 	 *
1715 	 * - fdexec()/fdfree() who only run once all threads in the proc
1716 	 *   are properly canceled, hence no fileproc in this proc should
1717 	 *   be in flux.
1718 	 *
1719 	 * Which means that neither UF_RESERVED nor UF_CLOSING should be set.
1720 	 *
1721 	 * Callers of fp_get_noref_locked_with_iocount() can still find
1722 	 * this entry so that they can drop their I/O reference despite
1723 	 * not having remembered the fileproc pointer (namely select() and
1724 	 * file_drop()).
1725 	 */
1726 	if (p->p_fd.fd_ofileflags[fd] & (UF_RESERVED | UF_CLOSING)) {
1727 		panic("%s: called with fileproc in flux (%d/:%p)",
1728 		    __func__, fd, fp);
1729 	}
1730 	p->p_fd.fd_ofileflags[fd] |= (UF_RESERVED | UF_CLOSING);
1731 
1732 	if ((fp->fp_flags & FP_AIOISSUED) ||
1733 #if CONFIG_MACF
1734 	    (FILEGLOB_DTYPE(fg) == DTYPE_VNODE)
1735 #else
1736 	    kauth_authorize_fileop_has_listeners()
1737 #endif
1738 	    ) {
1739 		proc_fdunlock(p);
1740 
1741 		if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) {
1742 			/*
1743 			 * call out to allow 3rd party notification of close.
1744 			 * Ignore result of kauth_authorize_fileop call.
1745 			 */
1746 #if CONFIG_MACF
1747 			mac_file_notify_close(cred, fp->fp_glob);
1748 #else
1749 			(void)cred;
1750 #endif
1751 
1752 			if (kauth_authorize_fileop_has_listeners() &&
1753 			    vnode_getwithref((vnode_t)fg_get_data(fg)) == 0) {
1754 				u_int   fileop_flags = 0;
1755 				if (fg->fg_flag & FWASWRITTEN) {
1756 					fileop_flags |= KAUTH_FILEOP_CLOSE_MODIFIED;
1757 				}
1758 				kauth_authorize_fileop(fg->fg_cred, KAUTH_FILEOP_CLOSE,
1759 				    (uintptr_t)fg_get_data(fg), (uintptr_t)fileop_flags);
1760 
1761 				vnode_put((vnode_t)fg_get_data(fg));
1762 			}
1763 		}
1764 
1765 		if (fp->fp_flags & FP_AIOISSUED) {
1766 			/*
1767 			 * cancel all async IO requests that can be cancelled.
1768 			 */
1769 			_aio_close( p, fd );
1770 		}
1771 
1772 		proc_fdlock(p);
1773 	}
1774 
1775 	if (fd < fdp->fd_knlistsize) {
1776 		knote_fdclose(p, fd);
1777 	}
1778 
1779 	fileproc_drain(p, fp);
1780 
1781 	if (flags & FD_DUP2RESV) {
1782 		fdp->fd_ofiles[fd] = NULL;
1783 		fdp->fd_ofileflags[fd] &= ~UF_CLOSING;
1784 	} else {
1785 		fdrelse(p, fd);
1786 	}
1787 
1788 	proc_fdunlock(p);
1789 
1790 	if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fg) == DTYPE_SOCKET) {
1791 		KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_END,
1792 		    fd, 0, (int64_t)VM_KERNEL_ADDRPERM(fg_get_data(fg)));
1793 	}
1794 
1795 	fileproc_free(fp);
1796 
1797 	return fg_drop(p, fg);
1798 }
1799 
1800 /*
1801  * dupfdopen
1802  *
1803  * Description:	Duplicate the specified descriptor to a free descriptor;
1804  *		this is the second half of fdopen(), above.
1805  *
1806  * Parameters:	p				current process pointer
1807  *		indx				fd to dup to
1808  *		dfd				fd to dup from
1809  *		mode				mode to set on new fd
1810  *		error				command code
1811  *
1812  * Returns:	0				Success
1813  *		EBADF				Source fd is bad
1814  *		EACCES				Requested mode not allowed
1815  *		!0				'error', if not ENODEV or
1816  *						ENXIO
1817  *
1818  * Notes:	XXX This is not thread safe; see fdopen() above
1819  */
1820 int
dupfdopen(proc_t p,int indx,int dfd,int flags,int error)1821 dupfdopen(proc_t p, int indx, int dfd, int flags, int error)
1822 {
1823 	struct filedesc *fdp = &p->p_fd;
1824 	struct fileproc *wfp;
1825 	struct fileproc *fp;
1826 #if CONFIG_MACF
1827 	int myerror;
1828 #endif
1829 
1830 	/*
1831 	 * If the to-be-dup'd fd number is greater than the allowed number
1832 	 * of file descriptors, or the fd to be dup'd has already been
1833 	 * closed, reject.  Note, check for new == old is necessary as
1834 	 * falloc could allocate an already closed to-be-dup'd descriptor
1835 	 * as the new descriptor.
1836 	 */
1837 	proc_fdlock(p);
1838 
1839 	fp = fdp->fd_ofiles[indx];
1840 	if (dfd < 0 || dfd >= fdp->fd_nfiles ||
1841 	    (wfp = fdp->fd_ofiles[dfd]) == NULL || wfp == fp ||
1842 	    (fdp->fd_ofileflags[dfd] & UF_RESERVED)) {
1843 		proc_fdunlock(p);
1844 		return EBADF;
1845 	}
1846 #if CONFIG_MACF
1847 	myerror = mac_file_check_dup(kauth_cred_get(), wfp->fp_glob, dfd);
1848 	if (myerror) {
1849 		proc_fdunlock(p);
1850 		return myerror;
1851 	}
1852 #endif
1853 	/*
1854 	 * There are two cases of interest here.
1855 	 *
1856 	 * For ENODEV simply dup (dfd) to file descriptor
1857 	 * (indx) and return.
1858 	 *
1859 	 * For ENXIO steal away the file structure from (dfd) and
1860 	 * store it in (indx).  (dfd) is effectively closed by
1861 	 * this operation.
1862 	 *
1863 	 * Any other error code is just returned.
1864 	 */
1865 	switch (error) {
1866 	case ENODEV:
1867 		if (fp_isguarded(wfp, GUARD_DUP)) {
1868 			proc_fdunlock(p);
1869 			return EPERM;
1870 		}
1871 
1872 		/*
1873 		 * Check that the mode the file is being opened for is a
1874 		 * subset of the mode of the existing descriptor.
1875 		 */
1876 		if (((flags & (FREAD | FWRITE)) | wfp->f_flag) != wfp->f_flag) {
1877 			proc_fdunlock(p);
1878 			return EACCES;
1879 		}
1880 		if (indx >= fdp->fd_afterlast) {
1881 			fdp->fd_afterlast = indx + 1;
1882 		}
1883 
1884 		if (fp->fp_glob) {
1885 			fg_free(fp->fp_glob);
1886 		}
1887 		fg_ref(p, wfp->fp_glob);
1888 		fp->fp_glob = wfp->fp_glob;
1889 		/*
1890 		 * Historically, open(/dev/fd/<n>) preserves close on fork/exec,
1891 		 * unlike dup(), dup2() or fcntl(F_DUPFD).
1892 		 *
1893 		 * open1() already handled O_CLO{EXEC,FORK}
1894 		 */
1895 		fp->fp_flags |= (wfp->fp_flags & (FP_CLOFORK | FP_CLOEXEC));
1896 
1897 		procfdtbl_releasefd(p, indx, NULL);
1898 		fp_drop(p, indx, fp, 1);
1899 		proc_fdunlock(p);
1900 		return 0;
1901 
1902 	default:
1903 		proc_fdunlock(p);
1904 		return error;
1905 	}
1906 	/* NOTREACHED */
1907 }
1908 
1909 
1910 #pragma mark KPIS (sys/file.h)
1911 
1912 /*
1913  * fg_get_vnode
1914  *
1915  * Description:	Return vnode associated with the file structure, if
1916  *		any.  The lifetime of the returned vnode is bound to
1917  *		the lifetime of the file structure.
1918  *
1919  * Parameters:	fg				Pointer to fileglob to
1920  *						inspect
1921  *
1922  * Returns:	vnode_t
1923  */
1924 vnode_t
fg_get_vnode(struct fileglob * fg)1925 fg_get_vnode(struct fileglob *fg)
1926 {
1927 	if (FILEGLOB_DTYPE(fg) == DTYPE_VNODE) {
1928 		return (vnode_t)fg_get_data(fg);
1929 	} else {
1930 		return NULL;
1931 	}
1932 }
1933 
1934 
1935 /*
1936  * fp_getfvp
1937  *
1938  * Description:	Get fileproc and vnode pointer for a given fd from the per
1939  *		process open file table of the specified process, and if
1940  *		successful, increment the fp_iocount
1941  *
1942  * Parameters:	p				Process in which fd lives
1943  *		fd				fd to get information for
1944  *		resultfp			Pointer to result fileproc
1945  *						pointer area, or 0 if none
1946  *		resultvp			Pointer to result vnode pointer
1947  *						area, or 0 if none
1948  *
1949  * Returns:	0				Success
1950  *		EBADF				Bad file descriptor
1951  *		ENOTSUP				fd does not refer to a vnode
1952  *
1953  * Implicit returns:
1954  *		*resultfp (modified)		Fileproc pointer
1955  *		*resultvp (modified)		vnode pointer
1956  *
1957  * Notes:	The resultfp and resultvp fields are optional, and may be
1958  *		independently specified as NULL to skip returning information
1959  *
1960  * Locks:	Internally takes and releases proc_fdlock
1961  */
1962 int
fp_getfvp(proc_t p,int fd,struct fileproc ** resultfp,struct vnode ** resultvp)1963 fp_getfvp(proc_t p, int fd, struct fileproc **resultfp, struct vnode **resultvp)
1964 {
1965 	struct fileproc *fp;
1966 	int error;
1967 
1968 	error = fp_get_ftype(p, fd, DTYPE_VNODE, ENOTSUP, &fp);
1969 	if (error == 0) {
1970 		if (resultfp) {
1971 			*resultfp = fp;
1972 		}
1973 		if (resultvp) {
1974 			*resultvp = (struct vnode *)fp_get_data(fp);
1975 		}
1976 	}
1977 
1978 	return error;
1979 }
1980 
1981 
1982 /*
1983  * fp_get_pipe_id
1984  *
1985  * Description:	Get pipe id for a given fd from the per process open file table
1986  *		of the specified process.
1987  *
1988  * Parameters:	p				Process in which fd lives
1989  *		fd				fd to get information for
1990  *		result_pipe_id			Pointer to result pipe id
1991  *
1992  * Returns:	0				Success
1993  *		EIVAL				NULL pointer arguments passed
1994  *		fp_lookup:EBADF			Bad file descriptor
1995  *		ENOTSUP				fd does not refer to a pipe
1996  *
1997  * Implicit returns:
1998  *		*result_pipe_id (modified)	pipe id
1999  *
2000  * Locks:	Internally takes and releases proc_fdlock
2001  */
2002 int
fp_get_pipe_id(proc_t p,int fd,uint64_t * result_pipe_id)2003 fp_get_pipe_id(proc_t p, int fd, uint64_t *result_pipe_id)
2004 {
2005 	struct fileproc *fp = FILEPROC_NULL;
2006 	struct fileglob *fg = NULL;
2007 	int error = 0;
2008 
2009 	if (p == NULL || result_pipe_id == NULL) {
2010 		return EINVAL;
2011 	}
2012 
2013 	proc_fdlock(p);
2014 	if ((error = fp_lookup(p, fd, &fp, 1))) {
2015 		proc_fdunlock(p);
2016 		return error;
2017 	}
2018 	fg = fp->fp_glob;
2019 
2020 	if (FILEGLOB_DTYPE(fg) == DTYPE_PIPE) {
2021 		*result_pipe_id = pipe_id((struct pipe*)fg_get_data(fg));
2022 	} else {
2023 		error = ENOTSUP;
2024 	}
2025 
2026 	fp_drop(p, fd, fp, 1);
2027 	proc_fdunlock(p);
2028 	return error;
2029 }
2030 
2031 
2032 /*
2033  * file_vnode
2034  *
2035  * Description:	Given an fd, look it up in the current process's per process
2036  *		open file table, and return its internal vnode pointer.
2037  *
2038  * Parameters:	fd				fd to obtain vnode from
2039  *		vpp				pointer to vnode return area
2040  *
2041  * Returns:	0				Success
2042  *		EINVAL				The fd does not refer to a
2043  *						vnode fileproc entry
2044  *	fp_lookup:EBADF				Bad file descriptor
2045  *
2046  * Implicit returns:
2047  *		*vpp (modified)			Returned vnode pointer
2048  *
2049  * Locks:	This function internally takes and drops the proc_fdlock for
2050  *		the current process
2051  *
2052  * Notes:	If successful, this function increments the fp_iocount on the
2053  *		fd's corresponding fileproc.
2054  *
2055  *		The fileproc referenced is not returned; because of this, care
2056  *		must be taken to not drop the last reference (e.g. by closing
2057  *		the file).  This is inherently unsafe, since the reference may
2058  *		not be recoverable from the vnode, if there is a subsequent
2059  *		close that destroys the associate fileproc.  The caller should
2060  *		therefore retain their own reference on the fileproc so that
2061  *		the fp_iocount can be dropped subsequently.  Failure to do this
2062  *		can result in the returned pointer immediately becoming invalid
2063  *		following the call.
2064  *
2065  *		Use of this function is discouraged.
2066  */
2067 int
file_vnode(int fd,struct vnode ** vpp)2068 file_vnode(int fd, struct vnode **vpp)
2069 {
2070 	return file_vnode_withvid(fd, vpp, NULL);
2071 }
2072 
2073 
2074 /*
2075  * file_vnode_withvid
2076  *
2077  * Description:	Given an fd, look it up in the current process's per process
2078  *		open file table, and return its internal vnode pointer.
2079  *
2080  * Parameters:	fd				fd to obtain vnode from
2081  *		vpp				pointer to vnode return area
2082  *		vidp				pointer to vid of the returned vnode
2083  *
2084  * Returns:	0				Success
2085  *		EINVAL				The fd does not refer to a
2086  *						vnode fileproc entry
2087  *	fp_lookup:EBADF				Bad file descriptor
2088  *
2089  * Implicit returns:
2090  *		*vpp (modified)			Returned vnode pointer
2091  *
2092  * Locks:	This function internally takes and drops the proc_fdlock for
2093  *		the current process
2094  *
2095  * Notes:	If successful, this function increments the fp_iocount on the
2096  *		fd's corresponding fileproc.
2097  *
2098  *		The fileproc referenced is not returned; because of this, care
2099  *		must be taken to not drop the last reference (e.g. by closing
2100  *		the file).  This is inherently unsafe, since the reference may
2101  *		not be recoverable from the vnode, if there is a subsequent
2102  *		close that destroys the associate fileproc.  The caller should
2103  *		therefore retain their own reference on the fileproc so that
2104  *		the fp_iocount can be dropped subsequently.  Failure to do this
2105  *		can result in the returned pointer immediately becoming invalid
2106  *		following the call.
2107  *
2108  *		Use of this function is discouraged.
2109  */
2110 int
file_vnode_withvid(int fd,struct vnode ** vpp,uint32_t * vidp)2111 file_vnode_withvid(int fd, struct vnode **vpp, uint32_t *vidp)
2112 {
2113 	struct fileproc *fp;
2114 	int error;
2115 
2116 	error = fp_get_ftype(current_proc(), fd, DTYPE_VNODE, EINVAL, &fp);
2117 	if (error == 0) {
2118 		if (vpp) {
2119 			*vpp = (struct vnode *)fp_get_data(fp);
2120 		}
2121 		if (vidp) {
2122 			*vidp = vnode_vid((struct vnode *)fp_get_data(fp));
2123 		}
2124 	}
2125 	return error;
2126 }
2127 
2128 /*
2129  * file_socket
2130  *
2131  * Description:	Given an fd, look it up in the current process's per process
2132  *		open file table, and return its internal socket pointer.
2133  *
2134  * Parameters:	fd				fd to obtain vnode from
2135  *		sp				pointer to socket return area
2136  *
2137  * Returns:	0				Success
2138  *		ENOTSOCK			Not a socket
2139  *		fp_lookup:EBADF			Bad file descriptor
2140  *
2141  * Implicit returns:
2142  *		*sp (modified)			Returned socket pointer
2143  *
2144  * Locks:	This function internally takes and drops the proc_fdlock for
2145  *		the current process
2146  *
2147  * Notes:	If successful, this function increments the fp_iocount on the
2148  *		fd's corresponding fileproc.
2149  *
2150  *		The fileproc referenced is not returned; because of this, care
2151  *		must be taken to not drop the last reference (e.g. by closing
2152  *		the file).  This is inherently unsafe, since the reference may
2153  *		not be recoverable from the socket, if there is a subsequent
2154  *		close that destroys the associate fileproc.  The caller should
2155  *		therefore retain their own reference on the fileproc so that
2156  *		the fp_iocount can be dropped subsequently.  Failure to do this
2157  *		can result in the returned pointer immediately becoming invalid
2158  *		following the call.
2159  *
2160  *		Use of this function is discouraged.
2161  */
2162 int
file_socket(int fd,struct socket ** sp)2163 file_socket(int fd, struct socket **sp)
2164 {
2165 	struct fileproc *fp;
2166 	int error;
2167 
2168 	error = fp_get_ftype(current_proc(), fd, DTYPE_SOCKET, ENOTSOCK, &fp);
2169 	if (error == 0) {
2170 		if (sp) {
2171 			*sp = (struct socket *)fp_get_data(fp);
2172 		}
2173 	}
2174 	return error;
2175 }
2176 
2177 
2178 /*
2179  * file_flags
2180  *
2181  * Description:	Given an fd, look it up in the current process's per process
2182  *		open file table, and return its fileproc's flags field.
2183  *
2184  * Parameters:	fd				fd whose flags are to be
2185  *						retrieved
2186  *		flags				pointer to flags data area
2187  *
2188  * Returns:	0				Success
2189  *		ENOTSOCK			Not a socket
2190  *		fp_lookup:EBADF			Bad file descriptor
2191  *
2192  * Implicit returns:
2193  *		*flags (modified)		Returned flags field
2194  *
2195  * Locks:	This function internally takes and drops the proc_fdlock for
2196  *		the current process
2197  */
2198 int
file_flags(int fd,int * flags)2199 file_flags(int fd, int *flags)
2200 {
2201 	proc_t p = current_proc();
2202 	struct fileproc *fp;
2203 	int error = EBADF;
2204 
2205 	proc_fdlock_spin(p);
2206 	fp = fp_get_noref_locked(p, fd);
2207 	if (fp) {
2208 		*flags = (int)fp->f_flag;
2209 		error = 0;
2210 	}
2211 	proc_fdunlock(p);
2212 
2213 	return error;
2214 }
2215 
2216 
2217 /*
2218  * file_drop
2219  *
2220  * Description:	Drop an iocount reference on an fd, and wake up any waiters
2221  *		for draining (i.e. blocked in fileproc_drain() called during
2222  *		the last attempt to close a file).
2223  *
2224  * Parameters:	fd				fd on which an ioreference is
2225  *						to be dropped
2226  *
2227  * Returns:	0				Success
2228  *
2229  * Description:	Given an fd, look it up in the current process's per process
2230  *		open file table, and drop it's fileproc's fp_iocount by one
2231  *
2232  * Notes:	This is intended as a corresponding operation to the functions
2233  *		file_vnode() and file_socket() operations.
2234  *
2235  *		If the caller can't possibly hold an I/O reference,
2236  *		this function will panic the kernel rather than allowing
2237  *		for memory corruption. Callers should always call this
2238  *		because they acquired an I/O reference on this file before.
2239  *
2240  *		Use of this function is discouraged.
2241  */
2242 int
file_drop(int fd)2243 file_drop(int fd)
2244 {
2245 	struct fileproc *fp;
2246 	proc_t p = current_proc();
2247 	struct filedesc *fdp = &p->p_fd;
2248 	int     needwakeup = 0;
2249 
2250 	proc_fdlock_spin(p);
2251 	fp = fp_get_noref_locked_with_iocount(p, fd);
2252 
2253 	if (1 == os_ref_release_locked(&fp->fp_iocount)) {
2254 		if (fp->fp_flags & FP_SELCONFLICT) {
2255 			fp->fp_flags &= ~FP_SELCONFLICT;
2256 		}
2257 
2258 		if (fdp->fd_fpdrainwait) {
2259 			fdp->fd_fpdrainwait = 0;
2260 			needwakeup = 1;
2261 		}
2262 	}
2263 	proc_fdunlock(p);
2264 
2265 	if (needwakeup) {
2266 		wakeup(&fdp->fd_fpdrainwait);
2267 	}
2268 	return 0;
2269 }
2270 
2271 
2272 #pragma mark syscalls
2273 
2274 #ifndef HFS_GET_BOOT_INFO
2275 #define HFS_GET_BOOT_INFO   (FCNTL_FS_SPECIFIC_BASE + 0x00004)
2276 #endif
2277 
2278 #ifndef HFS_SET_BOOT_INFO
2279 #define HFS_SET_BOOT_INFO   (FCNTL_FS_SPECIFIC_BASE + 0x00005)
2280 #endif
2281 
2282 #ifndef APFSIOC_REVERT_TO_SNAPSHOT
2283 #define APFSIOC_REVERT_TO_SNAPSHOT  _IOW('J', 1, u_int64_t)
2284 #endif
2285 
2286 #define CHECK_ADD_OVERFLOW_INT64L(x, y) \
2287 	        (((((x) > 0) && ((y) > 0) && ((x) > LLONG_MAX - (y))) || \
2288 	        (((x) < 0) && ((y) < 0) && ((x) < LLONG_MIN - (y)))) \
2289 	        ? 1 : 0)
2290 
2291 /*
2292  * sys_getdtablesize
2293  *
2294  * Description:	Returns the per process maximum size of the descriptor table
2295  *
2296  * Parameters:	p				Process being queried
2297  *		retval				Pointer to the call return area
2298  *
2299  * Returns:	0				Success
2300  *
2301  * Implicit returns:
2302  *		*retval (modified)		Size of dtable
2303  */
2304 int
sys_getdtablesize(proc_t p,__unused struct getdtablesize_args * uap,int32_t * retval)2305 sys_getdtablesize(proc_t p, __unused struct getdtablesize_args *uap, int32_t *retval)
2306 {
2307 	*retval = proc_limitgetcur_nofile(p);
2308 	return 0;
2309 }
2310 
2311 
2312 /*
2313  * check_file_seek_range
2314  *
2315  * Description: Checks if seek offsets are in the range of 0 to LLONG_MAX.
2316  *
2317  * Parameters:  fl		Flock structure.
2318  *		cur_file_offset	Current offset in the file.
2319  *
2320  * Returns:     0               on Success.
2321  *		EOVERFLOW	on overflow.
2322  *		EINVAL          on offset less than zero.
2323  */
2324 
2325 static int
check_file_seek_range(struct flock * fl,off_t cur_file_offset)2326 check_file_seek_range(struct flock *fl, off_t cur_file_offset)
2327 {
2328 	if (fl->l_whence == SEEK_CUR) {
2329 		/* Check if the start marker is beyond LLONG_MAX. */
2330 		if (CHECK_ADD_OVERFLOW_INT64L(fl->l_start, cur_file_offset)) {
2331 			/* Check if start marker is negative */
2332 			if (fl->l_start < 0) {
2333 				return EINVAL;
2334 			}
2335 			return EOVERFLOW;
2336 		}
2337 		/* Check if the start marker is negative. */
2338 		if (fl->l_start + cur_file_offset < 0) {
2339 			return EINVAL;
2340 		}
2341 		/* Check if end marker is beyond LLONG_MAX. */
2342 		if ((fl->l_len > 0) && (CHECK_ADD_OVERFLOW_INT64L(fl->l_start +
2343 		    cur_file_offset, fl->l_len - 1))) {
2344 			return EOVERFLOW;
2345 		}
2346 		/* Check if the end marker is negative. */
2347 		if ((fl->l_len <= 0) && (fl->l_start + cur_file_offset +
2348 		    fl->l_len < 0)) {
2349 			return EINVAL;
2350 		}
2351 	} else if (fl->l_whence == SEEK_SET) {
2352 		/* Check if the start marker is negative. */
2353 		if (fl->l_start < 0) {
2354 			return EINVAL;
2355 		}
2356 		/* Check if the end marker is beyond LLONG_MAX. */
2357 		if ((fl->l_len > 0) &&
2358 		    CHECK_ADD_OVERFLOW_INT64L(fl->l_start, fl->l_len - 1)) {
2359 			return EOVERFLOW;
2360 		}
2361 		/* Check if the end marker is negative. */
2362 		if ((fl->l_len < 0) && fl->l_start + fl->l_len < 0) {
2363 			return EINVAL;
2364 		}
2365 	}
2366 	return 0;
2367 }
2368 
2369 
2370 /*
2371  * sys_dup
2372  *
2373  * Description:	Duplicate a file descriptor.
2374  *
2375  * Parameters:	p				Process performing the dup
2376  *		uap->fd				The fd to dup
2377  *		retval				Pointer to the call return area
2378  *
2379  * Returns:	0				Success
2380  *		!0				Errno
2381  *
2382  * Implicit returns:
2383  *		*retval (modified)		The new descriptor
2384  */
2385 int
sys_dup(proc_t p,struct dup_args * uap,int32_t * retval)2386 sys_dup(proc_t p, struct dup_args *uap, int32_t *retval)
2387 {
2388 	int old = uap->fd;
2389 	int new, error;
2390 	struct fileproc *fp;
2391 	kauth_cred_t p_cred;
2392 
2393 	proc_fdlock(p);
2394 	if ((error = fp_lookup(p, old, &fp, 1))) {
2395 		proc_fdunlock(p);
2396 		return error;
2397 	}
2398 	if (fp_isguarded(fp, GUARD_DUP)) {
2399 		error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
2400 		(void) fp_drop(p, old, fp, 1);
2401 		proc_fdunlock(p);
2402 		return error;
2403 	}
2404 	if ((error = fdalloc(p, 0, &new))) {
2405 		fp_drop(p, old, fp, 1);
2406 		proc_fdunlock(p);
2407 		return error;
2408 	}
2409 	p_cred = current_cached_proc_cred(p);
2410 	error = finishdup(p, p_cred, old, new, 0, retval);
2411 
2412 	if (ENTR_SHOULDTRACE && FILEGLOB_DTYPE(fp->fp_glob) == DTYPE_SOCKET) {
2413 		KERNEL_ENERGYTRACE(kEnTrActKernSocket, DBG_FUNC_START,
2414 		    new, 0, (int64_t)VM_KERNEL_ADDRPERM(fp_get_data(fp)));
2415 	}
2416 
2417 	fp_drop(p, old, fp, 1);
2418 	proc_fdunlock(p);
2419 
2420 	return error;
2421 }
2422 
2423 /*
2424  * sys_dup2
2425  *
2426  * Description:	Duplicate a file descriptor to a particular value.
2427  *
2428  * Parameters:	p				Process performing the dup
2429  *		uap->from			The fd to dup
2430  *		uap->to				The fd to dup it to
2431  *		retval				Pointer to the call return area
2432  *
2433  * Returns:	0				Success
2434  *		!0				Errno
2435  *
2436  * Implicit returns:
2437  *		*retval (modified)		The new descriptor
2438  */
2439 int
sys_dup2(proc_t p,struct dup2_args * uap,int32_t * retval)2440 sys_dup2(proc_t p, struct dup2_args *uap, int32_t *retval)
2441 {
2442 	kauth_cred_t p_cred = current_cached_proc_cred(p);
2443 
2444 	return dup2(p, p_cred, uap->from, uap->to, retval);
2445 }
2446 
2447 int
dup2(proc_t p,kauth_cred_t p_cred,int old,int new,int * retval)2448 dup2(proc_t p, kauth_cred_t p_cred, int old, int new, int *retval)
2449 {
2450 	struct filedesc *fdp = &p->p_fd;
2451 	struct fileproc *fp, *nfp;
2452 	int i, error;
2453 
2454 	proc_fdlock(p);
2455 
2456 startover:
2457 	if ((error = fp_lookup(p, old, &fp, 1))) {
2458 		proc_fdunlock(p);
2459 		return error;
2460 	}
2461 	if (fp_isguarded(fp, GUARD_DUP)) {
2462 		error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
2463 		(void) fp_drop(p, old, fp, 1);
2464 		proc_fdunlock(p);
2465 		return error;
2466 	}
2467 	if (new < 0 || new >= proc_limitgetcur_nofile(p)) {
2468 		fp_drop(p, old, fp, 1);
2469 		proc_fdunlock(p);
2470 		return EBADF;
2471 	}
2472 	if (old == new) {
2473 		fp_drop(p, old, fp, 1);
2474 		*retval = new;
2475 		proc_fdunlock(p);
2476 		return 0;
2477 	}
2478 	if (new < 0 || new >= fdp->fd_nfiles) {
2479 		if ((error = fdalloc(p, new, &i))) {
2480 			fp_drop(p, old, fp, 1);
2481 			proc_fdunlock(p);
2482 			return error;
2483 		}
2484 		if (new != i) {
2485 			fdrelse(p, i);
2486 			goto closeit;
2487 		}
2488 	} else {
2489 closeit:
2490 		if ((fdp->fd_ofileflags[new] & UF_RESERVED) == UF_RESERVED) {
2491 			fp_drop(p, old, fp, 1);
2492 			procfdtbl_waitfd(p, new);
2493 #if DIAGNOSTIC
2494 			proc_fdlock_assert(p, LCK_MTX_ASSERT_OWNED);
2495 #endif
2496 			goto startover;
2497 		}
2498 
2499 		if ((nfp = fdp->fd_ofiles[new]) != NULL) {
2500 			if (fp_isguarded(nfp, GUARD_CLOSE)) {
2501 				fp_drop(p, old, fp, 1);
2502 				error = fp_guard_exception(p,
2503 				    new, nfp, kGUARD_EXC_CLOSE);
2504 				proc_fdunlock(p);
2505 				return error;
2506 			}
2507 			(void)fp_close_and_unlock(p, p_cred, new, nfp, FD_DUP2RESV);
2508 			proc_fdlock(p);
2509 			assert(fdp->fd_ofileflags[new] & UF_RESERVED);
2510 		} else {
2511 #if DIAGNOSTIC
2512 			if (fdp->fd_ofiles[new] != NULL) {
2513 				panic("dup2: no ref on fileproc %d", new);
2514 			}
2515 #endif
2516 			procfdtbl_reservefd(p, new);
2517 		}
2518 	}
2519 #if DIAGNOSTIC
2520 	if (fdp->fd_ofiles[new] != 0) {
2521 		panic("dup2: overwriting fd_ofiles with new %d", new);
2522 	}
2523 	if ((fdp->fd_ofileflags[new] & UF_RESERVED) == 0) {
2524 		panic("dup2: unreserved fileflags with new %d", new);
2525 	}
2526 #endif
2527 	error = finishdup(p, p_cred, old, new, 0, retval);
2528 	fp_drop(p, old, fp, 1);
2529 	proc_fdunlock(p);
2530 
2531 	return error;
2532 }
2533 
2534 
2535 /*
2536  * fcntl
2537  *
2538  * Description:	The file control system call.
2539  *
2540  * Parameters:	p				Process performing the fcntl
2541  *		uap->fd				The fd to operate against
2542  *		uap->cmd			The command to perform
2543  *		uap->arg			Pointer to the command argument
2544  *		retval				Pointer to the call return area
2545  *
2546  * Returns:	0				Success
2547  *		!0				Errno (see fcntl_nocancel)
2548  *
2549  * Implicit returns:
2550  *		*retval (modified)		fcntl return value (if any)
2551  *
2552  * Notes:	This system call differs from fcntl_nocancel() in that it
2553  *		tests for cancellation prior to performing a potentially
2554  *		blocking operation.
2555  */
2556 int
sys_fcntl(proc_t p,struct fcntl_args * uap,int32_t * retval)2557 sys_fcntl(proc_t p, struct fcntl_args *uap, int32_t *retval)
2558 {
2559 	__pthread_testcancel(1);
2560 	return sys_fcntl_nocancel(p, (struct fcntl_nocancel_args *)uap, retval);
2561 }
2562 
2563 #define ACCOUNT_OPENFROM_ENTITLEMENT \
2564 	"com.apple.private.vfs.role-account-openfrom"
2565 
2566 /*
2567  * sys_fcntl_nocancel
2568  *
2569  * Description:	A non-cancel-testing file control system call.
2570  *
2571  * Parameters:	p				Process performing the fcntl
2572  *		uap->fd				The fd to operate against
2573  *		uap->cmd			The command to perform
2574  *		uap->arg			Pointer to the command argument
2575  *		retval				Pointer to the call return area
2576  *
2577  * Returns:	0				Success
2578  *		EINVAL
2579  *	fp_lookup:EBADF				Bad file descriptor
2580  * [F_DUPFD]
2581  *	fdalloc:EMFILE
2582  *	fdalloc:ENOMEM
2583  *	finishdup:EBADF
2584  *	finishdup:ENOMEM
2585  * [F_SETOWN]
2586  *		ESRCH
2587  * [F_SETLK]
2588  *		EBADF
2589  *		EOVERFLOW
2590  *	copyin:EFAULT
2591  *	vnode_getwithref:???
2592  *	VNOP_ADVLOCK:???
2593  *	msleep:ETIMEDOUT
2594  * [F_GETLK]
2595  *		EBADF
2596  *		EOVERFLOW
2597  *	copyin:EFAULT
2598  *	copyout:EFAULT
2599  *	vnode_getwithref:???
2600  *	VNOP_ADVLOCK:???
2601  * [F_PREALLOCATE]
2602  *		EBADF
2603  *		EFBIG
2604  *		EINVAL
2605  *		ENOSPC
2606  *	copyin:EFAULT
2607  *	copyout:EFAULT
2608  *	vnode_getwithref:???
2609  *	VNOP_ALLOCATE:???
2610  * [F_SETSIZE,F_RDADVISE]
2611  *		EBADF
2612  *		EINVAL
2613  *	copyin:EFAULT
2614  *	vnode_getwithref:???
2615  * [F_RDAHEAD,F_NOCACHE]
2616  *		EBADF
2617  *	vnode_getwithref:???
2618  * [???]
2619  *
2620  * Implicit returns:
2621  *		*retval (modified)		fcntl return value (if any)
2622  */
2623 #define SYS_FCNTL_DECLARE_VFS_CONTEXT(context) \
2624 	struct vfs_context context = { \
2625 	    .vc_thread = current_thread(), \
2626 	    .vc_ucred = fp->f_cred, \
2627 	}
2628 
2629 static user_addr_t
sys_fnctl_parse_arg(proc_t p,user_long_t arg)2630 sys_fnctl_parse_arg(proc_t p, user_long_t arg)
2631 {
2632 	/*
2633 	 * Since the arg parameter is defined as a long but may be
2634 	 * either a long or a pointer we must take care to handle
2635 	 * sign extension issues.  Our sys call munger will sign
2636 	 * extend a long when we are called from a 32-bit process.
2637 	 * Since we can never have an address greater than 32-bits
2638 	 * from a 32-bit process we lop off the top 32-bits to avoid
2639 	 * getting the wrong address
2640 	 */
2641 	return proc_is64bit(p) ? arg : CAST_USER_ADDR_T((uint32_t)arg);
2642 }
2643 
2644 /* cleanup code common to fnctl functions, for when the fdlock is still held */
2645 static int
sys_fcntl_out(proc_t p,int fd,struct fileproc * fp,int error)2646 sys_fcntl_out(proc_t p, int fd, struct fileproc *fp, int error)
2647 {
2648 	fp_drop(p, fd, fp, 1);
2649 	proc_fdunlock(p);
2650 	return error;
2651 }
2652 
2653 /* cleanup code common to fnctl acting on vnodes, once they unlocked the fdlock */
2654 static int
sys_fcntl_outdrop(proc_t p,int fd,struct fileproc * fp,struct vnode * vp,int error)2655 sys_fcntl_outdrop(proc_t p, int fd, struct fileproc *fp, struct vnode *vp, int error)
2656 {
2657 #pragma unused(vp)
2658 
2659 	AUDIT_ARG(vnpath_withref, vp, ARG_VNODE1);
2660 	fp_drop(p, fd, fp, 0);
2661 	return error;
2662 }
2663 
2664 typedef int (*sys_fnctl_handler_t)(proc_t p, int fd, int cmd, user_long_t arg,
2665     struct fileproc *fp, int32_t *retval);
2666 
2667 typedef int (*sys_fnctl_vnode_handler_t)(proc_t p, int fd, int cmd,
2668     user_long_t arg, struct fileproc *fp, struct vnode *vp, int32_t *retval);
2669 
2670 /*
2671  * SPI (private) for opening a file starting from a dir fd
2672  *
2673  * Note: do not inline to keep stack usage under control.
2674  */
2675 __attribute__((noinline))
2676 static int
sys_fcntl__OPENFROM(proc_t p,int fd,int cmd,user_long_t arg,struct fileproc * fp,struct vnode * vp,int32_t * retval)2677 sys_fcntl__OPENFROM(proc_t p, int fd, int cmd, user_long_t arg,
2678     struct fileproc *fp, struct vnode *vp, int32_t *retval)
2679 {
2680 #pragma unused(cmd)
2681 
2682 	user_addr_t argp = sys_fnctl_parse_arg(p, arg);
2683 	struct user_fopenfrom fopen;
2684 	struct vnode_attr *va;
2685 	struct nameidata *nd;
2686 	int error, cmode;
2687 	bool has_entitlement;
2688 
2689 	/* Check if this isn't a valid file descriptor */
2690 	if ((fp->f_flag & FREAD) == 0) {
2691 		return sys_fcntl_out(p, fd, fp, EBADF);
2692 	}
2693 	proc_fdunlock(p);
2694 
2695 	if (vnode_getwithref(vp)) {
2696 		error = ENOENT;
2697 		goto outdrop;
2698 	}
2699 
2700 	/* Only valid for directories */
2701 	if (vp->v_type != VDIR) {
2702 		vnode_put(vp);
2703 		error = ENOTDIR;
2704 		goto outdrop;
2705 	}
2706 
2707 	/*
2708 	 * Only entitled apps may use the credentials of the thread
2709 	 * that opened the file descriptor.
2710 	 * Non-entitled threads will use their own context.
2711 	 */
2712 	has_entitlement = IOCurrentTaskHasEntitlement(ACCOUNT_OPENFROM_ENTITLEMENT);
2713 
2714 	/* Get flags, mode and pathname arguments. */
2715 	if (IS_64BIT_PROCESS(p)) {
2716 		error = copyin(argp, &fopen, sizeof(fopen));
2717 	} else {
2718 		struct user32_fopenfrom fopen32;
2719 
2720 		error = copyin(argp, &fopen32, sizeof(fopen32));
2721 		fopen.o_flags = fopen32.o_flags;
2722 		fopen.o_mode = fopen32.o_mode;
2723 		fopen.o_pathname = CAST_USER_ADDR_T(fopen32.o_pathname);
2724 	}
2725 	if (error) {
2726 		vnode_put(vp);
2727 		goto outdrop;
2728 	}
2729 
2730 	/* open1() can have really deep stacks, so allocate those */
2731 	va = kalloc_type(struct vnode_attr, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2732 	nd = kalloc_type(struct nameidata, Z_WAITOK | Z_ZERO | Z_NOFAIL);
2733 
2734 	AUDIT_ARG(fflags, fopen.o_flags);
2735 	AUDIT_ARG(mode, fopen.o_mode);
2736 	VATTR_INIT(va);
2737 	/* Mask off all but regular access permissions */
2738 	cmode = ((fopen.o_mode & ~p->p_fd.fd_cmask) & ALLPERMS) & ~S_ISTXT;
2739 	VATTR_SET(va, va_mode, cmode & ACCESSPERMS);
2740 
2741 	SYS_FCNTL_DECLARE_VFS_CONTEXT(context);
2742 
2743 	/* Start the lookup relative to the file descriptor's vnode. */
2744 	NDINIT(nd, LOOKUP, OP_OPEN, USEDVP | FOLLOW | AUDITVNPATH1, UIO_USERSPACE,
2745 	    fopen.o_pathname, has_entitlement ? &context : vfs_context_current());
2746 	nd->ni_dvp = vp;
2747 
2748 	error = open1(has_entitlement ? &context : vfs_context_current(),
2749 	    nd, fopen.o_flags, va, NULL, NULL, retval, AUTH_OPEN_NOAUTHFD);
2750 
2751 	kfree_type(struct vnode_attr, va);
2752 	kfree_type(struct nameidata, nd);
2753 
2754 	vnode_put(vp);
2755 
2756 outdrop:
2757 	return sys_fcntl_outdrop(p, fd, fp, vp, error);
2758 }
2759 
2760 int
sys_fcntl_nocancel(proc_t p,struct fcntl_nocancel_args * uap,int32_t * retval)2761 sys_fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval)
2762 {
2763 	int fd = uap->fd;
2764 	int cmd = uap->cmd;
2765 	struct fileproc *fp;
2766 	struct vnode *vp = NULLVP;      /* for AUDIT_ARG() at end */
2767 	unsigned int oflags, nflags;
2768 	int i, tmp, error, error2, flg = 0;
2769 	struct flock fl = {};
2770 	struct flocktimeout fltimeout;
2771 	struct timespec *timeout = NULL;
2772 	off_t offset;
2773 	int newmin;
2774 	daddr64_t lbn, bn;
2775 	unsigned int fflag;
2776 	user_addr_t argp;
2777 	boolean_t is64bit;
2778 	int has_entitlement = 0;
2779 	kauth_cred_t p_cred;
2780 	cs_blob_add_flags_t csblob_add_flags = 0;
2781 
2782 	AUDIT_ARG(fd, uap->fd);
2783 	AUDIT_ARG(cmd, uap->cmd);
2784 
2785 	proc_fdlock(p);
2786 	if ((error = fp_lookup(p, fd, &fp, 1))) {
2787 		proc_fdunlock(p);
2788 		return error;
2789 	}
2790 
2791 	SYS_FCNTL_DECLARE_VFS_CONTEXT(context);
2792 
2793 	is64bit = proc_is64bit(p);
2794 	if (is64bit) {
2795 		argp = uap->arg;
2796 	} else {
2797 		/*
2798 		 * Since the arg parameter is defined as a long but may be
2799 		 * either a long or a pointer we must take care to handle
2800 		 * sign extension issues.  Our sys call munger will sign
2801 		 * extend a long when we are called from a 32-bit process.
2802 		 * Since we can never have an address greater than 32-bits
2803 		 * from a 32-bit process we lop off the top 32-bits to avoid
2804 		 * getting the wrong address
2805 		 */
2806 		argp = CAST_USER_ADDR_T((uint32_t)uap->arg);
2807 	}
2808 
2809 #if CONFIG_MACF
2810 	error = mac_file_check_fcntl(kauth_cred_get(), fp->fp_glob, cmd, uap->arg);
2811 	if (error) {
2812 		goto out;
2813 	}
2814 #endif
2815 
2816 	switch (cmd) {
2817 	case F_DUPFD:
2818 	case F_DUPFD_CLOEXEC:
2819 		if (fp_isguarded(fp, GUARD_DUP)) {
2820 			error = fp_guard_exception(p, fd, fp, kGUARD_EXC_DUP);
2821 			goto out;
2822 		}
2823 		newmin = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */
2824 		AUDIT_ARG(value32, newmin);
2825 		if (newmin < 0 || newmin >= proc_limitgetcur_nofile(p)) {
2826 			error = EINVAL;
2827 			goto out;
2828 		}
2829 		if ((error = fdalloc(p, newmin, &i))) {
2830 			goto out;
2831 		}
2832 		p_cred = current_cached_proc_cred(p);
2833 		error = finishdup(p, p_cred, fd, i,
2834 		    cmd == F_DUPFD_CLOEXEC ? FP_CLOEXEC : 0, retval);
2835 		goto out;
2836 
2837 	case F_GETFD:
2838 		*retval = (fp->fp_flags & FP_CLOEXEC) ? FD_CLOEXEC : 0;
2839 		error = 0;
2840 		goto out;
2841 
2842 	case F_SETFD:
2843 		AUDIT_ARG(value32, (uint32_t)uap->arg);
2844 		if (uap->arg & FD_CLOEXEC) {
2845 			fp->fp_flags |= FP_CLOEXEC;
2846 			error = 0;
2847 		} else if (!fp->fp_guard_attrs) {
2848 			fp->fp_flags &= ~FP_CLOEXEC;
2849 			error = 0;
2850 		} else {
2851 			error = fp_guard_exception(p,
2852 			    fd, fp, kGUARD_EXC_NOCLOEXEC);
2853 		}
2854 		goto out;
2855 
2856 	case F_GETFL:
2857 		fflag = fp->f_flag;
2858 		if ((fflag & O_EVTONLY) && proc_disallow_rw_for_o_evtonly(p)) {
2859 			/*
2860 			 * We insert back F_READ so that conversion back to open flags with
2861 			 * OFLAGS() will come out right. We only need to set 'FREAD' as the
2862 			 * 'O_RDONLY' is always implied.
2863 			 */
2864 			fflag |= FREAD;
2865 		}
2866 		*retval = OFLAGS(fflag);
2867 		error = 0;
2868 		goto out;
2869 
2870 	case F_SETFL:
2871 		// FIXME (rdar://54898652)
2872 		//
2873 		// this code is broken if fnctl(F_SETFL), ioctl() are
2874 		// called concurrently for the same fileglob.
2875 
2876 		tmp = CAST_DOWN_EXPLICIT(int, uap->arg); /* arg is an int, so we won't lose bits */
2877 		AUDIT_ARG(value32, tmp);
2878 
2879 		os_atomic_rmw_loop(&fp->f_flag, oflags, nflags, relaxed, {
2880 			nflags  = oflags & ~FCNTLFLAGS;
2881 			nflags |= FFLAGS(tmp) & FCNTLFLAGS;
2882 		});
2883 		tmp = nflags & FNONBLOCK;
2884 		error = fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
2885 		if (error) {
2886 			goto out;
2887 		}
2888 		tmp = nflags & FASYNC;
2889 		error = fo_ioctl(fp, FIOASYNC, (caddr_t)&tmp, &context);
2890 		if (!error) {
2891 			goto out;
2892 		}
2893 		os_atomic_andnot(&fp->f_flag, FNONBLOCK, relaxed);
2894 		tmp = 0;
2895 		(void)fo_ioctl(fp, FIONBIO, (caddr_t)&tmp, &context);
2896 		goto out;
2897 
2898 	case F_GETOWN:
2899 		if (fp->f_type == DTYPE_SOCKET) {
2900 			*retval = ((struct socket *)fp_get_data(fp))->so_pgid;
2901 			error = 0;
2902 			goto out;
2903 		}
2904 		error = fo_ioctl(fp, TIOCGPGRP, (caddr_t)retval, &context);
2905 		*retval = -*retval;
2906 		goto out;
2907 
2908 	case F_SETOWN:
2909 		tmp = CAST_DOWN_EXPLICIT(pid_t, uap->arg); /* arg is an int, so we won't lose bits */
2910 		AUDIT_ARG(value32, tmp);
2911 		if (fp->f_type == DTYPE_SOCKET) {
2912 			((struct socket *)fp_get_data(fp))->so_pgid = tmp;
2913 			error = 0;
2914 			goto out;
2915 		}
2916 		if (fp->f_type == DTYPE_PIPE) {
2917 			error =  fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
2918 			goto out;
2919 		}
2920 
2921 		if (tmp <= 0) {
2922 			tmp = -tmp;
2923 		} else {
2924 			proc_t p1 = proc_find(tmp);
2925 			if (p1 == 0) {
2926 				error = ESRCH;
2927 				goto out;
2928 			}
2929 			tmp = (int)p1->p_pgrpid;
2930 			proc_rele(p1);
2931 		}
2932 		error =  fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
2933 		goto out;
2934 
2935 	case F_SETNOSIGPIPE:
2936 		tmp = CAST_DOWN_EXPLICIT(int, uap->arg);
2937 		if (fp->f_type == DTYPE_SOCKET) {
2938 #if SOCKETS
2939 			error = sock_setsockopt((struct socket *)fp_get_data(fp),
2940 			    SOL_SOCKET, SO_NOSIGPIPE, &tmp, sizeof(tmp));
2941 #else
2942 			error = EINVAL;
2943 #endif
2944 		} else {
2945 			struct fileglob *fg = fp->fp_glob;
2946 
2947 			lck_mtx_lock_spin(&fg->fg_lock);
2948 			if (tmp) {
2949 				fg->fg_lflags |= FG_NOSIGPIPE;
2950 			} else {
2951 				fg->fg_lflags &= ~FG_NOSIGPIPE;
2952 			}
2953 			lck_mtx_unlock(&fg->fg_lock);
2954 			error = 0;
2955 		}
2956 		goto out;
2957 
2958 	case F_GETNOSIGPIPE:
2959 		if (fp->f_type == DTYPE_SOCKET) {
2960 #if SOCKETS
2961 			int retsize = sizeof(*retval);
2962 			error = sock_getsockopt((struct socket *)fp_get_data(fp),
2963 			    SOL_SOCKET, SO_NOSIGPIPE, retval, &retsize);
2964 #else
2965 			error = EINVAL;
2966 #endif
2967 		} else {
2968 			*retval = (fp->fp_glob->fg_lflags & FG_NOSIGPIPE) ?
2969 			    1 : 0;
2970 			error = 0;
2971 		}
2972 		goto out;
2973 
2974 	case F_SETCONFINED:
2975 		/*
2976 		 * If this is the only reference to this fglob in the process
2977 		 * and it's already marked as close-on-fork then mark it as
2978 		 * (immutably) "confined" i.e. any fd that points to it will
2979 		 * forever be close-on-fork, and attempts to use an IPC
2980 		 * mechanism to move the descriptor elsewhere will fail.
2981 		 */
2982 		if (CAST_DOWN_EXPLICIT(int, uap->arg)) {
2983 			struct fileglob *fg = fp->fp_glob;
2984 
2985 			lck_mtx_lock_spin(&fg->fg_lock);
2986 			if (fg->fg_lflags & FG_CONFINED) {
2987 				error = 0;
2988 			} else if (1 != os_ref_get_count_raw(&fg->fg_count)) {
2989 				error = EAGAIN; /* go close the dup .. */
2990 			} else if (fp->fp_flags & FP_CLOFORK) {
2991 				fg->fg_lflags |= FG_CONFINED;
2992 				error = 0;
2993 			} else {
2994 				error = EBADF;  /* open without O_CLOFORK? */
2995 			}
2996 			lck_mtx_unlock(&fg->fg_lock);
2997 		} else {
2998 			/*
2999 			 * Other subsystems may have built on the immutability
3000 			 * of FG_CONFINED; clearing it may be tricky.
3001 			 */
3002 			error = EPERM;          /* immutable */
3003 		}
3004 		goto out;
3005 
3006 	case F_GETCONFINED:
3007 		*retval = (fp->fp_glob->fg_lflags & FG_CONFINED) ? 1 : 0;
3008 		error = 0;
3009 		goto out;
3010 
3011 	case F_SETLKWTIMEOUT:
3012 	case F_SETLKW:
3013 	case F_OFD_SETLKWTIMEOUT:
3014 	case F_OFD_SETLKW:
3015 		flg |= F_WAIT;
3016 		OS_FALLTHROUGH;
3017 
3018 	case F_SETLK:
3019 	case F_OFD_SETLK:
3020 		if (fp->f_type != DTYPE_VNODE) {
3021 			error = EBADF;
3022 			goto out;
3023 		}
3024 		vp = (struct vnode *)fp_get_data(fp);
3025 
3026 		fflag = fp->f_flag;
3027 		offset = fp->f_offset;
3028 		proc_fdunlock(p);
3029 
3030 		/* Copy in the lock structure */
3031 		if (F_SETLKWTIMEOUT == cmd || F_OFD_SETLKWTIMEOUT == cmd) {
3032 			error = copyin(argp, (caddr_t) &fltimeout, sizeof(fltimeout));
3033 			if (error) {
3034 				goto outdrop;
3035 			}
3036 			fl = fltimeout.fl;
3037 			timeout = &fltimeout.timeout;
3038 		} else {
3039 			error = copyin(argp, (caddr_t)&fl, sizeof(fl));
3040 			if (error) {
3041 				goto outdrop;
3042 			}
3043 		}
3044 
3045 		/* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
3046 		/* and ending byte for EOVERFLOW in SEEK_SET */
3047 		error = check_file_seek_range(&fl, offset);
3048 		if (error) {
3049 			goto outdrop;
3050 		}
3051 
3052 		if ((error = vnode_getwithref(vp))) {
3053 			goto outdrop;
3054 		}
3055 		if (fl.l_whence == SEEK_CUR) {
3056 			fl.l_start += offset;
3057 		}
3058 
3059 #if CONFIG_MACF
3060 		error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob,
3061 		    F_SETLK, &fl);
3062 		if (error) {
3063 			(void)vnode_put(vp);
3064 			goto outdrop;
3065 		}
3066 #endif
3067 
3068 #if CONFIG_FILE_LEASES
3069 		(void)vnode_breaklease(vp, O_WRONLY, vfs_context_current());
3070 #endif
3071 
3072 		switch (cmd) {
3073 		case F_OFD_SETLK:
3074 		case F_OFD_SETLKW:
3075 		case F_OFD_SETLKWTIMEOUT:
3076 			flg |= F_OFD_LOCK;
3077 			if (fp->fp_glob->fg_lflags & FG_CONFINED) {
3078 				flg |= F_CONFINED;
3079 			}
3080 			switch (fl.l_type) {
3081 			case F_RDLCK:
3082 				if ((fflag & FREAD) == 0) {
3083 					error = EBADF;
3084 					break;
3085 				}
3086 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3087 				    F_SETLK, &fl, flg, &context, timeout);
3088 				break;
3089 			case F_WRLCK:
3090 				if ((fflag & FWRITE) == 0) {
3091 					error = EBADF;
3092 					break;
3093 				}
3094 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3095 				    F_SETLK, &fl, flg, &context, timeout);
3096 				break;
3097 			case F_UNLCK:
3098 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3099 				    F_UNLCK, &fl, F_OFD_LOCK, &context,
3100 				    timeout);
3101 				break;
3102 			default:
3103 				error = EINVAL;
3104 				break;
3105 			}
3106 			if (0 == error &&
3107 			    (F_RDLCK == fl.l_type || F_WRLCK == fl.l_type)) {
3108 				struct fileglob *fg = fp->fp_glob;
3109 
3110 				/*
3111 				 * arrange F_UNLCK on last close (once
3112 				 * set, FG_HAS_OFDLOCK is immutable)
3113 				 */
3114 				if ((fg->fg_lflags & FG_HAS_OFDLOCK) == 0) {
3115 					lck_mtx_lock_spin(&fg->fg_lock);
3116 					fg->fg_lflags |= FG_HAS_OFDLOCK;
3117 					lck_mtx_unlock(&fg->fg_lock);
3118 				}
3119 			}
3120 			break;
3121 		default:
3122 			flg |= F_POSIX;
3123 			switch (fl.l_type) {
3124 			case F_RDLCK:
3125 				if ((fflag & FREAD) == 0) {
3126 					error = EBADF;
3127 					break;
3128 				}
3129 				// XXX UInt32 unsafe for LP64 kernel
3130 				os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
3131 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3132 				    F_SETLK, &fl, flg, &context, timeout);
3133 				break;
3134 			case F_WRLCK:
3135 				if ((fflag & FWRITE) == 0) {
3136 					error = EBADF;
3137 					break;
3138 				}
3139 				// XXX UInt32 unsafe for LP64 kernel
3140 				os_atomic_or(&p->p_ladvflag, P_LADVLOCK, relaxed);
3141 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3142 				    F_SETLK, &fl, flg, &context, timeout);
3143 				break;
3144 			case F_UNLCK:
3145 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3146 				    F_UNLCK, &fl, F_POSIX, &context, timeout);
3147 				break;
3148 			default:
3149 				error = EINVAL;
3150 				break;
3151 			}
3152 			break;
3153 		}
3154 		(void) vnode_put(vp);
3155 		goto outdrop;
3156 
3157 	case F_GETLK:
3158 	case F_OFD_GETLK:
3159 	case F_GETLKPID:
3160 	case F_OFD_GETLKPID:
3161 		if (fp->f_type != DTYPE_VNODE) {
3162 			error = EBADF;
3163 			goto out;
3164 		}
3165 		vp = (struct vnode *)fp_get_data(fp);
3166 
3167 		offset = fp->f_offset;
3168 		proc_fdunlock(p);
3169 
3170 		/* Copy in the lock structure */
3171 		error = copyin(argp, (caddr_t)&fl, sizeof(fl));
3172 		if (error) {
3173 			goto outdrop;
3174 		}
3175 
3176 		/* Check starting byte and ending byte for EOVERFLOW in SEEK_CUR */
3177 		/* and ending byte for EOVERFLOW in SEEK_SET */
3178 		error = check_file_seek_range(&fl, offset);
3179 		if (error) {
3180 			goto outdrop;
3181 		}
3182 
3183 		if ((fl.l_whence == SEEK_SET) && (fl.l_start < 0)) {
3184 			error = EINVAL;
3185 			goto outdrop;
3186 		}
3187 
3188 		switch (fl.l_type) {
3189 		case F_RDLCK:
3190 		case F_UNLCK:
3191 		case F_WRLCK:
3192 			break;
3193 		default:
3194 			error = EINVAL;
3195 			goto outdrop;
3196 		}
3197 
3198 		switch (fl.l_whence) {
3199 		case SEEK_CUR:
3200 		case SEEK_SET:
3201 		case SEEK_END:
3202 			break;
3203 		default:
3204 			error = EINVAL;
3205 			goto outdrop;
3206 		}
3207 
3208 		if ((error = vnode_getwithref(vp)) == 0) {
3209 			if (fl.l_whence == SEEK_CUR) {
3210 				fl.l_start += offset;
3211 			}
3212 
3213 #if CONFIG_MACF
3214 			error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob,
3215 			    cmd, &fl);
3216 			if (error == 0)
3217 #endif
3218 			switch (cmd) {
3219 			case F_OFD_GETLK:
3220 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3221 				    F_GETLK, &fl, F_OFD_LOCK, &context, NULL);
3222 				break;
3223 			case F_OFD_GETLKPID:
3224 				error = VNOP_ADVLOCK(vp, ofd_to_id(fp->fp_glob),
3225 				    F_GETLKPID, &fl, F_OFD_LOCK, &context, NULL);
3226 				break;
3227 			default:
3228 				error = VNOP_ADVLOCK(vp, (caddr_t)p,
3229 				    cmd, &fl, F_POSIX, &context, NULL);
3230 				break;
3231 			}
3232 
3233 			(void)vnode_put(vp);
3234 
3235 			if (error == 0) {
3236 				error = copyout((caddr_t)&fl, argp, sizeof(fl));
3237 			}
3238 		}
3239 		goto outdrop;
3240 
3241 	case F_PREALLOCATE: {
3242 		fstore_t alloc_struct;    /* structure for allocate command */
3243 		u_int32_t alloc_flags = 0;
3244 
3245 		if (fp->f_type != DTYPE_VNODE) {
3246 			error = EBADF;
3247 			goto out;
3248 		}
3249 
3250 		vp = (struct vnode *)fp_get_data(fp);
3251 		proc_fdunlock(p);
3252 
3253 		/* make sure that we have write permission */
3254 		if ((fp->f_flag & FWRITE) == 0) {
3255 			error = EBADF;
3256 			goto outdrop;
3257 		}
3258 
3259 		error = copyin(argp, (caddr_t)&alloc_struct, sizeof(alloc_struct));
3260 		if (error) {
3261 			goto outdrop;
3262 		}
3263 
3264 		/* now set the space allocated to 0 */
3265 		alloc_struct.fst_bytesalloc = 0;
3266 
3267 		/*
3268 		 * Do some simple parameter checking
3269 		 */
3270 
3271 		/* set up the flags */
3272 
3273 		alloc_flags |= PREALLOCATE;
3274 
3275 		if (alloc_struct.fst_flags & F_ALLOCATECONTIG) {
3276 			alloc_flags |= ALLOCATECONTIG;
3277 		}
3278 
3279 		if (alloc_struct.fst_flags & F_ALLOCATEALL) {
3280 			alloc_flags |= ALLOCATEALL;
3281 		}
3282 
3283 		if (alloc_struct.fst_flags & F_ALLOCATEPERSIST) {
3284 			alloc_flags |= ALLOCATEPERSIST;
3285 		}
3286 
3287 		/*
3288 		 * Do any position mode specific stuff.  The only
3289 		 * position mode  supported now is PEOFPOSMODE
3290 		 */
3291 
3292 		switch (alloc_struct.fst_posmode) {
3293 		case F_PEOFPOSMODE:
3294 			if (alloc_struct.fst_offset != 0) {
3295 				error = EINVAL;
3296 				goto outdrop;
3297 			}
3298 
3299 			alloc_flags |= ALLOCATEFROMPEOF;
3300 			break;
3301 
3302 		case F_VOLPOSMODE:
3303 			if (alloc_struct.fst_offset <= 0) {
3304 				error = EINVAL;
3305 				goto outdrop;
3306 			}
3307 
3308 			alloc_flags |= ALLOCATEFROMVOL;
3309 			break;
3310 
3311 		default: {
3312 			error = EINVAL;
3313 			goto outdrop;
3314 		}
3315 		}
3316 		if ((error = vnode_getwithref(vp)) == 0) {
3317 			/*
3318 			 * call allocate to get the space
3319 			 */
3320 			error = VNOP_ALLOCATE(vp, alloc_struct.fst_length, alloc_flags,
3321 			    &alloc_struct.fst_bytesalloc, alloc_struct.fst_offset,
3322 			    &context);
3323 			(void)vnode_put(vp);
3324 
3325 			error2 = copyout((caddr_t)&alloc_struct, argp, sizeof(alloc_struct));
3326 
3327 			if (error == 0) {
3328 				error = error2;
3329 			}
3330 		}
3331 		goto outdrop;
3332 	}
3333 	case F_PUNCHHOLE: {
3334 		fpunchhole_t args;
3335 
3336 		if (fp->f_type != DTYPE_VNODE) {
3337 			error = EBADF;
3338 			goto out;
3339 		}
3340 
3341 		vp = (struct vnode *)fp_get_data(fp);
3342 		proc_fdunlock(p);
3343 
3344 		/* need write permissions */
3345 		if ((fp->f_flag & FWRITE) == 0) {
3346 			error = EPERM;
3347 			goto outdrop;
3348 		}
3349 
3350 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3351 			goto outdrop;
3352 		}
3353 
3354 		if ((error = vnode_getwithref(vp))) {
3355 			goto outdrop;
3356 		}
3357 
3358 #if CONFIG_MACF
3359 		if ((error = mac_vnode_check_write(&context, fp->fp_glob->fg_cred, vp))) {
3360 			(void)vnode_put(vp);
3361 			goto outdrop;
3362 		}
3363 #endif
3364 
3365 		error = VNOP_IOCTL(vp, F_PUNCHHOLE, (caddr_t)&args, 0, &context);
3366 		(void)vnode_put(vp);
3367 
3368 		goto outdrop;
3369 	}
3370 	case F_TRIM_ACTIVE_FILE: {
3371 		ftrimactivefile_t args;
3372 
3373 		if (priv_check_cred(kauth_cred_get(), PRIV_TRIM_ACTIVE_FILE, 0)) {
3374 			error = EACCES;
3375 			goto out;
3376 		}
3377 
3378 		if (fp->f_type != DTYPE_VNODE) {
3379 			error = EBADF;
3380 			goto out;
3381 		}
3382 
3383 		vp = (struct vnode *)fp_get_data(fp);
3384 		proc_fdunlock(p);
3385 
3386 		/* need write permissions */
3387 		if ((fp->f_flag & FWRITE) == 0) {
3388 			error = EPERM;
3389 			goto outdrop;
3390 		}
3391 
3392 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3393 			goto outdrop;
3394 		}
3395 
3396 		if ((error = vnode_getwithref(vp))) {
3397 			goto outdrop;
3398 		}
3399 
3400 		error = VNOP_IOCTL(vp, F_TRIM_ACTIVE_FILE, (caddr_t)&args, 0, &context);
3401 		(void)vnode_put(vp);
3402 
3403 		goto outdrop;
3404 	}
3405 	case F_SPECULATIVE_READ: {
3406 		fspecread_t args;
3407 		off_t temp_length = 0;
3408 
3409 		if (fp->f_type != DTYPE_VNODE) {
3410 			error = EBADF;
3411 			goto out;
3412 		}
3413 
3414 		vp = (struct vnode *)fp_get_data(fp);
3415 		proc_fdunlock(p);
3416 
3417 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3418 			goto outdrop;
3419 		}
3420 
3421 		/* Discard invalid offsets or lengths */
3422 		if ((args.fsr_offset < 0) || (args.fsr_length < 0)) {
3423 			error = EINVAL;
3424 			goto outdrop;
3425 		}
3426 
3427 		/*
3428 		 * Round the file offset down to a page-size boundary (or to 0).
3429 		 * The filesystem will need to round the length up to the end of the page boundary
3430 		 * or to the EOF of the file.
3431 		 */
3432 		uint64_t foff = (((uint64_t)args.fsr_offset) & ~((uint64_t)PAGE_MASK));
3433 		uint64_t foff_delta = args.fsr_offset - foff;
3434 		args.fsr_offset = (off_t) foff;
3435 
3436 		/*
3437 		 * Now add in the delta to the supplied length. Since we may have adjusted the
3438 		 * offset, increase it by the amount that we adjusted.
3439 		 */
3440 		if (os_add_overflow(args.fsr_length, foff_delta, &args.fsr_length)) {
3441 			error = EOVERFLOW;
3442 			goto outdrop;
3443 		}
3444 
3445 		/*
3446 		 * Make sure (fsr_offset + fsr_length) does not overflow.
3447 		 */
3448 		if (os_add_overflow(args.fsr_offset, args.fsr_length, &temp_length)) {
3449 			error = EOVERFLOW;
3450 			goto outdrop;
3451 		}
3452 
3453 		if ((error = vnode_getwithref(vp))) {
3454 			goto outdrop;
3455 		}
3456 		error = VNOP_IOCTL(vp, F_SPECULATIVE_READ, (caddr_t)&args, 0, &context);
3457 		(void)vnode_put(vp);
3458 
3459 		goto outdrop;
3460 	}
3461 	case F_ATTRIBUTION_TAG: {
3462 		fattributiontag_t args;
3463 
3464 		if (fp->f_type != DTYPE_VNODE) {
3465 			error = EBADF;
3466 			goto out;
3467 		}
3468 
3469 		vp = (struct vnode *)fp_get_data(fp);
3470 		proc_fdunlock(p);
3471 
3472 		if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
3473 			goto outdrop;
3474 		}
3475 
3476 		if ((error = vnode_getwithref(vp))) {
3477 			goto outdrop;
3478 		}
3479 
3480 		error = VNOP_IOCTL(vp, F_ATTRIBUTION_TAG, (caddr_t)&args, 0, &context);
3481 		(void)vnode_put(vp);
3482 
3483 		if (error == 0) {
3484 			error = copyout((caddr_t)&args, argp, sizeof(args));
3485 		}
3486 
3487 		goto outdrop;
3488 	}
3489 	case F_SETSIZE:
3490 		if (fp->f_type != DTYPE_VNODE) {
3491 			error = EBADF;
3492 			goto out;
3493 		}
3494 
3495 		if ((fp->fp_glob->fg_flag & FWRITE) == 0) {
3496 			error = EBADF;
3497 			goto out;
3498 		}
3499 		vp = (struct vnode *)fp_get_data(fp);
3500 		proc_fdunlock(p);
3501 
3502 		error = copyin(argp, (caddr_t)&offset, sizeof(off_t));
3503 		if (error) {
3504 			goto outdrop;
3505 		}
3506 		AUDIT_ARG(value64, offset);
3507 
3508 		error = vnode_getwithref(vp);
3509 		if (error) {
3510 			goto outdrop;
3511 		}
3512 
3513 #if CONFIG_MACF
3514 		error = mac_vnode_check_truncate(&context,
3515 		    fp->fp_glob->fg_cred, vp);
3516 		if (error) {
3517 			(void)vnode_put(vp);
3518 			goto outdrop;
3519 		}
3520 #endif
3521 		/*
3522 		 * Make sure that we are root.  Growing a file
3523 		 * without zero filling the data is a security hole.
3524 		 */
3525 		if (!kauth_cred_issuser(kauth_cred_get())) {
3526 			error = EACCES;
3527 		} else {
3528 			/*
3529 			 * Require privilege to change file size without zerofill,
3530 			 * else will change the file size and zerofill it.
3531 			 */
3532 			error = priv_check_cred(kauth_cred_get(), PRIV_VFS_SETSIZE, 0);
3533 			if (error == 0) {
3534 				error = vnode_setsize(vp, offset, IO_NOZEROFILL, &context);
3535 			} else {
3536 				error = vnode_setsize(vp, offset, 0, &context);
3537 			}
3538 
3539 #if CONFIG_MACF
3540 			if (error == 0) {
3541 				mac_vnode_notify_truncate(&context, fp->fp_glob->fg_cred, vp);
3542 			}
3543 #endif
3544 		}
3545 
3546 		(void)vnode_put(vp);
3547 		goto outdrop;
3548 
3549 	case F_RDAHEAD:
3550 		if (fp->f_type != DTYPE_VNODE) {
3551 			error = EBADF;
3552 			goto out;
3553 		}
3554 		if (uap->arg) {
3555 			os_atomic_andnot(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
3556 		} else {
3557 			os_atomic_or(&fp->fp_glob->fg_flag, FNORDAHEAD, relaxed);
3558 		}
3559 		goto out;
3560 
3561 	case F_NOCACHE:
3562 	case F_NOCACHE_EXT:
3563 		if ((fp->f_type != DTYPE_VNODE) || (cmd == F_NOCACHE_EXT &&
3564 		    (vnode_vtype((struct vnode *)fp_get_data(fp)) != VREG))) {
3565 			error = EBADF;
3566 			goto out;
3567 		}
3568 		if (uap->arg) {
3569 			os_atomic_or(&fp->fp_glob->fg_flag, FNOCACHE, relaxed);
3570 			if (cmd == F_NOCACHE_EXT) {
3571 				/*
3572 				 * We're reusing the O_NOCTTY bit for this purpose as it is only
3573 				 * used for open(2) and is mutually exclusive with a regular file.
3574 				 */
3575 				os_atomic_or(&fp->fp_glob->fg_flag, O_NOCTTY, relaxed);
3576 			}
3577 		} else {
3578 			os_atomic_andnot(&fp->fp_glob->fg_flag, FNOCACHE | O_NOCTTY, relaxed);
3579 		}
3580 		goto out;
3581 
3582 	case F_NODIRECT:
3583 		if (fp->f_type != DTYPE_VNODE) {
3584 			error = EBADF;
3585 			goto out;
3586 		}
3587 		if (uap->arg) {
3588 			os_atomic_or(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
3589 		} else {
3590 			os_atomic_andnot(&fp->fp_glob->fg_flag, FNODIRECT, relaxed);
3591 		}
3592 		goto out;
3593 
3594 	case F_SINGLE_WRITER:
3595 		if (fp->f_type != DTYPE_VNODE) {
3596 			error = EBADF;
3597 			goto out;
3598 		}
3599 		if (uap->arg) {
3600 			os_atomic_or(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
3601 		} else {
3602 			os_atomic_andnot(&fp->fp_glob->fg_flag, FSINGLE_WRITER, relaxed);
3603 		}
3604 		goto out;
3605 
3606 	case F_GLOBAL_NOCACHE:
3607 		if (fp->f_type != DTYPE_VNODE) {
3608 			error = EBADF;
3609 			goto out;
3610 		}
3611 		vp = (struct vnode *)fp_get_data(fp);
3612 		proc_fdunlock(p);
3613 
3614 		if ((error = vnode_getwithref(vp)) == 0) {
3615 			*retval = vnode_isnocache(vp);
3616 
3617 			if (uap->arg) {
3618 				vnode_setnocache(vp);
3619 			} else {
3620 				vnode_clearnocache(vp);
3621 			}
3622 
3623 			(void)vnode_put(vp);
3624 		}
3625 		goto outdrop;
3626 
3627 	case F_CHECK_OPENEVT:
3628 		if (fp->f_type != DTYPE_VNODE) {
3629 			error = EBADF;
3630 			goto out;
3631 		}
3632 		vp = (struct vnode *)fp_get_data(fp);
3633 		proc_fdunlock(p);
3634 
3635 		if ((error = vnode_getwithref(vp)) == 0) {
3636 			*retval = vnode_is_openevt(vp);
3637 
3638 			if (uap->arg) {
3639 				vnode_set_openevt(vp);
3640 			} else {
3641 				vnode_clear_openevt(vp);
3642 			}
3643 
3644 			(void)vnode_put(vp);
3645 		}
3646 		goto outdrop;
3647 
3648 	case F_RDADVISE: {
3649 		struct radvisory ra_struct;
3650 
3651 		if (fp->f_type != DTYPE_VNODE) {
3652 			error = EBADF;
3653 			goto out;
3654 		}
3655 		vp = (struct vnode *)fp_get_data(fp);
3656 		proc_fdunlock(p);
3657 
3658 		if ((error = copyin(argp, (caddr_t)&ra_struct, sizeof(ra_struct)))) {
3659 			goto outdrop;
3660 		}
3661 		if (ra_struct.ra_offset < 0 || ra_struct.ra_count < 0) {
3662 			error = EINVAL;
3663 			goto outdrop;
3664 		}
3665 		if ((error = vnode_getwithref(vp)) == 0) {
3666 			error = VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, &context);
3667 
3668 			(void)vnode_put(vp);
3669 		}
3670 		goto outdrop;
3671 	}
3672 
3673 	case F_FLUSH_DATA:
3674 
3675 		if (fp->f_type != DTYPE_VNODE) {
3676 			error = EBADF;
3677 			goto out;
3678 		}
3679 		vp = (struct vnode *)fp_get_data(fp);
3680 		proc_fdunlock(p);
3681 
3682 		if ((error = vnode_getwithref(vp)) == 0) {
3683 			error = VNOP_FSYNC(vp, MNT_NOWAIT, &context);
3684 
3685 			(void)vnode_put(vp);
3686 		}
3687 		goto outdrop;
3688 
3689 	case F_LOG2PHYS:
3690 	case F_LOG2PHYS_EXT: {
3691 		struct log2phys l2p_struct = {};    /* structure for allocate command */
3692 		int devBlockSize;
3693 
3694 		off_t file_offset = 0;
3695 		size_t a_size = 0;
3696 		size_t run = 0;
3697 
3698 		if (cmd == F_LOG2PHYS_EXT) {
3699 			error = copyin(argp, (caddr_t)&l2p_struct, sizeof(l2p_struct));
3700 			if (error) {
3701 				goto out;
3702 			}
3703 			file_offset = l2p_struct.l2p_devoffset;
3704 		} else {
3705 			file_offset = fp->f_offset;
3706 		}
3707 		if (fp->f_type != DTYPE_VNODE) {
3708 			error = EBADF;
3709 			goto out;
3710 		}
3711 		vp = (struct vnode *)fp_get_data(fp);
3712 		proc_fdunlock(p);
3713 		if ((error = vnode_getwithref(vp))) {
3714 			goto outdrop;
3715 		}
3716 		error = VNOP_OFFTOBLK(vp, file_offset, &lbn);
3717 		if (error) {
3718 			(void)vnode_put(vp);
3719 			goto outdrop;
3720 		}
3721 		error = VNOP_BLKTOOFF(vp, lbn, &offset);
3722 		if (error) {
3723 			(void)vnode_put(vp);
3724 			goto outdrop;
3725 		}
3726 		devBlockSize = vfs_devblocksize(vnode_mount(vp));
3727 		if (cmd == F_LOG2PHYS_EXT) {
3728 			if (l2p_struct.l2p_contigbytes < 0) {
3729 				vnode_put(vp);
3730 				error = EINVAL;
3731 				goto outdrop;
3732 			}
3733 
3734 			a_size = (size_t)MIN((uint64_t)l2p_struct.l2p_contigbytes, SIZE_MAX);
3735 		} else {
3736 			a_size = devBlockSize;
3737 		}
3738 
3739 		error = VNOP_BLOCKMAP(vp, offset, a_size, &bn, &run, NULL, 0, &context);
3740 
3741 		(void)vnode_put(vp);
3742 
3743 		if (!error) {
3744 			l2p_struct.l2p_flags = 0;       /* for now */
3745 			if (cmd == F_LOG2PHYS_EXT) {
3746 				l2p_struct.l2p_contigbytes = run - (file_offset - offset);
3747 			} else {
3748 				l2p_struct.l2p_contigbytes = 0; /* for now */
3749 			}
3750 
3751 			/*
3752 			 * The block number being -1 suggests that the file offset is not backed
3753 			 * by any real blocks on-disk.  As a result, just let it be passed back up wholesale.
3754 			 */
3755 			if (bn == -1) {
3756 				/* Don't multiply it by the block size */
3757 				l2p_struct.l2p_devoffset = bn;
3758 			} else {
3759 				l2p_struct.l2p_devoffset = bn * devBlockSize;
3760 				l2p_struct.l2p_devoffset += file_offset - offset;
3761 			}
3762 			error = copyout((caddr_t)&l2p_struct, argp, sizeof(l2p_struct));
3763 		}
3764 		goto outdrop;
3765 	}
3766 	case F_GETPATH:
3767 	case F_GETPATH_NOFIRMLINK: {
3768 		char *pathbufp;
3769 		size_t pathlen;
3770 
3771 		if (fp->f_type != DTYPE_VNODE) {
3772 			error = EBADF;
3773 			goto out;
3774 		}
3775 		vp = (struct vnode *)fp_get_data(fp);
3776 		proc_fdunlock(p);
3777 
3778 		pathlen = MAXPATHLEN;
3779 		pathbufp = zalloc(ZV_NAMEI);
3780 
3781 		if ((error = vnode_getwithref(vp)) == 0) {
3782 			error = vn_getpath_ext(vp, NULL, pathbufp,
3783 			    &pathlen, cmd == F_GETPATH_NOFIRMLINK ?
3784 			    VN_GETPATH_NO_FIRMLINK : 0);
3785 			(void)vnode_put(vp);
3786 
3787 			if (error == 0) {
3788 				error = copyout((caddr_t)pathbufp, argp, pathlen);
3789 			}
3790 		}
3791 		zfree(ZV_NAMEI, pathbufp);
3792 		goto outdrop;
3793 	}
3794 
3795 	case F_PATHPKG_CHECK: {
3796 		char *pathbufp;
3797 		size_t pathlen;
3798 
3799 		if (fp->f_type != DTYPE_VNODE) {
3800 			error = EBADF;
3801 			goto out;
3802 		}
3803 		vp = (struct vnode *)fp_get_data(fp);
3804 		proc_fdunlock(p);
3805 
3806 		pathlen = MAXPATHLEN;
3807 		pathbufp = zalloc(ZV_NAMEI);
3808 
3809 		if ((error = copyinstr(argp, pathbufp, MAXPATHLEN, &pathlen)) == 0) {
3810 			if ((error = vnode_getwithref(vp)) == 0) {
3811 				AUDIT_ARG(text, pathbufp);
3812 				error = vn_path_package_check(vp, pathbufp, (int)pathlen, retval);
3813 
3814 				(void)vnode_put(vp);
3815 			}
3816 		}
3817 		zfree(ZV_NAMEI, pathbufp);
3818 		goto outdrop;
3819 	}
3820 
3821 	case F_CHKCLEAN:   // used by regression tests to see if all dirty pages got cleaned by fsync()
3822 	case F_FULLFSYNC:  // fsync + flush the journal + DKIOCSYNCHRONIZE
3823 	case F_BARRIERFSYNC:  // fsync + barrier
3824 	case F_FREEZE_FS:  // freeze all other fs operations for the fs of this fd
3825 	case F_THAW_FS: {  // thaw all frozen fs operations for the fs of this fd
3826 		if (fp->f_type != DTYPE_VNODE) {
3827 			error = EBADF;
3828 			goto out;
3829 		}
3830 		vp = (struct vnode *)fp_get_data(fp);
3831 		proc_fdunlock(p);
3832 
3833 		if ((error = vnode_getwithref(vp)) == 0) {
3834 			if ((cmd == F_BARRIERFSYNC) &&
3835 			    (vp->v_mount->mnt_supl_kern_flag & MNTK_SUPL_USE_FULLSYNC)) {
3836 				cmd = F_FULLFSYNC;
3837 			}
3838 			error = VNOP_IOCTL(vp, cmd, (caddr_t)NULL, 0, &context);
3839 
3840 			/*
3841 			 * Promote F_BARRIERFSYNC to F_FULLFSYNC if the underlying
3842 			 * filesystem doesn't support it.
3843 			 */
3844 			if ((error == ENOTTY || error == ENOTSUP || error == EINVAL) &&
3845 			    (cmd == F_BARRIERFSYNC)) {
3846 				os_atomic_or(&vp->v_mount->mnt_supl_kern_flag,
3847 				    MNTK_SUPL_USE_FULLSYNC, relaxed);
3848 
3849 				error = VNOP_IOCTL(vp, F_FULLFSYNC, (caddr_t)NULL, 0, &context);
3850 			}
3851 
3852 			(void)vnode_put(vp);
3853 		}
3854 		break;
3855 	}
3856 
3857 	/*
3858 	 * SPI (private) for opening a file starting from a dir fd
3859 	 */
3860 	case F_OPENFROM: {
3861 		/* Check if this isn't a valid file descriptor */
3862 		if (fp->f_type != DTYPE_VNODE) {
3863 			error = EBADF;
3864 			goto out;
3865 		}
3866 		vp = (struct vnode *)fp_get_data(fp);
3867 
3868 		return sys_fcntl__OPENFROM(p, fd, cmd, uap->arg, fp, vp, retval);
3869 	}
3870 
3871 	/*
3872 	 * SPI (private) for unlinking a file starting from a dir fd
3873 	 */
3874 	case F_UNLINKFROM: {
3875 		user_addr_t pathname;
3876 
3877 		/* Check if this isn't a valid file descriptor */
3878 		if ((fp->f_type != DTYPE_VNODE) ||
3879 		    (fp->f_flag & FREAD) == 0) {
3880 			error = EBADF;
3881 			goto out;
3882 		}
3883 		vp = (struct vnode *)fp_get_data(fp);
3884 		proc_fdunlock(p);
3885 
3886 		if (vnode_getwithref(vp)) {
3887 			error = ENOENT;
3888 			goto outdrop;
3889 		}
3890 
3891 		/* Only valid for directories */
3892 		if (vp->v_type != VDIR) {
3893 			vnode_put(vp);
3894 			error = ENOTDIR;
3895 			goto outdrop;
3896 		}
3897 
3898 		/*
3899 		 * Only entitled apps may use the credentials of the thread
3900 		 * that opened the file descriptor.
3901 		 * Non-entitled threads will use their own context.
3902 		 */
3903 		if (IOCurrentTaskHasEntitlement(ACCOUNT_OPENFROM_ENTITLEMENT)) {
3904 			has_entitlement = 1;
3905 		}
3906 
3907 		/* Get flags, mode and pathname arguments. */
3908 		if (IS_64BIT_PROCESS(p)) {
3909 			pathname = (user_addr_t)argp;
3910 		} else {
3911 			pathname = CAST_USER_ADDR_T(argp);
3912 		}
3913 
3914 		/* Start the lookup relative to the file descriptor's vnode. */
3915 		error = unlink1(has_entitlement ? &context : vfs_context_current(),
3916 		    vp, pathname, UIO_USERSPACE, 0);
3917 
3918 		vnode_put(vp);
3919 		break;
3920 	}
3921 
3922 #if DEVELOPMENT || DEBUG
3923 	case F_ADDSIGS_MAIN_BINARY:
3924 		csblob_add_flags |= CS_BLOB_ADD_ALLOW_MAIN_BINARY;
3925 		OS_FALLTHROUGH;
3926 #endif
3927 	case F_ADDSIGS:
3928 	case F_ADDFILESIGS:
3929 	case F_ADDFILESIGS_FOR_DYLD_SIM:
3930 	case F_ADDFILESIGS_RETURN:
3931 	case F_ADDFILESIGS_INFO:
3932 	{
3933 		struct cs_blob *blob = NULL;
3934 		struct user_fsignatures fs;
3935 		kern_return_t kr;
3936 		vm_offset_t kernel_blob_addr;
3937 		vm_size_t kernel_blob_size;
3938 		int blob_add_flags = 0;
3939 		const size_t sizeof_fs = (cmd == F_ADDFILESIGS_INFO ?
3940 		    offsetof(struct user_fsignatures, fs_cdhash /* first output element */) :
3941 		    offsetof(struct user_fsignatures, fs_fsignatures_size /* compat */));
3942 
3943 		if (fp->f_type != DTYPE_VNODE) {
3944 			error = EBADF;
3945 			goto out;
3946 		}
3947 		vp = (struct vnode *)fp_get_data(fp);
3948 		proc_fdunlock(p);
3949 
3950 		if (cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
3951 			blob_add_flags |= MAC_VNODE_CHECK_DYLD_SIM;
3952 			if ((proc_getcsflags(p) & CS_KILL) == 0) {
3953 				proc_lock(p);
3954 				proc_csflags_set(p, CS_KILL);
3955 				proc_unlock(p);
3956 			}
3957 		}
3958 
3959 		error = vnode_getwithref(vp);
3960 		if (error) {
3961 			goto outdrop;
3962 		}
3963 
3964 		if (IS_64BIT_PROCESS(p)) {
3965 			error = copyin(argp, &fs, sizeof_fs);
3966 		} else {
3967 			if (cmd == F_ADDFILESIGS_INFO) {
3968 				error = EINVAL;
3969 				vnode_put(vp);
3970 				goto outdrop;
3971 			}
3972 
3973 			struct user32_fsignatures fs32;
3974 
3975 			error = copyin(argp, &fs32, sizeof(fs32));
3976 			fs.fs_file_start = fs32.fs_file_start;
3977 			fs.fs_blob_start = CAST_USER_ADDR_T(fs32.fs_blob_start);
3978 			fs.fs_blob_size = fs32.fs_blob_size;
3979 		}
3980 
3981 		if (error) {
3982 			vnode_put(vp);
3983 			goto outdrop;
3984 		}
3985 
3986 		/*
3987 		 * First check if we have something loaded a this offset
3988 		 */
3989 		blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, fs.fs_file_start);
3990 		if (blob != NULL) {
3991 			/* If this is for dyld_sim revalidate the blob */
3992 			if (cmd == F_ADDFILESIGS_FOR_DYLD_SIM) {
3993 				error = ubc_cs_blob_revalidate(vp, blob, NULL, blob_add_flags, proc_platform(p));
3994 				if (error) {
3995 					blob = NULL;
3996 					if (error != EAGAIN) {
3997 						vnode_put(vp);
3998 						goto outdrop;
3999 					}
4000 				}
4001 			}
4002 		}
4003 
4004 		if (blob == NULL) {
4005 			/*
4006 			 * An arbitrary limit, to prevent someone from mapping in a 20GB blob.  This should cover
4007 			 * our use cases for the immediate future, but note that at the time of this commit, some
4008 			 * platforms are nearing 2MB blob sizes (with a prior soft limit of 2.5MB).
4009 			 *
4010 			 * We should consider how we can manage this more effectively; the above means that some
4011 			 * platforms are using megabytes of memory for signing data; it merely hasn't crossed the
4012 			 * threshold considered ridiculous at the time of this change.
4013 			 */
4014 #define CS_MAX_BLOB_SIZE (40ULL * 1024ULL * 1024ULL)
4015 			if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
4016 				error = E2BIG;
4017 				vnode_put(vp);
4018 				goto outdrop;
4019 			}
4020 
4021 			kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
4022 			kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
4023 			if (kr != KERN_SUCCESS || kernel_blob_size < fs.fs_blob_size) {
4024 				error = ENOMEM;
4025 				vnode_put(vp);
4026 				goto outdrop;
4027 			}
4028 
4029 			if (cmd == F_ADDSIGS || cmd == F_ADDSIGS_MAIN_BINARY) {
4030 				error = copyin(fs.fs_blob_start,
4031 				    (void *) kernel_blob_addr,
4032 				    fs.fs_blob_size);
4033 			} else { /* F_ADDFILESIGS || F_ADDFILESIGS_RETURN || F_ADDFILESIGS_FOR_DYLD_SIM || F_ADDFILESIGS_INFO */
4034 				int resid;
4035 
4036 				error = vn_rdwr(UIO_READ,
4037 				    vp,
4038 				    (caddr_t) kernel_blob_addr,
4039 				    (int)kernel_blob_size,
4040 				    fs.fs_file_start + fs.fs_blob_start,
4041 				    UIO_SYSSPACE,
4042 				    0,
4043 				    kauth_cred_get(),
4044 				    &resid,
4045 				    p);
4046 				if ((error == 0) && resid) {
4047 					/* kernel_blob_size rounded to a page size, but signature may be at end of file */
4048 					memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
4049 				}
4050 			}
4051 
4052 			if (error) {
4053 				ubc_cs_blob_deallocate(kernel_blob_addr,
4054 				    kernel_blob_size);
4055 				vnode_put(vp);
4056 				goto outdrop;
4057 			}
4058 
4059 			blob = NULL;
4060 			error = ubc_cs_blob_add(vp,
4061 			    proc_platform(p),
4062 			    CPU_TYPE_ANY,                       /* not for a specific architecture */
4063 			    CPU_SUBTYPE_ANY,
4064 			    fs.fs_file_start,
4065 			    &kernel_blob_addr,
4066 			    kernel_blob_size,
4067 			    NULL,
4068 			    blob_add_flags,
4069 			    &blob,
4070 			    csblob_add_flags);
4071 
4072 			/* ubc_blob_add() has consumed "kernel_blob_addr" if it is zeroed */
4073 			if (error) {
4074 				if (kernel_blob_addr) {
4075 					ubc_cs_blob_deallocate(kernel_blob_addr,
4076 					    kernel_blob_size);
4077 				}
4078 				vnode_put(vp);
4079 				goto outdrop;
4080 			} else {
4081 #if CHECK_CS_VALIDATION_BITMAP
4082 				ubc_cs_validation_bitmap_allocate( vp );
4083 #endif
4084 			}
4085 		}
4086 
4087 		if (cmd == F_ADDFILESIGS_RETURN || cmd == F_ADDFILESIGS_FOR_DYLD_SIM ||
4088 		    cmd == F_ADDFILESIGS_INFO) {
4089 			/*
4090 			 * The first element of the structure is a
4091 			 * off_t that happen to have the same size for
4092 			 * all archs. Lets overwrite that.
4093 			 */
4094 			off_t end_offset = 0;
4095 			if (blob) {
4096 				end_offset = blob->csb_end_offset;
4097 			}
4098 			error = copyout(&end_offset, argp, sizeof(end_offset));
4099 
4100 			if (error) {
4101 				vnode_put(vp);
4102 				goto outdrop;
4103 			}
4104 		}
4105 
4106 		if (cmd == F_ADDFILESIGS_INFO) {
4107 			/* Return information. What we copy out depends on the size of the
4108 			 * passed in structure, to keep binary compatibility. */
4109 
4110 			if (fs.fs_fsignatures_size >= sizeof(struct user_fsignatures)) {
4111 				// enough room for fs_cdhash[20]+fs_hash_type
4112 
4113 				if (blob != NULL) {
4114 					error = copyout(blob->csb_cdhash,
4115 					    (vm_address_t)argp + offsetof(struct user_fsignatures, fs_cdhash),
4116 					    USER_FSIGNATURES_CDHASH_LEN);
4117 					if (error) {
4118 						vnode_put(vp);
4119 						goto outdrop;
4120 					}
4121 					int hashtype = cs_hash_type(blob->csb_hashtype);
4122 					error = copyout(&hashtype,
4123 					    (vm_address_t)argp + offsetof(struct user_fsignatures, fs_hash_type),
4124 					    sizeof(int));
4125 					if (error) {
4126 						vnode_put(vp);
4127 						goto outdrop;
4128 					}
4129 				}
4130 			}
4131 		}
4132 
4133 		(void) vnode_put(vp);
4134 		break;
4135 	}
4136 #if CONFIG_SUPPLEMENTAL_SIGNATURES
4137 	case F_ADDFILESUPPL:
4138 	{
4139 		struct vnode *ivp;
4140 		struct cs_blob *blob = NULL;
4141 		struct user_fsupplement fs;
4142 		int orig_fd;
4143 		struct fileproc* orig_fp = NULL;
4144 		kern_return_t kr;
4145 		vm_offset_t kernel_blob_addr;
4146 		vm_size_t kernel_blob_size;
4147 
4148 		if (!IS_64BIT_PROCESS(p)) {
4149 			error = EINVAL;
4150 			goto out; // drop fp and unlock fds
4151 		}
4152 
4153 		if (fp->f_type != DTYPE_VNODE) {
4154 			error = EBADF;
4155 			goto out;
4156 		}
4157 
4158 		error = copyin(argp, &fs, sizeof(fs));
4159 		if (error) {
4160 			goto out;
4161 		}
4162 
4163 		orig_fd = fs.fs_orig_fd;
4164 		if ((error = fp_lookup(p, orig_fd, &orig_fp, 1))) {
4165 			printf("CODE SIGNING: Failed to find original file for supplemental signature attachment\n");
4166 			goto out;
4167 		}
4168 
4169 		if (orig_fp->f_type != DTYPE_VNODE) {
4170 			error = EBADF;
4171 			fp_drop(p, orig_fd, orig_fp, 1);
4172 			goto out;
4173 		}
4174 
4175 		ivp = (struct vnode *)fp_get_data(orig_fp);
4176 
4177 		vp = (struct vnode *)fp_get_data(fp);
4178 
4179 		proc_fdunlock(p);
4180 
4181 		error = vnode_getwithref(ivp);
4182 		if (error) {
4183 			fp_drop(p, orig_fd, orig_fp, 0);
4184 			goto outdrop; //drop fp
4185 		}
4186 
4187 		error = vnode_getwithref(vp);
4188 		if (error) {
4189 			vnode_put(ivp);
4190 			fp_drop(p, orig_fd, orig_fp, 0);
4191 			goto outdrop;
4192 		}
4193 
4194 		if (fs.fs_blob_size > CS_MAX_BLOB_SIZE) {
4195 			error = E2BIG;
4196 			goto dropboth; // drop iocounts on vp and ivp, drop orig_fp then drop fp via outdrop
4197 		}
4198 
4199 		kernel_blob_size = CAST_DOWN(vm_size_t, fs.fs_blob_size);
4200 		kr = ubc_cs_blob_allocate(&kernel_blob_addr, &kernel_blob_size);
4201 		if (kr != KERN_SUCCESS) {
4202 			error = ENOMEM;
4203 			goto dropboth;
4204 		}
4205 
4206 		int resid;
4207 		error = vn_rdwr(UIO_READ, vp,
4208 		    (caddr_t)kernel_blob_addr, (int)kernel_blob_size,
4209 		    fs.fs_file_start + fs.fs_blob_start,
4210 		    UIO_SYSSPACE, 0,
4211 		    kauth_cred_get(), &resid, p);
4212 		if ((error == 0) && resid) {
4213 			/* kernel_blob_size rounded to a page size, but signature may be at end of file */
4214 			memset((void *)(kernel_blob_addr + (kernel_blob_size - resid)), 0x0, resid);
4215 		}
4216 
4217 		if (error) {
4218 			ubc_cs_blob_deallocate(kernel_blob_addr,
4219 			    kernel_blob_size);
4220 			goto dropboth;
4221 		}
4222 
4223 		error = ubc_cs_blob_add_supplement(vp, ivp, fs.fs_file_start,
4224 		    &kernel_blob_addr, kernel_blob_size, &blob);
4225 
4226 		/* ubc_blob_add_supplement() has consumed kernel_blob_addr if it is zeroed */
4227 		if (error) {
4228 			if (kernel_blob_addr) {
4229 				ubc_cs_blob_deallocate(kernel_blob_addr,
4230 				    kernel_blob_size);
4231 			}
4232 			goto dropboth;
4233 		}
4234 		vnode_put(ivp);
4235 		vnode_put(vp);
4236 		fp_drop(p, orig_fd, orig_fp, 0);
4237 		break;
4238 
4239 dropboth:
4240 		vnode_put(ivp);
4241 		vnode_put(vp);
4242 		fp_drop(p, orig_fd, orig_fp, 0);
4243 		goto outdrop;
4244 	}
4245 #endif
4246 	case F_GETCODEDIR:
4247 	case F_FINDSIGS: {
4248 		error = ENOTSUP;
4249 		goto out;
4250 	}
4251 	case F_CHECK_LV: {
4252 		struct fileglob *fg;
4253 		fchecklv_t lv = {};
4254 
4255 		if (fp->f_type != DTYPE_VNODE) {
4256 			error = EBADF;
4257 			goto out;
4258 		}
4259 		fg = fp->fp_glob;
4260 		proc_fdunlock(p);
4261 
4262 		if (IS_64BIT_PROCESS(p)) {
4263 			error = copyin(argp, &lv, sizeof(lv));
4264 		} else {
4265 			struct user32_fchecklv lv32 = {};
4266 
4267 			error = copyin(argp, &lv32, sizeof(lv32));
4268 			lv.lv_file_start = lv32.lv_file_start;
4269 			lv.lv_error_message = (void *)(uintptr_t)lv32.lv_error_message;
4270 			lv.lv_error_message_size = lv32.lv_error_message_size;
4271 		}
4272 		if (error) {
4273 			goto outdrop;
4274 		}
4275 
4276 #if CONFIG_MACF
4277 		error = mac_file_check_library_validation(p, fg, lv.lv_file_start,
4278 		    (user_long_t)lv.lv_error_message, lv.lv_error_message_size);
4279 #endif
4280 
4281 		break;
4282 	}
4283 	case F_GETSIGSINFO: {
4284 		struct cs_blob *blob = NULL;
4285 		fgetsigsinfo_t sigsinfo = {};
4286 
4287 		if (fp->f_type != DTYPE_VNODE) {
4288 			error = EBADF;
4289 			goto out;
4290 		}
4291 		vp = (struct vnode *)fp_get_data(fp);
4292 		proc_fdunlock(p);
4293 
4294 		error = vnode_getwithref(vp);
4295 		if (error) {
4296 			goto outdrop;
4297 		}
4298 
4299 		error = copyin(argp, &sigsinfo, sizeof(sigsinfo));
4300 		if (error) {
4301 			vnode_put(vp);
4302 			goto outdrop;
4303 		}
4304 
4305 		blob = ubc_cs_blob_get(vp, CPU_TYPE_ANY, CPU_SUBTYPE_ANY, sigsinfo.fg_file_start);
4306 		if (blob == NULL) {
4307 			error = ENOENT;
4308 			vnode_put(vp);
4309 			goto outdrop;
4310 		}
4311 		switch (sigsinfo.fg_info_request) {
4312 		case GETSIGSINFO_PLATFORM_BINARY:
4313 			sigsinfo.fg_sig_is_platform = blob->csb_platform_binary;
4314 			error = copyout(&sigsinfo.fg_sig_is_platform,
4315 			    (vm_address_t)argp + offsetof(struct fgetsigsinfo, fg_sig_is_platform),
4316 			    sizeof(sigsinfo.fg_sig_is_platform));
4317 			if (error) {
4318 				vnode_put(vp);
4319 				goto outdrop;
4320 			}
4321 			break;
4322 		default:
4323 			error = EINVAL;
4324 			vnode_put(vp);
4325 			goto outdrop;
4326 		}
4327 		vnode_put(vp);
4328 		break;
4329 	}
4330 #if CONFIG_PROTECT
4331 	case F_GETPROTECTIONCLASS: {
4332 		if (fp->f_type != DTYPE_VNODE) {
4333 			error = EBADF;
4334 			goto out;
4335 		}
4336 		vp = (struct vnode *)fp_get_data(fp);
4337 
4338 		proc_fdunlock(p);
4339 
4340 		if (vnode_getwithref(vp)) {
4341 			error = ENOENT;
4342 			goto outdrop;
4343 		}
4344 
4345 		struct vnode_attr va;
4346 
4347 		VATTR_INIT(&va);
4348 		VATTR_WANTED(&va, va_dataprotect_class);
4349 		error = VNOP_GETATTR(vp, &va, &context);
4350 		if (!error) {
4351 			if (VATTR_IS_SUPPORTED(&va, va_dataprotect_class)) {
4352 				*retval = va.va_dataprotect_class;
4353 			} else {
4354 				error = ENOTSUP;
4355 			}
4356 		}
4357 
4358 		vnode_put(vp);
4359 		break;
4360 	}
4361 
4362 	case F_SETPROTECTIONCLASS: {
4363 		/* tmp must be a valid PROTECTION_CLASS_* */
4364 		tmp = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
4365 
4366 		if (fp->f_type != DTYPE_VNODE) {
4367 			error = EBADF;
4368 			goto out;
4369 		}
4370 		vp = (struct vnode *)fp_get_data(fp);
4371 
4372 		proc_fdunlock(p);
4373 
4374 		if (vnode_getwithref(vp)) {
4375 			error = ENOENT;
4376 			goto outdrop;
4377 		}
4378 
4379 		/* Only go forward if you have write access */
4380 		vfs_context_t ctx = vfs_context_current();
4381 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4382 			vnode_put(vp);
4383 			error = EBADF;
4384 			goto outdrop;
4385 		}
4386 
4387 		struct vnode_attr va;
4388 
4389 		VATTR_INIT(&va);
4390 		VATTR_SET(&va, va_dataprotect_class, tmp);
4391 
4392 		error = VNOP_SETATTR(vp, &va, ctx);
4393 
4394 		vnode_put(vp);
4395 		break;
4396 	}
4397 
4398 	case F_TRANSCODEKEY: {
4399 		if (fp->f_type != DTYPE_VNODE) {
4400 			error = EBADF;
4401 			goto out;
4402 		}
4403 
4404 		vp = (struct vnode *)fp_get_data(fp);
4405 		proc_fdunlock(p);
4406 
4407 		if (vnode_getwithref(vp)) {
4408 			error = ENOENT;
4409 			goto outdrop;
4410 		}
4411 
4412 		cp_key_t k = {
4413 			.len = CP_MAX_WRAPPEDKEYSIZE,
4414 		};
4415 
4416 		k.key = kalloc_data(CP_MAX_WRAPPEDKEYSIZE, Z_WAITOK | Z_ZERO);
4417 		if (k.key == NULL) {
4418 			error = ENOMEM;
4419 		} else {
4420 			error = VNOP_IOCTL(vp, F_TRANSCODEKEY, (caddr_t)&k, 1, &context);
4421 		}
4422 
4423 		vnode_put(vp);
4424 
4425 		if (error == 0) {
4426 			error = copyout(k.key, argp, k.len);
4427 			*retval = k.len;
4428 		}
4429 		kfree_data(k.key, CP_MAX_WRAPPEDKEYSIZE);
4430 
4431 		break;
4432 	}
4433 
4434 	case F_GETPROTECTIONLEVEL:  {
4435 		if (fp->f_type != DTYPE_VNODE) {
4436 			error = EBADF;
4437 			goto out;
4438 		}
4439 
4440 		vp = (struct vnode*)fp_get_data(fp);
4441 		proc_fdunlock(p);
4442 
4443 		if (vnode_getwithref(vp)) {
4444 			error = ENOENT;
4445 			goto outdrop;
4446 		}
4447 
4448 		error = VNOP_IOCTL(vp, F_GETPROTECTIONLEVEL, (caddr_t)retval, 0, &context);
4449 
4450 		vnode_put(vp);
4451 		break;
4452 	}
4453 
4454 	case F_GETDEFAULTPROTLEVEL:  {
4455 		if (fp->f_type != DTYPE_VNODE) {
4456 			error = EBADF;
4457 			goto out;
4458 		}
4459 
4460 		vp = (struct vnode*)fp_get_data(fp);
4461 		proc_fdunlock(p);
4462 
4463 		if (vnode_getwithref(vp)) {
4464 			error = ENOENT;
4465 			goto outdrop;
4466 		}
4467 
4468 		/*
4469 		 * if cp_get_major_vers fails, error will be set to proper errno
4470 		 * and cp_version will still be 0.
4471 		 */
4472 
4473 		error = VNOP_IOCTL(vp, F_GETDEFAULTPROTLEVEL, (caddr_t)retval, 0, &context);
4474 
4475 		vnode_put(vp);
4476 		break;
4477 	}
4478 
4479 #endif /* CONFIG_PROTECT */
4480 
4481 	case F_MOVEDATAEXTENTS: {
4482 		struct fileproc *fp2 = NULL;
4483 		struct vnode *src_vp = NULLVP;
4484 		struct vnode *dst_vp = NULLVP;
4485 		/* We need to grab the 2nd FD out of the arguments before moving on. */
4486 		int fd2 = CAST_DOWN_EXPLICIT(int32_t, uap->arg);
4487 
4488 		error = priv_check_cred(kauth_cred_get(), PRIV_VFS_MOVE_DATA_EXTENTS, 0);
4489 		if (error) {
4490 			goto out;
4491 		}
4492 
4493 		if (fp->f_type != DTYPE_VNODE) {
4494 			error = EBADF;
4495 			goto out;
4496 		}
4497 
4498 		/*
4499 		 * For now, special case HFS+ and APFS only, since this
4500 		 * is SPI.
4501 		 */
4502 		src_vp = (struct vnode *)fp_get_data(fp);
4503 		if (src_vp->v_tag != VT_HFS && src_vp->v_tag != VT_APFS) {
4504 			error = ENOTSUP;
4505 			goto out;
4506 		}
4507 
4508 		/*
4509 		 * Get the references before we start acquiring iocounts on the vnodes,
4510 		 * while we still hold the proc fd lock
4511 		 */
4512 		if ((error = fp_lookup(p, fd2, &fp2, 1))) {
4513 			error = EBADF;
4514 			goto out;
4515 		}
4516 		if (fp2->f_type != DTYPE_VNODE) {
4517 			fp_drop(p, fd2, fp2, 1);
4518 			error = EBADF;
4519 			goto out;
4520 		}
4521 		dst_vp = (struct vnode *)fp_get_data(fp2);
4522 		if (dst_vp->v_tag != VT_HFS && dst_vp->v_tag != VT_APFS) {
4523 			fp_drop(p, fd2, fp2, 1);
4524 			error = ENOTSUP;
4525 			goto out;
4526 		}
4527 
4528 #if CONFIG_MACF
4529 		/* Re-do MAC checks against the new FD, pass in a fake argument */
4530 		error = mac_file_check_fcntl(kauth_cred_get(), fp2->fp_glob, cmd, 0);
4531 		if (error) {
4532 			fp_drop(p, fd2, fp2, 1);
4533 			goto out;
4534 		}
4535 #endif
4536 		/* Audit the 2nd FD */
4537 		AUDIT_ARG(fd, fd2);
4538 
4539 		proc_fdunlock(p);
4540 
4541 		if (vnode_getwithref(src_vp)) {
4542 			fp_drop(p, fd2, fp2, 0);
4543 			error = ENOENT;
4544 			goto outdrop;
4545 		}
4546 		if (vnode_getwithref(dst_vp)) {
4547 			vnode_put(src_vp);
4548 			fp_drop(p, fd2, fp2, 0);
4549 			error = ENOENT;
4550 			goto outdrop;
4551 		}
4552 
4553 		/*
4554 		 * Basic asserts; validate they are not the same and that
4555 		 * both live on the same filesystem.
4556 		 */
4557 		if (dst_vp == src_vp) {
4558 			vnode_put(src_vp);
4559 			vnode_put(dst_vp);
4560 			fp_drop(p, fd2, fp2, 0);
4561 			error = EINVAL;
4562 			goto outdrop;
4563 		}
4564 
4565 		if (dst_vp->v_mount != src_vp->v_mount) {
4566 			vnode_put(src_vp);
4567 			vnode_put(dst_vp);
4568 			fp_drop(p, fd2, fp2, 0);
4569 			error = EXDEV;
4570 			goto outdrop;
4571 		}
4572 
4573 		/* Now we have a legit pair of FDs.  Go to work */
4574 
4575 		/* Now check for write access to the target files */
4576 		if (vnode_authorize(src_vp, NULLVP,
4577 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4578 			vnode_put(src_vp);
4579 			vnode_put(dst_vp);
4580 			fp_drop(p, fd2, fp2, 0);
4581 			error = EBADF;
4582 			goto outdrop;
4583 		}
4584 
4585 		if (vnode_authorize(dst_vp, NULLVP,
4586 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4587 			vnode_put(src_vp);
4588 			vnode_put(dst_vp);
4589 			fp_drop(p, fd2, fp2, 0);
4590 			error = EBADF;
4591 			goto outdrop;
4592 		}
4593 
4594 		/* Verify that both vps point to files and not directories */
4595 		if (!vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
4596 			error = EINVAL;
4597 			vnode_put(src_vp);
4598 			vnode_put(dst_vp);
4599 			fp_drop(p, fd2, fp2, 0);
4600 			goto outdrop;
4601 		}
4602 
4603 		/*
4604 		 * The exchangedata syscall handler passes in 0 for the flags to VNOP_EXCHANGE.
4605 		 * We'll pass in our special bit indicating that the new behavior is expected
4606 		 */
4607 
4608 		error = VNOP_EXCHANGE(src_vp, dst_vp, FSOPT_EXCHANGE_DATA_ONLY, &context);
4609 
4610 		vnode_put(src_vp);
4611 		vnode_put(dst_vp);
4612 		fp_drop(p, fd2, fp2, 0);
4613 		break;
4614 	}
4615 
4616 	case F_TRANSFEREXTENTS: {
4617 		struct fileproc *fp2 = NULL;
4618 		struct vnode *src_vp = NULLVP;
4619 		struct vnode *dst_vp = NULLVP;
4620 
4621 		/* Get 2nd FD out of the arguments. */
4622 		int fd2 = CAST_DOWN_EXPLICIT(int, uap->arg);
4623 		if (fd2 < 0) {
4624 			error = EINVAL;
4625 			goto out;
4626 		}
4627 
4628 		if (fp->f_type != DTYPE_VNODE) {
4629 			error = EBADF;
4630 			goto out;
4631 		}
4632 
4633 		/*
4634 		 * Only allow this for APFS
4635 		 */
4636 		src_vp = (struct vnode *)fp_get_data(fp);
4637 		if (src_vp->v_tag != VT_APFS) {
4638 			error = ENOTSUP;
4639 			goto out;
4640 		}
4641 
4642 		/*
4643 		 * Get the references before we start acquiring iocounts on the vnodes,
4644 		 * while we still hold the proc fd lock
4645 		 */
4646 		if ((error = fp_lookup(p, fd2, &fp2, 1))) {
4647 			error = EBADF;
4648 			goto out;
4649 		}
4650 		if (fp2->f_type != DTYPE_VNODE) {
4651 			fp_drop(p, fd2, fp2, 1);
4652 			error = EBADF;
4653 			goto out;
4654 		}
4655 		dst_vp = (struct vnode *)fp_get_data(fp2);
4656 		if (dst_vp->v_tag != VT_APFS) {
4657 			fp_drop(p, fd2, fp2, 1);
4658 			error = ENOTSUP;
4659 			goto out;
4660 		}
4661 
4662 #if CONFIG_MACF
4663 		/* Re-do MAC checks against the new FD, pass in a fake argument */
4664 		error = mac_file_check_fcntl(kauth_cred_get(), fp2->fp_glob, cmd, 0);
4665 		if (error) {
4666 			fp_drop(p, fd2, fp2, 1);
4667 			goto out;
4668 		}
4669 #endif
4670 		/* Audit the 2nd FD */
4671 		AUDIT_ARG(fd, fd2);
4672 
4673 		proc_fdunlock(p);
4674 
4675 		if (vnode_getwithref(src_vp)) {
4676 			fp_drop(p, fd2, fp2, 0);
4677 			error = ENOENT;
4678 			goto outdrop;
4679 		}
4680 		if (vnode_getwithref(dst_vp)) {
4681 			vnode_put(src_vp);
4682 			fp_drop(p, fd2, fp2, 0);
4683 			error = ENOENT;
4684 			goto outdrop;
4685 		}
4686 
4687 		/*
4688 		 * Validate they are not the same and that
4689 		 * both live on the same filesystem.
4690 		 */
4691 		if (dst_vp == src_vp) {
4692 			vnode_put(src_vp);
4693 			vnode_put(dst_vp);
4694 			fp_drop(p, fd2, fp2, 0);
4695 			error = EINVAL;
4696 			goto outdrop;
4697 		}
4698 		if (dst_vp->v_mount != src_vp->v_mount) {
4699 			vnode_put(src_vp);
4700 			vnode_put(dst_vp);
4701 			fp_drop(p, fd2, fp2, 0);
4702 			error = EXDEV;
4703 			goto outdrop;
4704 		}
4705 
4706 		/* Verify that both vps point to files and not directories */
4707 		if (!vnode_isreg(src_vp) || !vnode_isreg(dst_vp)) {
4708 			error = EINVAL;
4709 			vnode_put(src_vp);
4710 			vnode_put(dst_vp);
4711 			fp_drop(p, fd2, fp2, 0);
4712 			goto outdrop;
4713 		}
4714 
4715 
4716 		/*
4717 		 * Okay, vps are legit. Check  access.  We'll require write access
4718 		 * to both files.
4719 		 */
4720 		if (vnode_authorize(src_vp, NULLVP,
4721 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4722 			vnode_put(src_vp);
4723 			vnode_put(dst_vp);
4724 			fp_drop(p, fd2, fp2, 0);
4725 			error = EBADF;
4726 			goto outdrop;
4727 		}
4728 		if (vnode_authorize(dst_vp, NULLVP,
4729 		    (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), &context) != 0) {
4730 			vnode_put(src_vp);
4731 			vnode_put(dst_vp);
4732 			fp_drop(p, fd2, fp2, 0);
4733 			error = EBADF;
4734 			goto outdrop;
4735 		}
4736 
4737 		/* Pass it on through to the fs */
4738 		error = VNOP_IOCTL(src_vp, cmd, (caddr_t)dst_vp, 0, &context);
4739 
4740 		vnode_put(src_vp);
4741 		vnode_put(dst_vp);
4742 		fp_drop(p, fd2, fp2, 0);
4743 		break;
4744 	}
4745 
4746 	/*
4747 	 * SPI for making a file compressed.
4748 	 */
4749 	case F_MAKECOMPRESSED: {
4750 		uint32_t gcounter = CAST_DOWN_EXPLICIT(uint32_t, uap->arg);
4751 
4752 		if (fp->f_type != DTYPE_VNODE) {
4753 			error = EBADF;
4754 			goto out;
4755 		}
4756 
4757 		vp = (struct vnode*)fp_get_data(fp);
4758 		proc_fdunlock(p);
4759 
4760 		/* get the vnode */
4761 		if (vnode_getwithref(vp)) {
4762 			error = ENOENT;
4763 			goto outdrop;
4764 		}
4765 
4766 		/* Is it a file? */
4767 		if ((vnode_isreg(vp) == 0) && (vnode_islnk(vp) == 0)) {
4768 			vnode_put(vp);
4769 			error = EBADF;
4770 			goto outdrop;
4771 		}
4772 
4773 		/* invoke ioctl to pass off to FS */
4774 		/* Only go forward if you have write access */
4775 		vfs_context_t ctx = vfs_context_current();
4776 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4777 			vnode_put(vp);
4778 			error = EBADF;
4779 			goto outdrop;
4780 		}
4781 
4782 		error = VNOP_IOCTL(vp, cmd, (caddr_t)&gcounter, 0, &context);
4783 
4784 		vnode_put(vp);
4785 		break;
4786 	}
4787 
4788 	/*
4789 	 * SPI (private) for indicating to a filesystem that subsequent writes to
4790 	 * the open FD will written to the Fastflow.
4791 	 */
4792 	case F_SET_GREEDY_MODE:
4793 	/* intentionally drop through to the same handler as F_SETSTATIC.
4794 	 * both fcntls should pass the argument and their selector into VNOP_IOCTL.
4795 	 */
4796 
4797 	/*
4798 	 * SPI (private) for indicating to a filesystem that subsequent writes to
4799 	 * the open FD will represent static content.
4800 	 */
4801 	case F_SETSTATICCONTENT: {
4802 		caddr_t ioctl_arg = NULL;
4803 
4804 		if (uap->arg) {
4805 			ioctl_arg = (caddr_t) 1;
4806 		}
4807 
4808 		if (fp->f_type != DTYPE_VNODE) {
4809 			error = EBADF;
4810 			goto out;
4811 		}
4812 		vp = (struct vnode *)fp_get_data(fp);
4813 		proc_fdunlock(p);
4814 
4815 		error = vnode_getwithref(vp);
4816 		if (error) {
4817 			error = ENOENT;
4818 			goto outdrop;
4819 		}
4820 
4821 		/* Only go forward if you have write access */
4822 		vfs_context_t ctx = vfs_context_current();
4823 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4824 			vnode_put(vp);
4825 			error = EBADF;
4826 			goto outdrop;
4827 		}
4828 
4829 		error = VNOP_IOCTL(vp, cmd, ioctl_arg, 0, &context);
4830 		(void)vnode_put(vp);
4831 
4832 		break;
4833 	}
4834 
4835 	/*
4836 	 * SPI (private) for indicating to the lower level storage driver that the
4837 	 * subsequent writes should be of a particular IO type (burst, greedy, static),
4838 	 * or other flavors that may be necessary.
4839 	 */
4840 	case F_SETIOTYPE: {
4841 		caddr_t param_ptr;
4842 		uint32_t param;
4843 
4844 		if (uap->arg) {
4845 			/* extract 32 bits of flags from userland */
4846 			param_ptr = (caddr_t) uap->arg;
4847 			param = (uint32_t) param_ptr;
4848 		} else {
4849 			/* If no argument is specified, error out */
4850 			error = EINVAL;
4851 			goto out;
4852 		}
4853 
4854 		/*
4855 		 * Validate the different types of flags that can be specified:
4856 		 * all of them are mutually exclusive for now.
4857 		 */
4858 		switch (param) {
4859 		case F_IOTYPE_ISOCHRONOUS:
4860 			break;
4861 
4862 		default:
4863 			error = EINVAL;
4864 			goto out;
4865 		}
4866 
4867 
4868 		if (fp->f_type != DTYPE_VNODE) {
4869 			error = EBADF;
4870 			goto out;
4871 		}
4872 		vp = (struct vnode *)fp_get_data(fp);
4873 		proc_fdunlock(p);
4874 
4875 		error = vnode_getwithref(vp);
4876 		if (error) {
4877 			error = ENOENT;
4878 			goto outdrop;
4879 		}
4880 
4881 		/* Only go forward if you have write access */
4882 		vfs_context_t ctx = vfs_context_current();
4883 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4884 			vnode_put(vp);
4885 			error = EBADF;
4886 			goto outdrop;
4887 		}
4888 
4889 		error = VNOP_IOCTL(vp, cmd, param_ptr, 0, &context);
4890 		(void)vnode_put(vp);
4891 
4892 		break;
4893 	}
4894 
4895 	/*
4896 	 * Set the vnode pointed to by 'fd'
4897 	 * and tag it as the (potentially future) backing store
4898 	 * for another filesystem
4899 	 */
4900 	case F_SETBACKINGSTORE: {
4901 		if (fp->f_type != DTYPE_VNODE) {
4902 			error = EBADF;
4903 			goto out;
4904 		}
4905 
4906 		vp = (struct vnode *)fp_get_data(fp);
4907 
4908 		if (vp->v_tag != VT_HFS) {
4909 			error = EINVAL;
4910 			goto out;
4911 		}
4912 		proc_fdunlock(p);
4913 
4914 		if (vnode_getwithref(vp)) {
4915 			error = ENOENT;
4916 			goto outdrop;
4917 		}
4918 
4919 		/* only proceed if you have write access */
4920 		vfs_context_t ctx = vfs_context_current();
4921 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_WRITE_DATA), ctx) != 0) {
4922 			vnode_put(vp);
4923 			error = EBADF;
4924 			goto outdrop;
4925 		}
4926 
4927 
4928 		/* If arg != 0, set, otherwise unset */
4929 		if (uap->arg) {
4930 			error = VNOP_IOCTL(vp, cmd, (caddr_t)1, 0, &context);
4931 		} else {
4932 			error = VNOP_IOCTL(vp, cmd, (caddr_t)NULL, 0, &context);
4933 		}
4934 
4935 		vnode_put(vp);
4936 		break;
4937 	}
4938 
4939 	/*
4940 	 * like F_GETPATH, but special semantics for
4941 	 * the mobile time machine handler.
4942 	 */
4943 	case F_GETPATH_MTMINFO: {
4944 		char *pathbufp;
4945 		int pathlen;
4946 
4947 		if (fp->f_type != DTYPE_VNODE) {
4948 			error = EBADF;
4949 			goto out;
4950 		}
4951 		vp = (struct vnode *)fp_get_data(fp);
4952 		proc_fdunlock(p);
4953 
4954 		pathlen = MAXPATHLEN;
4955 		pathbufp = zalloc(ZV_NAMEI);
4956 
4957 		if ((error = vnode_getwithref(vp)) == 0) {
4958 			int backingstore = 0;
4959 
4960 			/* Check for error from vn_getpath before moving on */
4961 			if ((error = vn_getpath(vp, pathbufp, &pathlen)) == 0) {
4962 				if (vp->v_tag == VT_HFS) {
4963 					error = VNOP_IOCTL(vp, cmd, (caddr_t) &backingstore, 0, &context);
4964 				}
4965 				(void)vnode_put(vp);
4966 
4967 				if (error == 0) {
4968 					error = copyout((caddr_t)pathbufp, argp, pathlen);
4969 				}
4970 				if (error == 0) {
4971 					/*
4972 					 * If the copyout was successful, now check to ensure
4973 					 * that this vnode is not a BACKINGSTORE vnode.  mtmd
4974 					 * wants the path regardless.
4975 					 */
4976 					if (backingstore) {
4977 						error = EBUSY;
4978 					}
4979 				}
4980 			} else {
4981 				(void)vnode_put(vp);
4982 			}
4983 		}
4984 
4985 		zfree(ZV_NAMEI, pathbufp);
4986 		goto outdrop;
4987 	}
4988 
4989 	case F_RECYCLE: {
4990 #if !DEBUG && !DEVELOPMENT
4991 		bool allowed = false;
4992 
4993 		//
4994 		// non-debug and non-development kernels have restrictions
4995 		// on who can all this fcntl.  the process has to be marked
4996 		// with the dataless-manipulator entitlement and either the
4997 		// process or thread have to be marked rapid-aging.
4998 		//
4999 		if (!vfs_context_is_dataless_manipulator(&context)) {
5000 			error = EPERM;
5001 			goto out;
5002 		}
5003 
5004 		proc_t proc = vfs_context_proc(&context);
5005 		if (proc && (proc->p_lflag & P_LRAGE_VNODES)) {
5006 			allowed = true;
5007 		} else {
5008 			thread_t thr = vfs_context_thread(&context);
5009 			if (thr) {
5010 				struct uthread *ut = get_bsdthread_info(thr);
5011 
5012 				if (ut && (ut->uu_flag & UT_RAGE_VNODES)) {
5013 					allowed = true;
5014 				}
5015 			}
5016 		}
5017 		if (!allowed) {
5018 			error = EPERM;
5019 			goto out;
5020 		}
5021 #endif
5022 
5023 		if (fp->f_type != DTYPE_VNODE) {
5024 			error = EBADF;
5025 			goto out;
5026 		}
5027 		vp = (struct vnode *)fp_get_data(fp);
5028 		proc_fdunlock(p);
5029 
5030 		vnode_recycle(vp);
5031 		break;
5032 	}
5033 
5034 #if CONFIG_FILE_LEASES
5035 	case F_SETLEASE: {
5036 		struct fileglob *fg;
5037 		int fl_type;
5038 		int expcounts;
5039 
5040 		if (fp->f_type != DTYPE_VNODE) {
5041 			error = EBADF;
5042 			goto out;
5043 		}
5044 		vp = (struct vnode *)fp_get_data(fp);
5045 		fg = fp->fp_glob;;
5046 		proc_fdunlock(p);
5047 
5048 		/*
5049 		 * In order to allow a process to avoid breaking
5050 		 * its own leases, the expected open count needs
5051 		 * to be provided to F_SETLEASE when placing write lease.
5052 		 * Similarly, in order to allow a process to place a read lease
5053 		 * after opening the file multiple times in RW mode, the expected
5054 		 * write count needs to be provided to F_SETLEASE when placing a
5055 		 * read lease.
5056 		 *
5057 		 * We use the upper 30 bits of the integer argument (way more than
5058 		 * enough) as the expected open/write count.
5059 		 *
5060 		 * If the caller passed 0 for the expected open count,
5061 		 * assume 1.
5062 		 */
5063 		fl_type = CAST_DOWN_EXPLICIT(int, uap->arg);
5064 		expcounts = (unsigned int)fl_type >> 2;
5065 		fl_type &= 3;
5066 
5067 		if (fl_type == F_WRLCK && expcounts == 0) {
5068 			expcounts = 1;
5069 		}
5070 
5071 		AUDIT_ARG(value32, fl_type);
5072 
5073 		if ((error = vnode_getwithref(vp))) {
5074 			goto outdrop;
5075 		}
5076 
5077 		/*
5078 		 * Only support for regular file/dir mounted on local-based filesystem.
5079 		 */
5080 		if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
5081 		    !(vfs_flags(vnode_mount(vp)) & MNT_LOCAL)) {
5082 			error = EBADF;
5083 			vnode_put(vp);
5084 			goto outdrop;
5085 		}
5086 
5087 		/* For directory, we only support read lease. */
5088 		if (vnode_vtype(vp) == VDIR && fl_type == F_WRLCK) {
5089 			error = ENOTSUP;
5090 			vnode_put(vp);
5091 			goto outdrop;
5092 		}
5093 
5094 		switch (fl_type) {
5095 		case F_RDLCK:
5096 		case F_WRLCK:
5097 		case F_UNLCK:
5098 			error = vnode_setlease(vp, fg, fl_type, expcounts,
5099 			    vfs_context_current());
5100 			break;
5101 		default:
5102 			error = EINVAL;
5103 			break;
5104 		}
5105 
5106 		vnode_put(vp);
5107 		goto outdrop;
5108 	}
5109 
5110 	case F_GETLEASE: {
5111 		if (fp->f_type != DTYPE_VNODE) {
5112 			error = EBADF;
5113 			goto out;
5114 		}
5115 		vp = (struct vnode *)fp_get_data(fp);
5116 		proc_fdunlock(p);
5117 
5118 		if ((error = vnode_getwithref(vp))) {
5119 			goto outdrop;
5120 		}
5121 
5122 		if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
5123 		    !(vfs_flags(vnode_mount(vp)) & MNT_LOCAL)) {
5124 			error = EBADF;
5125 			vnode_put(vp);
5126 			goto outdrop;
5127 		}
5128 
5129 		error = 0;
5130 		*retval = vnode_getlease(vp);
5131 		vnode_put(vp);
5132 		goto outdrop;
5133 	}
5134 #endif /* CONFIG_FILE_LEASES */
5135 
5136 	/* SPI (private) for asserting background access to a file */
5137 	case F_ASSERT_BG_ACCESS:
5138 	/* SPI (private) for releasing background access to a file */
5139 	case F_RELEASE_BG_ACCESS: {
5140 		/*
5141 		 * Check if the process is platform code, which means
5142 		 * that it is considered part of the Operating System.
5143 		 */
5144 		if (!csproc_get_platform_binary(p)) {
5145 			error = EPERM;
5146 			goto out;
5147 		}
5148 
5149 		if (fp->f_type != DTYPE_VNODE) {
5150 			error = EBADF;
5151 			goto out;
5152 		}
5153 
5154 		vp = (struct vnode *)fp_get_data(fp);
5155 		proc_fdunlock(p);
5156 
5157 		if (vnode_getwithref(vp)) {
5158 			error = ENOENT;
5159 			goto outdrop;
5160 		}
5161 
5162 		/* Verify that vp points to a file and not a directory */
5163 		if (!vnode_isreg(vp)) {
5164 			vnode_put(vp);
5165 			error = EINVAL;
5166 			goto outdrop;
5167 		}
5168 
5169 		/* Only proceed if you have read access */
5170 		if (vnode_authorize(vp, NULLVP, (KAUTH_VNODE_ACCESS | KAUTH_VNODE_READ_DATA), &context) != 0) {
5171 			vnode_put(vp);
5172 			error = EBADF;
5173 			goto outdrop;
5174 		}
5175 
5176 		if (cmd == F_ASSERT_BG_ACCESS) {
5177 			fassertbgaccess_t args;
5178 
5179 			if ((error = copyin(argp, (caddr_t)&args, sizeof(args)))) {
5180 				vnode_put(vp);
5181 				goto outdrop;
5182 			}
5183 
5184 			error = VNOP_IOCTL(vp, F_ASSERT_BG_ACCESS, (caddr_t)&args, 0, &context);
5185 		} else {
5186 			// cmd == F_RELEASE_BG_ACCESS
5187 			error = VNOP_IOCTL(vp, F_RELEASE_BG_ACCESS, (caddr_t)NULL, 0, &context);
5188 		}
5189 
5190 		vnode_put(vp);
5191 
5192 		goto outdrop;
5193 	}
5194 
5195 	default:
5196 		/*
5197 		 * This is an fcntl() that we d not recognize at this level;
5198 		 * if this is a vnode, we send it down into the VNOP_IOCTL
5199 		 * for this vnode; this can include special devices, and will
5200 		 * effectively overload fcntl() to send ioctl()'s.
5201 		 */
5202 		if ((cmd & IOC_VOID) && (cmd & IOC_INOUT)) {
5203 			error = EINVAL;
5204 			goto out;
5205 		}
5206 
5207 		/*
5208 		 * Catch any now-invalid fcntl() selectors.
5209 		 * (When adding a selector to this list, it may be prudent
5210 		 * to consider adding it to the list in fsctl_internal() as well.)
5211 		 */
5212 		switch (cmd) {
5213 		case (int)APFSIOC_REVERT_TO_SNAPSHOT:
5214 		case (int)FSIOC_FIOSEEKHOLE:
5215 		case (int)FSIOC_FIOSEEKDATA:
5216 		case (int)FSIOC_CAS_BSDFLAGS:
5217 		case (int)FSIOC_KERNEL_ROOTAUTH:
5218 		case (int)FSIOC_GRAFT_FS:
5219 		case (int)FSIOC_UNGRAFT_FS:
5220 		case (int)FSIOC_AUTH_FS:
5221 		case HFS_GET_BOOT_INFO:
5222 		case HFS_SET_BOOT_INFO:
5223 		case FIOPINSWAP:
5224 		case F_MARKDEPENDENCY:
5225 		case TIOCREVOKE:
5226 		case TIOCREVOKECLEAR:
5227 			error = EINVAL;
5228 			goto out;
5229 		default:
5230 			break;
5231 		}
5232 
5233 		if (fp->f_type != DTYPE_VNODE) {
5234 			error = EBADF;
5235 			goto out;
5236 		}
5237 		vp = (struct vnode *)fp_get_data(fp);
5238 		proc_fdunlock(p);
5239 
5240 		if ((error = vnode_getwithref(vp)) == 0) {
5241 #define STK_PARAMS 128
5242 			char stkbuf[STK_PARAMS] = {0};
5243 			unsigned int size;
5244 			caddr_t data, memp;
5245 			/*
5246 			 * For this to work properly, we have to copy in the
5247 			 * ioctl() cmd argument if there is one; we must also
5248 			 * check that a command parameter, if present, does
5249 			 * not exceed the maximum command length dictated by
5250 			 * the number of bits we have available in the command
5251 			 * to represent a structure length.  Finally, we have
5252 			 * to copy the results back out, if it is that type of
5253 			 * ioctl().
5254 			 */
5255 			size = IOCPARM_LEN(cmd);
5256 			if (size > IOCPARM_MAX) {
5257 				(void)vnode_put(vp);
5258 				error = EINVAL;
5259 				break;
5260 			}
5261 
5262 			memp = NULL;
5263 			if (size > sizeof(stkbuf)) {
5264 				memp = (caddr_t)kalloc_data(size, Z_WAITOK);
5265 				if (memp == 0) {
5266 					(void)vnode_put(vp);
5267 					error = ENOMEM;
5268 					goto outdrop;
5269 				}
5270 				data = memp;
5271 			} else {
5272 				data = &stkbuf[0];
5273 			}
5274 
5275 			if (cmd & IOC_IN) {
5276 				if (size) {
5277 					/* structure */
5278 					error = copyin(argp, data, size);
5279 					if (error) {
5280 						(void)vnode_put(vp);
5281 						if (memp) {
5282 							kfree_data(memp, size);
5283 						}
5284 						goto outdrop;
5285 					}
5286 
5287 					/* Bzero the section beyond that which was needed */
5288 					if (size <= sizeof(stkbuf)) {
5289 						bzero((((uint8_t*)data) + size), (sizeof(stkbuf) - size));
5290 					}
5291 				} else {
5292 					/* int */
5293 					if (is64bit) {
5294 						*(user_addr_t *)data = argp;
5295 					} else {
5296 						*(uint32_t *)data = (uint32_t)argp;
5297 					}
5298 				};
5299 			} else if ((cmd & IOC_OUT) && size) {
5300 				/*
5301 				 * Zero the buffer so the user always
5302 				 * gets back something deterministic.
5303 				 */
5304 				bzero(data, size);
5305 			} else if (cmd & IOC_VOID) {
5306 				if (is64bit) {
5307 					*(user_addr_t *)data = argp;
5308 				} else {
5309 					*(uint32_t *)data = (uint32_t)argp;
5310 				}
5311 			}
5312 
5313 			error = VNOP_IOCTL(vp, cmd, CAST_DOWN(caddr_t, data), 0, &context);
5314 
5315 			(void)vnode_put(vp);
5316 
5317 			/* Copy any output data to user */
5318 			if (error == 0 && (cmd & IOC_OUT) && size) {
5319 				error = copyout(data, argp, size);
5320 			}
5321 			if (memp) {
5322 				kfree_data(memp, size);
5323 			}
5324 		}
5325 		break;
5326 	}
5327 
5328 outdrop:
5329 	return sys_fcntl_outdrop(p, fd, fp, vp, error);
5330 
5331 out:
5332 	return sys_fcntl_out(p, fd, fp, error);
5333 }
5334 
5335 
5336 /*
5337  * sys_close
5338  *
5339  * Description:	The implementation of the close(2) system call
5340  *
5341  * Parameters:	p			Process in whose per process file table
5342  *					the close is to occur
5343  *		uap->fd			fd to be closed
5344  *		retval			<unused>
5345  *
5346  * Returns:	0			Success
5347  *	fp_lookup:EBADF			Bad file descriptor
5348  *      fp_guard_exception:???          Guarded file descriptor
5349  *	close_internal:EBADF
5350  *	close_internal:???              Anything returnable by a per-fileops
5351  *					close function
5352  */
5353 int
sys_close(proc_t p,struct close_args * uap,__unused int32_t * retval)5354 sys_close(proc_t p, struct close_args *uap, __unused int32_t *retval)
5355 {
5356 	kauth_cred_t p_cred = current_cached_proc_cred(p);
5357 
5358 	__pthread_testcancel(1);
5359 	return close_nocancel(p, p_cred, uap->fd);
5360 }
5361 
5362 int
sys_close_nocancel(proc_t p,struct close_nocancel_args * uap,__unused int32_t * retval)5363 sys_close_nocancel(proc_t p, struct close_nocancel_args *uap, __unused int32_t *retval)
5364 {
5365 	kauth_cred_t p_cred = current_cached_proc_cred(p);
5366 
5367 	return close_nocancel(p, p_cred, uap->fd);
5368 }
5369 
5370 int
close_nocancel(proc_t p,kauth_cred_t p_cred,int fd)5371 close_nocancel(proc_t p, kauth_cred_t p_cred, int fd)
5372 {
5373 	struct fileproc *fp;
5374 
5375 	AUDIT_SYSCLOSE(p, fd);
5376 
5377 	proc_fdlock(p);
5378 	if ((fp = fp_get_noref_locked(p, fd)) == NULL) {
5379 		proc_fdunlock(p);
5380 		return EBADF;
5381 	}
5382 
5383 	if (fp_isguarded(fp, GUARD_CLOSE)) {
5384 		int error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
5385 		proc_fdunlock(p);
5386 		return error;
5387 	}
5388 
5389 	return fp_close_and_unlock(p, p_cred, fd, fp, 0);
5390 }
5391 
5392 
5393 /*
5394  * fstat
5395  *
5396  * Description:	Return status information about a file descriptor.
5397  *
5398  * Parameters:	p				The process doing the fstat
5399  *		fd				The fd to stat
5400  *		ub				The user stat buffer
5401  *		xsecurity			The user extended security
5402  *						buffer, or 0 if none
5403  *		xsecurity_size			The size of xsecurity, or 0
5404  *						if no xsecurity
5405  *		isstat64			Flag to indicate 64 bit version
5406  *						for inode size, etc.
5407  *
5408  * Returns:	0				Success
5409  *		EBADF
5410  *		EFAULT
5411  *	fp_lookup:EBADF				Bad file descriptor
5412  *	vnode_getwithref:???
5413  *	copyout:EFAULT
5414  *	vnode_getwithref:???
5415  *	vn_stat:???
5416  *	soo_stat:???
5417  *	pipe_stat:???
5418  *	pshm_stat:???
5419  *	kqueue_stat:???
5420  *
5421  * Notes:	Internal implementation for all other fstat() related
5422  *		functions
5423  *
5424  *		XXX switch on node type is bogus; need a stat in struct
5425  *		XXX fileops instead.
5426  */
5427 static int
fstat(proc_t p,int fd,user_addr_t ub,user_addr_t xsecurity,user_addr_t xsecurity_size,int isstat64)5428 fstat(proc_t p, int fd, user_addr_t ub, user_addr_t xsecurity,
5429     user_addr_t xsecurity_size, int isstat64)
5430 {
5431 	struct fileproc *fp;
5432 	union {
5433 		struct stat sb;
5434 		struct stat64 sb64;
5435 	} source;
5436 	union {
5437 		struct user64_stat user64_sb;
5438 		struct user32_stat user32_sb;
5439 		struct user64_stat64 user64_sb64;
5440 		struct user32_stat64 user32_sb64;
5441 	} dest;
5442 	int error, my_size;
5443 	file_type_t type;
5444 	caddr_t data;
5445 	kauth_filesec_t fsec;
5446 	user_size_t xsecurity_bufsize;
5447 	vfs_context_t ctx = vfs_context_current();
5448 	void * sbptr;
5449 
5450 
5451 	AUDIT_ARG(fd, fd);
5452 
5453 	if ((error = fp_lookup(p, fd, &fp, 0)) != 0) {
5454 		return error;
5455 	}
5456 	type = fp->f_type;
5457 	data = (caddr_t)fp_get_data(fp);
5458 	fsec = KAUTH_FILESEC_NONE;
5459 
5460 	sbptr = (void *)&source;
5461 
5462 	switch (type) {
5463 	case DTYPE_VNODE:
5464 		if ((error = vnode_getwithref((vnode_t)data)) == 0) {
5465 			/*
5466 			 * If the caller has the file open, and is not
5467 			 * requesting extended security information, we are
5468 			 * going to let them get the basic stat information.
5469 			 */
5470 			if (xsecurity == USER_ADDR_NULL) {
5471 				error = vn_stat_noauth((vnode_t)data, sbptr, NULL, isstat64, 0, ctx,
5472 				    fp->fp_glob->fg_cred);
5473 			} else {
5474 				error = vn_stat((vnode_t)data, sbptr, &fsec, isstat64, 0, ctx);
5475 			}
5476 
5477 			AUDIT_ARG(vnpath, (struct vnode *)data, ARG_VNODE1);
5478 			(void)vnode_put((vnode_t)data);
5479 		}
5480 		break;
5481 
5482 #if SOCKETS
5483 	case DTYPE_SOCKET:
5484 		error = soo_stat((struct socket *)data, sbptr, isstat64);
5485 		break;
5486 #endif /* SOCKETS */
5487 
5488 	case DTYPE_PIPE:
5489 		error = pipe_stat((void *)data, sbptr, isstat64);
5490 		break;
5491 
5492 	case DTYPE_PSXSHM:
5493 		error = pshm_stat((void *)data, sbptr, isstat64);
5494 		break;
5495 
5496 	case DTYPE_KQUEUE:
5497 		error = kqueue_stat((void *)data, sbptr, isstat64, p);
5498 		break;
5499 
5500 	default:
5501 		error = EBADF;
5502 		goto out;
5503 	}
5504 	if (error == 0) {
5505 		caddr_t sbp;
5506 
5507 		if (isstat64 != 0) {
5508 			source.sb64.st_lspare = 0;
5509 			source.sb64.st_qspare[0] = 0LL;
5510 			source.sb64.st_qspare[1] = 0LL;
5511 
5512 			if (IS_64BIT_PROCESS(p)) {
5513 				munge_user64_stat64(&source.sb64, &dest.user64_sb64);
5514 				my_size = sizeof(dest.user64_sb64);
5515 				sbp = (caddr_t)&dest.user64_sb64;
5516 			} else {
5517 				munge_user32_stat64(&source.sb64, &dest.user32_sb64);
5518 				my_size = sizeof(dest.user32_sb64);
5519 				sbp = (caddr_t)&dest.user32_sb64;
5520 			}
5521 		} else {
5522 			source.sb.st_lspare = 0;
5523 			source.sb.st_qspare[0] = 0LL;
5524 			source.sb.st_qspare[1] = 0LL;
5525 			if (IS_64BIT_PROCESS(p)) {
5526 				munge_user64_stat(&source.sb, &dest.user64_sb);
5527 				my_size = sizeof(dest.user64_sb);
5528 				sbp = (caddr_t)&dest.user64_sb;
5529 			} else {
5530 				munge_user32_stat(&source.sb, &dest.user32_sb);
5531 				my_size = sizeof(dest.user32_sb);
5532 				sbp = (caddr_t)&dest.user32_sb;
5533 			}
5534 		}
5535 
5536 		error = copyout(sbp, ub, my_size);
5537 	}
5538 
5539 	/* caller wants extended security information? */
5540 	if (xsecurity != USER_ADDR_NULL) {
5541 		/* did we get any? */
5542 		if (fsec == KAUTH_FILESEC_NONE) {
5543 			if (susize(xsecurity_size, 0) != 0) {
5544 				error = EFAULT;
5545 				goto out;
5546 			}
5547 		} else {
5548 			/* find the user buffer size */
5549 			xsecurity_bufsize = fusize(xsecurity_size);
5550 
5551 			/* copy out the actual data size */
5552 			if (susize(xsecurity_size, KAUTH_FILESEC_COPYSIZE(fsec)) != 0) {
5553 				error = EFAULT;
5554 				goto out;
5555 			}
5556 
5557 			/* if the caller supplied enough room, copy out to it */
5558 			if (xsecurity_bufsize >= KAUTH_FILESEC_COPYSIZE(fsec)) {
5559 				error = copyout(fsec, xsecurity, KAUTH_FILESEC_COPYSIZE(fsec));
5560 			}
5561 		}
5562 	}
5563 out:
5564 	fp_drop(p, fd, fp, 0);
5565 	if (fsec != NULL) {
5566 		kauth_filesec_free(fsec);
5567 	}
5568 	return error;
5569 }
5570 
5571 
5572 /*
5573  * sys_fstat_extended
5574  *
5575  * Description:	Extended version of fstat supporting returning extended
5576  *		security information
5577  *
5578  * Parameters:	p				The process doing the fstat
5579  *		uap->fd				The fd to stat
5580  *		uap->ub				The user stat buffer
5581  *		uap->xsecurity			The user extended security
5582  *						buffer, or 0 if none
5583  *		uap->xsecurity_size		The size of xsecurity, or 0
5584  *
5585  * Returns:	0				Success
5586  *		!0				Errno (see fstat)
5587  */
5588 int
sys_fstat_extended(proc_t p,struct fstat_extended_args * uap,__unused int32_t * retval)5589 sys_fstat_extended(proc_t p, struct fstat_extended_args *uap, __unused int32_t *retval)
5590 {
5591 	return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 0);
5592 }
5593 
5594 
5595 /*
5596  * sys_fstat
5597  *
5598  * Description:	Get file status for the file associated with fd
5599  *
5600  * Parameters:	p				The process doing the fstat
5601  *		uap->fd				The fd to stat
5602  *		uap->ub				The user stat buffer
5603  *
5604  * Returns:	0				Success
5605  *		!0				Errno (see fstat)
5606  */
5607 int
sys_fstat(proc_t p,struct fstat_args * uap,__unused int32_t * retval)5608 sys_fstat(proc_t p, struct fstat_args *uap, __unused int32_t *retval)
5609 {
5610 	return fstat(p, uap->fd, uap->ub, 0, 0, 0);
5611 }
5612 
5613 
5614 /*
5615  * sys_fstat64_extended
5616  *
5617  * Description:	Extended version of fstat64 supporting returning extended
5618  *		security information
5619  *
5620  * Parameters:	p				The process doing the fstat
5621  *		uap->fd				The fd to stat
5622  *		uap->ub				The user stat buffer
5623  *		uap->xsecurity			The user extended security
5624  *						buffer, or 0 if none
5625  *		uap->xsecurity_size		The size of xsecurity, or 0
5626  *
5627  * Returns:	0				Success
5628  *		!0				Errno (see fstat)
5629  */
5630 int
sys_fstat64_extended(proc_t p,struct fstat64_extended_args * uap,__unused int32_t * retval)5631 sys_fstat64_extended(proc_t p, struct fstat64_extended_args *uap, __unused int32_t *retval)
5632 {
5633 	return fstat(p, uap->fd, uap->ub, uap->xsecurity, uap->xsecurity_size, 1);
5634 }
5635 
5636 
5637 /*
5638  * sys_fstat64
5639  *
5640  * Description:	Get 64 bit version of the file status for the file associated
5641  *		with fd
5642  *
5643  * Parameters:	p				The process doing the fstat
5644  *		uap->fd				The fd to stat
5645  *		uap->ub				The user stat buffer
5646  *
5647  * Returns:	0				Success
5648  *		!0				Errno (see fstat)
5649  */
5650 int
sys_fstat64(proc_t p,struct fstat64_args * uap,__unused int32_t * retval)5651 sys_fstat64(proc_t p, struct fstat64_args *uap, __unused int32_t *retval)
5652 {
5653 	return fstat(p, uap->fd, uap->ub, 0, 0, 1);
5654 }
5655 
5656 
5657 /*
5658  * sys_fpathconf
5659  *
5660  * Description:	Return pathconf information about a file descriptor.
5661  *
5662  * Parameters:	p				Process making the request
5663  *		uap->fd				fd to get information about
5664  *		uap->name			Name of information desired
5665  *		retval				Pointer to the call return area
5666  *
5667  * Returns:	0				Success
5668  *		EINVAL
5669  *	fp_lookup:EBADF				Bad file descriptor
5670  *	vnode_getwithref:???
5671  *	vn_pathconf:???
5672  *
5673  * Implicit returns:
5674  *		*retval (modified)		Returned information (numeric)
5675  */
5676 int
sys_fpathconf(proc_t p,struct fpathconf_args * uap,int32_t * retval)5677 sys_fpathconf(proc_t p, struct fpathconf_args *uap, int32_t *retval)
5678 {
5679 	int fd = uap->fd;
5680 	struct fileproc *fp;
5681 	struct vnode *vp;
5682 	int error = 0;
5683 	file_type_t type;
5684 
5685 
5686 	AUDIT_ARG(fd, uap->fd);
5687 	if ((error = fp_lookup(p, fd, &fp, 0))) {
5688 		return error;
5689 	}
5690 	type = fp->f_type;
5691 
5692 	switch (type) {
5693 	case DTYPE_SOCKET:
5694 		if (uap->name != _PC_PIPE_BUF) {
5695 			error = EINVAL;
5696 			goto out;
5697 		}
5698 		*retval = PIPE_BUF;
5699 		error = 0;
5700 		goto out;
5701 
5702 	case DTYPE_PIPE:
5703 		if (uap->name != _PC_PIPE_BUF) {
5704 			error = EINVAL;
5705 			goto out;
5706 		}
5707 		*retval = PIPE_BUF;
5708 		error = 0;
5709 		goto out;
5710 
5711 	case DTYPE_VNODE:
5712 		vp = (struct vnode *)fp_get_data(fp);
5713 
5714 		if ((error = vnode_getwithref(vp)) == 0) {
5715 			AUDIT_ARG(vnpath, vp, ARG_VNODE1);
5716 
5717 			error = vn_pathconf(vp, uap->name, retval, vfs_context_current());
5718 
5719 			(void)vnode_put(vp);
5720 		}
5721 		goto out;
5722 
5723 	default:
5724 		error = EINVAL;
5725 		goto out;
5726 	}
5727 	/*NOTREACHED*/
5728 out:
5729 	fp_drop(p, fd, fp, 0);
5730 	return error;
5731 }
5732 
5733 /*
5734  * sys_flock
5735  *
5736  * Description:	Apply an advisory lock on a file descriptor.
5737  *
5738  * Parameters:	p				Process making request
5739  *		uap->fd				fd on which the lock is to be
5740  *						attempted
5741  *		uap->how			(Un)Lock bits, including type
5742  *		retval				Pointer to the call return area
5743  *
5744  * Returns:	0				Success
5745  *	fp_getfvp:EBADF				Bad file descriptor
5746  *	fp_getfvp:ENOTSUP			fd does not refer to a vnode
5747  *	vnode_getwithref:???
5748  *	VNOP_ADVLOCK:???
5749  *
5750  * Implicit returns:
5751  *		*retval (modified)		Size of dtable
5752  *
5753  * Notes:	Just attempt to get a record lock of the requested type on
5754  *		the entire file (l_whence = SEEK_SET, l_start = 0, l_len = 0).
5755  */
5756 int
sys_flock(proc_t p,struct flock_args * uap,__unused int32_t * retval)5757 sys_flock(proc_t p, struct flock_args *uap, __unused int32_t *retval)
5758 {
5759 	int fd = uap->fd;
5760 	int how = uap->how;
5761 	struct fileproc *fp;
5762 	struct vnode *vp;
5763 	struct flock lf;
5764 	vfs_context_t ctx = vfs_context_current();
5765 	int error = 0;
5766 
5767 	AUDIT_ARG(fd, uap->fd);
5768 	if ((error = fp_getfvp(p, fd, &fp, &vp))) {
5769 		return error;
5770 	}
5771 	if ((error = vnode_getwithref(vp))) {
5772 		goto out1;
5773 	}
5774 	AUDIT_ARG(vnpath, vp, ARG_VNODE1);
5775 
5776 	lf.l_whence = SEEK_SET;
5777 	lf.l_start = 0;
5778 	lf.l_len = 0;
5779 	if (how & LOCK_UN) {
5780 		lf.l_type = F_UNLCK;
5781 		error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_UNLCK, &lf, F_FLOCK, ctx, NULL);
5782 		goto out;
5783 	}
5784 	if (how & LOCK_EX) {
5785 		lf.l_type = F_WRLCK;
5786 	} else if (how & LOCK_SH) {
5787 		lf.l_type = F_RDLCK;
5788 	} else {
5789 		error = EBADF;
5790 		goto out;
5791 	}
5792 #if CONFIG_MACF
5793 	error = mac_file_check_lock(kauth_cred_get(), fp->fp_glob, F_SETLK, &lf);
5794 	if (error) {
5795 		goto out;
5796 	}
5797 #endif
5798 	error = VNOP_ADVLOCK(vp, (caddr_t)fp->fp_glob, F_SETLK, &lf,
5799 	    (how & LOCK_NB ? F_FLOCK : F_FLOCK | F_WAIT),
5800 	    ctx, NULL);
5801 	if (!error) {
5802 		os_atomic_or(&fp->fp_glob->fg_flag, FWASLOCKED, relaxed);
5803 	}
5804 out:
5805 	(void)vnode_put(vp);
5806 out1:
5807 	fp_drop(p, fd, fp, 0);
5808 	return error;
5809 }
5810 
5811 /*
5812  * sys_fileport_makeport
5813  *
5814  * Description: Obtain a Mach send right for a given file descriptor.
5815  *
5816  * Parameters:	p		Process calling fileport
5817  *              uap->fd		The fd to reference
5818  *              uap->portnamep  User address at which to place port name.
5819  *
5820  * Returns:	0		Success.
5821  *              EBADF		Bad file descriptor.
5822  *              EINVAL		File descriptor had type that cannot be sent, misc. other errors.
5823  *              EFAULT		Address at which to store port name is not valid.
5824  *              EAGAIN		Resource shortage.
5825  *
5826  * Implicit returns:
5827  *		On success, name of send right is stored at user-specified address.
5828  */
5829 int
sys_fileport_makeport(proc_t p,struct fileport_makeport_args * uap,__unused int * retval)5830 sys_fileport_makeport(proc_t p, struct fileport_makeport_args *uap,
5831     __unused int *retval)
5832 {
5833 	int err;
5834 	int fd = uap->fd;
5835 	user_addr_t user_portaddr = uap->portnamep;
5836 	struct fileproc *fp = FILEPROC_NULL;
5837 	struct fileglob *fg = NULL;
5838 	ipc_port_t fileport;
5839 	mach_port_name_t name = MACH_PORT_NULL;
5840 
5841 	proc_fdlock(p);
5842 	err = fp_lookup(p, fd, &fp, 1);
5843 	if (err != 0) {
5844 		goto out_unlock;
5845 	}
5846 
5847 	fg = fp->fp_glob;
5848 	if (!fg_sendable(fg)) {
5849 		err = EINVAL;
5850 		goto out_unlock;
5851 	}
5852 
5853 	if (fp_isguarded(fp, GUARD_FILEPORT)) {
5854 		err = fp_guard_exception(p, fd, fp, kGUARD_EXC_FILEPORT);
5855 		goto out_unlock;
5856 	}
5857 
5858 	/* Dropped when port is deallocated */
5859 	fg_ref(p, fg);
5860 
5861 	proc_fdunlock(p);
5862 
5863 	/* Allocate and initialize a port */
5864 	fileport = fileport_alloc(fg);
5865 	if (fileport == IPC_PORT_NULL) {
5866 		fg_drop_live(fg);
5867 		err = EAGAIN;
5868 		goto out;
5869 	}
5870 
5871 	/* Add an entry.  Deallocates port on failure. */
5872 	name = ipc_port_copyout_send(fileport, get_task_ipcspace(proc_task(p)));
5873 	if (!MACH_PORT_VALID(name)) {
5874 		err = EINVAL;
5875 		goto out;
5876 	}
5877 
5878 	err = copyout(&name, user_portaddr, sizeof(mach_port_name_t));
5879 	if (err != 0) {
5880 		goto out;
5881 	}
5882 
5883 	/* Tag the fileglob for debugging purposes */
5884 	lck_mtx_lock_spin(&fg->fg_lock);
5885 	fg->fg_lflags |= FG_PORTMADE;
5886 	lck_mtx_unlock(&fg->fg_lock);
5887 
5888 	fp_drop(p, fd, fp, 0);
5889 
5890 	return 0;
5891 
5892 out_unlock:
5893 	proc_fdunlock(p);
5894 out:
5895 	if (MACH_PORT_VALID(name)) {
5896 		/* Don't care if another thread races us to deallocate the entry */
5897 		(void) mach_port_deallocate(get_task_ipcspace(proc_task(p)), name);
5898 	}
5899 
5900 	if (fp != FILEPROC_NULL) {
5901 		fp_drop(p, fd, fp, 0);
5902 	}
5903 
5904 	return err;
5905 }
5906 
5907 void
fileport_releasefg(struct fileglob * fg)5908 fileport_releasefg(struct fileglob *fg)
5909 {
5910 	(void)fg_drop(FG_NOPROC, fg);
5911 }
5912 
5913 /*
5914  * fileport_makefd
5915  *
5916  * Description: Obtain the file descriptor for a given Mach send right.
5917  *
5918  * Returns:	0		Success
5919  *		EINVAL		Invalid Mach port name, or port is not for a file.
5920  *	fdalloc:EMFILE
5921  *	fdalloc:ENOMEM		Unable to allocate fileproc or extend file table.
5922  *
5923  * Implicit returns:
5924  *		*retval (modified)		The new descriptor
5925  */
5926 int
fileport_makefd(proc_t p,ipc_port_t port,fileproc_flags_t fp_flags,int * retval)5927 fileport_makefd(proc_t p, ipc_port_t port, fileproc_flags_t fp_flags, int *retval)
5928 {
5929 	struct fileglob *fg;
5930 	struct fileproc *fp = FILEPROC_NULL;
5931 	int fd;
5932 	int err;
5933 
5934 	fg = fileport_port_to_fileglob(port);
5935 	if (fg == NULL) {
5936 		err = EINVAL;
5937 		goto out;
5938 	}
5939 
5940 	fp = fileproc_alloc_init();
5941 
5942 	proc_fdlock(p);
5943 	err = fdalloc(p, 0, &fd);
5944 	if (err != 0) {
5945 		proc_fdunlock(p);
5946 		goto out;
5947 	}
5948 	if (fp_flags) {
5949 		fp->fp_flags |= fp_flags;
5950 	}
5951 
5952 	fp->fp_glob = fg;
5953 	fg_ref(p, fg);
5954 
5955 	procfdtbl_releasefd(p, fd, fp);
5956 	proc_fdunlock(p);
5957 
5958 	*retval = fd;
5959 	err = 0;
5960 out:
5961 	if ((fp != NULL) && (0 != err)) {
5962 		fileproc_free(fp);
5963 	}
5964 
5965 	return err;
5966 }
5967 
5968 /*
5969  * sys_fileport_makefd
5970  *
5971  * Description: Obtain the file descriptor for a given Mach send right.
5972  *
5973  * Parameters:	p		Process calling fileport
5974  *              uap->port	Name of send right to file port.
5975  *
5976  * Returns:	0		Success
5977  *		EINVAL		Invalid Mach port name, or port is not for a file.
5978  *	fdalloc:EMFILE
5979  *	fdalloc:ENOMEM		Unable to allocate fileproc or extend file table.
5980  *
5981  * Implicit returns:
5982  *		*retval (modified)		The new descriptor
5983  */
5984 int
sys_fileport_makefd(proc_t p,struct fileport_makefd_args * uap,int32_t * retval)5985 sys_fileport_makefd(proc_t p, struct fileport_makefd_args *uap, int32_t *retval)
5986 {
5987 	ipc_port_t port = IPC_PORT_NULL;
5988 	mach_port_name_t send = uap->port;
5989 	kern_return_t res;
5990 	int err;
5991 
5992 	res = ipc_object_copyin(get_task_ipcspace(proc_task(p)),
5993 	    send, MACH_MSG_TYPE_COPY_SEND, &port, 0, NULL, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
5994 
5995 	if (res == KERN_SUCCESS) {
5996 		err = fileport_makefd(p, port, FP_CLOEXEC, retval);
5997 	} else {
5998 		err = EINVAL;
5999 	}
6000 
6001 	if (IPC_PORT_NULL != port) {
6002 		ipc_port_release_send(port);
6003 	}
6004 
6005 	return err;
6006 }
6007 
6008 
6009 #pragma mark fileops wrappers
6010 
6011 /*
6012  * fo_read
6013  *
6014  * Description:	Generic fileops read indirected through the fileops pointer
6015  *		in the fileproc structure
6016  *
6017  * Parameters:	fp				fileproc structure pointer
6018  *		uio				user I/O structure pointer
6019  *		flags				FOF_ flags
6020  *		ctx				VFS context for operation
6021  *
6022  * Returns:	0				Success
6023  *		!0				Errno from read
6024  */
6025 int
fo_read(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6026 fo_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6027 {
6028 	return (*fp->f_ops->fo_read)(fp, uio, flags, ctx);
6029 }
6030 
6031 int
fo_no_read(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6032 fo_no_read(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6033 {
6034 #pragma unused(fp, uio, flags, ctx)
6035 	return ENXIO;
6036 }
6037 
6038 
6039 /*
6040  * fo_write
6041  *
6042  * Description:	Generic fileops write indirected through the fileops pointer
6043  *		in the fileproc structure
6044  *
6045  * Parameters:	fp				fileproc structure pointer
6046  *		uio				user I/O structure pointer
6047  *		flags				FOF_ flags
6048  *		ctx				VFS context for operation
6049  *
6050  * Returns:	0				Success
6051  *		!0				Errno from write
6052  */
6053 int
fo_write(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6054 fo_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6055 {
6056 	return (*fp->f_ops->fo_write)(fp, uio, flags, ctx);
6057 }
6058 
6059 int
fo_no_write(struct fileproc * fp,struct uio * uio,int flags,vfs_context_t ctx)6060 fo_no_write(struct fileproc *fp, struct uio *uio, int flags, vfs_context_t ctx)
6061 {
6062 #pragma unused(fp, uio, flags, ctx)
6063 	return ENXIO;
6064 }
6065 
6066 
6067 /*
6068  * fo_ioctl
6069  *
6070  * Description:	Generic fileops ioctl indirected through the fileops pointer
6071  *		in the fileproc structure
6072  *
6073  * Parameters:	fp				fileproc structure pointer
6074  *		com				ioctl command
6075  *		data				pointer to internalized copy
6076  *						of user space ioctl command
6077  *						parameter data in kernel space
6078  *		ctx				VFS context for operation
6079  *
6080  * Returns:	0				Success
6081  *		!0				Errno from ioctl
6082  *
6083  * Locks:	The caller is assumed to have held the proc_fdlock; this
6084  *		function releases and reacquires this lock.  If the caller
6085  *		accesses data protected by this lock prior to calling this
6086  *		function, it will need to revalidate/reacquire any cached
6087  *		protected data obtained prior to the call.
6088  */
6089 int
fo_ioctl(struct fileproc * fp,u_long com,caddr_t data,vfs_context_t ctx)6090 fo_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx)
6091 {
6092 	int error;
6093 
6094 	proc_fdunlock(vfs_context_proc(ctx));
6095 	error = (*fp->f_ops->fo_ioctl)(fp, com, data, ctx);
6096 	proc_fdlock(vfs_context_proc(ctx));
6097 	return error;
6098 }
6099 
6100 int
fo_no_ioctl(struct fileproc * fp,u_long com,caddr_t data,vfs_context_t ctx)6101 fo_no_ioctl(struct fileproc *fp, u_long com, caddr_t data, vfs_context_t ctx)
6102 {
6103 #pragma unused(fp, com, data, ctx)
6104 	return ENOTTY;
6105 }
6106 
6107 
6108 /*
6109  * fo_select
6110  *
6111  * Description:	Generic fileops select indirected through the fileops pointer
6112  *		in the fileproc structure
6113  *
6114  * Parameters:	fp				fileproc structure pointer
6115  *		which				select which
6116  *		wql				pointer to wait queue list
6117  *		ctx				VFS context for operation
6118  *
6119  * Returns:	0				Success
6120  *		!0				Errno from select
6121  */
6122 int
fo_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)6123 fo_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
6124 {
6125 	return (*fp->f_ops->fo_select)(fp, which, wql, ctx);
6126 }
6127 
6128 int
fo_no_select(struct fileproc * fp,int which,void * wql,vfs_context_t ctx)6129 fo_no_select(struct fileproc *fp, int which, void *wql, vfs_context_t ctx)
6130 {
6131 #pragma unused(fp, which, wql, ctx)
6132 	return ENOTSUP;
6133 }
6134 
6135 
6136 /*
6137  * fo_close
6138  *
6139  * Description:	Generic fileops close indirected through the fileops pointer
6140  *		in the fileproc structure
6141  *
6142  * Parameters:	fp				fileproc structure pointer for
6143  *						file to close
6144  *		ctx				VFS context for operation
6145  *
6146  * Returns:	0				Success
6147  *		!0				Errno from close
6148  */
6149 int
fo_close(struct fileglob * fg,vfs_context_t ctx)6150 fo_close(struct fileglob *fg, vfs_context_t ctx)
6151 {
6152 	return (*fg->fg_ops->fo_close)(fg, ctx);
6153 }
6154 
6155 
6156 /*
6157  * fo_drain
6158  *
6159  * Description:	Generic fileops kqueue filter indirected through the fileops
6160  *		pointer in the fileproc structure
6161  *
6162  * Parameters:	fp				fileproc structure pointer
6163  *		ctx				VFS context for operation
6164  *
6165  * Returns:	0				Success
6166  *		!0				errno from drain
6167  */
6168 int
fo_drain(struct fileproc * fp,vfs_context_t ctx)6169 fo_drain(struct fileproc *fp, vfs_context_t ctx)
6170 {
6171 	return (*fp->f_ops->fo_drain)(fp, ctx);
6172 }
6173 
6174 int
fo_no_drain(struct fileproc * fp,vfs_context_t ctx)6175 fo_no_drain(struct fileproc *fp, vfs_context_t ctx)
6176 {
6177 #pragma unused(fp, ctx)
6178 	return ENOTSUP;
6179 }
6180 
6181 
6182 /*
6183  * fo_kqfilter
6184  *
6185  * Description:	Generic fileops kqueue filter indirected through the fileops
6186  *		pointer in the fileproc structure
6187  *
6188  * Parameters:	fp				fileproc structure pointer
6189  *		kn				pointer to knote to filter on
6190  *
6191  * Returns:	(kn->kn_flags & EV_ERROR)	error in kn->kn_data
6192  *		0				Filter is not active
6193  *		!0				Filter is active
6194  */
6195 int
fo_kqfilter(struct fileproc * fp,struct knote * kn,struct kevent_qos_s * kev)6196 fo_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev)
6197 {
6198 	return (*fp->f_ops->fo_kqfilter)(fp, kn, kev);
6199 }
6200 
6201 int
fo_no_kqfilter(struct fileproc * fp,struct knote * kn,struct kevent_qos_s * kev)6202 fo_no_kqfilter(struct fileproc *fp, struct knote *kn, struct kevent_qos_s *kev)
6203 {
6204 #pragma unused(fp, kev)
6205 	knote_set_error(kn, ENOTSUP);
6206 	return 0;
6207 }
6208