xref: /xnu-11215/bsd/vfs/vfs_subr.c (revision 4f1223e8)
1 /*
2  *
3  * Copyright (c) 2000-2024 Apple Inc. All rights reserved.
4  *
5  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6  *
7  * This file contains Original Code and/or Modifications of Original Code
8  * as defined in and that are subject to the Apple Public Source License
9  * Version 2.0 (the 'License'). You may not use this file except in
10  * compliance with the License. The rights granted to you under the License
11  * may not be used to create, or enable the creation or redistribution of,
12  * unlawful or unlicensed copies of an Apple operating system, or to
13  * circumvent, violate, or enable the circumvention or violation of, any
14  * terms of an Apple operating system software license agreement.
15  *
16  * Please obtain a copy of the License at
17  * http://www.opensource.apple.com/apsl/ and read it before using this file.
18  *
19  * The Original Code and all software distributed under the License are
20  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
21  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
22  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
23  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
24  * Please see the License for the specific language governing rights and
25  * limitations under the License.
26  *
27  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28  */
29 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 /*
31  * Copyright (c) 1989, 1993
32  *	The Regents of the University of California.  All rights reserved.
33  * (c) UNIX System Laboratories, Inc.
34  * All or some portions of this file are derived from material licensed
35  * to the University of California by American Telephone and Telegraph
36  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37  * the permission of UNIX System Laboratories, Inc.
38  *
39  * Redistribution and use in source and binary forms, with or without
40  * modification, are permitted provided that the following conditions
41  * are met:
42  * 1. Redistributions of source code must retain the above copyright
43  *    notice, this list of conditions and the following disclaimer.
44  * 2. Redistributions in binary form must reproduce the above copyright
45  *    notice, this list of conditions and the following disclaimer in the
46  *    documentation and/or other materials provided with the distribution.
47  * 3. All advertising materials mentioning features or use of this software
48  *    must display the following acknowledgement:
49  *	This product includes software developed by the University of
50  *	California, Berkeley and its contributors.
51  * 4. Neither the name of the University nor the names of its contributors
52  *    may be used to endorse or promote products derived from this software
53  *    without specific prior written permission.
54  *
55  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65  * SUCH DAMAGE.
66  *
67  *	@(#)vfs_subr.c	8.31 (Berkeley) 5/26/95
68  */
69 /*
70  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71  * support for mandatory and extensible security protections.  This notice
72  * is included in support of clause 2.2 (b) of the Apple Public License,
73  * Version 2.0.
74  */
75 
76 /*
77  * External virtual filesystem routines
78  */
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount_internal.h>
85 #include <sys/time.h>
86 #include <sys/lock.h>
87 #include <sys/vnode.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf_internal.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/uio_internal.h>
96 #include <sys/uio.h>
97 #include <sys/domain.h>
98 #include <sys/mbuf.h>
99 #include <sys/syslog.h>
100 #include <sys/ubc_internal.h>
101 #include <sys/vm.h>
102 #include <sys/sysctl.h>
103 #include <sys/filedesc.h>
104 #include <sys/fcntl.h>
105 #include <sys/event.h>
106 #include <sys/kdebug.h>
107 #include <sys/kauth.h>
108 #include <sys/user.h>
109 #include <sys/systm.h>
110 #include <sys/kern_memorystatus.h>
111 #include <sys/lockf.h>
112 #include <sys/reboot.h>
113 #include <miscfs/fifofs/fifo.h>
114 
115 #include <nfs/nfs.h>
116 
117 #include <string.h>
118 #include <machine/machine_routines.h>
119 
120 #include <kern/assert.h>
121 #include <mach/kern_return.h>
122 #include <kern/thread.h>
123 #include <kern/sched_prim.h>
124 #include <kern/smr.h>
125 
126 #include <miscfs/specfs/specdev.h>
127 
128 #include <mach/mach_types.h>
129 #include <mach/memory_object_types.h>
130 #include <mach/memory_object_control.h>
131 
132 #include <kern/kalloc.h>        /* kalloc()/kfree() */
133 #include <kern/clock.h>         /* delay_for_interval() */
134 #include <libkern/coreanalytics/coreanalytics.h>
135 #include <libkern/OSAtomic.h>   /* OSAddAtomic() */
136 #include <os/atomic_private.h>
137 #if defined(XNU_TARGET_OS_OSX)
138 #include <console/video_console.h>
139 #endif
140 
141 #ifdef CONFIG_IOCOUNT_TRACE
142 #include <libkern/OSDebug.h>
143 #endif
144 
145 #include <vm/vm_protos.h>       /* vnode_pager_vrele() */
146 #include <vm/vm_ubc.h>
147 #include <vm/memory_object_xnu.h>
148 
149 #if CONFIG_MACF
150 #include <security/mac_framework.h>
151 #endif
152 
153 #include <vfs/vfs_disk_conditioner.h>
154 #include <libkern/section_keywords.h>
155 
156 static LCK_GRP_DECLARE(vnode_lck_grp, "vnode");
157 static LCK_ATTR_DECLARE(vnode_lck_attr, 0, 0);
158 
159 #if CONFIG_TRIGGERS
160 static LCK_GRP_DECLARE(trigger_vnode_lck_grp, "trigger_vnode");
161 static LCK_ATTR_DECLARE(trigger_vnode_lck_attr, 0, 0);
162 #endif
163 
164 extern lck_mtx_t mnt_list_mtx_lock;
165 
166 static KALLOC_TYPE_DEFINE(specinfo_zone, struct specinfo, KT_DEFAULT);
167 
168 ZONE_DEFINE(vnode_zone, "vnodes",
169     sizeof(struct vnode), ZC_NOGC | ZC_ZFREE_CLEARMEM);
170 
171 enum vtype iftovt_tab[16] = {
172 	VNON, VFIFO, VCHR, VNON, VDIR, VNON, VBLK, VNON,
173 	VREG, VNON, VLNK, VNON, VSOCK, VNON, VNON, VBAD,
174 };
175 int     vttoif_tab[9] = {
176 	0, S_IFREG, S_IFDIR, S_IFBLK, S_IFCHR, S_IFLNK,
177 	S_IFSOCK, S_IFIFO, S_IFMT,
178 };
179 
180 extern int paniclog_append_noflush(const char *format, ...);
181 
182 /* XXX next prototytype should be from libsa/stdlib.h> but conflicts libkern */
183 __private_extern__ void qsort(
184 	void * array,
185 	size_t nmembers,
186 	size_t member_size,
187 	int (*)(const void *, const void *));
188 
189 __private_extern__ void vntblinit(void);
190 __private_extern__ int unlink1(vfs_context_t, vnode_t, user_addr_t,
191     enum uio_seg, int);
192 
193 static void vnode_list_add(vnode_t);
194 static void vnode_async_list_add(vnode_t);
195 static void vnode_list_remove(vnode_t);
196 static void vnode_list_remove_locked(vnode_t);
197 
198 static void vnode_abort_advlocks(vnode_t);
199 static errno_t vnode_drain(vnode_t);
200 static void vgone(vnode_t, int flags);
201 static void vclean(vnode_t vp, int flag);
202 static void vnode_reclaim_internal(vnode_t, int, int, int);
203 
204 static void vnode_dropiocount(vnode_t);
205 
206 static vnode_t checkalias(vnode_t vp, dev_t nvp_rdev);
207 static int  vnode_reload(vnode_t);
208 
209 static int unmount_callback(mount_t, __unused void *);
210 
211 static void insmntque(vnode_t vp, mount_t mp);
212 static int mount_getvfscnt(void);
213 static int mount_fillfsids(fsid_t *, int );
214 static void vnode_iterate_setup(mount_t);
215 int vnode_umount_preflight(mount_t, vnode_t, int);
216 static int vnode_iterate_prepare(mount_t);
217 static int vnode_iterate_reloadq(mount_t);
218 static void vnode_iterate_clear(mount_t);
219 static mount_t vfs_getvfs_locked(fsid_t *);
220 static int vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp,
221     struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx);
222 static int vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx);
223 
224 errno_t rmdir_remove_orphaned_appleDouble(vnode_t, vfs_context_t, int *);
225 
226 #ifdef CONFIG_IOCOUNT_TRACE
227 static void record_vp(vnode_t vp, int count);
228 static TUNABLE(int, bootarg_vnode_iocount_trace, "vnode_iocount_trace", 0);
229 static TUNABLE(int, bootarg_uthread_iocount_trace, "uthread_iocount_trace", 0);
230 #endif /* CONFIG_IOCOUNT_TRACE */
231 
232 #if CONFIG_JETSAM && (DEVELOPMENT || DEBUG)
233 static TUNABLE(bool, bootarg_no_vnode_jetsam, "-no_vnode_jetsam", false);
234 #endif /* CONFIG_JETSAM && (DEVELOPMENT || DEBUG) */
235 
236 static TUNABLE(bool, bootarg_no_vnode_drain, "-no_vnode_drain", false);
237 
238 __options_decl(freeable_vnode_level_t, uint32_t, {
239 	DEALLOC_VNODE_NONE = 0,
240 	DEALLOC_VNODE_ONLY_OVERFLOW = 1,
241 	DEALLOC_VNODE_ALL = 2
242 });
243 
244 #if XNU_TARGET_OS_OSX
245 static TUNABLE(freeable_vnode_level_t, bootarg_vn_dealloc_level, "vn_dealloc_level", DEALLOC_VNODE_NONE);
246 #else
247 static TUNABLE(freeable_vnode_level_t, bootarg_vn_dealloc_level, "vn_dealloc_level", DEALLOC_VNODE_ONLY_OVERFLOW);
248 #endif /* CONFIG_VNDEALLOC */
249 
250 static freeable_vnode_level_t vn_dealloc_level = DEALLOC_VNODE_NONE;
251 
252 boolean_t root_is_CF_drive = FALSE;
253 
254 #if CONFIG_TRIGGERS
255 static int vnode_resolver_create(mount_t, vnode_t, struct vnode_trigger_param *, boolean_t external);
256 static void vnode_resolver_detach(vnode_t);
257 #endif
258 
259 TAILQ_HEAD(freelst, vnode) vnode_free_list;     /* vnode free list */
260 TAILQ_HEAD(deadlst, vnode) vnode_dead_list;     /* vnode dead list */
261 TAILQ_HEAD(async_work_lst, vnode) vnode_async_work_list;
262 
263 
264 TAILQ_HEAD(ragelst, vnode) vnode_rage_list;     /* vnode rapid age list */
265 struct timeval rage_tv;
266 int     rage_limit = 0;
267 int     ragevnodes = 0;
268 
269 long  reusablevnodes_max = LONG_MAX;
270 long  reusablevnodes = 0;
271 int   deadvnodes_low = 0;
272 int   deadvnodes_high = 0;
273 int   numvnodes_min = 0;
274 int   numvnodes_max = 0;
275 
276 uint64_t newvnode = 0;
277 unsigned long newvnode_nodead = 0;
278 
279 static  int vfs_unmountall_started = 0;
280 static  int vfs_unmountall_finished = 0;
281 static  uint64_t vfs_shutdown_last_completion_time;
282 
283 #define RAGE_LIMIT_MIN  100
284 #define RAGE_TIME_LIMIT 5
285 
286 VFS_SMR_DECLARE;
287 extern uint32_t nc_smr_enabled;
288 
289 /*
290  * ROSV definitions
291  * NOTE: These are shadowed from PlatformSupport definitions, but XNU
292  * builds standalone.
293  */
294 #define PLATFORM_DATA_VOLUME_MOUNT_POINT "/System/Volumes/Data"
295 
296 /*
297  * These could be in PlatformSupport but aren't yet
298  */
299 #define PLATFORM_PREBOOT_VOLUME_MOUNT_POINT "/System/Volumes/Preboot"
300 #define PLATFORM_RECOVERY_VOLUME_MOUNT_POINT "/System/Volumes/Recovery"
301 
302 #if CONFIG_MOUNT_VM
303 #define PLATFORM_VM_VOLUME_MOUNT_POINT "/System/Volumes/VM"
304 #endif
305 
306 struct mntlist mountlist;                       /* mounted filesystem list */
307 static int nummounts = 0;
308 
309 static int print_busy_vnodes = 0;                               /* print out busy vnodes */
310 
311 #if DIAGNOSTIC
312 #define VLISTCHECK(fun, vp, list)       \
313 	if ((vp)->v_freelist.tqe_prev == (struct vnode **)0xdeadb) \
314 	        panic("%s: %s vnode not on %slist", (fun), (list), (list));
315 #else
316 #define VLISTCHECK(fun, vp, list)
317 #endif /* DIAGNOSTIC */
318 
319 #define VLISTNONE(vp)   \
320 	do {    \
321 	        (vp)->v_freelist.tqe_next = (struct vnode *)0;  \
322 	        (vp)->v_freelist.tqe_prev = (struct vnode **)0xdeadb;   \
323 	} while(0)
324 
325 #define VONLIST(vp)     \
326 	((vp)->v_freelist.tqe_prev != (struct vnode **)0xdeadb)
327 
328 /* remove a vnode from free vnode list */
329 #define VREMFREE(fun, vp)       \
330 	do {    \
331 	        VLISTCHECK((fun), (vp), "free");        \
332 	        TAILQ_REMOVE(&vnode_free_list, (vp), v_freelist);       \
333 	        VLISTNONE((vp));        \
334 	        freevnodes--;   \
335 	        reusablevnodes--;    \
336 	} while(0)
337 
338 
339 /* remove a vnode from dead vnode list */
340 #define VREMDEAD(fun, vp)       \
341 	do {    \
342 	        VLISTCHECK((fun), (vp), "dead");        \
343 	        TAILQ_REMOVE(&vnode_dead_list, (vp), v_freelist);       \
344 	        VLISTNONE((vp));        \
345 	        vp->v_listflag &= ~VLIST_DEAD;  \
346 	        deadvnodes--;   \
347 	        if (vp->v_listflag & VLIST_NO_REUSE) {        \
348 	                deadvnodes_noreuse--;        \
349 	        }        \
350 	} while(0)
351 
352 
353 /* remove a vnode from async work vnode list */
354 #define VREMASYNC_WORK(fun, vp) \
355 	do {    \
356 	        VLISTCHECK((fun), (vp), "async_work");  \
357 	        TAILQ_REMOVE(&vnode_async_work_list, (vp), v_freelist); \
358 	        VLISTNONE((vp));        \
359 	        vp->v_listflag &= ~VLIST_ASYNC_WORK;    \
360 	        async_work_vnodes--;    \
361 	        if (!(vp->v_listflag & VLIST_NO_REUSE)) {        \
362 	                reusablevnodes--;    \
363 	        }        \
364 	} while(0)
365 
366 
367 /* remove a vnode from rage vnode list */
368 #define VREMRAGE(fun, vp)       \
369 	do {    \
370 	        if ( !(vp->v_listflag & VLIST_RAGE))                    \
371 	                panic("VREMRAGE: vp not on rage list");         \
372 	        VLISTCHECK((fun), (vp), "rage");                        \
373 	        TAILQ_REMOVE(&vnode_rage_list, (vp), v_freelist);       \
374 	        VLISTNONE((vp));                \
375 	        vp->v_listflag &= ~VLIST_RAGE;  \
376 	        ragevnodes--;                   \
377 	        reusablevnodes--;    \
378 	} while(0)
379 
380 static void async_work_continue(void);
381 static void vn_laundry_continue(void);
382 static void wakeup_laundry_thread(void);
383 static void vnode_smr_free(void *, size_t);
384 
385 CA_EVENT(freeable_vnodes,
386     CA_INT, numvnodes_min,
387     CA_INT, numvnodes_max,
388     CA_INT, desiredvnodes,
389     CA_INT, numvnodes,
390     CA_INT, freevnodes,
391     CA_INT, deadvnodes,
392     CA_INT, freeablevnodes,
393     CA_INT, busyvnodes,
394     CA_BOOL, threshold_crossed);
395 static CA_EVENT_TYPE(freeable_vnodes) freeable_vnodes_telemetry;
396 
397 static bool freeablevnodes_threshold_crossed = false;
398 
399 /*
400  * Initialize the vnode management data structures.
401  */
402 __private_extern__ void
vntblinit(void)403 vntblinit(void)
404 {
405 	thread_t        thread = THREAD_NULL;
406 	int desiredvnodes_one_percent = desiredvnodes / 100;
407 
408 	TAILQ_INIT(&vnode_free_list);
409 	TAILQ_INIT(&vnode_rage_list);
410 	TAILQ_INIT(&vnode_dead_list);
411 	TAILQ_INIT(&vnode_async_work_list);
412 	TAILQ_INIT(&mountlist);
413 
414 	microuptime(&rage_tv);
415 	rage_limit = desiredvnodes_one_percent;
416 	if (rage_limit < RAGE_LIMIT_MIN) {
417 		rage_limit = RAGE_LIMIT_MIN;
418 	}
419 
420 	deadvnodes_low = desiredvnodes_one_percent;
421 	if (deadvnodes_low > 300) {
422 		deadvnodes_low = 300;
423 	}
424 	deadvnodes_high = deadvnodes_low * 2;
425 
426 	numvnodes_min = numvnodes_max = desiredvnodes;
427 	if (bootarg_vn_dealloc_level == DEALLOC_VNODE_ONLY_OVERFLOW) {
428 		numvnodes_max = desiredvnodes * 2;
429 		vn_dealloc_level = bootarg_vn_dealloc_level;
430 	} else if (bootarg_vn_dealloc_level == DEALLOC_VNODE_ALL) {
431 		numvnodes_min = desiredvnodes_one_percent * 40;
432 		numvnodes_max = desiredvnodes * 2;
433 		reusablevnodes_max = (desiredvnodes_one_percent * 20) - deadvnodes_low;
434 		vn_dealloc_level = bootarg_vn_dealloc_level;
435 	}
436 
437 	bzero(&freeable_vnodes_telemetry, sizeof(CA_EVENT_TYPE(freeable_vnodes)));
438 	freeable_vnodes_telemetry.numvnodes_min = numvnodes_min;
439 	freeable_vnodes_telemetry.numvnodes_max = numvnodes_max;
440 	freeable_vnodes_telemetry.desiredvnodes = desiredvnodes;
441 
442 	if (nc_smr_enabled) {
443 		zone_enable_smr(vnode_zone, VFS_SMR(), &vnode_smr_free);
444 	}
445 
446 	/*
447 	 * create worker threads
448 	 */
449 	kernel_thread_start((thread_continue_t)async_work_continue, NULL, &thread);
450 	thread_deallocate(thread);
451 	kernel_thread_start((thread_continue_t)vn_laundry_continue, NULL, &thread);
452 	thread_deallocate(thread);
453 }
454 
455 /* the timeout is in 10 msecs */
456 int
vnode_waitforwrites(vnode_t vp,int output_target,int slpflag,int slptimeout,const char * msg)457 vnode_waitforwrites(vnode_t vp, int output_target, int slpflag, int slptimeout, const char *msg)
458 {
459 	int error = 0;
460 	struct timespec ts;
461 
462 	if (output_target < 0) {
463 		return EINVAL;
464 	}
465 
466 	KERNEL_DEBUG(0x3010280 | DBG_FUNC_START, (int)vp, output_target, vp->v_numoutput, 0, 0);
467 
468 	if (vp->v_numoutput > output_target) {
469 		slpflag |= PDROP;
470 
471 		vnode_lock_spin(vp);
472 
473 		while ((vp->v_numoutput > output_target) && error == 0) {
474 			if (output_target) {
475 				vp->v_flag |= VTHROTTLED;
476 			} else {
477 				vp->v_flag |= VBWAIT;
478 			}
479 
480 			ts.tv_sec = (slptimeout / 100);
481 			ts.tv_nsec = (slptimeout % 1000)  * 10 * NSEC_PER_USEC * 1000;
482 			error = msleep((caddr_t)&vp->v_numoutput, &vp->v_lock, (slpflag | (PRIBIO + 1)), msg, &ts);
483 
484 			vnode_lock_spin(vp);
485 		}
486 		vnode_unlock(vp);
487 	}
488 	KERNEL_DEBUG(0x3010280 | DBG_FUNC_END, (int)vp, output_target, vp->v_numoutput, error, 0);
489 
490 	return error;
491 }
492 
493 
494 void
vnode_startwrite(vnode_t vp)495 vnode_startwrite(vnode_t vp)
496 {
497 	OSAddAtomic(1, &vp->v_numoutput);
498 }
499 
500 
501 void
vnode_writedone(vnode_t vp)502 vnode_writedone(vnode_t vp)
503 {
504 	if (vp) {
505 		int need_wakeup = 0;
506 
507 		OSAddAtomic(-1, &vp->v_numoutput);
508 
509 		vnode_lock_spin(vp);
510 
511 		if (vp->v_numoutput < 0) {
512 			panic("vnode_writedone: numoutput < 0");
513 		}
514 
515 		if ((vp->v_flag & VTHROTTLED)) {
516 			vp->v_flag &= ~VTHROTTLED;
517 			need_wakeup = 1;
518 		}
519 		if ((vp->v_flag & VBWAIT) && (vp->v_numoutput == 0)) {
520 			vp->v_flag &= ~VBWAIT;
521 			need_wakeup = 1;
522 		}
523 		vnode_unlock(vp);
524 
525 		if (need_wakeup) {
526 			wakeup((caddr_t)&vp->v_numoutput);
527 		}
528 	}
529 }
530 
531 
532 
533 int
vnode_hasdirtyblks(vnode_t vp)534 vnode_hasdirtyblks(vnode_t vp)
535 {
536 	struct cl_writebehind *wbp;
537 
538 	/*
539 	 * Not taking the buf_mtx as there is little
540 	 * point doing it. Even if the lock is taken the
541 	 * state can change right after that. If their
542 	 * needs to be a synchronization, it must be driven
543 	 * by the caller
544 	 */
545 	if (vp->v_dirtyblkhd.lh_first) {
546 		return 1;
547 	}
548 
549 	if (!UBCINFOEXISTS(vp)) {
550 		return 0;
551 	}
552 
553 	wbp = vp->v_ubcinfo->cl_wbehind;
554 
555 	if (wbp && (wbp->cl_number || wbp->cl_scmap)) {
556 		return 1;
557 	}
558 
559 	return 0;
560 }
561 
562 int
vnode_hascleanblks(vnode_t vp)563 vnode_hascleanblks(vnode_t vp)
564 {
565 	/*
566 	 * Not taking the buf_mtx as there is little
567 	 * point doing it. Even if the lock is taken the
568 	 * state can change right after that. If their
569 	 * needs to be a synchronization, it must be driven
570 	 * by the caller
571 	 */
572 	if (vp->v_cleanblkhd.lh_first) {
573 		return 1;
574 	}
575 	return 0;
576 }
577 
578 void
vnode_iterate_setup(mount_t mp)579 vnode_iterate_setup(mount_t mp)
580 {
581 	mp->mnt_lflag |= MNT_LITER;
582 }
583 
584 int
vnode_umount_preflight(mount_t mp,vnode_t skipvp,int flags)585 vnode_umount_preflight(mount_t mp, vnode_t skipvp, int flags)
586 {
587 	vnode_t vp;
588 	int ret = 0;
589 
590 	TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
591 		if (vp->v_type == VDIR) {
592 			continue;
593 		}
594 		if (vp == skipvp) {
595 			continue;
596 		}
597 		if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) || (vp->v_flag & VNOFLUSH))) {
598 			continue;
599 		}
600 		if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
601 			continue;
602 		}
603 		if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) {
604 			continue;
605 		}
606 
607 		/* Look for busy vnode */
608 		if ((vp->v_usecount != 0) && ((vp->v_usecount - vp->v_kusecount) != 0)) {
609 			ret = 1;
610 			if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
611 				vprint("vnode_umount_preflight - busy vnode", vp);
612 			} else {
613 				return ret;
614 			}
615 		} else if (vp->v_iocount > 0) {
616 			/* Busy if iocount is > 0 for more than 3 seconds */
617 			tsleep(&vp->v_iocount, PVFS, "vnode_drain_network", 3 * hz);
618 			if (vp->v_iocount > 0) {
619 				ret = 1;
620 				if (print_busy_vnodes && ((flags & FORCECLOSE) == 0)) {
621 					vprint("vnode_umount_preflight - busy vnode", vp);
622 				} else {
623 					return ret;
624 				}
625 			}
626 			continue;
627 		}
628 	}
629 
630 	return ret;
631 }
632 
633 /*
634  * This routine prepares iteration by moving all the vnodes to worker queue
635  * called with mount lock held
636  */
637 int
vnode_iterate_prepare(mount_t mp)638 vnode_iterate_prepare(mount_t mp)
639 {
640 	vnode_t vp;
641 
642 	if (TAILQ_EMPTY(&mp->mnt_vnodelist)) {
643 		/* nothing to do */
644 		return 0;
645 	}
646 
647 	vp = TAILQ_FIRST(&mp->mnt_vnodelist);
648 	vp->v_mntvnodes.tqe_prev = &(mp->mnt_workerqueue.tqh_first);
649 	mp->mnt_workerqueue.tqh_first = mp->mnt_vnodelist.tqh_first;
650 	mp->mnt_workerqueue.tqh_last = mp->mnt_vnodelist.tqh_last;
651 
652 	TAILQ_INIT(&mp->mnt_vnodelist);
653 	if (mp->mnt_newvnodes.tqh_first != NULL) {
654 		panic("vnode_iterate_prepare: newvnode when entering vnode");
655 	}
656 	TAILQ_INIT(&mp->mnt_newvnodes);
657 
658 	return 1;
659 }
660 
661 
662 /* called with mount lock held */
663 int
vnode_iterate_reloadq(mount_t mp)664 vnode_iterate_reloadq(mount_t mp)
665 {
666 	int moved = 0;
667 
668 	/* add the remaining entries in workerq to the end of mount vnode list */
669 	if (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
670 		struct vnode * mvp;
671 		mvp = TAILQ_LAST(&mp->mnt_vnodelist, vnodelst);
672 
673 		/* Joining the workerque entities to mount vnode list */
674 		if (mvp) {
675 			mvp->v_mntvnodes.tqe_next = mp->mnt_workerqueue.tqh_first;
676 		} else {
677 			mp->mnt_vnodelist.tqh_first = mp->mnt_workerqueue.tqh_first;
678 		}
679 		mp->mnt_workerqueue.tqh_first->v_mntvnodes.tqe_prev = mp->mnt_vnodelist.tqh_last;
680 		mp->mnt_vnodelist.tqh_last = mp->mnt_workerqueue.tqh_last;
681 		TAILQ_INIT(&mp->mnt_workerqueue);
682 	}
683 
684 	/* add the newvnodes to the head of mount vnode list */
685 	if (!TAILQ_EMPTY(&mp->mnt_newvnodes)) {
686 		struct vnode * nlvp;
687 		nlvp = TAILQ_LAST(&mp->mnt_newvnodes, vnodelst);
688 
689 		mp->mnt_newvnodes.tqh_first->v_mntvnodes.tqe_prev = &mp->mnt_vnodelist.tqh_first;
690 		nlvp->v_mntvnodes.tqe_next = mp->mnt_vnodelist.tqh_first;
691 		if (mp->mnt_vnodelist.tqh_first) {
692 			mp->mnt_vnodelist.tqh_first->v_mntvnodes.tqe_prev = &nlvp->v_mntvnodes.tqe_next;
693 		} else {
694 			mp->mnt_vnodelist.tqh_last = mp->mnt_newvnodes.tqh_last;
695 		}
696 		mp->mnt_vnodelist.tqh_first = mp->mnt_newvnodes.tqh_first;
697 		TAILQ_INIT(&mp->mnt_newvnodes);
698 		moved = 1;
699 	}
700 
701 	return moved;
702 }
703 
704 
705 void
vnode_iterate_clear(mount_t mp)706 vnode_iterate_clear(mount_t mp)
707 {
708 	mp->mnt_lflag &= ~MNT_LITER;
709 }
710 
711 #if defined(__x86_64__)
712 
713 #include <i386/panic_hooks.h>
714 
715 struct vnode_iterate_panic_hook {
716 	panic_hook_t hook;
717 	mount_t mp;
718 	struct vnode *vp;
719 };
720 
721 static void
vnode_iterate_panic_hook(panic_hook_t * hook_)722 vnode_iterate_panic_hook(panic_hook_t *hook_)
723 {
724 	struct vnode_iterate_panic_hook *hook = (struct vnode_iterate_panic_hook *)hook_;
725 	panic_phys_range_t range;
726 	uint64_t phys;
727 
728 	if (panic_phys_range_before(hook->mp, &phys, &range)) {
729 		paniclog_append_noflush("mp = %p, phys = %p, prev (%p: %p-%p)\n",
730 		    hook->mp, phys, range.type, range.phys_start,
731 		    range.phys_start + range.len);
732 	} else {
733 		paniclog_append_noflush("mp = %p, phys = %p, prev (!)\n", hook->mp, phys);
734 	}
735 
736 	if (panic_phys_range_before(hook->vp, &phys, &range)) {
737 		paniclog_append_noflush("vp = %p, phys = %p, prev (%p: %p-%p)\n",
738 		    hook->vp, phys, range.type, range.phys_start,
739 		    range.phys_start + range.len);
740 	} else {
741 		paniclog_append_noflush("vp = %p, phys = %p, prev (!)\n", hook->vp, phys);
742 	}
743 	panic_dump_mem((void *)(((vm_offset_t)hook->mp - 4096) & ~4095), 12288);
744 }
745 #endif /* defined(__x86_64__) */
746 
747 int
vnode_iterate(mount_t mp,int flags,int (* callout)(struct vnode *,void *),void * arg)748 vnode_iterate(mount_t mp, int flags, int (*callout)(struct vnode *, void *),
749     void *arg)
750 {
751 	struct vnode *vp;
752 	int vid, retval;
753 	int ret = 0;
754 
755 	/*
756 	 * The mount iterate mutex is held for the duration of the iteration.
757 	 * This can be done by a state flag on the mount structure but we can
758 	 * run into priority inversion issues sometimes.
759 	 * Using a mutex allows us to benefit from the priority donation
760 	 * mechanisms in the kernel for locks. This mutex should never be
761 	 * acquired in spin mode and it should be acquired before attempting to
762 	 * acquire the mount lock.
763 	 */
764 	mount_iterate_lock(mp);
765 
766 	mount_lock(mp);
767 
768 	vnode_iterate_setup(mp);
769 
770 	/* If it returns 0 then there is nothing to do */
771 	retval = vnode_iterate_prepare(mp);
772 
773 	if (retval == 0) {
774 		vnode_iterate_clear(mp);
775 		mount_unlock(mp);
776 		mount_iterate_unlock(mp);
777 		return ret;
778 	}
779 
780 #if defined(__x86_64__)
781 	struct vnode_iterate_panic_hook hook;
782 	hook.mp = mp;
783 	hook.vp = NULL;
784 	panic_hook(&hook.hook, vnode_iterate_panic_hook);
785 #endif
786 	/* iterate over all the vnodes */
787 	while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
788 		vp = TAILQ_FIRST(&mp->mnt_workerqueue);
789 #if defined(__x86_64__)
790 		hook.vp = vp;
791 #endif
792 		TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
793 		TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
794 		vid = vp->v_id;
795 		if ((vp->v_data == NULL) || (vp->v_type == VNON) || (vp->v_mount != mp)) {
796 			continue;
797 		}
798 		vnode_hold(vp);
799 		mount_unlock(mp);
800 
801 		if (vget_internal(vp, vid, (flags | VNODE_NODEAD | VNODE_WITHID | VNODE_NOSUSPEND))) {
802 			mount_lock(mp);
803 			vnode_drop(vp);
804 			continue;
805 		}
806 		vnode_drop(vp);
807 		if (flags & VNODE_RELOAD) {
808 			/*
809 			 * we're reloading the filesystem
810 			 * cast out any inactive vnodes...
811 			 */
812 			if (vnode_reload(vp)) {
813 				/* vnode will be recycled on the refcount drop */
814 				vnode_put(vp);
815 				mount_lock(mp);
816 				continue;
817 			}
818 		}
819 
820 		retval = callout(vp, arg);
821 
822 		switch (retval) {
823 		case VNODE_RETURNED:
824 		case VNODE_RETURNED_DONE:
825 			vnode_put(vp);
826 			if (retval == VNODE_RETURNED_DONE) {
827 				mount_lock(mp);
828 				ret = 0;
829 				goto out;
830 			}
831 			break;
832 
833 		case VNODE_CLAIMED_DONE:
834 			mount_lock(mp);
835 			ret = 0;
836 			goto out;
837 		case VNODE_CLAIMED:
838 		default:
839 			break;
840 		}
841 		mount_lock(mp);
842 	}
843 
844 out:
845 #if defined(__x86_64__)
846 	panic_unhook(&hook.hook);
847 #endif
848 	(void)vnode_iterate_reloadq(mp);
849 	vnode_iterate_clear(mp);
850 	mount_unlock(mp);
851 	mount_iterate_unlock(mp);
852 	return ret;
853 }
854 
855 void
mount_lock_renames(mount_t mp)856 mount_lock_renames(mount_t mp)
857 {
858 	lck_mtx_lock(&mp->mnt_renamelock);
859 }
860 
861 void
mount_unlock_renames(mount_t mp)862 mount_unlock_renames(mount_t mp)
863 {
864 	lck_mtx_unlock(&mp->mnt_renamelock);
865 }
866 
867 void
mount_iterate_lock(mount_t mp)868 mount_iterate_lock(mount_t mp)
869 {
870 	lck_mtx_lock(&mp->mnt_iter_lock);
871 }
872 
873 void
mount_iterate_unlock(mount_t mp)874 mount_iterate_unlock(mount_t mp)
875 {
876 	lck_mtx_unlock(&mp->mnt_iter_lock);
877 }
878 
879 void
mount_lock(mount_t mp)880 mount_lock(mount_t mp)
881 {
882 	lck_mtx_lock(&mp->mnt_mlock);
883 }
884 
885 void
mount_lock_spin(mount_t mp)886 mount_lock_spin(mount_t mp)
887 {
888 	lck_mtx_lock_spin(&mp->mnt_mlock);
889 }
890 
891 void
mount_unlock(mount_t mp)892 mount_unlock(mount_t mp)
893 {
894 	lck_mtx_unlock(&mp->mnt_mlock);
895 }
896 
897 
898 void
mount_ref(mount_t mp,int locked)899 mount_ref(mount_t mp, int locked)
900 {
901 	if (!locked) {
902 		mount_lock_spin(mp);
903 	}
904 
905 	mp->mnt_count++;
906 
907 	if (!locked) {
908 		mount_unlock(mp);
909 	}
910 }
911 
912 
913 void
mount_drop(mount_t mp,int locked)914 mount_drop(mount_t mp, int locked)
915 {
916 	if (!locked) {
917 		mount_lock_spin(mp);
918 	}
919 
920 	mp->mnt_count--;
921 
922 	if (mp->mnt_count == 0 && (mp->mnt_lflag & MNT_LDRAIN)) {
923 		wakeup(&mp->mnt_lflag);
924 	}
925 
926 	if (!locked) {
927 		mount_unlock(mp);
928 	}
929 }
930 
931 
932 int
mount_iterref(mount_t mp,int locked)933 mount_iterref(mount_t mp, int locked)
934 {
935 	int retval = 0;
936 
937 	if (!locked) {
938 		mount_list_lock();
939 	}
940 	if (mp->mnt_iterref < 0) {
941 		retval = 1;
942 	} else {
943 		mp->mnt_iterref++;
944 	}
945 	if (!locked) {
946 		mount_list_unlock();
947 	}
948 	return retval;
949 }
950 
951 int
mount_isdrained(mount_t mp,int locked)952 mount_isdrained(mount_t mp, int locked)
953 {
954 	int retval;
955 
956 	if (!locked) {
957 		mount_list_lock();
958 	}
959 	if (mp->mnt_iterref < 0) {
960 		retval = 1;
961 	} else {
962 		retval = 0;
963 	}
964 	if (!locked) {
965 		mount_list_unlock();
966 	}
967 	return retval;
968 }
969 
970 void
mount_iterdrop(mount_t mp)971 mount_iterdrop(mount_t mp)
972 {
973 	mount_list_lock();
974 	mp->mnt_iterref--;
975 	wakeup(&mp->mnt_iterref);
976 	mount_list_unlock();
977 }
978 
979 void
mount_iterdrain(mount_t mp)980 mount_iterdrain(mount_t mp)
981 {
982 	mount_list_lock();
983 	while (mp->mnt_iterref) {
984 		msleep((caddr_t)&mp->mnt_iterref, &mnt_list_mtx_lock, PVFS, "mount_iterdrain", NULL);
985 	}
986 	/* mount iterations drained */
987 	mp->mnt_iterref = -1;
988 	mount_list_unlock();
989 }
990 void
mount_iterreset(mount_t mp)991 mount_iterreset(mount_t mp)
992 {
993 	mount_list_lock();
994 	if (mp->mnt_iterref == -1) {
995 		mp->mnt_iterref = 0;
996 	}
997 	mount_list_unlock();
998 }
999 
1000 /* always called with  mount lock held */
1001 int
mount_refdrain(mount_t mp)1002 mount_refdrain(mount_t mp)
1003 {
1004 	if (mp->mnt_lflag & MNT_LDRAIN) {
1005 		panic("already in drain");
1006 	}
1007 	mp->mnt_lflag |= MNT_LDRAIN;
1008 
1009 	while (mp->mnt_count) {
1010 		msleep((caddr_t)&mp->mnt_lflag, &mp->mnt_mlock, PVFS, "mount_drain", NULL);
1011 	}
1012 
1013 	if (mp->mnt_vnodelist.tqh_first != NULL) {
1014 		panic("mount_refdrain: dangling vnode");
1015 	}
1016 
1017 	mp->mnt_lflag &= ~MNT_LDRAIN;
1018 
1019 	return 0;
1020 }
1021 
1022 /* Tags the mount point as not supportine extended readdir for NFS exports */
1023 void
mount_set_noreaddirext(mount_t mp)1024 mount_set_noreaddirext(mount_t mp)
1025 {
1026 	mount_lock(mp);
1027 	mp->mnt_kern_flag |= MNTK_DENY_READDIREXT;
1028 	mount_unlock(mp);
1029 }
1030 
1031 /*
1032  * Mark a mount point as busy. Used to synchronize access and to delay
1033  * unmounting.
1034  */
1035 int
vfs_busy(mount_t mp,int flags)1036 vfs_busy(mount_t mp, int flags)
1037 {
1038 restart:
1039 	if (mp->mnt_lflag & MNT_LDEAD) {
1040 		return ENOENT;
1041 	}
1042 
1043 	mount_lock(mp);
1044 
1045 	if (mp->mnt_lflag & MNT_LUNMOUNT) {
1046 		if (flags & LK_NOWAIT || mp->mnt_lflag & MNT_LDEAD) {
1047 			mount_unlock(mp);
1048 			return ENOENT;
1049 		}
1050 
1051 		/*
1052 		 * Since all busy locks are shared except the exclusive
1053 		 * lock granted when unmounting, the only place that a
1054 		 * wakeup needs to be done is at the release of the
1055 		 * exclusive lock at the end of dounmount.
1056 		 */
1057 		mp->mnt_lflag |= MNT_LWAIT;
1058 		msleep((caddr_t)mp, &mp->mnt_mlock, (PVFS | PDROP), "vfsbusy", NULL);
1059 		return ENOENT;
1060 	}
1061 
1062 	mount_unlock(mp);
1063 
1064 	lck_rw_lock_shared(&mp->mnt_rwlock);
1065 
1066 	/*
1067 	 * Until we are granted the rwlock, it's possible for the mount point to
1068 	 * change state, so re-evaluate before granting the vfs_busy.
1069 	 */
1070 	if (mp->mnt_lflag & (MNT_LDEAD | MNT_LUNMOUNT)) {
1071 		lck_rw_done(&mp->mnt_rwlock);
1072 		goto restart;
1073 	}
1074 	return 0;
1075 }
1076 
1077 /*
1078  * Free a busy filesystem.
1079  */
1080 void
vfs_unbusy(mount_t mp)1081 vfs_unbusy(mount_t mp)
1082 {
1083 	lck_rw_done(&mp->mnt_rwlock);
1084 }
1085 
1086 
1087 
1088 static void
vfs_rootmountfailed(mount_t mp)1089 vfs_rootmountfailed(mount_t mp)
1090 {
1091 	mount_list_lock();
1092 	mp->mnt_vtable->vfc_refcount--;
1093 	mount_list_unlock();
1094 
1095 	vfs_unbusy(mp);
1096 
1097 	if (nc_smr_enabled) {
1098 		vfs_smr_synchronize();
1099 	}
1100 
1101 	mount_lock_destroy(mp);
1102 
1103 #if CONFIG_MACF
1104 	mac_mount_label_destroy(mp);
1105 #endif
1106 
1107 	zfree(mount_zone, mp);
1108 }
1109 
1110 /*
1111  * Lookup a filesystem type, and if found allocate and initialize
1112  * a mount structure for it.
1113  *
1114  * Devname is usually updated by mount(8) after booting.
1115  */
1116 static mount_t
vfs_rootmountalloc_internal(struct vfstable * vfsp,const char * devname)1117 vfs_rootmountalloc_internal(struct vfstable *vfsp, const char *devname)
1118 {
1119 	mount_t mp;
1120 
1121 	mp = zalloc_flags(mount_zone, Z_WAITOK | Z_ZERO);
1122 	/* Initialize the default IO constraints */
1123 	mp->mnt_maxreadcnt = mp->mnt_maxwritecnt = MAXPHYS;
1124 	mp->mnt_segreadcnt = mp->mnt_segwritecnt = 32;
1125 	mp->mnt_maxsegreadsize = mp->mnt_maxreadcnt;
1126 	mp->mnt_maxsegwritesize = mp->mnt_maxwritecnt;
1127 	mp->mnt_devblocksize = DEV_BSIZE;
1128 	mp->mnt_alignmentmask = PAGE_MASK;
1129 	mp->mnt_ioqueue_depth = MNT_DEFAULT_IOQUEUE_DEPTH;
1130 	mp->mnt_ioscale = 1;
1131 	mp->mnt_ioflags = 0;
1132 	mp->mnt_realrootvp = NULLVP;
1133 	mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
1134 	mp->mnt_throttle_mask = LOWPRI_MAX_NUM_DEV - 1;
1135 	mp->mnt_devbsdunit = 0;
1136 
1137 	mount_lock_init(mp);
1138 	(void)vfs_busy(mp, LK_NOWAIT);
1139 
1140 	TAILQ_INIT(&mp->mnt_vnodelist);
1141 	TAILQ_INIT(&mp->mnt_workerqueue);
1142 	TAILQ_INIT(&mp->mnt_newvnodes);
1143 
1144 	mp->mnt_vtable = vfsp;
1145 	mp->mnt_op = vfsp->vfc_vfsops;
1146 	mp->mnt_flag = MNT_RDONLY | MNT_ROOTFS;
1147 	mp->mnt_vnodecovered = NULLVP;
1148 	//mp->mnt_stat.f_type = vfsp->vfc_typenum;
1149 	mp->mnt_flag |= vfsp->vfc_flags & MNT_VISFLAGMASK;
1150 
1151 	mount_list_lock();
1152 	vfsp->vfc_refcount++;
1153 	mount_list_unlock();
1154 
1155 	strlcpy(mp->mnt_vfsstat.f_fstypename, vfsp->vfc_name, MFSTYPENAMELEN);
1156 	mp->mnt_vfsstat.f_mntonname[0] = '/';
1157 	/* XXX const poisoning layering violation */
1158 	(void) copystr((const void *)devname, mp->mnt_vfsstat.f_mntfromname, MAXPATHLEN - 1, NULL);
1159 
1160 #if CONFIG_MACF
1161 	mac_mount_label_init(mp);
1162 	mac_mount_label_associate(vfs_context_kernel(), mp);
1163 #endif
1164 	return mp;
1165 }
1166 
1167 errno_t
vfs_rootmountalloc(const char * fstypename,const char * devname,mount_t * mpp)1168 vfs_rootmountalloc(const char *fstypename, const char *devname, mount_t *mpp)
1169 {
1170 	struct vfstable *vfsp;
1171 
1172 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1173 		if (!strncmp(vfsp->vfc_name, fstypename,
1174 		    sizeof(vfsp->vfc_name))) {
1175 			break;
1176 		}
1177 	}
1178 	if (vfsp == NULL) {
1179 		return ENODEV;
1180 	}
1181 
1182 	*mpp = vfs_rootmountalloc_internal(vfsp, devname);
1183 
1184 	if (*mpp) {
1185 		return 0;
1186 	}
1187 
1188 	return ENOMEM;
1189 }
1190 
1191 #define DBG_MOUNTROOT (FSDBG_CODE(DBG_MOUNT, 0))
1192 
1193 /*
1194  * Find an appropriate filesystem to use for the root. If a filesystem
1195  * has not been preselected, walk through the list of known filesystems
1196  * trying those that have mountroot routines, and try them until one
1197  * works or we have tried them all.
1198  */
1199 extern int (*mountroot)(void);
1200 
1201 int
vfs_mountroot(void)1202 vfs_mountroot(void)
1203 {
1204 #if CONFIG_MACF
1205 	struct vnode *vp;
1206 #endif
1207 	struct vfstable *vfsp;
1208 	vfs_context_t ctx = vfs_context_kernel();
1209 	struct vfs_attr vfsattr;
1210 	int     error;
1211 	mount_t mp;
1212 	vnode_t bdevvp_rootvp;
1213 
1214 	/*
1215 	 * Reset any prior "unmounting everything" state.  This handles the
1216 	 * situation where mount root and then unmountall and re-mountroot
1217 	 * a new image (see bsd/kern/imageboot.c).
1218 	 */
1219 	vfs_unmountall_started = vfs_unmountall_finished = 0;
1220 	OSMemoryBarrier();
1221 
1222 	KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_START);
1223 	if (mountroot != NULL) {
1224 		/*
1225 		 * used for netboot which follows a different set of rules
1226 		 */
1227 		error = (*mountroot)();
1228 
1229 		KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 0);
1230 		return error;
1231 	}
1232 	if ((error = bdevvp(rootdev, &rootvp))) {
1233 		printf("vfs_mountroot: can't setup bdevvp\n");
1234 
1235 		KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error, 1);
1236 		return error;
1237 	}
1238 	/*
1239 	 * 4951998 - code we call in vfc_mountroot may replace rootvp
1240 	 * so keep a local copy for some house keeping.
1241 	 */
1242 	bdevvp_rootvp = rootvp;
1243 
1244 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
1245 		if (vfsp->vfc_mountroot == NULL
1246 		    && !ISSET(vfsp->vfc_vfsflags, VFC_VFSCANMOUNTROOT)) {
1247 			continue;
1248 		}
1249 
1250 		mp = vfs_rootmountalloc_internal(vfsp, "root_device");
1251 		mp->mnt_devvp = rootvp;
1252 
1253 		if (vfsp->vfc_mountroot) {
1254 			error = (*vfsp->vfc_mountroot)(mp, rootvp, ctx);
1255 		} else {
1256 			error = VFS_MOUNT(mp, rootvp, 0, ctx);
1257 		}
1258 
1259 		if (!error) {
1260 			if (bdevvp_rootvp != rootvp) {
1261 				/*
1262 				 * rootvp changed...
1263 				 *   bump the iocount and fix up mnt_devvp for the
1264 				 *   new rootvp (it will already have a usecount taken)...
1265 				 *   drop the iocount and the usecount on the orignal
1266 				 *   since we are no longer going to use it...
1267 				 */
1268 				vnode_getwithref(rootvp);
1269 				mp->mnt_devvp = rootvp;
1270 
1271 				vnode_rele(bdevvp_rootvp);
1272 				vnode_put(bdevvp_rootvp);
1273 			}
1274 			mp->mnt_devvp->v_specflags |= SI_MOUNTEDON;
1275 
1276 			vfs_unbusy(mp);
1277 
1278 			mount_list_add(mp);
1279 
1280 			/*
1281 			 *   cache the IO attributes for the underlying physical media...
1282 			 *   an error return indicates the underlying driver doesn't
1283 			 *   support all the queries necessary... however, reasonable
1284 			 *   defaults will have been set, so no reason to bail or care
1285 			 */
1286 			vfs_init_io_attributes(rootvp, mp);
1287 
1288 			if (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) {
1289 				root_is_CF_drive = TRUE;
1290 			}
1291 
1292 			/*
1293 			 * Shadow the VFC_VFSNATIVEXATTR flag to MNTK_EXTENDED_ATTRS.
1294 			 */
1295 			if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSNATIVEXATTR) {
1296 				mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1297 			}
1298 			if (mp->mnt_vtable->vfc_vfsflags & VFC_VFSPREFLIGHT) {
1299 				mp->mnt_kern_flag |= MNTK_UNMOUNT_PREFLIGHT;
1300 			}
1301 
1302 #if defined(XNU_TARGET_OS_OSX)
1303 			uint32_t speed;
1304 
1305 			if (MNTK_VIRTUALDEV & mp->mnt_kern_flag) {
1306 				speed = 128;
1307 			} else if (disk_conditioner_mount_is_ssd(mp)) {
1308 				speed = 7 * 256;
1309 			} else {
1310 				speed = 256;
1311 			}
1312 			vc_progress_setdiskspeed(speed);
1313 #endif /* XNU_TARGET_OS_OSX */
1314 			/*
1315 			 * Probe root file system for additional features.
1316 			 */
1317 			(void)VFS_START(mp, 0, ctx);
1318 
1319 			VFSATTR_INIT(&vfsattr);
1320 			VFSATTR_WANTED(&vfsattr, f_capabilities);
1321 			if (vfs_getattr(mp, &vfsattr, ctx) == 0 &&
1322 			    VFSATTR_IS_SUPPORTED(&vfsattr, f_capabilities)) {
1323 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR) &&
1324 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_EXTENDED_ATTR)) {
1325 					mp->mnt_kern_flag |= MNTK_EXTENDED_ATTRS;
1326 				}
1327 #if NAMEDSTREAMS
1328 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS) &&
1329 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_INTERFACES] & VOL_CAP_INT_NAMEDSTREAMS)) {
1330 					mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1331 				}
1332 #endif
1333 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID) &&
1334 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_PATH_FROM_ID)) {
1335 					mp->mnt_kern_flag |= MNTK_PATH_FROM_ID;
1336 				}
1337 
1338 				if ((vfsattr.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS) &&
1339 				    (vfsattr.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & VOL_CAP_FMT_DIR_HARDLINKS)) {
1340 					mp->mnt_kern_flag |= MNTK_DIR_HARDLINKS;
1341 				}
1342 			}
1343 
1344 			/*
1345 			 * get rid of iocount reference returned
1346 			 * by bdevvp (or picked up by us on the substitued
1347 			 * rootvp)... it (or we) will have also taken
1348 			 * a usecount reference which we want to keep
1349 			 */
1350 			vnode_put(rootvp);
1351 
1352 #if CONFIG_MACF
1353 			if ((vfs_flags(mp) & MNT_MULTILABEL) == 0) {
1354 				KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 2);
1355 				return 0;
1356 			}
1357 
1358 			error = VFS_ROOT(mp, &vp, ctx);
1359 			if (error) {
1360 				printf("%s() VFS_ROOT() returned %d\n",
1361 				    __func__, error);
1362 				dounmount(mp, MNT_FORCE, 0, ctx);
1363 				goto fail;
1364 			}
1365 			error = vnode_label(mp, NULL, vp, NULL, 0, ctx);
1366 			/*
1367 			 * get rid of reference provided by VFS_ROOT
1368 			 */
1369 			vnode_put(vp);
1370 
1371 			if (error) {
1372 				printf("%s() vnode_label() returned %d\n",
1373 				    __func__, error);
1374 				dounmount(mp, MNT_FORCE, 0, ctx);
1375 				goto fail;
1376 			}
1377 #endif
1378 			KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, 0, 3);
1379 			return 0;
1380 		}
1381 		vfs_rootmountfailed(mp);
1382 #if CONFIG_MACF
1383 fail:
1384 #endif
1385 		if (error != EINVAL) {
1386 			printf("%s_mountroot failed: %d\n", vfsp->vfc_name, error);
1387 		}
1388 	}
1389 	KDBG_RELEASE(DBG_MOUNTROOT | DBG_FUNC_END, error ? error : ENODEV, 4);
1390 	return ENODEV;
1391 }
1392 
1393 static int
cache_purge_callback(mount_t mp,__unused void * arg)1394 cache_purge_callback(mount_t mp, __unused void * arg)
1395 {
1396 	cache_purgevfs(mp);
1397 	return VFS_RETURNED;
1398 }
1399 
1400 extern lck_rw_t rootvnode_rw_lock;
1401 extern void set_rootvnode(vnode_t);
1402 
1403 
1404 static int
mntonname_fixup_callback(mount_t mp,__unused void * arg)1405 mntonname_fixup_callback(mount_t mp, __unused void *arg)
1406 {
1407 	int error = 0;
1408 
1409 	if ((strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/", sizeof("/")) == 0) ||
1410 	    (strncmp(&mp->mnt_vfsstat.f_mntonname[0], "/dev", sizeof("/dev")) == 0)) {
1411 		return 0;
1412 	}
1413 
1414 	if ((error = vfs_busy(mp, LK_NOWAIT))) {
1415 		printf("vfs_busy failed with %d for %s\n", error, mp->mnt_vfsstat.f_mntonname);
1416 		return -1;
1417 	}
1418 
1419 	size_t pathlen = MAXPATHLEN;
1420 	if ((error = vn_getpath_ext(mp->mnt_vnodecovered, NULL, mp->mnt_vfsstat.f_mntonname, &pathlen, VN_GETPATH_FSENTER))) {
1421 		printf("vn_getpath_ext failed with %d for mnt_vnodecovered of %s\n", error, mp->mnt_vfsstat.f_mntonname);
1422 	}
1423 
1424 	vfs_unbusy(mp);
1425 
1426 	return error;
1427 }
1428 
1429 static int
clear_mntk_backs_root_callback(mount_t mp,__unused void * arg)1430 clear_mntk_backs_root_callback(mount_t mp, __unused void *arg)
1431 {
1432 	lck_rw_lock_exclusive(&mp->mnt_rwlock);
1433 	mp->mnt_kern_flag &= ~MNTK_BACKS_ROOT;
1434 	lck_rw_done(&mp->mnt_rwlock);
1435 	return VFS_RETURNED;
1436 }
1437 
1438 static int
verify_incoming_rootfs(vnode_t * incoming_rootvnodep,vfs_context_t ctx,vfs_switch_root_flags_t flags)1439 verify_incoming_rootfs(vnode_t *incoming_rootvnodep, vfs_context_t ctx,
1440     vfs_switch_root_flags_t flags)
1441 {
1442 	mount_t mp;
1443 	vnode_t tdp;
1444 	vnode_t incoming_rootvnode_with_iocount = *incoming_rootvnodep;
1445 	vnode_t incoming_rootvnode_with_usecount = NULLVP;
1446 	int error = 0;
1447 
1448 	if (vnode_vtype(incoming_rootvnode_with_iocount) != VDIR) {
1449 		printf("Incoming rootfs path not a directory\n");
1450 		error = ENOTDIR;
1451 		goto done;
1452 	}
1453 
1454 	/*
1455 	 * Before we call VFS_ROOT, we have to let go of the iocount already
1456 	 * acquired, but before doing that get a usecount.
1457 	 */
1458 	vnode_ref_ext(incoming_rootvnode_with_iocount, 0, VNODE_REF_FORCE);
1459 	incoming_rootvnode_with_usecount = incoming_rootvnode_with_iocount;
1460 	vnode_lock_spin(incoming_rootvnode_with_usecount);
1461 	if ((mp = incoming_rootvnode_with_usecount->v_mount)) {
1462 		mp->mnt_crossref++;
1463 		vnode_unlock(incoming_rootvnode_with_usecount);
1464 	} else {
1465 		vnode_unlock(incoming_rootvnode_with_usecount);
1466 		printf("Incoming rootfs root vnode does not have associated mount\n");
1467 		error = ENOTDIR;
1468 		goto done;
1469 	}
1470 
1471 	if (vfs_busy(mp, LK_NOWAIT)) {
1472 		printf("Incoming rootfs root vnode mount is busy\n");
1473 		error = ENOENT;
1474 		goto out;
1475 	}
1476 
1477 	vnode_put(incoming_rootvnode_with_iocount);
1478 	incoming_rootvnode_with_iocount = NULLVP;
1479 
1480 	error = VFS_ROOT(mp, &tdp, ctx);
1481 
1482 	if (error) {
1483 		printf("Could not get rootvnode of incoming rootfs\n");
1484 	} else if (tdp != incoming_rootvnode_with_usecount) {
1485 		vnode_put(tdp);
1486 		tdp = NULLVP;
1487 		printf("Incoming rootfs root vnode mount is is not a mountpoint\n");
1488 		error = EINVAL;
1489 		goto out_busy;
1490 	} else {
1491 		incoming_rootvnode_with_iocount = tdp;
1492 		tdp = NULLVP;
1493 	}
1494 
1495 	if ((flags & VFSSR_VIRTUALDEV_PROHIBITED) != 0) {
1496 		if (mp->mnt_kern_flag & MNTK_VIRTUALDEV) {
1497 			error = ENODEV;
1498 		}
1499 		if (error) {
1500 			printf("Incoming rootfs is backed by a virtual device; cannot switch to it");
1501 			goto out_busy;
1502 		}
1503 	}
1504 
1505 out_busy:
1506 	vfs_unbusy(mp);
1507 
1508 out:
1509 	vnode_lock(incoming_rootvnode_with_usecount);
1510 	mp->mnt_crossref--;
1511 	if (mp->mnt_crossref < 0) {
1512 		panic("mount cross refs -ve");
1513 	}
1514 	vnode_unlock(incoming_rootvnode_with_usecount);
1515 
1516 done:
1517 	if (incoming_rootvnode_with_usecount) {
1518 		vnode_rele(incoming_rootvnode_with_usecount);
1519 		incoming_rootvnode_with_usecount = NULLVP;
1520 	}
1521 
1522 	if (error && incoming_rootvnode_with_iocount) {
1523 		vnode_put(incoming_rootvnode_with_iocount);
1524 		incoming_rootvnode_with_iocount = NULLVP;
1525 	}
1526 
1527 	*incoming_rootvnodep = incoming_rootvnode_with_iocount;
1528 	return error;
1529 }
1530 
1531 /*
1532  * vfs_switch_root()
1533  *
1534  * Move the current root volume, and put a different volume at the root.
1535  *
1536  * incoming_vol_old_path: This is the path where the incoming root volume
1537  *	is mounted when this function begins.
1538  * outgoing_vol_new_path: This is the path where the outgoing root volume
1539  *	will be mounted when this function (successfully) ends.
1540  *	Note: Do not use a leading slash.
1541  *
1542  * Volumes mounted at several fixed points (including /dev) will be preserved
1543  * at the same absolute path. That means they will move within the folder
1544  * hierarchy during the pivot operation. For example, /dev before the pivot
1545  * will be at /dev after the pivot.
1546  *
1547  * If any filesystem has MNTK_BACKS_ROOT set, it will be cleared. If the
1548  * incoming root volume is actually a disk image backed by some other
1549  * filesystem, it is the caller's responsibility to re-set MNTK_BACKS_ROOT
1550  * as appropriate.
1551  */
1552 int
vfs_switch_root(const char * incoming_vol_old_path,const char * outgoing_vol_new_path,vfs_switch_root_flags_t flags)1553 vfs_switch_root(const char *incoming_vol_old_path,
1554     const char *outgoing_vol_new_path,
1555     vfs_switch_root_flags_t flags)
1556 {
1557 	// grumble grumble
1558 #define countof(x) (sizeof(x) / sizeof(x[0]))
1559 
1560 	struct preserved_mount {
1561 		vnode_t pm_rootvnode;
1562 		mount_t pm_mount;
1563 		vnode_t pm_new_covered_vp;
1564 		vnode_t pm_old_covered_vp;
1565 		const char *pm_path;
1566 	};
1567 
1568 	vfs_context_t ctx = vfs_context_kernel();
1569 	vnode_t incoming_rootvnode = NULLVP;
1570 	vnode_t outgoing_vol_new_covered_vp = NULLVP;
1571 	vnode_t incoming_vol_old_covered_vp = NULLVP;
1572 	mount_t outgoing = NULL;
1573 	mount_t incoming = NULL;
1574 
1575 	struct preserved_mount devfs = { NULLVP, NULL, NULLVP, NULLVP, "dev" };
1576 	struct preserved_mount preboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Preboot" };
1577 	struct preserved_mount recovery = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Recovery" };
1578 	struct preserved_mount vm = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/VM" };
1579 	struct preserved_mount update = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Update" };
1580 	struct preserved_mount iscPreboot = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/iSCPreboot" };
1581 	struct preserved_mount hardware = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Hardware" };
1582 	struct preserved_mount xarts = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/xarts" };
1583 	struct preserved_mount factorylogs = { NULLVP, NULL, NULLVP, NULLVP, "FactoryLogs" };
1584 	struct preserved_mount idiags = { NULLVP, NULL, NULLVP, NULLVP, "System/Volumes/Diags" };
1585 
1586 	struct preserved_mount *preserved[10];
1587 	preserved[0] = &devfs;
1588 	preserved[1] = &preboot;
1589 	preserved[2] = &recovery;
1590 	preserved[3] = &vm;
1591 	preserved[4] = &update;
1592 	preserved[5] = &iscPreboot;
1593 	preserved[6] = &hardware;
1594 	preserved[7] = &xarts;
1595 	preserved[8] = &factorylogs;
1596 	preserved[9] = &idiags;
1597 
1598 	int error;
1599 
1600 	printf("%s : shuffling mount points : %s <-> / <-> %s\n", __FUNCTION__, incoming_vol_old_path, outgoing_vol_new_path);
1601 
1602 	if (outgoing_vol_new_path[0] == '/') {
1603 		// I should have written this to be more helpful and just advance the pointer forward past the slash
1604 		printf("Do not use a leading slash in outgoing_vol_new_path\n");
1605 		return EINVAL;
1606 	}
1607 
1608 	// Set incoming_rootvnode.
1609 	// Find the vnode representing the mountpoint of the new root
1610 	// filesystem. That will be the new root directory.
1611 	error = vnode_lookup(incoming_vol_old_path, 0, &incoming_rootvnode, ctx);
1612 	if (error) {
1613 		printf("Incoming rootfs root vnode not found\n");
1614 		error = ENOENT;
1615 		goto done;
1616 	}
1617 
1618 	/*
1619 	 * This function drops the icoount and sets the vnode to NULL on error.
1620 	 */
1621 	error = verify_incoming_rootfs(&incoming_rootvnode, ctx, flags);
1622 	if (error) {
1623 		goto done;
1624 	}
1625 
1626 	/*
1627 	 * Set outgoing_vol_new_covered_vp.
1628 	 * Find the vnode representing the future mountpoint of the old
1629 	 * root filesystem, inside the directory incoming_rootvnode.
1630 	 * Right now it's at "/incoming_vol_old_path/outgoing_vol_new_path".
1631 	 * soon it will become "/oldrootfs_path_after", which will be covered.
1632 	 */
1633 	error = vnode_lookupat(outgoing_vol_new_path, 0, &outgoing_vol_new_covered_vp, ctx, incoming_rootvnode);
1634 	if (error) {
1635 		printf("Outgoing rootfs path not found, abandoning / switch, error = %d\n", error);
1636 		error = ENOENT;
1637 		goto done;
1638 	}
1639 	if (vnode_vtype(outgoing_vol_new_covered_vp) != VDIR) {
1640 		printf("Outgoing rootfs path is not a directory, abandoning / switch\n");
1641 		error = ENOTDIR;
1642 		goto done;
1643 	}
1644 
1645 	/*
1646 	 * Find the preserved mounts - see if they are mounted. Get their root
1647 	 * vnode if they are. If they aren't, leave rootvnode NULL which will
1648 	 * be the signal to ignore this mount later on.
1649 	 *
1650 	 * Also get preserved mounts' new_covered_vp.
1651 	 * Find the node representing the folder "dev" inside the directory newrootvnode.
1652 	 * Right now it's at "/incoming_vol_old_path/dev".
1653 	 * Soon it will become /dev, which will be covered by the devfs mountpoint.
1654 	 */
1655 	for (size_t i = 0; i < countof(preserved); i++) {
1656 		struct preserved_mount *pmi = preserved[i];
1657 
1658 		error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_rootvnode, ctx, rootvnode);
1659 		if (error) {
1660 			printf("skipping preserved mountpoint because not found or error: %d: %s\n", error, pmi->pm_path);
1661 			// not fatal. try the next one in the list.
1662 			continue;
1663 		}
1664 		bool is_mountpoint = false;
1665 		vnode_lock_spin(pmi->pm_rootvnode);
1666 		if ((pmi->pm_rootvnode->v_flag & VROOT) != 0) {
1667 			is_mountpoint = true;
1668 		}
1669 		vnode_unlock(pmi->pm_rootvnode);
1670 		if (!is_mountpoint) {
1671 			printf("skipping preserved mountpoint because not a mountpoint: %s\n", pmi->pm_path);
1672 			vnode_put(pmi->pm_rootvnode);
1673 			pmi->pm_rootvnode = NULLVP;
1674 			// not fatal. try the next one in the list.
1675 			continue;
1676 		}
1677 
1678 		error = vnode_lookupat(pmi->pm_path, 0, &pmi->pm_new_covered_vp, ctx, incoming_rootvnode);
1679 		if (error) {
1680 			printf("preserved new mount directory not found or error: %d: %s\n", error, pmi->pm_path);
1681 			error = ENOENT;
1682 			goto done;
1683 		}
1684 		if (vnode_vtype(pmi->pm_new_covered_vp) != VDIR) {
1685 			printf("preserved new mount directory not directory: %s\n", pmi->pm_path);
1686 			error = ENOTDIR;
1687 			goto done;
1688 		}
1689 
1690 		printf("will preserve mountpoint across pivot: /%s\n", pmi->pm_path);
1691 	}
1692 
1693 	/*
1694 	 * --
1695 	 * At this point, everything has been prepared and all error conditions
1696 	 * have been checked. We check everything we can before this point;
1697 	 * from now on we start making destructive changes, and we can't stop
1698 	 * until we reach the end.
1699 	 * ----
1700 	 */
1701 
1702 	/* this usecount is transferred to the mnt_vnodecovered */
1703 	vnode_ref_ext(outgoing_vol_new_covered_vp, 0, VNODE_REF_FORCE);
1704 	/* this usecount is transferred to set_rootvnode */
1705 	vnode_ref_ext(incoming_rootvnode, 0, VNODE_REF_FORCE);
1706 
1707 
1708 	for (size_t i = 0; i < countof(preserved); i++) {
1709 		struct preserved_mount *pmi = preserved[i];
1710 		if (pmi->pm_rootvnode == NULLVP) {
1711 			continue;
1712 		}
1713 
1714 		/* this usecount is transferred to the mnt_vnodecovered */
1715 		vnode_ref_ext(pmi->pm_new_covered_vp, 0, VNODE_REF_FORCE);
1716 
1717 		/* The new_covered_vp is a mountpoint from now on. */
1718 		vnode_lock_spin(pmi->pm_new_covered_vp);
1719 		pmi->pm_new_covered_vp->v_flag |= VMOUNTEDHERE;
1720 		vnode_unlock(pmi->pm_new_covered_vp);
1721 	}
1722 
1723 	/* The outgoing_vol_new_covered_vp is a mountpoint from now on. */
1724 	vnode_lock_spin(outgoing_vol_new_covered_vp);
1725 	outgoing_vol_new_covered_vp->v_flag |= VMOUNTEDHERE;
1726 	vnode_unlock(outgoing_vol_new_covered_vp);
1727 
1728 
1729 	/*
1730 	 * Identify the mount_ts of the mounted filesystems that are being
1731 	 * manipulated: outgoing rootfs, incoming rootfs, and the preserved
1732 	 * mounts.
1733 	 */
1734 	outgoing = rootvnode->v_mount;
1735 	incoming = incoming_rootvnode->v_mount;
1736 	for (size_t i = 0; i < countof(preserved); i++) {
1737 		struct preserved_mount *pmi = preserved[i];
1738 		if (pmi->pm_rootvnode == NULLVP) {
1739 			continue;
1740 		}
1741 
1742 		pmi->pm_mount = pmi->pm_rootvnode->v_mount;
1743 	}
1744 
1745 	lck_rw_lock_exclusive(&rootvnode_rw_lock);
1746 
1747 	/* Setup incoming as the new rootfs */
1748 	lck_rw_lock_exclusive(&incoming->mnt_rwlock);
1749 	incoming_vol_old_covered_vp = incoming->mnt_vnodecovered;
1750 	incoming->mnt_vnodecovered = NULLVP;
1751 	strlcpy(incoming->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1752 	incoming->mnt_flag |= MNT_ROOTFS;
1753 	lck_rw_done(&incoming->mnt_rwlock);
1754 
1755 	/*
1756 	 * The preserved mountpoints will now be moved to
1757 	 * incoming_rootnode/pm_path, and then by the end of the function,
1758 	 * since incoming_rootnode is going to /, the preserved mounts
1759 	 * will be end up back at /pm_path
1760 	 */
1761 	for (size_t i = 0; i < countof(preserved); i++) {
1762 		struct preserved_mount *pmi = preserved[i];
1763 		if (pmi->pm_rootvnode == NULLVP) {
1764 			continue;
1765 		}
1766 
1767 		lck_rw_lock_exclusive(&pmi->pm_mount->mnt_rwlock);
1768 		pmi->pm_old_covered_vp = pmi->pm_mount->mnt_vnodecovered;
1769 		pmi->pm_mount->mnt_vnodecovered = pmi->pm_new_covered_vp;
1770 		vnode_lock_spin(pmi->pm_new_covered_vp);
1771 		pmi->pm_new_covered_vp->v_mountedhere = pmi->pm_mount;
1772 		SET(pmi->pm_new_covered_vp->v_flag, VMOUNTEDHERE);
1773 		vnode_unlock(pmi->pm_new_covered_vp);
1774 		lck_rw_done(&pmi->pm_mount->mnt_rwlock);
1775 	}
1776 
1777 	/*
1778 	 * The old root volume now covers outgoing_vol_new_covered_vp
1779 	 * on the new root volume. Remove the ROOTFS marker.
1780 	 * Now it is to be found at outgoing_vol_new_path
1781 	 */
1782 	lck_rw_lock_exclusive(&outgoing->mnt_rwlock);
1783 	outgoing->mnt_vnodecovered = outgoing_vol_new_covered_vp;
1784 	strlcpy(outgoing->mnt_vfsstat.f_mntonname, "/", MAXPATHLEN);
1785 	strlcat(outgoing->mnt_vfsstat.f_mntonname, outgoing_vol_new_path, MAXPATHLEN);
1786 	outgoing->mnt_flag &= ~MNT_ROOTFS;
1787 	vnode_lock_spin(outgoing_vol_new_covered_vp);
1788 	outgoing_vol_new_covered_vp->v_mountedhere = outgoing;
1789 	vnode_unlock(outgoing_vol_new_covered_vp);
1790 	lck_rw_done(&outgoing->mnt_rwlock);
1791 
1792 	if (!(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1793 	    (TAILQ_FIRST(&mountlist) == outgoing)) {
1794 		vfs_setmntsystem(outgoing);
1795 	}
1796 
1797 	/*
1798 	 * Finally, remove the mount_t linkage from the previously covered
1799 	 * vnodes on the old root volume. These were incoming_vol_old_path,
1800 	 * and each preserved mounts's "/pm_path". The filesystems previously
1801 	 * mounted there have already been moved away.
1802 	 */
1803 	vnode_lock_spin(incoming_vol_old_covered_vp);
1804 	incoming_vol_old_covered_vp->v_flag &= ~VMOUNT;
1805 	incoming_vol_old_covered_vp->v_mountedhere = NULL;
1806 	vnode_unlock(incoming_vol_old_covered_vp);
1807 
1808 	for (size_t i = 0; i < countof(preserved); i++) {
1809 		struct preserved_mount *pmi = preserved[i];
1810 		if (pmi->pm_rootvnode == NULLVP) {
1811 			continue;
1812 		}
1813 
1814 		vnode_lock_spin(pmi->pm_old_covered_vp);
1815 		CLR(pmi->pm_old_covered_vp->v_flag, VMOUNTEDHERE);
1816 		pmi->pm_old_covered_vp->v_mountedhere = NULL;
1817 		vnode_unlock(pmi->pm_old_covered_vp);
1818 	}
1819 
1820 	/*
1821 	 * Clear the name cache since many cached names are now invalid.
1822 	 */
1823 	vfs_iterate(0 /* flags */, cache_purge_callback, NULL);
1824 
1825 	/*
1826 	 * Actually change the rootvnode! And finally drop the lock that
1827 	 * prevents concurrent vnode_lookups.
1828 	 */
1829 	set_rootvnode(incoming_rootvnode);
1830 	lck_rw_unlock_exclusive(&rootvnode_rw_lock);
1831 
1832 	if (!(incoming->mnt_kern_flag & MNTK_VIRTUALDEV) &&
1833 	    !(outgoing->mnt_kern_flag & MNTK_VIRTUALDEV)) {
1834 		/*
1835 		 * Switch the order of mount structures in the mountlist, new root
1836 		 * mount moves to the head of the list followed by /dev and the other
1837 		 * preserved mounts then all the preexisting mounts (old rootfs + any
1838 		 * others)
1839 		 */
1840 		mount_list_lock();
1841 		for (size_t i = 0; i < countof(preserved); i++) {
1842 			struct preserved_mount *pmi = preserved[i];
1843 			if (pmi->pm_rootvnode == NULLVP) {
1844 				continue;
1845 			}
1846 
1847 			TAILQ_REMOVE(&mountlist, pmi->pm_mount, mnt_list);
1848 			TAILQ_INSERT_HEAD(&mountlist, pmi->pm_mount, mnt_list);
1849 		}
1850 		TAILQ_REMOVE(&mountlist, incoming, mnt_list);
1851 		TAILQ_INSERT_HEAD(&mountlist, incoming, mnt_list);
1852 		mount_list_unlock();
1853 	}
1854 
1855 	/*
1856 	 * Fixups across all volumes
1857 	 */
1858 	vfs_iterate(0 /* flags */, mntonname_fixup_callback, NULL);
1859 	vfs_iterate(0 /* flags */, clear_mntk_backs_root_callback, NULL);
1860 
1861 	error = 0;
1862 
1863 done:
1864 	for (size_t i = 0; i < countof(preserved); i++) {
1865 		struct preserved_mount *pmi = preserved[i];
1866 
1867 		if (pmi->pm_rootvnode) {
1868 			vnode_put(pmi->pm_rootvnode);
1869 		}
1870 		if (pmi->pm_new_covered_vp) {
1871 			vnode_put(pmi->pm_new_covered_vp);
1872 		}
1873 		if (pmi->pm_old_covered_vp) {
1874 			vnode_rele(pmi->pm_old_covered_vp);
1875 		}
1876 	}
1877 
1878 	if (outgoing_vol_new_covered_vp) {
1879 		vnode_put(outgoing_vol_new_covered_vp);
1880 	}
1881 
1882 	if (incoming_vol_old_covered_vp) {
1883 		vnode_rele(incoming_vol_old_covered_vp);
1884 	}
1885 
1886 	if (incoming_rootvnode) {
1887 		vnode_put(incoming_rootvnode);
1888 	}
1889 
1890 	printf("%s : done shuffling mount points with error: %d\n", __FUNCTION__, error);
1891 	return error;
1892 }
1893 
1894 /*
1895  * Mount the Recovery volume of a container
1896  */
1897 int
vfs_mount_recovery(void)1898 vfs_mount_recovery(void)
1899 {
1900 #if CONFIG_MOUNT_PREBOOTRECOVERY
1901 	int error = 0;
1902 
1903 	error = vnode_get(rootvnode);
1904 	if (error) {
1905 		/* root must be mounted first */
1906 		printf("vnode_get(rootvnode) failed with error %d\n", error);
1907 		return error;
1908 	}
1909 
1910 	char recoverypath[] = PLATFORM_RECOVERY_VOLUME_MOUNT_POINT; /* !const because of internal casting */
1911 
1912 	/* Mount the recovery volume */
1913 	printf("attempting kernel mount for recovery volume... \n");
1914 	error = kernel_mount(rootvnode->v_mount->mnt_vfsstat.f_fstypename, NULLVP, NULLVP,
1915 	    recoverypath, (rootvnode->v_mount), 0, 0, (KERNEL_MOUNT_RECOVERYVOL), vfs_context_kernel());
1916 
1917 	if (error) {
1918 		printf("Failed to mount recovery volume (%d)\n", error);
1919 	} else {
1920 		printf("mounted recovery volume\n");
1921 	}
1922 
1923 	vnode_put(rootvnode);
1924 	return error;
1925 #else
1926 	return 0;
1927 #endif
1928 }
1929 
1930 /*
1931  * Lookup a mount point by filesystem identifier.
1932  */
1933 
1934 struct mount *
vfs_getvfs(fsid_t * fsid)1935 vfs_getvfs(fsid_t *fsid)
1936 {
1937 	return mount_list_lookupby_fsid(fsid, 0, 0);
1938 }
1939 
1940 static struct mount *
vfs_getvfs_locked(fsid_t * fsid)1941 vfs_getvfs_locked(fsid_t *fsid)
1942 {
1943 	return mount_list_lookupby_fsid(fsid, 1, 0);
1944 }
1945 
1946 struct mount *
vfs_getvfs_with_vfsops(fsid_t * fsid,const struct vfsops * const ops)1947 vfs_getvfs_with_vfsops(fsid_t *fsid, const struct vfsops * const ops)
1948 {
1949 	mount_t mp = mount_list_lookupby_fsid(fsid, 0, 0);
1950 
1951 	if (mp != NULL && mp->mnt_op != ops) {
1952 		mp = NULL;
1953 	}
1954 	return mp;
1955 }
1956 
1957 struct mount *
vfs_getvfs_by_mntonname(char * path)1958 vfs_getvfs_by_mntonname(char *path)
1959 {
1960 	mount_t retmp = (mount_t)0;
1961 	mount_t mp;
1962 
1963 	mount_list_lock();
1964 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
1965 		if (!strncmp(mp->mnt_vfsstat.f_mntonname, path,
1966 		    sizeof(mp->mnt_vfsstat.f_mntonname))) {
1967 			retmp = mp;
1968 			if (mount_iterref(retmp, 1)) {
1969 				retmp = NULL;
1970 			}
1971 			goto out;
1972 		}
1973 	}
1974 out:
1975 	mount_list_unlock();
1976 	return retmp;
1977 }
1978 
1979 /* generation number for creation of new fsids */
1980 u_short mntid_gen = 0;
1981 /*
1982  * Get a new unique fsid
1983  */
1984 void
vfs_getnewfsid(struct mount * mp)1985 vfs_getnewfsid(struct mount *mp)
1986 {
1987 	fsid_t tfsid;
1988 	int mtype;
1989 
1990 	mount_list_lock();
1991 
1992 	/* generate a new fsid */
1993 	mtype = mp->mnt_vtable->vfc_typenum;
1994 	if (++mntid_gen == 0) {
1995 		mntid_gen++;
1996 	}
1997 	tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
1998 	tfsid.val[1] = mtype;
1999 
2000 	while (vfs_getvfs_locked(&tfsid)) {
2001 		if (++mntid_gen == 0) {
2002 			mntid_gen++;
2003 		}
2004 		tfsid.val[0] = makedev(nblkdev + mtype, mntid_gen);
2005 	}
2006 
2007 	mp->mnt_vfsstat.f_fsid.val[0] = tfsid.val[0];
2008 	mp->mnt_vfsstat.f_fsid.val[1] = tfsid.val[1];
2009 	mount_list_unlock();
2010 }
2011 
2012 /*
2013  * Routines having to do with the management of the vnode table.
2014  */
2015 extern int(**dead_vnodeop_p)(void *);
2016 long numvnodes, freevnodes, deadvnodes, async_work_vnodes;
2017 long busyvnodes = 0;
2018 long deadvnodes_noreuse = 0;
2019 int32_t freeablevnodes = 0;
2020 uint64_t allocedvnodes = 0;
2021 uint64_t deallocedvnodes = 0;
2022 
2023 
2024 int async_work_timed_out = 0;
2025 int async_work_handled = 0;
2026 int dead_vnode_wanted = 0;
2027 int dead_vnode_waited = 0;
2028 
2029 /*
2030  * Move a vnode from one mount queue to another.
2031  */
2032 static void
insmntque(vnode_t vp,mount_t mp)2033 insmntque(vnode_t vp, mount_t mp)
2034 {
2035 	mount_t lmp;
2036 	/*
2037 	 * Delete from old mount point vnode list, if on one.
2038 	 */
2039 	if ((lmp = vp->v_mount) != NULL && lmp != dead_mountp) {
2040 		if ((vp->v_lflag & VNAMED_MOUNT) == 0) {
2041 			panic("insmntque: vp not in mount vnode list");
2042 		}
2043 		vp->v_lflag &= ~VNAMED_MOUNT;
2044 
2045 		mount_lock_spin(lmp);
2046 
2047 		mount_drop(lmp, 1);
2048 
2049 		if (vp->v_mntvnodes.tqe_next == NULL) {
2050 			if (TAILQ_LAST(&lmp->mnt_vnodelist, vnodelst) == vp) {
2051 				TAILQ_REMOVE(&lmp->mnt_vnodelist, vp, v_mntvnodes);
2052 			} else if (TAILQ_LAST(&lmp->mnt_newvnodes, vnodelst) == vp) {
2053 				TAILQ_REMOVE(&lmp->mnt_newvnodes, vp, v_mntvnodes);
2054 			} else if (TAILQ_LAST(&lmp->mnt_workerqueue, vnodelst) == vp) {
2055 				TAILQ_REMOVE(&lmp->mnt_workerqueue, vp, v_mntvnodes);
2056 			}
2057 		} else {
2058 			vp->v_mntvnodes.tqe_next->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_prev;
2059 			*vp->v_mntvnodes.tqe_prev = vp->v_mntvnodes.tqe_next;
2060 		}
2061 		vp->v_mntvnodes.tqe_next = NULL;
2062 		vp->v_mntvnodes.tqe_prev = NULL;
2063 		mount_unlock(lmp);
2064 		vnode_drop(vp);
2065 		return;
2066 	}
2067 
2068 	/*
2069 	 * Insert into list of vnodes for the new mount point, if available.
2070 	 */
2071 	if ((vp->v_mount = mp) != NULL) {
2072 		mount_lock_spin(mp);
2073 		if ((vp->v_mntvnodes.tqe_next != 0) && (vp->v_mntvnodes.tqe_prev != 0)) {
2074 			panic("vp already in mount list");
2075 		}
2076 		if (mp->mnt_lflag & MNT_LITER) {
2077 			TAILQ_INSERT_HEAD(&mp->mnt_newvnodes, vp, v_mntvnodes);
2078 		} else {
2079 			TAILQ_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes);
2080 		}
2081 		if (vp->v_lflag & VNAMED_MOUNT) {
2082 			panic("insmntque: vp already in mount vnode list");
2083 		}
2084 		vnode_hold(vp);
2085 		vp->v_lflag |= VNAMED_MOUNT;
2086 		mount_ref(mp, 1);
2087 		mount_unlock(mp);
2088 	}
2089 }
2090 
2091 
2092 /*
2093  * Create a vnode for a block device.
2094  * Used for root filesystem, argdev, and swap areas.
2095  * Also used for memory file system special devices.
2096  */
2097 int
bdevvp(dev_t dev,vnode_t * vpp)2098 bdevvp(dev_t dev, vnode_t *vpp)
2099 {
2100 	vnode_t nvp;
2101 	int     error;
2102 	struct vnode_fsparam vfsp;
2103 	struct vfs_context context;
2104 
2105 	if (dev == NODEV) {
2106 		*vpp = NULLVP;
2107 		return ENODEV;
2108 	}
2109 
2110 	context.vc_thread = current_thread();
2111 	context.vc_ucred = FSCRED;
2112 
2113 	vfsp.vnfs_mp = (struct mount *)0;
2114 	vfsp.vnfs_vtype = VBLK;
2115 	vfsp.vnfs_str = "bdevvp";
2116 	vfsp.vnfs_dvp = NULL;
2117 	vfsp.vnfs_fsnode = NULL;
2118 	vfsp.vnfs_cnp = NULL;
2119 	vfsp.vnfs_vops = spec_vnodeop_p;
2120 	vfsp.vnfs_rdev = dev;
2121 	vfsp.vnfs_filesize = 0;
2122 
2123 	vfsp.vnfs_flags = VNFS_NOCACHE | VNFS_CANTCACHE;
2124 
2125 	vfsp.vnfs_marksystem = 0;
2126 	vfsp.vnfs_markroot = 0;
2127 
2128 	if ((error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &vfsp, &nvp))) {
2129 		*vpp = NULLVP;
2130 		return error;
2131 	}
2132 	vnode_lock_spin(nvp);
2133 	nvp->v_flag |= VBDEVVP;
2134 	nvp->v_tag = VT_NON;    /* set this to VT_NON so during aliasing it can be replaced */
2135 	vnode_unlock(nvp);
2136 	if ((error = vnode_ref(nvp))) {
2137 		panic("bdevvp failed: vnode_ref");
2138 		return error;
2139 	}
2140 	if ((error = VNOP_FSYNC(nvp, MNT_WAIT, &context))) {
2141 		panic("bdevvp failed: fsync");
2142 		return error;
2143 	}
2144 	if ((error = buf_invalidateblks(nvp, BUF_WRITE_DATA, 0, 0))) {
2145 		panic("bdevvp failed: invalidateblks");
2146 		return error;
2147 	}
2148 
2149 #if CONFIG_MACF
2150 	/*
2151 	 * XXXMAC: We can't put a MAC check here, the system will
2152 	 * panic without this vnode.
2153 	 */
2154 #endif /* MAC */
2155 
2156 	if ((error = VNOP_OPEN(nvp, FREAD, &context))) {
2157 		panic("bdevvp failed: open");
2158 		return error;
2159 	}
2160 	*vpp = nvp;
2161 
2162 	return 0;
2163 }
2164 
2165 /*
2166  * Check to see if the new vnode represents a special device
2167  * for which we already have a vnode (either because of
2168  * bdevvp() or because of a different vnode representing
2169  * the same block device). If such an alias exists, deallocate
2170  * the existing contents and return the aliased vnode. The
2171  * caller is responsible for filling it with its new contents.
2172  */
2173 static vnode_t
checkalias(struct vnode * nvp,dev_t nvp_rdev)2174 checkalias(struct vnode *nvp, dev_t nvp_rdev)
2175 {
2176 	struct vnode *vp;
2177 	struct vnode **vpp;
2178 	struct specinfo *sin = NULL;
2179 	int vid = 0;
2180 
2181 	vpp = &speclisth[SPECHASH(nvp_rdev)];
2182 loop:
2183 	SPECHASH_LOCK();
2184 
2185 	for (vp = *vpp; vp; vp = vp->v_specnext) {
2186 		if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2187 			vid = vp->v_id;
2188 			vnode_hold(vp);
2189 			break;
2190 		}
2191 	}
2192 	SPECHASH_UNLOCK();
2193 
2194 	if (vp) {
2195 found_alias:
2196 		if (vnode_getwithvid(vp, vid)) {
2197 			vnode_drop(vp);
2198 			goto loop;
2199 		}
2200 		vnode_drop(vp);
2201 		/*
2202 		 * Termination state is checked in vnode_getwithvid
2203 		 */
2204 		vnode_lock(vp);
2205 
2206 		/*
2207 		 * Alias, but not in use, so flush it out.
2208 		 */
2209 		if ((vp->v_iocount == 1) && (vp->v_usecount == 0)) {
2210 			vnode_hold(vp);
2211 			vnode_reclaim_internal(vp, 1, 1, 0);
2212 			vnode_put_locked(vp);
2213 			vnode_drop_and_unlock(vp);
2214 			goto loop;
2215 		}
2216 	}
2217 	if (vp == NULL || vp->v_tag != VT_NON) {
2218 		if (sin == NULL) {
2219 			sin = zalloc_flags(specinfo_zone, Z_WAITOK | Z_ZERO);
2220 		} else {
2221 			bzero(sin, sizeof(struct specinfo));
2222 		}
2223 
2224 		nvp->v_specinfo = sin;
2225 		nvp->v_rdev = nvp_rdev;
2226 		nvp->v_specflags = 0;
2227 		nvp->v_speclastr = -1;
2228 		nvp->v_specinfo->si_opencount = 0;
2229 		nvp->v_specinfo->si_initted = 0;
2230 		nvp->v_specinfo->si_throttleable = 0;
2231 		nvp->v_specinfo->si_devbsdunit = LOWPRI_MAX_NUM_DEV;
2232 
2233 		SPECHASH_LOCK();
2234 
2235 		/* We dropped the lock, someone could have added */
2236 		if (vp == NULLVP) {
2237 			for (vp = *vpp; vp; vp = vp->v_specnext) {
2238 				if (nvp_rdev == vp->v_rdev && nvp->v_type == vp->v_type) {
2239 					vid = vp->v_id;
2240 					vnode_hold(vp);
2241 					SPECHASH_UNLOCK();
2242 					goto found_alias;
2243 				}
2244 			}
2245 		}
2246 
2247 		nvp->v_hashchain = vpp;
2248 		nvp->v_specnext = *vpp;
2249 		*vpp = nvp;
2250 
2251 		if (vp != NULLVP) {
2252 			nvp->v_specflags |= SI_ALIASED;
2253 			vp->v_specflags |= SI_ALIASED;
2254 			SPECHASH_UNLOCK();
2255 			vnode_put_locked(vp);
2256 			vnode_unlock(vp);
2257 		} else {
2258 			SPECHASH_UNLOCK();
2259 		}
2260 
2261 		return NULLVP;
2262 	}
2263 
2264 	if (sin) {
2265 		zfree(specinfo_zone, sin);
2266 	}
2267 
2268 	if ((vp->v_flag & (VBDEVVP | VDEVFLUSH)) != 0) {
2269 		return vp;
2270 	}
2271 
2272 	panic("checkalias with VT_NON vp that shouldn't: %p", vp);
2273 
2274 	return vp;
2275 }
2276 
2277 
2278 /*
2279  * Get a reference on a particular vnode and lock it if requested.
2280  * If the vnode was on the inactive list, remove it from the list.
2281  * If the vnode was on the free list, remove it from the list and
2282  * move it to inactive list as needed.
2283  * The vnode lock bit is set if the vnode is being eliminated in
2284  * vgone. The process is awakened when the transition is completed,
2285  * and an error returned to indicate that the vnode is no longer
2286  * usable (possibly having been changed to a new file system type).
2287  */
2288 int
vget_internal(vnode_t vp,int vid,int vflags)2289 vget_internal(vnode_t vp, int vid, int vflags)
2290 {
2291 	int error = 0;
2292 
2293 	vnode_lock_spin(vp);
2294 
2295 	if ((vflags & VNODE_WRITEABLE) && (vp->v_writecount == 0)) {
2296 		/*
2297 		 * vnode to be returned only if it has writers opened
2298 		 */
2299 		error = EINVAL;
2300 	} else {
2301 		error = vnode_getiocount(vp, vid, vflags);
2302 	}
2303 
2304 	vnode_unlock(vp);
2305 
2306 	return error;
2307 }
2308 
2309 /*
2310  * Returns:	0			Success
2311  *		ENOENT			No such file or directory [terminating]
2312  */
2313 int
vnode_ref(vnode_t vp)2314 vnode_ref(vnode_t vp)
2315 {
2316 	return vnode_ref_ext(vp, 0, 0);
2317 }
2318 
2319 /*
2320  * Returns:	0			Success
2321  *		ENOENT			No such file or directory [terminating]
2322  */
2323 int
vnode_ref_ext(vnode_t vp,int fmode,int flags)2324 vnode_ref_ext(vnode_t vp, int fmode, int flags)
2325 {
2326 	int     error = 0;
2327 
2328 	vnode_lock_spin(vp);
2329 
2330 	/*
2331 	 * once all the current call sites have been fixed to insure they have
2332 	 * taken an iocount, we can toughen this assert up and insist that the
2333 	 * iocount is non-zero... a non-zero usecount doesn't insure correctness
2334 	 */
2335 	if (vp->v_iocount <= 0 && vp->v_usecount <= 0) {
2336 		panic("vnode_ref_ext: vp %p has no valid reference %d, %d", vp, vp->v_iocount, vp->v_usecount);
2337 	}
2338 
2339 	/*
2340 	 * if you are the owner of drain/termination, can acquire usecount
2341 	 */
2342 	if ((flags & VNODE_REF_FORCE) == 0) {
2343 		if ((vp->v_lflag & (VL_DRAIN | VL_TERMINATE | VL_DEAD))) {
2344 			if (vp->v_owner != current_thread()) {
2345 				error = ENOENT;
2346 				goto out;
2347 			}
2348 		}
2349 	}
2350 
2351 	/* Enable atomic ops on v_usecount without the vnode lock */
2352 	os_atomic_inc(&vp->v_usecount, relaxed);
2353 
2354 	if (fmode & FWRITE) {
2355 		if (++vp->v_writecount <= 0) {
2356 			panic("vnode_ref_ext: v_writecount");
2357 		}
2358 	}
2359 	if (fmode & O_EVTONLY) {
2360 		if (++vp->v_kusecount <= 0) {
2361 			panic("vnode_ref_ext: v_kusecount");
2362 		}
2363 	}
2364 	if (vp->v_flag & VRAGE) {
2365 		struct  uthread *ut;
2366 
2367 		ut = current_uthread();
2368 
2369 		if (!(current_proc()->p_lflag & P_LRAGE_VNODES) &&
2370 		    !(ut->uu_flag & UT_RAGE_VNODES)) {
2371 			/*
2372 			 * a 'normal' process accessed this vnode
2373 			 * so make sure its no longer marked
2374 			 * for rapid aging...  also, make sure
2375 			 * it gets removed from the rage list...
2376 			 * when v_usecount drops back to 0, it
2377 			 * will be put back on the real free list
2378 			 */
2379 			vp->v_flag &= ~VRAGE;
2380 			vp->v_references = 0;
2381 			vnode_list_remove(vp);
2382 		}
2383 	}
2384 	if (vp->v_usecount == 1 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2385 		if (vp->v_ubcinfo) {
2386 			vnode_lock_convert(vp);
2387 			memory_object_mark_used(vp->v_ubcinfo->ui_control);
2388 		}
2389 	}
2390 out:
2391 	vnode_unlock(vp);
2392 
2393 	return error;
2394 }
2395 
2396 
2397 boolean_t
vnode_on_reliable_media(vnode_t vp)2398 vnode_on_reliable_media(vnode_t vp)
2399 {
2400 	mount_t mp = vp->v_mount;
2401 
2402 	/*
2403 	 * A NULL mountpoint would imply it's not attached to a any filesystem.
2404 	 * This can only happen with a vnode created by bdevvp(). We'll consider
2405 	 * those as not unreliable as the primary use of this function is determine
2406 	 * which vnodes are to be handed off to the async cleaner thread for
2407 	 * reclaim.
2408 	 */
2409 	if (!mp || (!(mp->mnt_kern_flag & MNTK_VIRTUALDEV) && (mp->mnt_flag & MNT_LOCAL))) {
2410 		return TRUE;
2411 	}
2412 
2413 	return FALSE;
2414 }
2415 
2416 static void
vnode_async_list_add_locked(vnode_t vp)2417 vnode_async_list_add_locked(vnode_t vp)
2418 {
2419 	if (VONLIST(vp) || (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
2420 		panic("vnode_async_list_add: %p is in wrong state", vp);
2421 	}
2422 
2423 	TAILQ_INSERT_HEAD(&vnode_async_work_list, vp, v_freelist);
2424 	vp->v_listflag |= VLIST_ASYNC_WORK;
2425 
2426 	async_work_vnodes++;
2427 	if (!(vp->v_listflag & VLIST_NO_REUSE)) {
2428 		reusablevnodes++;
2429 	}
2430 	if (vp->v_flag & VCANDEALLOC) {
2431 		os_atomic_dec(&busyvnodes, relaxed);
2432 	}
2433 }
2434 
2435 static void
vnode_async_list_add(vnode_t vp)2436 vnode_async_list_add(vnode_t vp)
2437 {
2438 	vnode_list_lock();
2439 
2440 	if (VONLIST(vp)) {
2441 		if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
2442 			vnode_list_remove_locked(vp);
2443 			vnode_async_list_add_locked(vp);
2444 		}
2445 	} else {
2446 		vnode_async_list_add_locked(vp);
2447 	}
2448 
2449 	vnode_list_unlock();
2450 
2451 	wakeup(&vnode_async_work_list);
2452 }
2453 
2454 
2455 /*
2456  * put the vnode on appropriate free list.
2457  * called with vnode LOCKED
2458  */
2459 static void
vnode_list_add(vnode_t vp)2460 vnode_list_add(vnode_t vp)
2461 {
2462 	boolean_t need_dead_wakeup = FALSE;
2463 	bool no_busy_decrement = false;
2464 
2465 #if DIAGNOSTIC
2466 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2467 #endif
2468 
2469 again:
2470 
2471 	/*
2472 	 * if it is already on a list or non zero references return
2473 	 */
2474 	if (VONLIST(vp) || (vp->v_usecount != 0) || (vp->v_iocount != 0) || (vp->v_lflag & VL_TERMINATE)) {
2475 		return;
2476 	}
2477 
2478 	/*
2479 	 * In vclean, we might have deferred ditching locked buffers
2480 	 * because something was still referencing them (indicated by
2481 	 * usecount).  We can ditch them now.
2482 	 */
2483 	if (ISSET(vp->v_lflag, VL_DEAD)
2484 	    && (!LIST_EMPTY(&vp->v_cleanblkhd) || !LIST_EMPTY(&vp->v_dirtyblkhd))) {
2485 		++vp->v_iocount;        // Probably not necessary, but harmless
2486 #ifdef CONFIG_IOCOUNT_TRACE
2487 		record_vp(vp, 1);
2488 #endif
2489 		vnode_unlock(vp);
2490 		buf_invalidateblks(vp, BUF_INVALIDATE_LOCKED, 0, 0);
2491 		vnode_lock(vp);
2492 		vnode_dropiocount(vp);
2493 		goto again;
2494 	}
2495 
2496 	vnode_list_lock();
2497 
2498 	if (!(vp->v_lflag & VL_DEAD) && (vp->v_listflag & VLIST_NO_REUSE)) {
2499 		if (!(vp->v_listflag & VLIST_ASYNC_WORK)) {
2500 			vnode_async_list_add_locked(vp);
2501 		}
2502 		no_busy_decrement = true;
2503 	} else if ((vp->v_flag & VRAGE) && !(vp->v_lflag & VL_DEAD)) {
2504 		/*
2505 		 * add the new guy to the appropriate end of the RAGE list
2506 		 */
2507 		if ((vp->v_flag & VAGE)) {
2508 			TAILQ_INSERT_HEAD(&vnode_rage_list, vp, v_freelist);
2509 		} else {
2510 			TAILQ_INSERT_TAIL(&vnode_rage_list, vp, v_freelist);
2511 		}
2512 
2513 		vp->v_listflag |= VLIST_RAGE;
2514 		ragevnodes++;
2515 		reusablevnodes++;
2516 		wakeup_laundry_thread();
2517 
2518 		/*
2519 		 * reset the timestamp for the last inserted vp on the RAGE
2520 		 * queue to let new_vnode know that its not ok to start stealing
2521 		 * from this list... as long as we're actively adding to this list
2522 		 * we'll push out the vnodes we want to donate to the real free list
2523 		 * once we stop pushing, we'll let some time elapse before we start
2524 		 * stealing them in the new_vnode routine
2525 		 */
2526 		microuptime(&rage_tv);
2527 	} else {
2528 		/*
2529 		 * if VL_DEAD, insert it at head of the dead list
2530 		 * else insert at tail of LRU list or at head if VAGE is set
2531 		 */
2532 		if ((vp->v_lflag & VL_DEAD)) {
2533 			if (vp->v_flag & VCANDEALLOC) {
2534 				TAILQ_INSERT_TAIL(&vnode_dead_list, vp, v_freelist);
2535 				if (vp->v_listflag & VLIST_NO_REUSE) {
2536 					deadvnodes_noreuse++;
2537 				}
2538 			} else {
2539 				TAILQ_INSERT_HEAD(&vnode_dead_list, vp, v_freelist);
2540 			}
2541 			vp->v_listflag |= VLIST_DEAD;
2542 			deadvnodes++;
2543 
2544 			if (dead_vnode_wanted) {
2545 				dead_vnode_wanted--;
2546 				need_dead_wakeup = TRUE;
2547 			}
2548 		} else if ((vp->v_flag & VAGE)) {
2549 			TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2550 			vp->v_flag &= ~VAGE;
2551 			freevnodes++;
2552 			reusablevnodes++;
2553 			wakeup_laundry_thread();
2554 		} else {
2555 			TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2556 			freevnodes++;
2557 			reusablevnodes++;
2558 			wakeup_laundry_thread();
2559 		}
2560 	}
2561 	if ((vp->v_flag & VCANDEALLOC) && !no_busy_decrement) {
2562 		os_atomic_dec(&busyvnodes, relaxed);
2563 	}
2564 	vnode_list_unlock();
2565 
2566 	if (need_dead_wakeup == TRUE) {
2567 		wakeup_one((caddr_t)&dead_vnode_wanted);
2568 	}
2569 }
2570 
2571 
2572 /*
2573  * remove the vnode from appropriate free list.
2574  * called with vnode LOCKED and
2575  * the list lock held
2576  */
2577 static void
vnode_list_remove_locked(vnode_t vp)2578 vnode_list_remove_locked(vnode_t vp)
2579 {
2580 	if (VONLIST(vp)) {
2581 		/*
2582 		 * the v_listflag field is
2583 		 * protected by the vnode_list_lock
2584 		 */
2585 		if (vp->v_listflag & VLIST_RAGE) {
2586 			VREMRAGE("vnode_list_remove", vp);
2587 		} else if (vp->v_listflag & VLIST_DEAD) {
2588 			VREMDEAD("vnode_list_remove", vp);
2589 			wakeup_laundry_thread();
2590 		} else if (vp->v_listflag & VLIST_ASYNC_WORK) {
2591 			VREMASYNC_WORK("vnode_list_remove", vp);
2592 		} else {
2593 			VREMFREE("vnode_list_remove", vp);
2594 		}
2595 		if (vp->v_flag & VCANDEALLOC) {
2596 			os_atomic_inc(&busyvnodes, relaxed);
2597 		}
2598 	}
2599 }
2600 
2601 
2602 /*
2603  * remove the vnode from appropriate free list.
2604  * called with vnode LOCKED
2605  */
2606 static void
vnode_list_remove(vnode_t vp)2607 vnode_list_remove(vnode_t vp)
2608 {
2609 #if DIAGNOSTIC
2610 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2611 #endif
2612 	/*
2613 	 * we want to avoid taking the list lock
2614 	 * in the case where we're not on the free
2615 	 * list... this will be true for most
2616 	 * directories and any currently in use files
2617 	 *
2618 	 * we're guaranteed that we can't go from
2619 	 * the not-on-list state to the on-list
2620 	 * state since we hold the vnode lock...
2621 	 * all calls to vnode_list_add are done
2622 	 * under the vnode lock... so we can
2623 	 * check for that condition (the prevelant one)
2624 	 * without taking the list lock
2625 	 */
2626 	if (VONLIST(vp)) {
2627 		vnode_list_lock();
2628 		/*
2629 		 * however, we're not guaranteed that
2630 		 * we won't go from the on-list state
2631 		 * to the not-on-list state until we
2632 		 * hold the vnode_list_lock... this
2633 		 * is due to "new_vnode" removing vnodes
2634 		 * from the free list uder the list_lock
2635 		 * w/o the vnode lock... so we need to
2636 		 * check again whether we're currently
2637 		 * on the free list
2638 		 */
2639 		vnode_list_remove_locked(vp);
2640 
2641 		vnode_list_unlock();
2642 	}
2643 }
2644 
2645 
2646 void
vnode_rele(vnode_t vp)2647 vnode_rele(vnode_t vp)
2648 {
2649 	vnode_rele_internal(vp, 0, 0, 0);
2650 }
2651 
2652 
2653 void
vnode_rele_ext(vnode_t vp,int fmode,int dont_reenter)2654 vnode_rele_ext(vnode_t vp, int fmode, int dont_reenter)
2655 {
2656 	vnode_rele_internal(vp, fmode, dont_reenter, 0);
2657 }
2658 
2659 
2660 void
vnode_rele_internal(vnode_t vp,int fmode,int dont_reenter,int locked)2661 vnode_rele_internal(vnode_t vp, int fmode, int dont_reenter, int locked)
2662 {
2663 	int32_t old_usecount;
2664 
2665 	if (!locked) {
2666 		vnode_hold(vp);
2667 		vnode_lock_spin(vp);
2668 	}
2669 #if DIAGNOSTIC
2670 	else {
2671 		lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
2672 	}
2673 #endif
2674 	/* Enable atomic ops on v_usecount without the vnode lock */
2675 	old_usecount = os_atomic_dec_orig(&vp->v_usecount, relaxed);
2676 	if (old_usecount < 1) {
2677 		/*
2678 		 * Because we allow atomic ops on usecount (in lookup only, under
2679 		 * specific conditions of already having a usecount) it is
2680 		 * possible that when the vnode is examined, its usecount is
2681 		 * different than what will be printed in this panic message.
2682 		 */
2683 		panic("vnode_rele_ext: vp %p usecount -ve : %d.  v_tag = %d, v_type = %d, v_flag = %x.",
2684 		    vp, old_usecount - 1, vp->v_tag, vp->v_type, vp->v_flag);
2685 	}
2686 
2687 	if (fmode & FWRITE) {
2688 		if (--vp->v_writecount < 0) {
2689 			panic("vnode_rele_ext: vp %p writecount -ve : %d.  v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_writecount, vp->v_tag, vp->v_type, vp->v_flag);
2690 		}
2691 	}
2692 	if (fmode & O_EVTONLY) {
2693 		if (--vp->v_kusecount < 0) {
2694 			panic("vnode_rele_ext: vp %p kusecount -ve : %d.  v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_tag, vp->v_type, vp->v_flag);
2695 		}
2696 	}
2697 	if (vp->v_kusecount > vp->v_usecount) {
2698 		panic("vnode_rele_ext: vp %p kusecount(%d) out of balance with usecount(%d).  v_tag = %d, v_type = %d, v_flag = %x.", vp, vp->v_kusecount, vp->v_usecount, vp->v_tag, vp->v_type, vp->v_flag);
2699 	}
2700 
2701 	if ((vp->v_iocount > 0) || (vp->v_usecount > 0)) {
2702 		/*
2703 		 * vnode is still busy... if we're the last
2704 		 * usecount, mark for a future call to VNOP_INACTIVE
2705 		 * when the iocount finally drops to 0
2706 		 */
2707 		if (vp->v_usecount == 0) {
2708 			vp->v_lflag |= VL_NEEDINACTIVE;
2709 			vp->v_flag  &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2710 		}
2711 		goto done;
2712 	}
2713 	vp->v_flag  &= ~(VNOCACHE_DATA | VRAOFF | VOPENEVT);
2714 
2715 	if (ISSET(vp->v_lflag, VL_TERMINATE | VL_DEAD) || dont_reenter) {
2716 		/*
2717 		 * vnode is being cleaned, or
2718 		 * we've requested that we don't reenter
2719 		 * the filesystem on this release...in
2720 		 * the latter case, we'll mark the vnode aged
2721 		 */
2722 		if (dont_reenter) {
2723 			if (!(vp->v_lflag & (VL_TERMINATE | VL_DEAD | VL_MARKTERM))) {
2724 				vp->v_lflag |= VL_NEEDINACTIVE;
2725 
2726 				if (vnode_on_reliable_media(vp) == FALSE || vp->v_flag & VISDIRTY) {
2727 					vnode_async_list_add(vp);
2728 					goto done;
2729 				}
2730 			}
2731 			vp->v_flag |= VAGE;
2732 		}
2733 		vnode_list_add(vp);
2734 
2735 		goto done;
2736 	}
2737 	/*
2738 	 * at this point both the iocount and usecount
2739 	 * are zero
2740 	 * pick up an iocount so that we can call
2741 	 * VNOP_INACTIVE with the vnode lock unheld
2742 	 */
2743 	vp->v_iocount++;
2744 #ifdef CONFIG_IOCOUNT_TRACE
2745 	record_vp(vp, 1);
2746 #endif
2747 	vp->v_lflag &= ~VL_NEEDINACTIVE;
2748 
2749 	if (UBCINFOEXISTS(vp)) {
2750 		ubc_cs_free_and_vnode_unlock(vp);
2751 	} else {
2752 		vnode_unlock(vp);
2753 	}
2754 
2755 	VNOP_INACTIVE(vp, vfs_context_current());
2756 
2757 	vnode_lock_spin(vp);
2758 
2759 	/*
2760 	 * because we dropped the vnode lock to call VNOP_INACTIVE
2761 	 * the state of the vnode may have changed... we may have
2762 	 * picked up an iocount, usecount or the MARKTERM may have
2763 	 * been set... we need to reevaluate the reference counts
2764 	 * to determine if we can call vnode_reclaim_internal at
2765 	 * this point... if the reference counts are up, we'll pick
2766 	 * up the MARKTERM state when they get subsequently dropped
2767 	 */
2768 	if ((vp->v_iocount == 1) && (vp->v_usecount == 0) &&
2769 	    ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM)) {
2770 		struct  uthread *ut;
2771 
2772 		ut = current_uthread();
2773 
2774 		if (ut->uu_defer_reclaims) {
2775 			vp->v_defer_reclaimlist = ut->uu_vreclaims;
2776 			ut->uu_vreclaims = vp;
2777 			goto done;
2778 		}
2779 		vnode_lock_convert(vp);
2780 		vnode_reclaim_internal(vp, 1, 1, 0);
2781 	}
2782 	vnode_dropiocount(vp);
2783 	vnode_list_add(vp);
2784 done:
2785 	if (vp->v_usecount == 0 && vp->v_type == VREG && !(vp->v_flag & VSYSTEM)) {
2786 		if (vp->v_ubcinfo) {
2787 			vnode_lock_convert(vp);
2788 			memory_object_mark_unused(vp->v_ubcinfo->ui_control, (vp->v_flag & VRAGE) == VRAGE);
2789 		}
2790 	}
2791 	if (!locked) {
2792 		vnode_drop_and_unlock(vp);
2793 	}
2794 	return;
2795 }
2796 
2797 /*
2798  * Remove any vnodes in the vnode table belonging to mount point mp.
2799  *
2800  * If MNT_NOFORCE is specified, there should not be any active ones,
2801  * return error if any are found (nb: this is a user error, not a
2802  * system error). If MNT_FORCE is specified, detach any active vnodes
2803  * that are found.
2804  */
2805 
2806 int
vflush(struct mount * mp,struct vnode * skipvp,int flags)2807 vflush(struct mount *mp, struct vnode *skipvp, int flags)
2808 {
2809 	struct vnode *vp;
2810 	int busy = 0;
2811 	int reclaimed = 0;
2812 	int retval;
2813 	unsigned int vid;
2814 	bool first_try = true;
2815 
2816 	/*
2817 	 * See comments in vnode_iterate() for the rationale for this lock
2818 	 */
2819 	mount_iterate_lock(mp);
2820 
2821 	mount_lock(mp);
2822 	vnode_iterate_setup(mp);
2823 	/*
2824 	 * On regular unmounts(not forced) do a
2825 	 * quick check for vnodes to be in use. This
2826 	 * preserves the caching of vnodes. automounter
2827 	 * tries unmounting every so often to see whether
2828 	 * it is still busy or not.
2829 	 */
2830 	if (((flags & FORCECLOSE) == 0) && ((mp->mnt_kern_flag & MNTK_UNMOUNT_PREFLIGHT) != 0)) {
2831 		if (vnode_umount_preflight(mp, skipvp, flags)) {
2832 			vnode_iterate_clear(mp);
2833 			mount_unlock(mp);
2834 			mount_iterate_unlock(mp);
2835 			return EBUSY;
2836 		}
2837 	}
2838 loop:
2839 	/* If it returns 0 then there is nothing to do */
2840 	retval = vnode_iterate_prepare(mp);
2841 
2842 	if (retval == 0) {
2843 		vnode_iterate_clear(mp);
2844 		mount_unlock(mp);
2845 		mount_iterate_unlock(mp);
2846 		return retval;
2847 	}
2848 
2849 	/* iterate over all the vnodes */
2850 	while (!TAILQ_EMPTY(&mp->mnt_workerqueue)) {
2851 		vp = TAILQ_FIRST(&mp->mnt_workerqueue);
2852 		TAILQ_REMOVE(&mp->mnt_workerqueue, vp, v_mntvnodes);
2853 		TAILQ_INSERT_TAIL(&mp->mnt_vnodelist, vp, v_mntvnodes);
2854 
2855 		if ((vp->v_mount != mp) || (vp == skipvp)) {
2856 			continue;
2857 		}
2858 		vid = vp->v_id;
2859 		mount_unlock(mp);
2860 
2861 		vnode_lock_spin(vp);
2862 
2863 		// If vnode is already terminating, wait for it...
2864 		while (vp->v_id == vid && ISSET(vp->v_lflag, VL_TERMINATE)) {
2865 			vp->v_lflag |= VL_TERMWANT;
2866 			msleep(&vp->v_lflag, &vp->v_lock, PVFS, "vflush", NULL);
2867 		}
2868 
2869 		if ((vp->v_id != vid) || ISSET(vp->v_lflag, VL_DEAD)) {
2870 			vnode_unlock(vp);
2871 			mount_lock(mp);
2872 			continue;
2873 		}
2874 
2875 		/*
2876 		 * If requested, skip over vnodes marked VSYSTEM.
2877 		 * Skip over all vnodes marked VNOFLUSH.
2878 		 */
2879 		if ((flags & SKIPSYSTEM) && ((vp->v_flag & VSYSTEM) ||
2880 		    (vp->v_flag & VNOFLUSH))) {
2881 			vnode_unlock(vp);
2882 			mount_lock(mp);
2883 			continue;
2884 		}
2885 		/*
2886 		 * If requested, skip over vnodes marked VSWAP.
2887 		 */
2888 		if ((flags & SKIPSWAP) && (vp->v_flag & VSWAP)) {
2889 			vnode_unlock(vp);
2890 			mount_lock(mp);
2891 			continue;
2892 		}
2893 		/*
2894 		 * If requested, skip over vnodes marked VROOT.
2895 		 */
2896 		if ((flags & SKIPROOT) && (vp->v_flag & VROOT)) {
2897 			vnode_unlock(vp);
2898 			mount_lock(mp);
2899 			continue;
2900 		}
2901 		/*
2902 		 * If WRITECLOSE is set, only flush out regular file
2903 		 * vnodes open for writing.
2904 		 */
2905 		if ((flags & WRITECLOSE) &&
2906 		    (vp->v_writecount == 0 || vp->v_type != VREG)) {
2907 			vnode_unlock(vp);
2908 			mount_lock(mp);
2909 			continue;
2910 		}
2911 		/*
2912 		 * If the real usecount is 0, all we need to do is clear
2913 		 * out the vnode data structures and we are done.
2914 		 */
2915 		if (((vp->v_usecount == 0) ||
2916 		    ((vp->v_usecount - vp->v_kusecount) == 0))) {
2917 			vnode_lock_convert(vp);
2918 			vnode_hold(vp);
2919 			vp->v_iocount++;        /* so that drain waits for * other iocounts */
2920 #ifdef CONFIG_IOCOUNT_TRACE
2921 			record_vp(vp, 1);
2922 #endif
2923 			vnode_reclaim_internal(vp, 1, 1, 0);
2924 			vnode_dropiocount(vp);
2925 			vnode_list_add(vp);
2926 			vnode_drop_and_unlock(vp);
2927 
2928 			reclaimed++;
2929 			mount_lock(mp);
2930 			continue;
2931 		}
2932 		/*
2933 		 * If FORCECLOSE is set, forcibly close the vnode.
2934 		 * For block or character devices, revert to an
2935 		 * anonymous device. For all other files, just kill them.
2936 		 */
2937 		if (flags & FORCECLOSE) {
2938 			vnode_lock_convert(vp);
2939 
2940 			if (vp->v_type != VBLK && vp->v_type != VCHR) {
2941 				vp->v_iocount++;        /* so that drain waits * for other iocounts */
2942 				vnode_hold(vp);
2943 #ifdef CONFIG_IOCOUNT_TRACE
2944 				record_vp(vp, 1);
2945 #endif
2946 				vnode_abort_advlocks(vp);
2947 				vnode_reclaim_internal(vp, 1, 1, 0);
2948 				vnode_dropiocount(vp);
2949 				vnode_list_add(vp);
2950 				vnode_drop_and_unlock(vp);
2951 			} else {
2952 				vnode_hold(vp);
2953 				vp->v_lflag |= VL_OPSCHANGE;
2954 				vclean(vp, 0);
2955 				vp->v_lflag &= ~VL_DEAD;
2956 				vp->v_op = spec_vnodeop_p;
2957 				vp->v_flag |= VDEVFLUSH;
2958 				vnode_drop_and_unlock(vp);
2959 				wakeup(&vp->v_lflag); /* chkvnlock is waitng for VL_DEAD to get unset */
2960 			}
2961 			mount_lock(mp);
2962 			continue;
2963 		}
2964 
2965 		/* log vnodes blocking unforced unmounts */
2966 		if (print_busy_vnodes && first_try && ((flags & FORCECLOSE) == 0)) {
2967 			vprint("vflush - busy vnode", vp);
2968 		}
2969 
2970 		vnode_unlock(vp);
2971 		mount_lock(mp);
2972 		busy++;
2973 	}
2974 
2975 	/* At this point the worker queue is completed */
2976 	if (busy && ((flags & FORCECLOSE) == 0) && reclaimed) {
2977 		busy = 0;
2978 		reclaimed = 0;
2979 		(void)vnode_iterate_reloadq(mp);
2980 		first_try = false;
2981 		/* returned with mount lock held */
2982 		goto loop;
2983 	}
2984 
2985 	/* if new vnodes were created in between retry the reclaim */
2986 	if (vnode_iterate_reloadq(mp) != 0) {
2987 		if (!(busy && ((flags & FORCECLOSE) == 0))) {
2988 			first_try = false;
2989 			goto loop;
2990 		}
2991 	}
2992 	vnode_iterate_clear(mp);
2993 	mount_unlock(mp);
2994 	mount_iterate_unlock(mp);
2995 
2996 	if (busy && ((flags & FORCECLOSE) == 0)) {
2997 		return EBUSY;
2998 	}
2999 	return 0;
3000 }
3001 
3002 long num_recycledvnodes = 0;
3003 /*
3004  * Disassociate the underlying file system from a vnode.
3005  * The vnode lock is held on entry.
3006  */
3007 static void
vclean(vnode_t vp,int flags)3008 vclean(vnode_t vp, int flags)
3009 {
3010 	vfs_context_t ctx = vfs_context_current();
3011 	int active;
3012 	int need_inactive;
3013 	int already_terminating;
3014 	int clflags = 0;
3015 #if NAMEDSTREAMS
3016 	int is_namedstream;
3017 #endif
3018 
3019 	/*
3020 	 * Check to see if the vnode is in use.
3021 	 * If so we have to reference it before we clean it out
3022 	 * so that its count cannot fall to zero and generate a
3023 	 * race against ourselves to recycle it.
3024 	 */
3025 	active = vp->v_usecount;
3026 
3027 	/*
3028 	 * just in case we missed sending a needed
3029 	 * VNOP_INACTIVE, we'll do it now
3030 	 */
3031 	need_inactive = (vp->v_lflag & VL_NEEDINACTIVE);
3032 
3033 	vp->v_lflag &= ~VL_NEEDINACTIVE;
3034 
3035 	/*
3036 	 * Prevent the vnode from being recycled or
3037 	 * brought into use while we clean it out.
3038 	 */
3039 	already_terminating = (vp->v_lflag & VL_TERMINATE);
3040 
3041 	vp->v_lflag |= VL_TERMINATE;
3042 
3043 #if NAMEDSTREAMS
3044 	is_namedstream = vnode_isnamedstream(vp);
3045 #endif
3046 
3047 	vnode_unlock(vp);
3048 
3049 	OSAddAtomicLong(1, &num_recycledvnodes);
3050 
3051 	if (flags & DOCLOSE) {
3052 		clflags |= IO_NDELAY;
3053 	}
3054 	if (flags & REVOKEALL) {
3055 		clflags |= IO_REVOKE;
3056 	}
3057 
3058 #if CONFIG_MACF
3059 	if (vp->v_mount) {
3060 		/*
3061 		 * It is possible for bdevvp vnodes to not have a mount
3062 		 * pointer. It's fine to let it get reclaimed without
3063 		 * notifying.
3064 		 */
3065 		mac_vnode_notify_reclaim(vp);
3066 	}
3067 #endif
3068 
3069 	if (active && (flags & DOCLOSE)) {
3070 		VNOP_CLOSE(vp, clflags, ctx);
3071 	}
3072 
3073 	/*
3074 	 * Clean out any buffers associated with the vnode.
3075 	 */
3076 	if (flags & DOCLOSE) {
3077 		if (vp->v_tag == VT_NFS) {
3078 			nfs_vinvalbuf(vp, V_SAVE, ctx, 0);
3079 		} else {
3080 			VNOP_FSYNC(vp, MNT_WAIT, ctx);
3081 
3082 			/*
3083 			 * If the vnode is still in use (by the journal for
3084 			 * example) we don't want to invalidate locked buffers
3085 			 * here.  In that case, either the journal will tidy them
3086 			 * up, or we will deal with it when the usecount is
3087 			 * finally released in vnode_rele_internal.
3088 			 */
3089 			buf_invalidateblks(vp, BUF_WRITE_DATA | (active ? 0 : BUF_INVALIDATE_LOCKED), 0, 0);
3090 		}
3091 		if (UBCINFOEXISTS(vp)) {
3092 			/*
3093 			 * Clean the pages in VM.
3094 			 */
3095 			(void)ubc_msync(vp, (off_t)0, ubc_getsize(vp), NULL, UBC_PUSHALL | UBC_INVALIDATE | UBC_SYNC);
3096 		}
3097 	}
3098 	if (active || need_inactive) {
3099 		VNOP_INACTIVE(vp, ctx);
3100 	}
3101 
3102 #if NAMEDSTREAMS
3103 	if ((is_namedstream != 0) && (vp->v_parent != NULLVP)) {
3104 		vnode_t pvp = vp->v_parent;
3105 
3106 		/* Delete the shadow stream file before we reclaim its vnode */
3107 		if (vnode_isshadow(vp)) {
3108 			vnode_relenamedstream(pvp, vp);
3109 		}
3110 
3111 		/*
3112 		 * No more streams associated with the parent.  We
3113 		 * have a ref on it, so its identity is stable.
3114 		 * If the parent is on an opaque volume, then we need to know
3115 		 * whether it has associated named streams.
3116 		 */
3117 		if (vfs_authopaque(pvp->v_mount)) {
3118 			vnode_lock_spin(pvp);
3119 			pvp->v_lflag &= ~VL_HASSTREAMS;
3120 			vnode_unlock(pvp);
3121 		}
3122 	}
3123 #endif
3124 
3125 	vm_object_destroy_reason_t reason = VM_OBJECT_DESTROY_RECLAIM;
3126 	bool forced_unmount = vnode_mount(vp) != NULL && (vnode_mount(vp)->mnt_lflag & MNT_LFORCE) != 0;
3127 	bool ungraft_heuristic = flags & REVOKEALL;
3128 	bool unmount = vnode_mount(vp) != NULL && (vnode_mount(vp)->mnt_lflag & MNT_LUNMOUNT) != 0;
3129 	if (forced_unmount) {
3130 		reason = VM_OBJECT_DESTROY_FORCED_UNMOUNT;
3131 	} else if (ungraft_heuristic) {
3132 		reason = VM_OBJECT_DESTROY_UNGRAFT;
3133 	} else if (unmount) {
3134 		reason = VM_OBJECT_DESTROY_UNMOUNT;
3135 	}
3136 
3137 	/*
3138 	 * Destroy ubc named reference
3139 	 * cluster_release is done on this path
3140 	 * along with dropping the reference on the ucred
3141 	 * (and in the case of forced unmount of an mmap-ed file,
3142 	 * the ubc reference on the vnode is dropped here too).
3143 	 */
3144 	ubc_destroy_named(vp, reason);
3145 
3146 #if CONFIG_TRIGGERS
3147 	/*
3148 	 * cleanup trigger info from vnode (if any)
3149 	 */
3150 	if (vp->v_resolve) {
3151 		vnode_resolver_detach(vp);
3152 	}
3153 #endif
3154 
3155 #if CONFIG_IO_COMPRESSION_STATS
3156 	if ((vp->io_compression_stats)) {
3157 		vnode_iocs_record_and_free(vp);
3158 	}
3159 #endif /* CONFIG_IO_COMPRESSION_STATS */
3160 
3161 	/*
3162 	 * Reclaim the vnode.
3163 	 */
3164 	if (VNOP_RECLAIM(vp, ctx)) {
3165 		panic("vclean: cannot reclaim");
3166 	}
3167 
3168 	// make sure the name & parent ptrs get cleaned out!
3169 	vnode_update_identity(vp, NULLVP, NULL, 0, 0, VNODE_UPDATE_PARENT | VNODE_UPDATE_NAME | VNODE_UPDATE_PURGE | VNODE_UPDATE_PURGEFIRMLINK);
3170 
3171 	vnode_lock(vp);
3172 
3173 	/*
3174 	 * Remove the vnode from any mount list it might be on.  It is not
3175 	 * safe to do this any earlier because unmount needs to wait for
3176 	 * any vnodes to terminate and it cannot do that if it cannot find
3177 	 * them.
3178 	 */
3179 	insmntque(vp, (struct mount *)0);
3180 
3181 	vp->v_lflag |= VL_DEAD;
3182 	vp->v_mount = dead_mountp;
3183 	vp->v_op = dead_vnodeop_p;
3184 	vp->v_tag = VT_NON;
3185 	vp->v_data = NULL;
3186 
3187 	vp->v_flag &= ~VISDIRTY;
3188 
3189 	if (already_terminating == 0) {
3190 		vp->v_lflag &= ~VL_TERMINATE;
3191 		/*
3192 		 * Done with purge, notify sleepers of the grim news.
3193 		 */
3194 		if (vp->v_lflag & VL_TERMWANT) {
3195 			vp->v_lflag &= ~VL_TERMWANT;
3196 			wakeup(&vp->v_lflag);
3197 		}
3198 	}
3199 }
3200 
3201 /*
3202  * Eliminate all activity associated with  the requested vnode
3203  * and with all vnodes aliased to the requested vnode.
3204  */
3205 int
3206 #if DIAGNOSTIC
vn_revoke(vnode_t vp,int flags,__unused vfs_context_t a_context)3207 vn_revoke(vnode_t vp, int flags, __unused vfs_context_t a_context)
3208 #else
3209 vn_revoke(vnode_t vp, __unused int flags, __unused vfs_context_t a_context)
3210 #endif
3211 {
3212 	struct vnode *vq;
3213 	int vid;
3214 
3215 #if DIAGNOSTIC
3216 	if ((flags & REVOKEALL) == 0) {
3217 		panic("vnop_revoke");
3218 	}
3219 #endif
3220 
3221 	if (vnode_isaliased(vp)) {
3222 		/*
3223 		 * If a vgone (or vclean) is already in progress,
3224 		 * return an immediate error
3225 		 */
3226 		if (vp->v_lflag & VL_TERMINATE) {
3227 			return ENOENT;
3228 		}
3229 
3230 		/*
3231 		 * Ensure that vp will not be vgone'd while we
3232 		 * are eliminating its aliases.
3233 		 */
3234 		SPECHASH_LOCK();
3235 		while ((vp->v_specflags & SI_ALIASED)) {
3236 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3237 				if (vq->v_rdev != vp->v_rdev ||
3238 				    vq->v_type != vp->v_type || vp == vq) {
3239 					continue;
3240 				}
3241 				vid = vq->v_id;
3242 				vnode_hold(vq);
3243 				SPECHASH_UNLOCK();
3244 				if (vnode_getwithvid(vq, vid)) {
3245 					vq = vnode_drop(vq);
3246 					SPECHASH_LOCK();
3247 					break;
3248 				}
3249 				vnode_lock(vq);
3250 				if (!(vq->v_lflag & VL_TERMINATE)) {
3251 					vnode_reclaim_internal(vq, 1, 1, 0);
3252 				}
3253 				vnode_put_locked(vq);
3254 				vq = vnode_drop_and_unlock(vq);
3255 				SPECHASH_LOCK();
3256 				break;
3257 			}
3258 		}
3259 		SPECHASH_UNLOCK();
3260 	}
3261 	vnode_lock(vp);
3262 	if (vp->v_lflag & VL_TERMINATE) {
3263 		vnode_unlock(vp);
3264 		return ENOENT;
3265 	}
3266 	vnode_reclaim_internal(vp, 1, 0, REVOKEALL);
3267 	vnode_unlock(vp);
3268 
3269 	return 0;
3270 }
3271 
3272 /*
3273  * Recycle an unused vnode to the front of the free list.
3274  * Release the passed interlock if the vnode will be recycled.
3275  */
3276 int
vnode_recycle(struct vnode * vp)3277 vnode_recycle(struct vnode *vp)
3278 {
3279 	vnode_lock_spin(vp);
3280 
3281 	if (vp->v_iocount || vp->v_usecount) {
3282 		vp->v_lflag |= VL_MARKTERM;
3283 		vnode_unlock(vp);
3284 		return 0;
3285 	}
3286 	vnode_lock_convert(vp);
3287 	vnode_hold(vp);
3288 	vnode_reclaim_internal(vp, 1, 0, 0);
3289 
3290 	vnode_drop_and_unlock(vp);
3291 
3292 	return 1;
3293 }
3294 
3295 static int
vnode_reload(vnode_t vp)3296 vnode_reload(vnode_t vp)
3297 {
3298 	vnode_lock_spin(vp);
3299 
3300 	if ((vp->v_iocount > 1) || vp->v_usecount) {
3301 		vnode_unlock(vp);
3302 		return 0;
3303 	}
3304 	if (vp->v_iocount <= 0) {
3305 		panic("vnode_reload with no iocount %d", vp->v_iocount);
3306 	}
3307 
3308 	/* mark for release when iocount is dopped */
3309 	vp->v_lflag |= VL_MARKTERM;
3310 	vnode_unlock(vp);
3311 
3312 	return 1;
3313 }
3314 
3315 
3316 static void
vgone(vnode_t vp,int flags)3317 vgone(vnode_t vp, int flags)
3318 {
3319 	struct vnode *vq;
3320 	struct vnode *vx;
3321 
3322 	/*
3323 	 * Clean out the filesystem specific data.
3324 	 * vclean also takes care of removing the
3325 	 * vnode from any mount list it might be on
3326 	 */
3327 	vclean(vp, flags | DOCLOSE);
3328 
3329 	/*
3330 	 * If special device, remove it from special device alias list
3331 	 * if it is on one.
3332 	 */
3333 	if ((vp->v_type == VBLK || vp->v_type == VCHR) && vp->v_specinfo != 0) {
3334 		SPECHASH_LOCK();
3335 		if (*vp->v_hashchain == vp) {
3336 			*vp->v_hashchain = vp->v_specnext;
3337 		} else {
3338 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3339 				if (vq->v_specnext != vp) {
3340 					continue;
3341 				}
3342 				vq->v_specnext = vp->v_specnext;
3343 				break;
3344 			}
3345 			if (vq == NULL) {
3346 				panic("missing bdev");
3347 			}
3348 		}
3349 		if (vp->v_specflags & SI_ALIASED) {
3350 			vx = NULL;
3351 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
3352 				if (vq->v_rdev != vp->v_rdev ||
3353 				    vq->v_type != vp->v_type) {
3354 					continue;
3355 				}
3356 				if (vx) {
3357 					break;
3358 				}
3359 				vx = vq;
3360 			}
3361 			if (vx == NULL) {
3362 				panic("missing alias");
3363 			}
3364 			if (vq == NULL) {
3365 				vx->v_specflags &= ~SI_ALIASED;
3366 			}
3367 			vp->v_specflags &= ~SI_ALIASED;
3368 		}
3369 		SPECHASH_UNLOCK();
3370 		{
3371 			struct specinfo *tmp = vp->v_specinfo;
3372 			vp->v_specinfo = NULL;
3373 			zfree(specinfo_zone, tmp);
3374 		}
3375 	}
3376 }
3377 
3378 /*
3379  * internal helper function only!
3380  * vend an _iocounted_ vnode via output argument, or return an error if unable.
3381  */
3382 static int
get_vp_from_dev(dev_t dev,enum vtype type,vnode_t * outvp)3383 get_vp_from_dev(dev_t dev, enum vtype type, vnode_t *outvp)
3384 {
3385 	vnode_t vp;
3386 	int vid;
3387 
3388 loop:
3389 	SPECHASH_LOCK();
3390 	for (vp = speclisth[SPECHASH(dev)]; vp; vp = vp->v_specnext) {
3391 		if (dev != vp->v_rdev || type != vp->v_type) {
3392 			continue;
3393 		}
3394 		vid = vp->v_id;
3395 		vnode_hold(vp);
3396 		SPECHASH_UNLOCK();
3397 
3398 		/* acquire iocount */
3399 		if (vnode_getwithvid(vp, vid)) {
3400 			vnode_drop(vp);
3401 			goto loop;
3402 		}
3403 		vnode_drop(vp);
3404 
3405 		/* Vend iocounted vnode */
3406 		*outvp = vp;
3407 		return 0;
3408 	}
3409 
3410 	/* vnode not found, error out */
3411 	SPECHASH_UNLOCK();
3412 	return ENOENT;
3413 }
3414 
3415 
3416 
3417 /*
3418  * Lookup a vnode by device number.
3419  */
3420 int
check_mountedon(dev_t dev,enum vtype type,int * errorp)3421 check_mountedon(dev_t dev, enum vtype type, int *errorp)
3422 {
3423 	vnode_t vp = NULLVP;
3424 	int rc = 0;
3425 
3426 	rc = get_vp_from_dev(dev, type, &vp);
3427 	if (rc) {
3428 		/* if no vnode found, it cannot be mounted on */
3429 		return 0;
3430 	}
3431 
3432 	/* otherwise, examine it */
3433 	vnode_lock_spin(vp);
3434 	/* note: exclude the iocount we JUST got (e.g. >1, not >0) */
3435 	if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
3436 		vnode_unlock(vp);
3437 		if ((*errorp = vfs_mountedon(vp)) != 0) {
3438 			rc = 1;
3439 		}
3440 	} else {
3441 		vnode_unlock(vp);
3442 	}
3443 	/* release iocount! */
3444 	vnode_put(vp);
3445 
3446 	return rc;
3447 }
3448 
3449 extern dev_t chrtoblk(dev_t d);
3450 
3451 /*
3452  * Examine the supplied vnode's dev_t and find its counterpart
3453  * (e.g.  VCHR => VDEV) to compare against.
3454  */
3455 static int
vnode_cmp_paired_dev(vnode_t vp,vnode_t bdev_vp,enum vtype in_type,enum vtype out_type)3456 vnode_cmp_paired_dev(vnode_t vp, vnode_t bdev_vp, enum vtype in_type,
3457     enum vtype out_type)
3458 {
3459 	if (!vp || !bdev_vp) {
3460 		return EINVAL;
3461 	}
3462 	/* Verify iocounts */
3463 	if (vnode_iocount(vp) <= 0 ||
3464 	    vnode_iocount(bdev_vp) <= 0) {
3465 		return EINVAL;
3466 	}
3467 
3468 	/* check for basic matches */
3469 	if (vnode_vtype(vp) != in_type) {
3470 		return EINVAL;
3471 	}
3472 	if (vnode_vtype(bdev_vp) != out_type) {
3473 		return EINVAL;
3474 	}
3475 
3476 	dev_t dev = vnode_specrdev(vp);
3477 	dev_t blk_devt = vnode_specrdev(bdev_vp);
3478 
3479 	if (in_type == VCHR) {
3480 		if (out_type != VBLK) {
3481 			return EINVAL;
3482 		}
3483 		dev_t bdev = chrtoblk(dev);
3484 		if (bdev == NODEV) {
3485 			return EINVAL;
3486 		} else if (bdev == blk_devt) {
3487 			return 0;
3488 		}
3489 		//fall through
3490 	}
3491 	/*
3492 	 * else case:
3493 	 *
3494 	 * in_type == VBLK? => VCHR?
3495 	 * not implemented...
3496 	 * exercise to the reader: this can be built by
3497 	 * taking the device's major, and iterating the `chrtoblktab`
3498 	 * array to look for a value that matches.
3499 	 */
3500 	return EINVAL;
3501 }
3502 /*
3503  * Vnode compare: does the supplied vnode's CHR device, match the dev_t
3504  * of the accompanying `blk_vp` ?
3505  * NOTE: vnodes MUST be iocounted BEFORE calling this!
3506  */
3507 
3508 int
vnode_cmp_chrtoblk(vnode_t vp,vnode_t blk_vp)3509 vnode_cmp_chrtoblk(vnode_t vp, vnode_t blk_vp)
3510 {
3511 	return vnode_cmp_paired_dev(vp, blk_vp, VCHR, VBLK);
3512 }
3513 
3514 
3515 
3516 /*
3517  * Calculate the total number of references to a special device.
3518  */
3519 int
vcount(vnode_t vp)3520 vcount(vnode_t vp)
3521 {
3522 	vnode_t vq, vnext;
3523 	int count;
3524 	int vid;
3525 
3526 	if (!vnode_isspec(vp)) {
3527 		return vp->v_usecount - vp->v_kusecount;
3528 	}
3529 
3530 loop:
3531 	if (!vnode_isaliased(vp)) {
3532 		return vp->v_specinfo->si_opencount;
3533 	}
3534 	count = 0;
3535 
3536 	SPECHASH_LOCK();
3537 	/*
3538 	 * Grab first vnode and its vid.
3539 	 */
3540 	vq = *vp->v_hashchain;
3541 	if (vq) {
3542 		vid = vq->v_id;
3543 		vnode_hold(vq);
3544 	} else {
3545 		vid = 0;
3546 	}
3547 	SPECHASH_UNLOCK();
3548 
3549 	while (vq) {
3550 		/*
3551 		 * Attempt to get the vnode outside the SPECHASH lock.
3552 		 * Don't take iocount on 'vp' as iocount is already held by the caller.
3553 		 */
3554 		if ((vq != vp) && vnode_getwithvid(vq, vid)) {
3555 			vnode_drop(vq);
3556 			goto loop;
3557 		}
3558 		vnode_drop(vq);
3559 		vnode_lock(vq);
3560 
3561 		if (vq->v_rdev == vp->v_rdev && vq->v_type == vp->v_type) {
3562 			if ((vq->v_usecount == 0) && (vq->v_iocount == 1) && vq != vp) {
3563 				/*
3564 				 * Alias, but not in use, so flush it out.
3565 				 */
3566 				vnode_hold(vq);
3567 				vnode_reclaim_internal(vq, 1, 1, 0);
3568 				vnode_put_locked(vq);
3569 				vnode_drop_and_unlock(vq);
3570 				goto loop;
3571 			}
3572 			count += vq->v_specinfo->si_opencount;
3573 		}
3574 		vnode_unlock(vq);
3575 
3576 		SPECHASH_LOCK();
3577 		/*
3578 		 * must do this with the reference still held on 'vq'
3579 		 * so that it can't be destroyed while we're poking
3580 		 * through v_specnext
3581 		 */
3582 		vnext = vq->v_specnext;
3583 		if (vnext) {
3584 			vid = vnext->v_id;
3585 			vnode_hold(vnext);
3586 		} else {
3587 			vid = 0;
3588 		}
3589 		SPECHASH_UNLOCK();
3590 
3591 		if (vq != vp) {
3592 			vnode_put(vq);
3593 		}
3594 
3595 		vq = vnext;
3596 	}
3597 
3598 	return count;
3599 }
3600 
3601 int     prtactive = 0;          /* 1 => print out reclaim of active vnodes */
3602 
3603 /*
3604  * Print out a description of a vnode.
3605  */
3606 static const char *typename[] =
3607 { "VNON", "VREG", "VDIR", "VBLK", "VCHR", "VLNK", "VSOCK", "VFIFO", "VBAD" };
3608 
3609 void
vprint(const char * label,struct vnode * vp)3610 vprint(const char *label, struct vnode *vp)
3611 {
3612 	char sbuf[64];
3613 
3614 	if (label != NULL) {
3615 		printf("%s: ", label);
3616 	}
3617 	printf("name %s type %s, usecount %d, writecount %d\n",
3618 	    vp->v_name, typename[vp->v_type],
3619 	    vp->v_usecount, vp->v_writecount);
3620 	sbuf[0] = '\0';
3621 	if (vp->v_flag & VROOT) {
3622 		strlcat(sbuf, "|VROOT", sizeof(sbuf));
3623 	}
3624 	if (vp->v_flag & VTEXT) {
3625 		strlcat(sbuf, "|VTEXT", sizeof(sbuf));
3626 	}
3627 	if (vp->v_flag & VSYSTEM) {
3628 		strlcat(sbuf, "|VSYSTEM", sizeof(sbuf));
3629 	}
3630 	if (vp->v_flag & VNOFLUSH) {
3631 		strlcat(sbuf, "|VNOFLUSH", sizeof(sbuf));
3632 	}
3633 	if (vp->v_flag & VBWAIT) {
3634 		strlcat(sbuf, "|VBWAIT", sizeof(sbuf));
3635 	}
3636 	if (vnode_isaliased(vp)) {
3637 		strlcat(sbuf, "|VALIASED", sizeof(sbuf));
3638 	}
3639 	if (sbuf[0] != '\0') {
3640 		printf("vnode flags (%s\n", &sbuf[1]);
3641 	}
3642 }
3643 
3644 static int
vn_getpath_flags_to_buildpath_flags(int flags)3645 vn_getpath_flags_to_buildpath_flags(int flags)
3646 {
3647 	int bpflags = (flags & VN_GETPATH_FSENTER) ? 0 : BUILDPATH_NO_FS_ENTER;
3648 
3649 	if (flags && (flags != VN_GETPATH_FSENTER)) {
3650 		if (flags & VN_GETPATH_NO_FIRMLINK) {
3651 			bpflags |= BUILDPATH_NO_FIRMLINK;
3652 		}
3653 		if (flags & VN_GETPATH_VOLUME_RELATIVE) {
3654 			bpflags |= (BUILDPATH_VOLUME_RELATIVE |
3655 			    BUILDPATH_NO_FIRMLINK);
3656 		}
3657 		if (flags & VN_GETPATH_NO_PROCROOT) {
3658 			bpflags |= BUILDPATH_NO_PROCROOT;
3659 		}
3660 		if (flags & VN_GETPATH_CHECK_MOVED) {
3661 			bpflags |= BUILDPATH_CHECK_MOVED;
3662 		}
3663 	}
3664 
3665 	return bpflags;
3666 }
3667 
3668 int
vn_getpath_ext_with_mntlen(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,size_t * mntlen,int flags)3669 vn_getpath_ext_with_mntlen(struct vnode *vp, struct vnode *dvp, char *pathbuf,
3670     size_t *len, size_t *mntlen, int flags)
3671 {
3672 	int bpflags = vn_getpath_flags_to_buildpath_flags(flags);
3673 	int local_len;
3674 	int error;
3675 
3676 	if (*len > INT_MAX) {
3677 		return EINVAL;
3678 	}
3679 
3680 	local_len = *len;
3681 
3682 	error = build_path_with_parent(vp, dvp, pathbuf, local_len, &local_len,
3683 	    mntlen, bpflags, vfs_context_current());
3684 
3685 	if (local_len >= 0 && local_len <= (int)*len) {
3686 		*len = (size_t)local_len;
3687 	}
3688 
3689 	return error;
3690 }
3691 
3692 int
vn_getpath_ext(struct vnode * vp,struct vnode * dvp,char * pathbuf,size_t * len,int flags)3693 vn_getpath_ext(struct vnode *vp, struct vnode *dvp, char *pathbuf, size_t *len,
3694     int flags)
3695 {
3696 	return vn_getpath_ext_with_mntlen(vp, dvp, pathbuf, len, NULL, flags);
3697 }
3698 
3699 /*
3700  * Wrapper around vn_getpath_ext() that takes care of the int * <-> size_t *
3701  * conversion for the legacy KPIs.
3702  */
3703 static int
vn_getpath_ext_int(struct vnode * vp,struct vnode * dvp,char * pathbuf,int * len,int flags)3704 vn_getpath_ext_int(struct vnode *vp, struct vnode *dvp, char *pathbuf,
3705     int *len, int flags)
3706 {
3707 	size_t slen = *len;
3708 	int error;
3709 
3710 	if (*len < 0) {
3711 		return EINVAL;
3712 	}
3713 
3714 	error = vn_getpath_ext(vp, dvp, pathbuf, &slen, flags);
3715 
3716 	if (slen <= INT_MAX) {
3717 		*len = (int)slen;
3718 	}
3719 
3720 	return error;
3721 }
3722 
3723 int
vn_getpath(struct vnode * vp,char * pathbuf,int * len)3724 vn_getpath(struct vnode *vp, char *pathbuf, int *len)
3725 {
3726 	return vn_getpath_ext_int(vp, NULL, pathbuf, len, 0);
3727 }
3728 
3729 int
vn_getpath_fsenter(struct vnode * vp,char * pathbuf,int * len)3730 vn_getpath_fsenter(struct vnode *vp, char *pathbuf, int *len)
3731 {
3732 	return vn_getpath_ext_int(vp, NULL, pathbuf, len, VN_GETPATH_FSENTER);
3733 }
3734 
3735 /*
3736  * vn_getpath_fsenter_with_parent will reenter the file system to fine the path of the
3737  * vnode.  It requires that there are IO counts on both the vnode and the directory vnode.
3738  *
3739  * vn_getpath_fsenter is called by MAC hooks to authorize operations for every thing, but
3740  * unlink, rmdir and rename. For these operation the MAC hook  calls vn_getpath. This presents
3741  * problems where if the path can not be found from the name cache, those operations can
3742  * erroneously fail with EPERM even though the call should succeed. When removing or moving
3743  * file system objects with operations such as unlink or rename, those operations need to
3744  * take IO counts on the target and containing directory. Calling vn_getpath_fsenter from a
3745  * MAC hook from these operations during forced unmount operations can lead to dead
3746  * lock. This happens when the operation starts, IO counts are taken on the containing
3747  * directories and targets. Before the MAC hook is called a forced unmount from another
3748  * thread takes place and blocks on the on going operation's directory vnode in vdrain.
3749  * After which, the MAC hook gets called and calls vn_getpath_fsenter.  vn_getpath_fsenter
3750  * is called with the understanding that there is an IO count on the target. If in
3751  * build_path the directory vnode is no longer in the cache, then the parent object id via
3752  * vnode_getattr from the target is obtain and used to call VFS_VGET to get the parent
3753  * vnode. The file system's VFS_VGET then looks up by inode in its hash and tries to get
3754  * an IO count. But VFS_VGET "sees" the directory vnode is in vdrain and can block
3755  * depending on which version and how it calls the vnode_get family of interfaces.
3756  *
3757  * N.B.  A reasonable interface to use is vnode_getwithvid. This interface was modified to
3758  * call vnode_getiocount with VNODE_DRAINO, so it will happily get an IO count and not
3759  * cause issues, but there is no guarantee that all or any file systems are doing that.
3760  *
3761  * vn_getpath_fsenter_with_parent can enter the file system safely since there is a known
3762  * IO count on the directory vnode by calling build_path_with_parent.
3763  */
3764 
3765 int
vn_getpath_fsenter_with_parent(struct vnode * dvp,struct vnode * vp,char * pathbuf,int * len)3766 vn_getpath_fsenter_with_parent(struct vnode *dvp, struct vnode *vp, char *pathbuf, int *len)
3767 {
3768 	return build_path_with_parent(vp, dvp, pathbuf, *len, len, NULL, 0, vfs_context_current());
3769 }
3770 
3771 int
vn_getpath_no_firmlink(struct vnode * vp,char * pathbuf,int * len)3772 vn_getpath_no_firmlink(struct vnode *vp, char *pathbuf, int *len)
3773 {
3774 	return vn_getpath_ext_int(vp, NULLVP, pathbuf, len,
3775 	           VN_GETPATH_NO_FIRMLINK);
3776 }
3777 
3778 int
vn_getcdhash(struct vnode * vp,off_t offset,unsigned char * cdhash)3779 vn_getcdhash(struct vnode *vp, off_t offset, unsigned char *cdhash)
3780 {
3781 	return ubc_cs_getcdhash(vp, offset, cdhash);
3782 }
3783 
3784 
3785 static char *extension_table = NULL;
3786 static int   nexts;
3787 static int   max_ext_width;
3788 
3789 static int
extension_cmp(const void * a,const void * b)3790 extension_cmp(const void *a, const void *b)
3791 {
3792 	return (int)(strlen((const char *)a) - strlen((const char *)b));
3793 }
3794 
3795 
3796 //
3797 // This is the api LaunchServices uses to inform the kernel
3798 // the list of package extensions to ignore.
3799 //
3800 // Internally we keep the list sorted by the length of the
3801 // the extension (from longest to shortest).  We sort the
3802 // list of extensions so that we can speed up our searches
3803 // when comparing file names -- we only compare extensions
3804 // that could possibly fit into the file name, not all of
3805 // them (i.e. a short 8 character name can't have an 8
3806 // character extension).
3807 //
3808 extern lck_mtx_t pkg_extensions_lck;
3809 
3810 __private_extern__ int
set_package_extensions_table(user_addr_t data,int nentries,int maxwidth)3811 set_package_extensions_table(user_addr_t data, int nentries, int maxwidth)
3812 {
3813 	char *new_exts, *old_exts;
3814 	int old_nentries = 0, old_maxwidth = 0;
3815 	int error;
3816 
3817 	if (nentries <= 0 || nentries > 1024 || maxwidth <= 0 || maxwidth > 255) {
3818 		return EINVAL;
3819 	}
3820 
3821 
3822 	// allocate one byte extra so we can guarantee null termination
3823 	new_exts = kalloc_data((nentries * maxwidth) + 1, Z_WAITOK);
3824 	if (new_exts == NULL) {
3825 		return ENOMEM;
3826 	}
3827 
3828 	error = copyin(data, new_exts, nentries * maxwidth);
3829 	if (error) {
3830 		kfree_data(new_exts, (nentries * maxwidth) + 1);
3831 		return error;
3832 	}
3833 
3834 	new_exts[(nentries * maxwidth)] = '\0'; // guarantee null termination of the block
3835 
3836 	qsort(new_exts, nentries, maxwidth, extension_cmp);
3837 
3838 	lck_mtx_lock(&pkg_extensions_lck);
3839 
3840 	old_exts        = extension_table;
3841 	old_nentries    = nexts;
3842 	old_maxwidth    = max_ext_width;
3843 	extension_table = new_exts;
3844 	nexts           = nentries;
3845 	max_ext_width   = maxwidth;
3846 
3847 	lck_mtx_unlock(&pkg_extensions_lck);
3848 
3849 	kfree_data(old_exts, (old_nentries * old_maxwidth) + 1);
3850 
3851 	return 0;
3852 }
3853 
3854 
3855 int
is_package_name(const char * name,int len)3856 is_package_name(const char *name, int len)
3857 {
3858 	int i;
3859 	size_t extlen;
3860 	const char *ptr, *name_ext;
3861 
3862 	// if the name is less than 3 bytes it can't be of the
3863 	// form A.B and if it begins with a "." then it is also
3864 	// not a package.
3865 	if (len <= 3 || name[0] == '.') {
3866 		return 0;
3867 	}
3868 
3869 	name_ext = NULL;
3870 	for (ptr = name; *ptr != '\0'; ptr++) {
3871 		if (*ptr == '.') {
3872 			name_ext = ptr;
3873 		}
3874 	}
3875 
3876 	// if there is no "." extension, it can't match
3877 	if (name_ext == NULL) {
3878 		return 0;
3879 	}
3880 
3881 	// advance over the "."
3882 	name_ext++;
3883 
3884 	lck_mtx_lock(&pkg_extensions_lck);
3885 
3886 	// now iterate over all the extensions to see if any match
3887 	ptr = &extension_table[0];
3888 	for (i = 0; i < nexts; i++, ptr += max_ext_width) {
3889 		extlen = strlen(ptr);
3890 		if (strncasecmp(name_ext, ptr, extlen) == 0 && name_ext[extlen] == '\0') {
3891 			// aha, a match!
3892 			lck_mtx_unlock(&pkg_extensions_lck);
3893 			return 1;
3894 		}
3895 	}
3896 
3897 	lck_mtx_unlock(&pkg_extensions_lck);
3898 
3899 	// if we get here, no extension matched
3900 	return 0;
3901 }
3902 
3903 int
vn_path_package_check(__unused vnode_t vp,char * path,int pathlen,int * component)3904 vn_path_package_check(__unused vnode_t vp, char *path, int pathlen, int *component)
3905 {
3906 	char *ptr, *end;
3907 	int comp = 0;
3908 
3909 	if (pathlen < 0) {
3910 		return EINVAL;
3911 	}
3912 
3913 	*component = -1;
3914 	if (*path != '/') {
3915 		return EINVAL;
3916 	}
3917 
3918 	end = path + 1;
3919 	while (end < path + pathlen && *end != '\0') {
3920 		while (end < path + pathlen && *end == '/' && *end != '\0') {
3921 			end++;
3922 		}
3923 
3924 		ptr = end;
3925 
3926 		while (end < path + pathlen && *end != '/' && *end != '\0') {
3927 			end++;
3928 		}
3929 
3930 		if (end > path + pathlen) {
3931 			// hmm, string wasn't null terminated
3932 			return EINVAL;
3933 		}
3934 
3935 		*end = '\0';
3936 		if (is_package_name(ptr, (int)(end - ptr))) {
3937 			*component = comp;
3938 			break;
3939 		}
3940 
3941 		end++;
3942 		comp++;
3943 	}
3944 
3945 	return 0;
3946 }
3947 
3948 /*
3949  * Determine if a name is inappropriate for a searchfs query.
3950  * This list consists of /System currently.
3951  */
3952 
3953 int
vn_searchfs_inappropriate_name(const char * name,int len)3954 vn_searchfs_inappropriate_name(const char *name, int len)
3955 {
3956 	const char *bad_names[] = { "System" };
3957 	int   bad_len[]   = { 6 };
3958 	int  i;
3959 
3960 	if (len < 0) {
3961 		return EINVAL;
3962 	}
3963 
3964 	for (i = 0; i < (int) (sizeof(bad_names) / sizeof(bad_names[0])); i++) {
3965 		if (len == bad_len[i] && strncmp(name, bad_names[i], strlen(bad_names[i]) + 1) == 0) {
3966 			return 1;
3967 		}
3968 	}
3969 
3970 	// if we get here, no name matched
3971 	return 0;
3972 }
3973 
3974 /*
3975  * Top level filesystem related information gathering.
3976  */
3977 extern unsigned int vfs_nummntops;
3978 
3979 /*
3980  * The VFS_NUMMNTOPS shouldn't be at name[1] since
3981  * is a VFS generic variable. Since we no longer support
3982  * VT_UFS, we reserve its value to support this sysctl node.
3983  *
3984  * It should have been:
3985  *    name[0]:  VFS_GENERIC
3986  *    name[1]:  VFS_NUMMNTOPS
3987  */
3988 SYSCTL_INT(_vfs, VFS_NUMMNTOPS, nummntops,
3989     CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
3990     &vfs_nummntops, 0, "");
3991 
3992 int
3993 vfs_sysctl(int *name __unused, u_int namelen __unused,
3994     user_addr_t oldp __unused, size_t *oldlenp __unused,
3995     user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused);
3996 
3997 int
vfs_sysctl(int * name __unused,u_int namelen __unused,user_addr_t oldp __unused,size_t * oldlenp __unused,user_addr_t newp __unused,size_t newlen __unused,proc_t p __unused)3998 vfs_sysctl(int *name __unused, u_int namelen __unused,
3999     user_addr_t oldp __unused, size_t *oldlenp __unused,
4000     user_addr_t newp __unused, size_t newlen __unused, proc_t p __unused)
4001 {
4002 	return EINVAL;
4003 }
4004 
4005 
4006 //
4007 // The following code disallows specific sysctl's that came through
4008 // the direct sysctl interface (vfs_sysctl_node) instead of the newer
4009 // sysctl_vfs_ctlbyfsid() interface.  We can not allow these selectors
4010 // through vfs_sysctl_node() because it passes the user's oldp pointer
4011 // directly to the file system which (for these selectors) casts it
4012 // back to a struct sysctl_req and then proceed to use SYSCTL_IN()
4013 // which jumps through an arbitrary function pointer.  When called
4014 // through the sysctl_vfs_ctlbyfsid() interface this does not happen
4015 // and so it's safe.
4016 //
4017 // Unfortunately we have to pull in definitions from AFP and SMB and
4018 // perform explicit name checks on the file system to determine if
4019 // these selectors are being used.
4020 //
4021 
4022 #define AFPFS_VFS_CTL_GETID            0x00020001
4023 #define AFPFS_VFS_CTL_NETCHANGE        0x00020002
4024 #define AFPFS_VFS_CTL_VOLCHANGE        0x00020003
4025 
4026 #define SMBFS_SYSCTL_REMOUNT           1
4027 #define SMBFS_SYSCTL_REMOUNT_INFO      2
4028 #define SMBFS_SYSCTL_GET_SERVER_SHARE  3
4029 
4030 
4031 static int
is_bad_sysctl_name(struct vfstable * vfsp,int selector_name)4032 is_bad_sysctl_name(struct vfstable *vfsp, int selector_name)
4033 {
4034 	switch (selector_name) {
4035 	case VFS_CTL_QUERY:
4036 	case VFS_CTL_TIMEO:
4037 	case VFS_CTL_NOLOCKS:
4038 	case VFS_CTL_NSTATUS:
4039 	case VFS_CTL_SADDR:
4040 	case VFS_CTL_DISC:
4041 	case VFS_CTL_SERVERINFO:
4042 		return 1;
4043 
4044 	default:
4045 		break;
4046 	}
4047 
4048 	// the more complicated check for some of SMB's special values
4049 	if (strcmp(vfsp->vfc_name, "smbfs") == 0) {
4050 		switch (selector_name) {
4051 		case SMBFS_SYSCTL_REMOUNT:
4052 		case SMBFS_SYSCTL_REMOUNT_INFO:
4053 		case SMBFS_SYSCTL_GET_SERVER_SHARE:
4054 			return 1;
4055 		}
4056 	} else if (strcmp(vfsp->vfc_name, "afpfs") == 0) {
4057 		switch (selector_name) {
4058 		case AFPFS_VFS_CTL_GETID:
4059 		case AFPFS_VFS_CTL_NETCHANGE:
4060 		case AFPFS_VFS_CTL_VOLCHANGE:
4061 			return 1;
4062 		}
4063 	}
4064 
4065 	//
4066 	// If we get here we passed all the checks so the selector is ok
4067 	//
4068 	return 0;
4069 }
4070 
4071 
4072 int vfs_sysctl_node SYSCTL_HANDLER_ARGS
4073 {
4074 	int *name, namelen;
4075 	struct vfstable *vfsp;
4076 	int error;
4077 	int fstypenum;
4078 
4079 	fstypenum = oidp->oid_number;
4080 	name = arg1;
4081 	namelen = arg2;
4082 
4083 	/* all sysctl names at this level should have at least one name slot for the FS */
4084 	if (namelen < 1) {
4085 		return EISDIR; /* overloaded */
4086 	}
4087 	mount_list_lock();
4088 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
4089 		if (vfsp->vfc_typenum == fstypenum) {
4090 			vfsp->vfc_refcount++;
4091 			break;
4092 		}
4093 	}
4094 	mount_list_unlock();
4095 
4096 	if (vfsp == NULL) {
4097 		return ENOTSUP;
4098 	}
4099 
4100 	if (is_bad_sysctl_name(vfsp, name[0])) {
4101 		printf("vfs: bad selector 0x%.8x for old-style sysctl().  use the sysctl-by-fsid interface instead\n", name[0]);
4102 		error = EPERM;
4103 	} else {
4104 		error = (vfsp->vfc_vfsops->vfs_sysctl)(name, namelen,
4105 		    req->oldptr, &req->oldlen, req->newptr, req->newlen,
4106 		    vfs_context_current());
4107 	}
4108 
4109 	mount_list_lock();
4110 	vfsp->vfc_refcount--;
4111 	mount_list_unlock();
4112 
4113 	return error;
4114 }
4115 
4116 /*
4117  * Check to see if a filesystem is mounted on a block device.
4118  */
4119 int
vfs_mountedon(struct vnode * vp)4120 vfs_mountedon(struct vnode *vp)
4121 {
4122 	struct vnode *vq;
4123 	int error = 0;
4124 
4125 restart:
4126 	SPECHASH_LOCK();
4127 	if (vp->v_specflags & SI_MOUNTING && (vp->v_specinfo->si_mountingowner != current_thread())) {
4128 		msleep((caddr_t)&vp->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4129 		goto restart;
4130 	}
4131 	if (vp->v_specflags & SI_MOUNTEDON) {
4132 		error = EBUSY;
4133 		goto out;
4134 	}
4135 	if (vp->v_specflags & SI_ALIASED) {
4136 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
4137 			if (vq->v_rdev != vp->v_rdev ||
4138 			    vq->v_type != vp->v_type || vq == vp) {
4139 				continue;
4140 			}
4141 			if (vq->v_specflags & SI_MOUNTING) {
4142 				msleep((caddr_t)&vq->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4143 				goto restart;
4144 			}
4145 			if (vq->v_specflags & SI_MOUNTEDON) {
4146 				error = EBUSY;
4147 				break;
4148 			}
4149 		}
4150 	}
4151 out:
4152 	SPECHASH_UNLOCK();
4153 	return error;
4154 }
4155 
4156 void
vfs_setmountedon(vnode_t vp)4157 vfs_setmountedon(vnode_t vp)
4158 {
4159 	vnode_lock(vp);
4160 	SPECHASH_LOCK();
4161 	vp->v_specflags |= SI_MOUNTEDON;
4162 	vp->v_specflags &= ~SI_MOUNTING;
4163 	vp->v_specinfo->si_mountingowner = NULL;
4164 	SPECHASH_UNLOCK();
4165 	vnode_unlock(vp);
4166 	wakeup(&vp->v_specflags);
4167 }
4168 
4169 void
vfs_clearmounting(vnode_t vp)4170 vfs_clearmounting(vnode_t vp)
4171 {
4172 	vnode_lock(vp);
4173 	SPECHASH_LOCK();
4174 	vp->v_specflags &= ~SI_MOUNTING;
4175 	vp->v_specinfo->si_mountingowner = NULL;
4176 	SPECHASH_UNLOCK();
4177 	vnode_unlock(vp);
4178 	wakeup(&vp->v_specflags);
4179 }
4180 
4181 /*
4182  * Check to see if a filesystem is mounted on a block device.
4183  */
4184 int
vfs_setmounting(vnode_t vp)4185 vfs_setmounting(vnode_t vp)
4186 {
4187 	struct vnode *vq;
4188 	int error = 0;
4189 
4190 	vnode_lock(vp);
4191 	while (vp->v_specflags & SI_MOUNTING) {
4192 		msleep((caddr_t)&vp->v_specflags, &vp->v_lock, PVFS, "vnode_waitformounting", NULL);
4193 	}
4194 	if (vp->v_specflags & SI_MOUNTEDON) {
4195 		vnode_unlock(vp);
4196 		return EBUSY;
4197 	}
4198 	SPECHASH_LOCK();
4199 	vp->v_specflags |= SI_MOUNTING;
4200 	vp->v_specinfo->si_mountingowner = current_thread();
4201 	vnode_unlock(vp);
4202 restart:
4203 	if (vp->v_specflags & SI_ALIASED) {
4204 		for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
4205 			if (vq->v_rdev != vp->v_rdev ||
4206 			    vq->v_type != vp->v_type || vq == vp) {
4207 				continue;
4208 			}
4209 			if (vq->v_specflags & SI_MOUNTING) {
4210 				msleep((caddr_t)&vq->v_specflags, SPECHASH_LOCK_ADDR(), PVFS | PDROP, "vnode_waitformounting", NULL);
4211 				SPECHASH_LOCK();
4212 				goto restart;
4213 			}
4214 			if (vq->v_specflags & SI_MOUNTEDON) {
4215 				error = EBUSY;
4216 				break;
4217 			}
4218 		}
4219 	}
4220 	SPECHASH_UNLOCK();
4221 	if (error) {
4222 		vnode_lock(vp);
4223 		SPECHASH_LOCK();
4224 		vp->v_specflags &= ~SI_MOUNTING;
4225 		SPECHASH_UNLOCK();
4226 		vnode_unlock(vp);
4227 		wakeup(&vp->v_specflags);
4228 	}
4229 	return error;
4230 }
4231 
4232 struct unmount_info {
4233 	int     u_errs; // Total failed unmounts
4234 	int     u_busy; // EBUSY failed unmounts
4235 	int     u_count; // Total volumes iterated
4236 	int     u_only_non_system;
4237 };
4238 
4239 static int
unmount_callback(mount_t mp,void * arg)4240 unmount_callback(mount_t mp, void *arg)
4241 {
4242 	int error;
4243 	char *mntname;
4244 	struct unmount_info *uip = arg;
4245 
4246 	uip->u_count++;
4247 
4248 	mntname = zalloc_flags(ZV_NAMEI, Z_WAITOK | Z_NOFAIL);
4249 	strlcpy(mntname, mp->mnt_vfsstat.f_mntonname, MAXPATHLEN);
4250 
4251 	if (uip->u_only_non_system
4252 	    && ((mp->mnt_flag & MNT_ROOTFS) || (mp->mnt_kern_flag & MNTK_SYSTEM))) { //MNTK_BACKS_ROOT
4253 		printf("unmount(%d) %s skipped\n", uip->u_only_non_system, mntname);
4254 		mount_iterdrop(mp);     // VFS_ITERATE_CB_DROPREF
4255 	} else {
4256 		printf("unmount(%d) %s\n", uip->u_only_non_system, mntname);
4257 
4258 		mount_ref(mp, 0);
4259 		mount_iterdrop(mp);     // VFS_ITERATE_CB_DROPREF
4260 		error = dounmount(mp, MNT_FORCE, 1, vfs_context_current());
4261 		if (error) {
4262 			uip->u_errs++;
4263 			printf("Unmount of %s failed (%d)\n", mntname ? mntname:"?", error);
4264 			if (error == EBUSY) {
4265 				uip->u_busy++;
4266 			}
4267 		}
4268 	}
4269 	zfree(ZV_NAMEI, mntname);
4270 
4271 	return VFS_RETURNED;
4272 }
4273 
4274 /*
4275  * Unmount all filesystems. The list is traversed in reverse order
4276  * of mounting to avoid dependencies.
4277  * Busy mounts are retried.
4278  */
4279 __private_extern__ void
vfs_unmountall(int only_non_system)4280 vfs_unmountall(int only_non_system)
4281 {
4282 	int mounts, sec = 1;
4283 	struct unmount_info ui;
4284 
4285 	/*
4286 	 * Ensure last-completion-time is valid before anyone can see that
4287 	 * VFS shutdown has started.
4288 	 */
4289 	vfs_shutdown_last_completion_time = mach_absolute_time();
4290 	OSMemoryBarrier();
4291 	vfs_unmountall_started = 1;
4292 	printf("vfs_unmountall(%ssystem) start\n", only_non_system ? "non" : "");
4293 
4294 retry:
4295 	ui.u_errs = ui.u_busy = ui.u_count = 0;
4296 	ui.u_only_non_system = only_non_system;
4297 	// avoid vfs_iterate deadlock in dounmount(), use VFS_ITERATE_CB_DROPREF
4298 	vfs_iterate(VFS_ITERATE_CB_DROPREF | VFS_ITERATE_TAIL_FIRST, unmount_callback, &ui);
4299 	mounts = mount_getvfscnt();
4300 	if (mounts == 0) {
4301 		goto out;
4302 	}
4303 	if (ui.u_busy > 0) {            // Busy mounts - wait & retry
4304 		tsleep(&nummounts, PVFS, "busy mount", sec * hz);
4305 		sec *= 2;
4306 		if (sec <= 32) {
4307 			goto retry;
4308 		}
4309 		printf("Unmounting timed out\n");
4310 	} else if (ui.u_count < mounts) {
4311 		// If the vfs_iterate missed mounts in progress - wait a bit
4312 		tsleep(&nummounts, PVFS, "missed mount", 2 * hz);
4313 	}
4314 
4315 out:
4316 	printf("vfs_unmountall(%ssystem) end\n", only_non_system ? "non" : "");
4317 
4318 	/*
4319 	 * reboot_kernel() calls us twice; once to deal with non-system
4320 	 * mounts, and again to sweep up anything left after terminating
4321 	 * DEXTs.  We're only finished once we've completed the second pass.
4322 	 */
4323 	if (!only_non_system) {
4324 		vfs_unmountall_finished = 1;
4325 	}
4326 }
4327 
4328 /*
4329  * vfs_shutdown_in_progress --
4330  *
4331  * Returns whether or not the VFS is shutting down the file systems.
4332  */
4333 boolean_t
vfs_shutdown_in_progress(void)4334 vfs_shutdown_in_progress(void)
4335 {
4336 	return vfs_unmountall_started && !vfs_unmountall_finished;
4337 }
4338 
4339 /*
4340  * vfs_shutdown_finished --
4341  *
4342  * Returns whether or not the VFS shutdown has completed.
4343  */
4344 boolean_t
vfs_shutdown_finished(void)4345 vfs_shutdown_finished(void)
4346 {
4347 	return !!vfs_unmountall_finished;
4348 }
4349 
4350 /*
4351  * vfs_update_last_completion_time --
4352  *
4353  * Updates the "last I/O completion time" timestamp used by the watchdog
4354  * to monitor VFS shutdown progress.  Called by various I/O stack layers
4355  * as operations complete and progress moves forward.
4356  */
4357 void
vfs_update_last_completion_time(void)4358 vfs_update_last_completion_time(void)
4359 {
4360 	if (vfs_unmountall_started) {
4361 		vfs_shutdown_last_completion_time = mach_absolute_time();
4362 	}
4363 }
4364 
4365 /*
4366  * vfs_last_completion_time --
4367  *
4368  * Returns the "last I/O completion time" timestamp.  Return
4369  * value is a mach_absolute_time() value, and is not meaningful
4370  * unless vfs_is_shutting_down() also returns true.
4371  */
4372 uint64_t
vfs_last_completion_time(void)4373 vfs_last_completion_time(void)
4374 {
4375 	return vfs_unmountall_started ? vfs_shutdown_last_completion_time : 0;
4376 }
4377 
4378 /*
4379  * This routine is called from vnode_pager_deallocate out of the VM
4380  * The path to vnode_pager_deallocate can only be initiated by ubc_destroy_named
4381  * on a vnode that has a UBCINFO
4382  */
4383 __private_extern__ void
vnode_pager_vrele(vnode_t vp)4384 vnode_pager_vrele(vnode_t vp)
4385 {
4386 	struct ubc_info *uip;
4387 
4388 	vnode_lock_spin(vp);
4389 
4390 	vp->v_lflag &= ~VNAMED_UBC;
4391 	if (vp->v_usecount != 0) {
4392 		/*
4393 		 * At the eleventh hour, just before the ubcinfo is
4394 		 * destroyed, ensure the ubc-specific v_usecount
4395 		 * reference has gone.  We use v_usecount != 0 as a hint;
4396 		 * ubc_unmap() does nothing if there's no mapping.
4397 		 *
4398 		 * This case is caused by coming here via forced unmount,
4399 		 * versus the usual vm_object_deallocate() path.
4400 		 * In the forced unmount case, ubc_destroy_named()
4401 		 * releases the pager before memory_object_last_unmap()
4402 		 * can be called.
4403 		 */
4404 		vnode_unlock(vp);
4405 		ubc_unmap(vp);
4406 		vnode_lock_spin(vp);
4407 	}
4408 
4409 	uip = vp->v_ubcinfo;
4410 	vp->v_ubcinfo = UBC_INFO_NULL;
4411 
4412 	vnode_unlock(vp);
4413 
4414 	ubc_info_deallocate(uip);
4415 }
4416 
4417 
4418 #include <sys/disk.h>
4419 
4420 u_int32_t rootunit = (u_int32_t)-1;
4421 
4422 #if CONFIG_IOSCHED
4423 extern int lowpri_throttle_enabled;
4424 extern int iosched_enabled;
4425 #endif
4426 
4427 errno_t
vfs_init_io_attributes(vnode_t devvp,mount_t mp)4428 vfs_init_io_attributes(vnode_t devvp, mount_t mp)
4429 {
4430 	int     error;
4431 	off_t   readblockcnt = 0;
4432 	off_t   writeblockcnt = 0;
4433 	off_t   readmaxcnt = 0;
4434 	off_t   writemaxcnt = 0;
4435 	off_t   readsegcnt = 0;
4436 	off_t   writesegcnt = 0;
4437 	off_t   readsegsize = 0;
4438 	off_t   writesegsize = 0;
4439 	off_t   alignment = 0;
4440 	u_int32_t minsaturationbytecount = 0;
4441 	u_int32_t ioqueue_depth = 0;
4442 	u_int32_t blksize;
4443 	u_int64_t temp;
4444 	u_int32_t features;
4445 	u_int64_t location = 0;
4446 	vfs_context_t ctx = vfs_context_current();
4447 	dk_corestorage_info_t cs_info;
4448 	boolean_t cs_present = FALSE;
4449 	int isssd = 0;
4450 	int isvirtual = 0;
4451 
4452 
4453 	VNOP_IOCTL(devvp, DKIOCGETTHROTTLEMASK, (caddr_t)&mp->mnt_throttle_mask, 0, NULL);
4454 	/*
4455 	 * as a reasonable approximation, only use the lowest bit of the mask
4456 	 * to generate a disk unit number
4457 	 */
4458 	mp->mnt_devbsdunit = mp->mnt_throttle_mask ?
4459 	    num_trailing_0(mp->mnt_throttle_mask) : (LOWPRI_MAX_NUM_DEV - 1);
4460 
4461 	if (devvp == rootvp) {
4462 		rootunit = mp->mnt_devbsdunit;
4463 	}
4464 
4465 	if (mp->mnt_devbsdunit == rootunit) {
4466 		/*
4467 		 * this mount point exists on the same device as the root
4468 		 * partition, so it comes under the hard throttle control...
4469 		 * this is true even for the root mount point itself
4470 		 */
4471 		mp->mnt_kern_flag |= MNTK_ROOTDEV;
4472 	}
4473 	/*
4474 	 * force the spec device to re-cache
4475 	 * the underlying block size in case
4476 	 * the filesystem overrode the initial value
4477 	 */
4478 	set_fsblocksize(devvp);
4479 
4480 
4481 	if ((error = VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE,
4482 	    (caddr_t)&blksize, 0, ctx))) {
4483 		return error;
4484 	}
4485 
4486 	mp->mnt_devblocksize = blksize;
4487 
4488 	/*
4489 	 * set the maximum possible I/O size
4490 	 * this may get clipped to a smaller value
4491 	 * based on which constraints are being advertised
4492 	 * and if those advertised constraints result in a smaller
4493 	 * limit for a given I/O
4494 	 */
4495 	mp->mnt_maxreadcnt = MAX_UPL_SIZE_BYTES;
4496 	mp->mnt_maxwritecnt = MAX_UPL_SIZE_BYTES;
4497 
4498 	if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, ctx) == 0) {
4499 		if (isvirtual) {
4500 			mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
4501 			mp->mnt_flag |= MNT_REMOVABLE;
4502 		}
4503 	}
4504 	if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
4505 		if (isssd) {
4506 			mp->mnt_kern_flag |= MNTK_SSD;
4507 		}
4508 	}
4509 	if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
4510 	    (caddr_t)&features, 0, ctx))) {
4511 		return error;
4512 	}
4513 
4514 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTREAD,
4515 	    (caddr_t)&readblockcnt, 0, ctx))) {
4516 		return error;
4517 	}
4518 
4519 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBLOCKCOUNTWRITE,
4520 	    (caddr_t)&writeblockcnt, 0, ctx))) {
4521 		return error;
4522 	}
4523 
4524 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTREAD,
4525 	    (caddr_t)&readmaxcnt, 0, ctx))) {
4526 		return error;
4527 	}
4528 
4529 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXBYTECOUNTWRITE,
4530 	    (caddr_t)&writemaxcnt, 0, ctx))) {
4531 		return error;
4532 	}
4533 
4534 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTREAD,
4535 	    (caddr_t)&readsegcnt, 0, ctx))) {
4536 		return error;
4537 	}
4538 
4539 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTCOUNTWRITE,
4540 	    (caddr_t)&writesegcnt, 0, ctx))) {
4541 		return error;
4542 	}
4543 
4544 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTREAD,
4545 	    (caddr_t)&readsegsize, 0, ctx))) {
4546 		return error;
4547 	}
4548 
4549 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMAXSEGMENTBYTECOUNTWRITE,
4550 	    (caddr_t)&writesegsize, 0, ctx))) {
4551 		return error;
4552 	}
4553 
4554 	if ((error = VNOP_IOCTL(devvp, DKIOCGETMINSEGMENTALIGNMENTBYTECOUNT,
4555 	    (caddr_t)&alignment, 0, ctx))) {
4556 		return error;
4557 	}
4558 
4559 	if ((error = VNOP_IOCTL(devvp, DKIOCGETCOMMANDPOOLSIZE,
4560 	    (caddr_t)&ioqueue_depth, 0, ctx))) {
4561 		return error;
4562 	}
4563 
4564 	if (readmaxcnt) {
4565 		mp->mnt_maxreadcnt = (readmaxcnt > UINT32_MAX) ? UINT32_MAX :(uint32_t) readmaxcnt;
4566 	}
4567 
4568 	if (readblockcnt) {
4569 		temp = readblockcnt * blksize;
4570 		temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4571 
4572 		if (temp < mp->mnt_maxreadcnt) {
4573 			mp->mnt_maxreadcnt = (u_int32_t)temp;
4574 		}
4575 	}
4576 
4577 	if (writemaxcnt) {
4578 		mp->mnt_maxwritecnt = (writemaxcnt > UINT32_MAX) ? UINT32_MAX : (uint32_t)writemaxcnt;
4579 	}
4580 
4581 	if (writeblockcnt) {
4582 		temp = writeblockcnt * blksize;
4583 		temp = (temp > UINT32_MAX) ? UINT32_MAX : temp;
4584 
4585 		if (temp < mp->mnt_maxwritecnt) {
4586 			mp->mnt_maxwritecnt = (u_int32_t)temp;
4587 		}
4588 	}
4589 
4590 	if (readsegcnt) {
4591 		temp = (readsegcnt > UINT16_MAX) ? UINT16_MAX : readsegcnt;
4592 	} else {
4593 		temp = mp->mnt_maxreadcnt / PAGE_SIZE;
4594 
4595 		if (temp > UINT16_MAX) {
4596 			temp = UINT16_MAX;
4597 		}
4598 	}
4599 	mp->mnt_segreadcnt = (u_int16_t)temp;
4600 
4601 	if (writesegcnt) {
4602 		temp = (writesegcnt > UINT16_MAX) ? UINT16_MAX : writesegcnt;
4603 	} else {
4604 		temp = mp->mnt_maxwritecnt / PAGE_SIZE;
4605 
4606 		if (temp > UINT16_MAX) {
4607 			temp = UINT16_MAX;
4608 		}
4609 	}
4610 	mp->mnt_segwritecnt = (u_int16_t)temp;
4611 
4612 	if (readsegsize) {
4613 		temp = (readsegsize > UINT32_MAX) ? UINT32_MAX : readsegsize;
4614 	} else {
4615 		temp = mp->mnt_maxreadcnt;
4616 	}
4617 	mp->mnt_maxsegreadsize = (u_int32_t)temp;
4618 
4619 	if (writesegsize) {
4620 		temp = (writesegsize > UINT32_MAX) ? UINT32_MAX : writesegsize;
4621 	} else {
4622 		temp = mp->mnt_maxwritecnt;
4623 	}
4624 	mp->mnt_maxsegwritesize = (u_int32_t)temp;
4625 
4626 	if (alignment) {
4627 		temp = (alignment > PAGE_SIZE) ? PAGE_MASK : alignment - 1;
4628 	} else {
4629 		temp = 0;
4630 	}
4631 	mp->mnt_alignmentmask = (uint32_t)temp;
4632 
4633 
4634 	if (ioqueue_depth > MNT_DEFAULT_IOQUEUE_DEPTH) {
4635 		temp = ioqueue_depth;
4636 	} else {
4637 		temp = MNT_DEFAULT_IOQUEUE_DEPTH;
4638 	}
4639 
4640 	mp->mnt_ioqueue_depth = (uint32_t)temp;
4641 	mp->mnt_ioscale = MNT_IOSCALE(mp->mnt_ioqueue_depth);
4642 
4643 	if (mp->mnt_ioscale > 1) {
4644 		printf("ioqueue_depth = %d,   ioscale = %d\n", (int)mp->mnt_ioqueue_depth, (int)mp->mnt_ioscale);
4645 	}
4646 
4647 	if (features & DK_FEATURE_FORCE_UNIT_ACCESS) {
4648 		mp->mnt_ioflags |= MNT_IOFLAGS_FUA_SUPPORTED;
4649 	}
4650 
4651 	if (VNOP_IOCTL(devvp, DKIOCGETIOMINSATURATIONBYTECOUNT, (caddr_t)&minsaturationbytecount, 0, ctx) == 0) {
4652 		mp->mnt_minsaturationbytecount = minsaturationbytecount;
4653 	} else {
4654 		mp->mnt_minsaturationbytecount = 0;
4655 	}
4656 
4657 	if (VNOP_IOCTL(devvp, DKIOCCORESTORAGE, (caddr_t)&cs_info, 0, ctx) == 0) {
4658 		cs_present = TRUE;
4659 	}
4660 
4661 	if (features & DK_FEATURE_UNMAP) {
4662 		mp->mnt_ioflags |= MNT_IOFLAGS_UNMAP_SUPPORTED;
4663 
4664 		if (cs_present == TRUE) {
4665 			mp->mnt_ioflags |= MNT_IOFLAGS_CSUNMAP_SUPPORTED;
4666 		}
4667 	}
4668 	if (cs_present == TRUE) {
4669 		/*
4670 		 * for now we'll use the following test as a proxy for
4671 		 * the underlying drive being FUSION in nature
4672 		 */
4673 		if ((cs_info.flags & DK_CORESTORAGE_PIN_YOUR_METADATA)) {
4674 			mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4675 		}
4676 	} else {
4677 		/* Check for APFS Fusion */
4678 		dk_apfs_flavour_t flavour;
4679 		if ((VNOP_IOCTL(devvp, DKIOCGETAPFSFLAVOUR, (caddr_t)&flavour, 0, ctx) == 0) &&
4680 		    (flavour == DK_APFS_FUSION)) {
4681 			mp->mnt_ioflags |= MNT_IOFLAGS_FUSION_DRIVE;
4682 		}
4683 	}
4684 
4685 	if (VNOP_IOCTL(devvp, DKIOCGETLOCATION, (caddr_t)&location, 0, ctx) == 0) {
4686 		if (location & DK_LOCATION_EXTERNAL) {
4687 			mp->mnt_ioflags |= MNT_IOFLAGS_PERIPHERAL_DRIVE;
4688 			mp->mnt_flag |= MNT_REMOVABLE;
4689 		}
4690 	}
4691 
4692 #if CONFIG_IOSCHED
4693 	if (iosched_enabled && (features & DK_FEATURE_PRIORITY)) {
4694 		mp->mnt_ioflags |= MNT_IOFLAGS_IOSCHED_SUPPORTED;
4695 		throttle_info_disable_throttle(mp->mnt_devbsdunit, (mp->mnt_ioflags & MNT_IOFLAGS_FUSION_DRIVE) != 0);
4696 	}
4697 #endif /* CONFIG_IOSCHED */
4698 	return error;
4699 }
4700 
4701 static struct klist fs_klist;
4702 static LCK_GRP_DECLARE(fs_klist_lck_grp, "fs_klist");
4703 static LCK_MTX_DECLARE(fs_klist_lock, &fs_klist_lck_grp);
4704 
4705 void
vfs_event_init(void)4706 vfs_event_init(void)
4707 {
4708 	klist_init(&fs_klist);
4709 }
4710 
4711 void
vfs_event_signal(fsid_t * fsid,u_int32_t event,intptr_t data)4712 vfs_event_signal(fsid_t *fsid, u_int32_t event, intptr_t data)
4713 {
4714 	if (event == VQ_DEAD || event == VQ_NOTRESP) {
4715 		struct mount *mp = vfs_getvfs(fsid);
4716 		if (mp) {
4717 			mount_lock_spin(mp);
4718 			if (data) {
4719 				mp->mnt_lflag &= ~MNT_LNOTRESP;     // Now responding
4720 			} else {
4721 				mp->mnt_lflag |= MNT_LNOTRESP;      // Not responding
4722 			}
4723 			mount_unlock(mp);
4724 		}
4725 	}
4726 
4727 	lck_mtx_lock(&fs_klist_lock);
4728 	KNOTE(&fs_klist, event);
4729 	lck_mtx_unlock(&fs_klist_lock);
4730 }
4731 
4732 /*
4733  * return the number of mounted filesystems.
4734  */
4735 static int
sysctl_vfs_getvfscnt(void)4736 sysctl_vfs_getvfscnt(void)
4737 {
4738 	return mount_getvfscnt();
4739 }
4740 
4741 
4742 static int
mount_getvfscnt(void)4743 mount_getvfscnt(void)
4744 {
4745 	int ret;
4746 
4747 	mount_list_lock();
4748 	ret = nummounts;
4749 	mount_list_unlock();
4750 	return ret;
4751 }
4752 
4753 
4754 
4755 static int
mount_fillfsids(fsid_t * fsidlst,int count)4756 mount_fillfsids(fsid_t *fsidlst, int count)
4757 {
4758 	struct mount *mp;
4759 	int actual = 0;
4760 
4761 	actual = 0;
4762 	mount_list_lock();
4763 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4764 		if (actual < count) {
4765 			fsidlst[actual] = mp->mnt_vfsstat.f_fsid;
4766 			actual++;
4767 		}
4768 	}
4769 	mount_list_unlock();
4770 	return actual;
4771 }
4772 
4773 /*
4774  * fill in the array of fsid_t's up to a max of 'count', the actual
4775  * number filled in will be set in '*actual'.  If there are more fsid_t's
4776  * than room in fsidlst then ENOMEM will be returned and '*actual' will
4777  * have the actual count.
4778  * having *actual filled out even in the error case is depended upon.
4779  */
4780 static int
sysctl_vfs_getvfslist(fsid_t * fsidlst,unsigned long count,unsigned long * actual)4781 sysctl_vfs_getvfslist(fsid_t *fsidlst, unsigned long count, unsigned long *actual)
4782 {
4783 	struct mount *mp;
4784 
4785 	*actual = 0;
4786 	mount_list_lock();
4787 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
4788 		(*actual)++;
4789 		if (*actual <= count) {
4790 			fsidlst[(*actual) - 1] = mp->mnt_vfsstat.f_fsid;
4791 		}
4792 	}
4793 	mount_list_unlock();
4794 	return *actual <= count ? 0 : ENOMEM;
4795 }
4796 
4797 static int
sysctl_vfs_vfslist(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)4798 sysctl_vfs_vfslist(__unused struct sysctl_oid *oidp, __unused void *arg1,
4799     __unused int arg2, struct sysctl_req *req)
4800 {
4801 	unsigned long actual;
4802 	int error;
4803 	size_t space;
4804 	fsid_t *fsidlst;
4805 
4806 	/* This is a readonly node. */
4807 	if (req->newptr != USER_ADDR_NULL) {
4808 		return EPERM;
4809 	}
4810 
4811 	/* they are querying us so just return the space required. */
4812 	if (req->oldptr == USER_ADDR_NULL) {
4813 		req->oldidx = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4814 		return 0;
4815 	}
4816 again:
4817 	/*
4818 	 * Retrieve an accurate count of the amount of space required to copy
4819 	 * out all the fsids in the system.
4820 	 */
4821 	space = req->oldlen;
4822 	req->oldlen = sysctl_vfs_getvfscnt() * sizeof(fsid_t);
4823 
4824 	/* they didn't give us enough space. */
4825 	if (space < req->oldlen) {
4826 		return ENOMEM;
4827 	}
4828 
4829 	fsidlst = kalloc_data(req->oldlen, Z_WAITOK | Z_ZERO);
4830 	if (fsidlst == NULL) {
4831 		return ENOMEM;
4832 	}
4833 
4834 	error = sysctl_vfs_getvfslist(fsidlst, req->oldlen / sizeof(fsid_t),
4835 	    &actual);
4836 	/*
4837 	 * If we get back ENOMEM, then another mount has been added while we
4838 	 * slept in malloc above.  If this is the case then try again.
4839 	 */
4840 	if (error == ENOMEM) {
4841 		kfree_data(fsidlst, req->oldlen);
4842 		req->oldlen = space;
4843 		goto again;
4844 	}
4845 	if (error == 0) {
4846 		error = SYSCTL_OUT(req, fsidlst, actual * sizeof(fsid_t));
4847 	}
4848 	kfree_data(fsidlst, req->oldlen);
4849 	return error;
4850 }
4851 
4852 /*
4853  * Do a sysctl by fsid.
4854  */
4855 static int
sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid * oidp,void * arg1,int arg2,struct sysctl_req * req)4856 sysctl_vfs_ctlbyfsid(__unused struct sysctl_oid *oidp, void *arg1, int arg2,
4857     struct sysctl_req *req)
4858 {
4859 	union union_vfsidctl vc;
4860 	struct mount *mp = NULL;
4861 	struct vfsstatfs *sp;
4862 	int *name, namelen;
4863 	int flags = 0;
4864 	int error = 0, gotref = 0;
4865 	vfs_context_t ctx = vfs_context_current();
4866 	proc_t p = req->p;      /* XXX req->p != current_proc()? */
4867 	boolean_t is_64_bit;
4868 	union {
4869 		struct statfs64 sfs64;
4870 		struct user64_statfs osfs64;
4871 		struct user32_statfs osfs32;
4872 	} *sfsbuf;
4873 
4874 	if (req->newptr == USER_ADDR_NULL) {
4875 		error = EINVAL;
4876 		goto out;
4877 	}
4878 
4879 	name = arg1;
4880 	namelen = arg2;
4881 	is_64_bit = proc_is64bit(p);
4882 
4883 	error = SYSCTL_IN(req, &vc, is_64_bit? sizeof(vc.vc64):sizeof(vc.vc32));
4884 	if (error) {
4885 		goto out;
4886 	}
4887 	if (vc.vc32.vc_vers != VFS_CTL_VERS1) { /* works for 32 and 64 */
4888 		error = EINVAL;
4889 		goto out;
4890 	}
4891 	mp = mount_list_lookupby_fsid(&vc.vc32.vc_fsid, 0, 1); /* works for 32 and 64 */
4892 	if (mp == NULL) {
4893 		error = ENOENT;
4894 		goto out;
4895 	}
4896 	gotref = 1;
4897 	/* reset so that the fs specific code can fetch it. */
4898 	req->newidx = 0;
4899 	/*
4900 	 * Note if this is a VFS_CTL then we pass the actual sysctl req
4901 	 * in for "oldp" so that the lower layer can DTRT and use the
4902 	 * SYSCTL_IN/OUT routines.
4903 	 */
4904 	if (mp->mnt_op->vfs_sysctl != NULL) {
4905 		if (is_64_bit) {
4906 			if (vfs_64bitready(mp)) {
4907 				error = mp->mnt_op->vfs_sysctl(name, namelen,
4908 				    CAST_USER_ADDR_T(req),
4909 				    NULL, USER_ADDR_NULL, 0,
4910 				    ctx);
4911 			} else {
4912 				error = ENOTSUP;
4913 			}
4914 		} else {
4915 			error = mp->mnt_op->vfs_sysctl(name, namelen,
4916 			    CAST_USER_ADDR_T(req),
4917 			    NULL, USER_ADDR_NULL, 0,
4918 			    ctx);
4919 		}
4920 		if (error != ENOTSUP) {
4921 			goto out;
4922 		}
4923 	}
4924 	switch (name[0]) {
4925 	case VFS_CTL_UMOUNT:
4926 #if CONFIG_MACF
4927 		error = mac_mount_check_umount(ctx, mp);
4928 		if (error != 0) {
4929 			goto out;
4930 		}
4931 #endif
4932 		req->newidx = 0;
4933 		if (is_64_bit) {
4934 			req->newptr = vc.vc64.vc_ptr;
4935 			req->newlen = (size_t)vc.vc64.vc_len;
4936 		} else {
4937 			req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4938 			req->newlen = vc.vc32.vc_len;
4939 		}
4940 		error = SYSCTL_IN(req, &flags, sizeof(flags));
4941 		if (error) {
4942 			break;
4943 		}
4944 
4945 		mount_ref(mp, 0);
4946 		mount_iterdrop(mp);
4947 		gotref = 0;
4948 		/* safedounmount consumes a ref */
4949 		error = safedounmount(mp, flags, ctx);
4950 		break;
4951 	case VFS_CTL_OSTATFS:
4952 	case VFS_CTL_STATFS64:
4953 #if CONFIG_MACF
4954 		error = mac_mount_check_stat(ctx, mp);
4955 		if (error != 0) {
4956 			break;
4957 		}
4958 #endif
4959 		req->newidx = 0;
4960 		if (is_64_bit) {
4961 			req->newptr = vc.vc64.vc_ptr;
4962 			req->newlen = (size_t)vc.vc64.vc_len;
4963 		} else {
4964 			req->newptr = CAST_USER_ADDR_T(vc.vc32.vc_ptr);
4965 			req->newlen = vc.vc32.vc_len;
4966 		}
4967 		error = SYSCTL_IN(req, &flags, sizeof(flags));
4968 		if (error) {
4969 			break;
4970 		}
4971 		sp = &mp->mnt_vfsstat;
4972 		if (((flags & MNT_NOWAIT) == 0 || (flags & (MNT_WAIT | MNT_DWAIT))) &&
4973 		    (error = vfs_update_vfsstat(mp, ctx, VFS_USER_EVENT))) {
4974 			goto out;
4975 		}
4976 
4977 		sfsbuf = kalloc_type(typeof(*sfsbuf), Z_WAITOK);
4978 
4979 		if (name[0] == VFS_CTL_STATFS64) {
4980 			struct statfs64 *sfs = &sfsbuf->sfs64;
4981 
4982 			vfs_get_statfs64(mp, sfs);
4983 			error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
4984 		} else if (is_64_bit) {
4985 			struct user64_statfs *sfs = &sfsbuf->osfs64;
4986 
4987 			bzero(sfs, sizeof(*sfs));
4988 			sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
4989 			sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
4990 			sfs->f_bsize = (user64_long_t)sp->f_bsize;
4991 			sfs->f_iosize = (user64_long_t)sp->f_iosize;
4992 			sfs->f_blocks = (user64_long_t)sp->f_blocks;
4993 			sfs->f_bfree = (user64_long_t)sp->f_bfree;
4994 			sfs->f_bavail = (user64_long_t)sp->f_bavail;
4995 			sfs->f_files = (user64_long_t)sp->f_files;
4996 			sfs->f_ffree = (user64_long_t)sp->f_ffree;
4997 			sfs->f_fsid = sp->f_fsid;
4998 			sfs->f_owner = sp->f_owner;
4999 			vfs_getfstypename(mp, sfs->f_fstypename, MFSNAMELEN);
5000 			strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
5001 			strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
5002 
5003 			error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5004 		} else {
5005 			struct user32_statfs *sfs = &sfsbuf->osfs32;
5006 			long temp;
5007 
5008 			bzero(sfs, sizeof(*sfs));
5009 			sfs->f_flags = mp->mnt_flag & MNT_VISFLAGMASK;
5010 			sfs->f_type = (short)mp->mnt_vtable->vfc_typenum;
5011 
5012 			/*
5013 			 * It's possible for there to be more than 2^^31 blocks in the filesystem, so we
5014 			 * have to fudge the numbers here in that case.   We inflate the blocksize in order
5015 			 * to reflect the filesystem size as best we can.
5016 			 */
5017 			if (sp->f_blocks > INT_MAX) {
5018 				int             shift;
5019 
5020 				/*
5021 				 * Work out how far we have to shift the block count down to make it fit.
5022 				 * Note that it's possible to have to shift so far that the resulting
5023 				 * blocksize would be unreportably large.  At that point, we will clip
5024 				 * any values that don't fit.
5025 				 *
5026 				 * For safety's sake, we also ensure that f_iosize is never reported as
5027 				 * being smaller than f_bsize.
5028 				 */
5029 				for (shift = 0; shift < 32; shift++) {
5030 					if ((sp->f_blocks >> shift) <= INT_MAX) {
5031 						break;
5032 					}
5033 					if ((((long long)sp->f_bsize) << (shift + 1)) > INT_MAX) {
5034 						break;
5035 					}
5036 				}
5037 #define __SHIFT_OR_CLIP(x, s)   ((((x) >> (s)) > INT_MAX) ? INT_MAX : ((x) >> (s)))
5038 				sfs->f_blocks = (user32_long_t)__SHIFT_OR_CLIP(sp->f_blocks, shift);
5039 				sfs->f_bfree = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bfree, shift);
5040 				sfs->f_bavail = (user32_long_t)__SHIFT_OR_CLIP(sp->f_bavail, shift);
5041 #undef __SHIFT_OR_CLIP
5042 				sfs->f_bsize = (user32_long_t)(sp->f_bsize << shift);
5043 				temp = lmax(sp->f_iosize, sp->f_bsize);
5044 				if (temp > INT32_MAX) {
5045 					error = EINVAL;
5046 					kfree_type(typeof(*sfsbuf), sfsbuf);
5047 					goto out;
5048 				}
5049 				sfs->f_iosize = (user32_long_t)temp;
5050 			} else {
5051 				sfs->f_bsize = (user32_long_t)sp->f_bsize;
5052 				sfs->f_iosize = (user32_long_t)sp->f_iosize;
5053 				sfs->f_blocks = (user32_long_t)sp->f_blocks;
5054 				sfs->f_bfree = (user32_long_t)sp->f_bfree;
5055 				sfs->f_bavail = (user32_long_t)sp->f_bavail;
5056 			}
5057 			sfs->f_files = (user32_long_t)sp->f_files;
5058 			sfs->f_ffree = (user32_long_t)sp->f_ffree;
5059 			sfs->f_fsid = sp->f_fsid;
5060 			sfs->f_owner = sp->f_owner;
5061 
5062 			vfs_getfstypename(mp, sfs->f_fstypename, MFSNAMELEN);
5063 			strlcpy(sfs->f_mntonname, sp->f_mntonname, MNAMELEN);
5064 			strlcpy(sfs->f_mntfromname, sp->f_mntfromname, MNAMELEN);
5065 
5066 			error = SYSCTL_OUT(req, sfs, sizeof(*sfs));
5067 		}
5068 		kfree_type(typeof(*sfsbuf), sfsbuf);
5069 		break;
5070 	default:
5071 		error = ENOTSUP;
5072 		goto out;
5073 	}
5074 out:
5075 	if (gotref != 0) {
5076 		mount_iterdrop(mp);
5077 	}
5078 	return error;
5079 }
5080 
5081 static int      filt_fsattach(struct knote *kn, struct kevent_qos_s *kev);
5082 static void     filt_fsdetach(struct knote *kn);
5083 static int      filt_fsevent(struct knote *kn, long hint);
5084 static int      filt_fstouch(struct knote *kn, struct kevent_qos_s *kev);
5085 static int      filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev);
5086 SECURITY_READ_ONLY_EARLY(struct filterops) fs_filtops = {
5087 	.f_attach = filt_fsattach,
5088 	.f_detach = filt_fsdetach,
5089 	.f_event = filt_fsevent,
5090 	.f_touch = filt_fstouch,
5091 	.f_process = filt_fsprocess,
5092 };
5093 
5094 static int
filt_fsattach(struct knote * kn,__unused struct kevent_qos_s * kev)5095 filt_fsattach(struct knote *kn, __unused struct kevent_qos_s *kev)
5096 {
5097 	kn->kn_flags |= EV_CLEAR; /* automatic */
5098 	kn->kn_sdata = 0;         /* incoming data is ignored */
5099 
5100 	lck_mtx_lock(&fs_klist_lock);
5101 	KNOTE_ATTACH(&fs_klist, kn);
5102 	lck_mtx_unlock(&fs_klist_lock);
5103 
5104 	/*
5105 	 * filter only sees future events,
5106 	 * so it can't be fired already.
5107 	 */
5108 	return 0;
5109 }
5110 
5111 static void
filt_fsdetach(struct knote * kn)5112 filt_fsdetach(struct knote *kn)
5113 {
5114 	lck_mtx_lock(&fs_klist_lock);
5115 	KNOTE_DETACH(&fs_klist, kn);
5116 	lck_mtx_unlock(&fs_klist_lock);
5117 }
5118 
5119 static int
filt_fsevent(struct knote * kn,long hint)5120 filt_fsevent(struct knote *kn, long hint)
5121 {
5122 	/*
5123 	 * Backwards compatibility:
5124 	 * Other filters would do nothing if kn->kn_sfflags == 0
5125 	 */
5126 
5127 	if ((kn->kn_sfflags == 0) || (kn->kn_sfflags & hint)) {
5128 		kn->kn_fflags |= hint;
5129 	}
5130 
5131 	return kn->kn_fflags != 0;
5132 }
5133 
5134 static int
filt_fstouch(struct knote * kn,struct kevent_qos_s * kev)5135 filt_fstouch(struct knote *kn, struct kevent_qos_s *kev)
5136 {
5137 	int res;
5138 
5139 	lck_mtx_lock(&fs_klist_lock);
5140 
5141 	kn->kn_sfflags = kev->fflags;
5142 
5143 	/*
5144 	 * the above filter function sets bits even if nobody is looking for them.
5145 	 * Just preserve those bits even in the new mask is more selective
5146 	 * than before.
5147 	 *
5148 	 * For compatibility with previous implementations, we leave kn_fflags
5149 	 * as they were before.
5150 	 */
5151 	//if (kn->kn_sfflags)
5152 	//	kn->kn_fflags &= kn->kn_sfflags;
5153 	res = (kn->kn_fflags != 0);
5154 
5155 	lck_mtx_unlock(&fs_klist_lock);
5156 
5157 	return res;
5158 }
5159 
5160 static int
filt_fsprocess(struct knote * kn,struct kevent_qos_s * kev)5161 filt_fsprocess(struct knote *kn, struct kevent_qos_s *kev)
5162 {
5163 	int res = 0;
5164 
5165 	lck_mtx_lock(&fs_klist_lock);
5166 	if (kn->kn_fflags) {
5167 		knote_fill_kevent(kn, kev, 0);
5168 		res = 1;
5169 	}
5170 	lck_mtx_unlock(&fs_klist_lock);
5171 	return res;
5172 }
5173 
5174 static int
sysctl_vfs_noremotehang(__unused struct sysctl_oid * oidp,__unused void * arg1,__unused int arg2,struct sysctl_req * req)5175 sysctl_vfs_noremotehang(__unused struct sysctl_oid *oidp,
5176     __unused void *arg1, __unused int arg2, struct sysctl_req *req)
5177 {
5178 	int out, error;
5179 	pid_t pid;
5180 	proc_t p;
5181 
5182 	/* We need a pid. */
5183 	if (req->newptr == USER_ADDR_NULL) {
5184 		return EINVAL;
5185 	}
5186 
5187 	error = SYSCTL_IN(req, &pid, sizeof(pid));
5188 	if (error) {
5189 		return error;
5190 	}
5191 
5192 	p = proc_find(pid < 0 ? -pid : pid);
5193 	if (p == NULL) {
5194 		return ESRCH;
5195 	}
5196 
5197 	/*
5198 	 * Fetching the value is ok, but we only fetch if the old
5199 	 * pointer is given.
5200 	 */
5201 	if (req->oldptr != USER_ADDR_NULL) {
5202 		out = !((p->p_flag & P_NOREMOTEHANG) == 0);
5203 		proc_rele(p);
5204 		error = SYSCTL_OUT(req, &out, sizeof(out));
5205 		return error;
5206 	}
5207 
5208 	/* cansignal offers us enough security. */
5209 	if (p != req->p && proc_suser(req->p) != 0) {
5210 		proc_rele(p);
5211 		return EPERM;
5212 	}
5213 
5214 	if (pid < 0) {
5215 		OSBitAndAtomic(~((uint32_t)P_NOREMOTEHANG), &p->p_flag);
5216 	} else {
5217 		OSBitOrAtomic(P_NOREMOTEHANG, &p->p_flag);
5218 	}
5219 	proc_rele(p);
5220 
5221 	return 0;
5222 }
5223 
5224 static int
5225 sysctl_vfs_generic_conf SYSCTL_HANDLER_ARGS
5226 {
5227 	int *name, namelen;
5228 	struct vfstable *vfsp;
5229 	struct vfsconf vfsc = {};
5230 
5231 	(void)oidp;
5232 	name = arg1;
5233 	namelen = arg2;
5234 
5235 	if (namelen < 1) {
5236 		return EISDIR;
5237 	} else if (namelen > 1) {
5238 		return ENOTDIR;
5239 	}
5240 
5241 	mount_list_lock();
5242 	for (vfsp = vfsconf; vfsp; vfsp = vfsp->vfc_next) {
5243 		if (vfsp->vfc_typenum == name[0]) {
5244 			break;
5245 		}
5246 	}
5247 
5248 	if (vfsp == NULL) {
5249 		mount_list_unlock();
5250 		return ENOTSUP;
5251 	}
5252 
5253 	vfsc.vfc_reserved1 = 0;
5254 	bcopy(vfsp->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
5255 	vfsc.vfc_typenum = vfsp->vfc_typenum;
5256 	vfsc.vfc_refcount = vfsp->vfc_refcount;
5257 	vfsc.vfc_flags = vfsp->vfc_flags;
5258 	vfsc.vfc_reserved2 = 0;
5259 	vfsc.vfc_reserved3 = 0;
5260 
5261 	mount_list_unlock();
5262 	return SYSCTL_OUT(req, &vfsc, sizeof(struct vfsconf));
5263 }
5264 
5265 /* the vfs.generic. branch. */
5266 SYSCTL_EXTENSIBLE_NODE(_vfs, VFS_GENERIC, generic,
5267     CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs generic hinge");
5268 /* retreive a list of mounted filesystem fsid_t */
5269 SYSCTL_PROC(_vfs_generic, OID_AUTO, vfsidlist,
5270     CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
5271     NULL, 0, sysctl_vfs_vfslist, "S,fsid", "List of mounted filesystem ids");
5272 /* perform operations on filesystem via fsid_t */
5273 SYSCTL_NODE(_vfs_generic, OID_AUTO, ctlbyfsid, CTLFLAG_RW | CTLFLAG_LOCKED,
5274     sysctl_vfs_ctlbyfsid, "ctlbyfsid");
5275 SYSCTL_PROC(_vfs_generic, OID_AUTO, noremotehang, CTLFLAG_RW | CTLFLAG_ANYBODY,
5276     NULL, 0, sysctl_vfs_noremotehang, "I", "noremotehang");
5277 SYSCTL_INT(_vfs_generic, VFS_MAXTYPENUM, maxtypenum,
5278     CTLFLAG_RD | CTLFLAG_KERN | CTLFLAG_LOCKED,
5279     &maxvfstypenum, 0, "");
5280 SYSCTL_INT(_vfs_generic, OID_AUTO, sync_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &sync_timeout_seconds, 0, "");
5281 SYSCTL_NODE(_vfs_generic, VFS_CONF, conf,
5282     CTLFLAG_RD | CTLFLAG_LOCKED,
5283     sysctl_vfs_generic_conf, "");
5284 #if DEVELOPMENT || DEBUG
5285 SYSCTL_INT(_vfs_generic, OID_AUTO, print_busy_vnodes,
5286     CTLTYPE_INT | CTLFLAG_RW,
5287     &print_busy_vnodes, 0,
5288     "VFS log busy vnodes blocking unmount");
5289 #endif
5290 
5291 /* Indicate that the root file system unmounted cleanly */
5292 static int vfs_root_unmounted_cleanly = 0;
5293 SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &vfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
5294 
5295 void
vfs_set_root_unmounted_cleanly(void)5296 vfs_set_root_unmounted_cleanly(void)
5297 {
5298 	vfs_root_unmounted_cleanly = 1;
5299 }
5300 
5301 /*
5302  * Print vnode state.
5303  */
5304 void
vn_print_state(struct vnode * vp,const char * fmt,...)5305 vn_print_state(struct vnode *vp, const char *fmt, ...)
5306 {
5307 	va_list ap;
5308 	char perm_str[] = "(VM_KERNEL_ADDRPERM pointer)";
5309 	char fs_name[MFSNAMELEN];
5310 
5311 	va_start(ap, fmt);
5312 	vprintf(fmt, ap);
5313 	va_end(ap);
5314 	printf("vp 0x%0llx %s: ", (uint64_t)VM_KERNEL_ADDRPERM(vp), perm_str);
5315 	printf("tag %d, type %d\n", vp->v_tag, vp->v_type);
5316 	/* Counts .. */
5317 	printf("    iocount %d, usecount %d, kusecount %d references %d\n",
5318 	    vp->v_iocount, vp->v_usecount, vp->v_kusecount, vp->v_references);
5319 	printf("    writecount %d, numoutput %d\n", vp->v_writecount,
5320 	    vp->v_numoutput);
5321 	/* Flags */
5322 	printf("    flag 0x%x, lflag 0x%x, listflag 0x%x\n", vp->v_flag,
5323 	    vp->v_lflag, vp->v_listflag);
5324 
5325 	if (vp->v_mount == NULL || vp->v_mount == dead_mountp) {
5326 		strlcpy(fs_name, "deadfs", MFSNAMELEN);
5327 	} else {
5328 		vfs_name(vp->v_mount, fs_name);
5329 	}
5330 
5331 	printf("    v_data 0x%0llx %s\n",
5332 	    (vp->v_data ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_data) : 0),
5333 	    perm_str);
5334 	printf("    v_mount 0x%0llx %s vfs_name %s\n",
5335 	    (vp->v_mount ? (uint64_t)VM_KERNEL_ADDRPERM(vp->v_mount) : 0),
5336 	    perm_str, fs_name);
5337 }
5338 
5339 long num_reusedvnodes = 0;
5340 
5341 
5342 static vnode_t
process_vp(vnode_t vp,int want_vp,bool can_defer,int * deferred)5343 process_vp(vnode_t vp, int want_vp, bool can_defer, int *deferred)
5344 {
5345 	unsigned int  vpid;
5346 
5347 	*deferred = 0;
5348 
5349 	vpid = vp->v_id;
5350 
5351 	vnode_list_remove_locked(vp);
5352 
5353 	vnode_hold(vp);
5354 	vnode_list_unlock();
5355 
5356 	vnode_lock_spin(vp);
5357 
5358 	/*
5359 	 * We could wait for the vnode_lock after removing the vp from the freelist
5360 	 * and the vid is bumped only at the very end of reclaim. So it is  possible
5361 	 * that we are looking at a vnode that is being terminated. If so skip it.
5362 	 */
5363 	if ((vpid != vp->v_id) || (vp->v_usecount != 0) || (vp->v_iocount != 0) ||
5364 	    VONLIST(vp) || (vp->v_lflag & VL_TERMINATE)) {
5365 		/*
5366 		 * we lost the race between dropping the list lock
5367 		 * and picking up the vnode_lock... someone else
5368 		 * used this vnode and it is now in a new state
5369 		 */
5370 		vnode_drop_and_unlock(vp);
5371 
5372 		return NULLVP;
5373 	}
5374 	if ((vp->v_lflag & (VL_NEEDINACTIVE | VL_MARKTERM)) == VL_NEEDINACTIVE) {
5375 		/*
5376 		 * we did a vnode_rele_ext that asked for
5377 		 * us not to reenter the filesystem during
5378 		 * the release even though VL_NEEDINACTIVE was
5379 		 * set... we'll do it here by doing a
5380 		 * vnode_get/vnode_put
5381 		 *
5382 		 * pick up an iocount so that we can call
5383 		 * vnode_put and drive the VNOP_INACTIVE...
5384 		 * vnode_put will either leave us off
5385 		 * the freelist if a new ref comes in,
5386 		 * or put us back on the end of the freelist
5387 		 * or recycle us if we were marked for termination...
5388 		 * so we'll just go grab a new candidate
5389 		 */
5390 		vp->v_iocount++;
5391 #ifdef CONFIG_IOCOUNT_TRACE
5392 		record_vp(vp, 1);
5393 #endif
5394 		vnode_put_locked(vp);
5395 		vnode_drop_and_unlock(vp);
5396 
5397 		return NULLVP;
5398 	}
5399 	/*
5400 	 * Checks for anyone racing us for recycle
5401 	 */
5402 	if (vp->v_type != VBAD) {
5403 		if ((want_vp || can_defer) && (vnode_on_reliable_media(vp) == FALSE || (vp->v_flag & VISDIRTY))) {
5404 			vnode_async_list_add(vp);
5405 			vnode_drop_and_unlock(vp);
5406 
5407 			*deferred = 1;
5408 
5409 			return NULLVP;
5410 		}
5411 		if (vp->v_lflag & VL_DEAD) {
5412 			panic("new_vnode(%p): the vnode is VL_DEAD but not VBAD", vp);
5413 		}
5414 
5415 		vnode_lock_convert(vp);
5416 		(void)vnode_reclaim_internal(vp, 1, want_vp, 0);
5417 
5418 		if (want_vp) {
5419 			if ((VONLIST(vp))) {
5420 				panic("new_vnode(%p): vp on list", vp);
5421 			}
5422 			if (vp->v_usecount || vp->v_iocount || vp->v_kusecount ||
5423 			    (vp->v_lflag & (VNAMED_UBC | VNAMED_MOUNT | VNAMED_FSHASH))) {
5424 				panic("new_vnode(%p): free vnode still referenced", vp);
5425 			}
5426 			if ((vp->v_mntvnodes.tqe_prev != 0) && (vp->v_mntvnodes.tqe_next != 0)) {
5427 				panic("new_vnode(%p): vnode seems to be on mount list", vp);
5428 			}
5429 			if (!LIST_EMPTY(&vp->v_nclinks) || !TAILQ_EMPTY(&vp->v_ncchildren)) {
5430 				panic("new_vnode(%p): vnode still hooked into the name cache", vp);
5431 			}
5432 		} else {
5433 			vnode_drop_and_unlock(vp);
5434 			vp = NULLVP;
5435 		}
5436 	}
5437 	return vp;
5438 }
5439 
5440 __attribute__((noreturn))
5441 static void
async_work_continue(void)5442 async_work_continue(void)
5443 {
5444 	struct async_work_lst *q;
5445 	int     deferred;
5446 	vnode_t vp;
5447 
5448 	q = &vnode_async_work_list;
5449 
5450 	for (;;) {
5451 		vnode_list_lock();
5452 
5453 		if (TAILQ_EMPTY(q)) {
5454 			assert_wait(q, (THREAD_UNINT));
5455 
5456 			vnode_list_unlock();
5457 
5458 			thread_block((thread_continue_t)async_work_continue);
5459 
5460 			continue;
5461 		}
5462 		async_work_handled++;
5463 
5464 		vp = TAILQ_FIRST(q);
5465 
5466 		vp = process_vp(vp, 0, false, &deferred);
5467 
5468 		if (vp != NULLVP) {
5469 			panic("found VBAD vp (%p) on async queue", vp);
5470 		}
5471 	}
5472 }
5473 
5474 #if CONFIG_JETSAM
5475 bool do_async_jetsam = false;
5476 #endif
5477 
5478 __attribute__((noreturn))
5479 static void
vn_laundry_continue(void)5480 vn_laundry_continue(void)
5481 {
5482 	struct freelst *free_q;
5483 	struct ragelst *rage_q;
5484 	vnode_t vp;
5485 	int deferred;
5486 	bool rage_q_empty;
5487 	bool free_q_empty;
5488 
5489 
5490 	free_q = &vnode_free_list;
5491 	rage_q = &vnode_rage_list;
5492 
5493 	for (;;) {
5494 		vnode_list_lock();
5495 
5496 #if CONFIG_JETSAM
5497 		if (do_async_jetsam) {
5498 			do_async_jetsam = false;
5499 			if (deadvnodes <= deadvnodes_low) {
5500 				vnode_list_unlock();
5501 
5502 				log(LOG_EMERG, "Initiating vnode jetsam : %d desired, %ld numvnodes, "
5503 				    "%ld free, %ld dead, %ld async, %d rage\n",
5504 				    desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes);
5505 
5506 				memorystatus_kill_on_vnode_limit();
5507 
5508 				continue;
5509 			}
5510 		}
5511 #endif
5512 
5513 		if (!TAILQ_EMPTY(&vnode_async_work_list)) {
5514 			vp = TAILQ_FIRST(&vnode_async_work_list);
5515 			async_work_handled++;
5516 
5517 			vp = process_vp(vp, 0, false, &deferred);
5518 
5519 			if (vp != NULLVP) {
5520 				panic("found VBAD vp (%p) on async queue", vp);
5521 			}
5522 			continue;
5523 		}
5524 
5525 		free_q_empty = TAILQ_EMPTY(free_q);
5526 		rage_q_empty = TAILQ_EMPTY(rage_q);
5527 
5528 		if (!rage_q_empty && !free_q_empty) {
5529 			struct timeval current_tv;
5530 
5531 			microuptime(&current_tv);
5532 			if (ragevnodes < rage_limit &&
5533 			    ((current_tv.tv_sec - rage_tv.tv_sec) < RAGE_TIME_LIMIT)) {
5534 				rage_q_empty = true;
5535 			}
5536 		}
5537 
5538 		if (numvnodes < numvnodes_min || (rage_q_empty && free_q_empty) ||
5539 		    (reusablevnodes <= reusablevnodes_max && deadvnodes >= deadvnodes_high)) {
5540 			assert_wait(free_q, (THREAD_UNINT));
5541 
5542 			vnode_list_unlock();
5543 
5544 			thread_block((thread_continue_t)vn_laundry_continue);
5545 
5546 			continue;
5547 		}
5548 
5549 		if (!rage_q_empty) {
5550 			vp = TAILQ_FIRST(rage_q);
5551 		} else {
5552 			vp = TAILQ_FIRST(free_q);
5553 		}
5554 
5555 		vp = process_vp(vp, 0, false, &deferred);
5556 
5557 		if (vp != NULLVP) {
5558 			/* If process_vp returns a vnode, it is locked and has a holdcount */
5559 			vnode_drop_and_unlock(vp);
5560 			vp = NULLVP;
5561 		}
5562 	}
5563 }
5564 
5565 static inline void
wakeup_laundry_thread()5566 wakeup_laundry_thread()
5567 {
5568 	if (deadvnodes_noreuse || (numvnodes >= numvnodes_min && deadvnodes < deadvnodes_low &&
5569 	    (reusablevnodes > reusablevnodes_max || numvnodes >= desiredvnodes))) {
5570 		wakeup(&vnode_free_list);
5571 	}
5572 }
5573 
5574 /*
5575  * This must be called under vnode_list_lock() to prevent race when accessing
5576  * various vnode stats.
5577  */
5578 static void
send_freeable_vnodes_telemetry(void)5579 send_freeable_vnodes_telemetry(void)
5580 {
5581 	bool send_event = false;
5582 
5583 	/*
5584 	 * Log an event when the 'numvnodes' is above the freeable vnodes threshold
5585 	 * or when it falls back within the threshold.
5586 	 * When the 'numvnodes' is above the threshold, log an event when it has
5587 	 * been incrementally growing by 25%.
5588 	 */
5589 	if ((numvnodes > desiredvnodes) && (freevnodes + deadvnodes) == 0) {
5590 		long last_numvnodes = freeable_vnodes_telemetry.numvnodes;
5591 
5592 		if (numvnodes > (last_numvnodes + ((last_numvnodes * 25) / 100)) ||
5593 		    numvnodes >= numvnodes_max) {
5594 			send_event = true;
5595 		}
5596 		freeablevnodes_threshold_crossed = true;
5597 	} else if (freeablevnodes_threshold_crossed &&
5598 	    (freevnodes + deadvnodes) > busyvnodes) {
5599 		freeablevnodes_threshold_crossed = false;
5600 		send_event = true;
5601 	}
5602 
5603 	if (__improbable(send_event)) {
5604 		ca_event_t event = CA_EVENT_ALLOCATE_FLAGS(freeable_vnodes, Z_NOWAIT);
5605 
5606 		if (event) {
5607 			/*
5608 			 * Update the stats except the 'numvnodes_max' and 'desiredvnodes'
5609 			 * as they are immutable after init.
5610 			 */
5611 			freeable_vnodes_telemetry.numvnodes_min = numvnodes_min;
5612 			freeable_vnodes_telemetry.numvnodes = numvnodes;
5613 			freeable_vnodes_telemetry.freevnodes = freevnodes;
5614 			freeable_vnodes_telemetry.deadvnodes = deadvnodes;
5615 			freeable_vnodes_telemetry.freeablevnodes = freeablevnodes;
5616 			freeable_vnodes_telemetry.busyvnodes = busyvnodes;
5617 			freeable_vnodes_telemetry.threshold_crossed =
5618 			    freeablevnodes_threshold_crossed;
5619 
5620 			memcpy(event->data, &freeable_vnodes_telemetry,
5621 			    sizeof(CA_EVENT_TYPE(freeable_vnodes)));
5622 
5623 			if (!freeablevnodes_threshold_crossed) {
5624 				freeable_vnodes_telemetry.numvnodes = 0;
5625 			}
5626 			CA_EVENT_SEND(event);
5627 		}
5628 	}
5629 }
5630 
5631 static int
new_vnode(vnode_t * vpp,bool can_free)5632 new_vnode(vnode_t *vpp, bool can_free)
5633 {
5634 	long force_alloc_min;
5635 	vnode_t vp;
5636 #if CONFIG_JETSAM
5637 	uint32_t retries = 0, max_retries = 2;                  /* retry incase of tablefull */
5638 #else
5639 	uint32_t retries = 0, max_retries = 100;                /* retry incase of tablefull */
5640 #endif
5641 	int force_alloc = 0, walk_count = 0;
5642 	boolean_t need_reliable_vp = FALSE;
5643 	int deferred;
5644 	struct timeval initial_tv;
5645 	struct timeval current_tv;
5646 	proc_t  curproc = current_proc();
5647 	bool force_alloc_freeable = false;
5648 
5649 	if (vn_dealloc_level == DEALLOC_VNODE_NONE) {
5650 		can_free = false;
5651 	}
5652 
5653 	initial_tv.tv_sec = 0;
5654 retry:
5655 	vp = NULLVP;
5656 
5657 	vnode_list_lock();
5658 	newvnode++;
5659 
5660 	if (need_reliable_vp == TRUE) {
5661 		async_work_timed_out++;
5662 	}
5663 
5664 	/*
5665 	 * The vnode list lock was dropped after force_alloc_freeable was set,
5666 	 * reevaluate.
5667 	 */
5668 	force_alloc_min = MAX(desiredvnodes, numvnodes_min);
5669 	if (force_alloc_freeable &&
5670 	    (numvnodes < force_alloc_min || numvnodes >= numvnodes_max)) {
5671 		force_alloc_freeable = false;
5672 	}
5673 
5674 #if CONFIG_JETSAM
5675 	if ((numvnodes_max > desiredvnodes) && numvnodes > (numvnodes_max - 100)
5676 #if (DEVELOPMENT || DEBUG)
5677 	    && !bootarg_no_vnode_jetsam
5678 #endif
5679 	    ) {
5680 		do_async_jetsam = true;
5681 		wakeup(&vnode_free_list);
5682 	}
5683 #endif /* CONFIG_JETSAM */
5684 
5685 	if (((numvnodes - deadvnodes + deadvnodes_noreuse) < desiredvnodes) ||
5686 	    force_alloc || force_alloc_freeable) {
5687 		struct timespec ts;
5688 		uint32_t vflag = 0;
5689 
5690 		/*
5691 		 * Can always reuse a dead one except if it is in the process of
5692 		 * being freed or the FS cannot handle freeable vnodes.
5693 		 */
5694 		if (!TAILQ_EMPTY(&vnode_dead_list)) {
5695 			/* Select an appropriate deadvnode */
5696 			if (numvnodes <= numvnodes_min || !can_free) {
5697 				/* all vnodes upto numvnodes_min are not freeable */
5698 				vp = TAILQ_FIRST(&vnode_dead_list);
5699 				if (numvnodes > numvnodes_min &&
5700 				    (vp->v_flag & VCANDEALLOC)) {
5701 					/*
5702 					 * Freeable vnodes are added to the
5703 					 * back of the queue, so if the first
5704 					 * from the front is freeable, then
5705 					 * there are none on the dead list.
5706 					 */
5707 					vp = NULLVP;
5708 				}
5709 			} else {
5710 				/*
5711 				 * Filesystems which opt in to freeable vnodes
5712 				 * can get either one.
5713 				 */
5714 				TAILQ_FOREACH_REVERSE(vp, &vnode_dead_list,
5715 				    deadlst, v_freelist) {
5716 					if (!(vp->v_listflag & VLIST_NO_REUSE)) {
5717 						break;
5718 					}
5719 				}
5720 			}
5721 
5722 			if (vp) {
5723 				force_alloc_freeable = false;
5724 				goto steal_this_vp;
5725 			}
5726 		}
5727 
5728 		/*
5729 		 * no dead vnodes available... if we're under
5730 		 * the limit, we'll create a new vnode
5731 		 */
5732 		numvnodes++;
5733 		if (force_alloc) {
5734 			numvnodes_min++;
5735 		} else if (can_free && (numvnodes > numvnodes_min)) {
5736 			allocedvnodes++;
5737 			freeablevnodes++;
5738 			vflag = VCANDEALLOC;
5739 
5740 			send_freeable_vnodes_telemetry();
5741 		}
5742 		vnode_list_unlock();
5743 
5744 		if (nc_smr_enabled) {
5745 			vp = zalloc_smr(vnode_zone, Z_WAITOK_ZERO_NOFAIL);
5746 		} else {
5747 			vp = zalloc_flags(vnode_zone, Z_WAITOK_ZERO_NOFAIL);
5748 		}
5749 
5750 		VLISTNONE(vp);          /* avoid double queue removal */
5751 		lck_mtx_init(&vp->v_lock, &vnode_lck_grp, &vnode_lck_attr);
5752 
5753 		TAILQ_INIT(&vp->v_ncchildren);
5754 
5755 		klist_init(&vp->v_knotes);
5756 		nanouptime(&ts);
5757 		vp->v_id = (uint32_t)ts.tv_nsec;
5758 		vp->v_flag = VSTANDARD | vflag;
5759 		if (force_alloc_freeable) {
5760 			/* This vnode should be recycled and freed immediately */
5761 			vp->v_lflag = VL_MARKTERM;
5762 			vp->v_listflag = VLIST_NO_REUSE;
5763 		}
5764 
5765 		if (vflag & VCANDEALLOC) {
5766 			os_atomic_inc(&busyvnodes, relaxed);
5767 		}
5768 
5769 #if CONFIG_MACF
5770 		if (mac_vnode_label_init_needed(vp)) {
5771 			mac_vnode_label_init(vp);
5772 		}
5773 #endif /* MAC */
5774 
5775 #if CONFIG_IOCOUNT_TRACE
5776 		if (__improbable(bootarg_vnode_iocount_trace)) {
5777 			vp->v_iocount_trace = (vnode_iocount_trace_t)zalloc_permanent(
5778 				IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace),
5779 				ZALIGN(struct vnode_iocount_trace));
5780 		}
5781 #endif /* CONFIG_IOCOUNT_TRACE */
5782 
5783 #if CONFIG_FILE_LEASES
5784 		LIST_INIT(&vp->v_leases);
5785 #endif
5786 
5787 		vp->v_iocount = 1;
5788 
5789 		goto done;
5790 	}
5791 
5792 	microuptime(&current_tv);
5793 
5794 #define MAX_WALK_COUNT 1000
5795 
5796 	if (!TAILQ_EMPTY(&vnode_rage_list) &&
5797 	    (ragevnodes >= rage_limit ||
5798 	    (current_tv.tv_sec - rage_tv.tv_sec) >= RAGE_TIME_LIMIT)) {
5799 		TAILQ_FOREACH(vp, &vnode_rage_list, v_freelist) {
5800 			if (!(vp->v_listflag & VLIST_RAGE)) {
5801 				panic("new_vnode: vp (%p) on RAGE list not marked VLIST_RAGE", vp);
5802 			}
5803 
5804 			// if we're a dependency-capable process, skip vnodes that can
5805 			// cause recycling deadlocks. (i.e. this process is diskimages
5806 			// helper and the vnode is in a disk image).  Querying the
5807 			// mnt_kern_flag for the mount's virtual device status
5808 			// is safer than checking the mnt_dependent_process, which
5809 			// may not be updated if there are multiple devnode layers
5810 			// in between the disk image and the final consumer.
5811 
5812 			if (((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5813 			    (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) &&
5814 			    !(vp->v_listflag & VLIST_NO_REUSE) &&
5815 			    (can_free || !(vp->v_flag & VCANDEALLOC))) {
5816 				/*
5817 				 * if need_reliable_vp == TRUE, then we've already sent one or more
5818 				 * non-reliable vnodes to the async thread for processing and timed
5819 				 * out waiting for a dead vnode to show up.  Use the MAX_WALK_COUNT
5820 				 * mechanism to first scan for a reliable vnode before forcing
5821 				 * a new vnode to be created
5822 				 */
5823 				if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5824 					break;
5825 				}
5826 			}
5827 
5828 			// don't iterate more than MAX_WALK_COUNT vnodes to
5829 			// avoid keeping the vnode list lock held for too long.
5830 
5831 			if (walk_count++ > MAX_WALK_COUNT) {
5832 				vp = NULL;
5833 				break;
5834 			}
5835 		}
5836 	}
5837 
5838 	if (vp == NULL && !TAILQ_EMPTY(&vnode_free_list)) {
5839 		/*
5840 		 * Pick the first vp for possible reuse
5841 		 */
5842 		walk_count = 0;
5843 		TAILQ_FOREACH(vp, &vnode_free_list, v_freelist) {
5844 			// if we're a dependency-capable process, skip vnodes that can
5845 			// cause recycling deadlocks. (i.e. this process is diskimages
5846 			// helper and the vnode is in a disk image).  Querying the
5847 			// mnt_kern_flag for the mount's virtual device status
5848 			// is safer than checking the mnt_dependent_process, which
5849 			// may not be updated if there are multiple devnode layers
5850 			// in between the disk image and the final consumer.
5851 
5852 			if (((curproc->p_flag & P_DEPENDENCY_CAPABLE) == 0 || vp->v_mount == NULL ||
5853 			    (vp->v_mount->mnt_kern_flag & MNTK_VIRTUALDEV) == 0) &&
5854 			    !(vp->v_listflag & VLIST_NO_REUSE) &&
5855 			    (can_free || !(vp->v_flag & VCANDEALLOC))) {
5856 				/*
5857 				 * if need_reliable_vp == TRUE, then we've already sent one or more
5858 				 * non-reliable vnodes to the async thread for processing and timed
5859 				 * out waiting for a dead vnode to show up.  Use the MAX_WALK_COUNT
5860 				 * mechanism to first scan for a reliable vnode before forcing
5861 				 * a new vnode to be created
5862 				 */
5863 				if (need_reliable_vp == FALSE || vnode_on_reliable_media(vp) == TRUE) {
5864 					break;
5865 				}
5866 			}
5867 
5868 			// don't iterate more than MAX_WALK_COUNT vnodes to
5869 			// avoid keeping the vnode list lock held for too long.
5870 
5871 			if (walk_count++ > MAX_WALK_COUNT) {
5872 				vp = NULL;
5873 				break;
5874 			}
5875 		}
5876 	}
5877 
5878 	//
5879 	// if we don't have a vnode and the walk_count is >= MAX_WALK_COUNT
5880 	// then we're trying to create a vnode on behalf of a
5881 	// process like diskimages-helper that has file systems
5882 	// mounted on top of itself (and thus we can't reclaim
5883 	// vnodes in the file systems on top of us).  if we can't
5884 	// find a vnode to reclaim then we'll just have to force
5885 	// the allocation.
5886 	//
5887 	if (vp == NULL && walk_count >= MAX_WALK_COUNT) {
5888 		force_alloc = 1;
5889 		vnode_list_unlock();
5890 		goto retry;
5891 	}
5892 
5893 	if (vp == NULL) {
5894 		if (can_free && (vn_dealloc_level > DEALLOC_VNODE_NONE) &&
5895 		    (numvnodes >= force_alloc_min) && (numvnodes < numvnodes_max)) {
5896 			force_alloc_freeable = true;
5897 			vnode_list_unlock();
5898 			goto retry;
5899 		}
5900 		vnode_list_unlock();
5901 
5902 		/*
5903 		 * we've reached the system imposed maximum number of vnodes
5904 		 * but there isn't a single one available
5905 		 * wait a bit and then retry... if we can't get a vnode
5906 		 * after our target number of retries, than log a complaint
5907 		 */
5908 		if (++retries <= max_retries) {
5909 			delay_for_interval(1, 1000 * 1000);
5910 			goto retry;
5911 		}
5912 
5913 		tablefull("vnode");
5914 		log(LOG_EMERG, "%d desired, %ld numvnodes, "
5915 		    "%ld free, %ld dead, %ld async, %d rage\n",
5916 		    desiredvnodes, numvnodes, freevnodes, deadvnodes, async_work_vnodes, ragevnodes);
5917 
5918 #if CONFIG_JETSAM
5919 		/*
5920 		 * Running out of vnodes tends to make a system unusable. Start killing
5921 		 * processes that jetsam knows are killable.
5922 		 */
5923 		if (memorystatus_kill_on_vnode_limit() == FALSE
5924 #if DEVELOPMENT || DEBUG
5925 		    || bootarg_no_vnode_jetsam
5926 #endif
5927 		    ) {
5928 			/*
5929 			 * If jetsam can't find any more processes to kill and there
5930 			 * still aren't any free vnodes, panic. Hopefully we'll get a
5931 			 * panic log to tell us why we ran out.
5932 			 */
5933 			panic("vnode table is full");
5934 		}
5935 
5936 		/*
5937 		 * Now that we've killed someone, wait a bit and continue looking
5938 		 */
5939 		delay_for_interval(3, 1000 * 1000);
5940 		retries = 0;
5941 		goto retry;
5942 #endif
5943 
5944 		*vpp = NULL;
5945 		return ENFILE;
5946 	}
5947 	newvnode_nodead++;
5948 steal_this_vp:
5949 	if ((vp = process_vp(vp, 1, true, &deferred)) == NULLVP) {
5950 		if (deferred) {
5951 			int     elapsed_msecs;
5952 			struct timeval elapsed_tv;
5953 
5954 			if (initial_tv.tv_sec == 0) {
5955 				microuptime(&initial_tv);
5956 			}
5957 
5958 			vnode_list_lock();
5959 
5960 			dead_vnode_waited++;
5961 			dead_vnode_wanted++;
5962 
5963 			/*
5964 			 * note that we're only going to explicitly wait 10ms
5965 			 * for a dead vnode to become available, since even if one
5966 			 * isn't available, a reliable vnode might now be available
5967 			 * at the head of the VRAGE or free lists... if so, we
5968 			 * can satisfy the new_vnode request with less latency then waiting
5969 			 * for the full 100ms duration we're ultimately willing to tolerate
5970 			 */
5971 			assert_wait_timeout((caddr_t)&dead_vnode_wanted, (THREAD_INTERRUPTIBLE), 10000, NSEC_PER_USEC);
5972 
5973 			vnode_list_unlock();
5974 
5975 			thread_block(THREAD_CONTINUE_NULL);
5976 
5977 			microuptime(&elapsed_tv);
5978 
5979 			timevalsub(&elapsed_tv, &initial_tv);
5980 			elapsed_msecs = (int)(elapsed_tv.tv_sec * 1000 + elapsed_tv.tv_usec / 1000);
5981 
5982 			if (elapsed_msecs >= 100) {
5983 				/*
5984 				 * we've waited long enough... 100ms is
5985 				 * somewhat arbitrary for this case, but the
5986 				 * normal worst case latency used for UI
5987 				 * interaction is 100ms, so I've chosen to
5988 				 * go with that.
5989 				 *
5990 				 * setting need_reliable_vp to TRUE
5991 				 * forces us to find a reliable vnode
5992 				 * that we can process synchronously, or
5993 				 * to create a new one if the scan for
5994 				 * a reliable one hits the scan limit
5995 				 */
5996 				need_reliable_vp = TRUE;
5997 			}
5998 		}
5999 		goto retry;
6000 	}
6001 	OSAddAtomicLong(1, &num_reusedvnodes);
6002 
6003 
6004 #if CONFIG_MACF
6005 	/*
6006 	 * We should never see VL_LABELWAIT or VL_LABEL here.
6007 	 * as those operations hold a reference.
6008 	 */
6009 	assert((vp->v_lflag & VL_LABELWAIT) != VL_LABELWAIT);
6010 	assert((vp->v_lflag & VL_LABEL) != VL_LABEL);
6011 	if (vp->v_lflag & VL_LABELED || mac_vnode_label(vp) != NULL) {
6012 		vnode_lock_convert(vp);
6013 		mac_vnode_label_recycle(vp);
6014 	} else if (mac_vnode_label_init_needed(vp)) {
6015 		vnode_lock_convert(vp);
6016 		mac_vnode_label_init(vp);
6017 	}
6018 
6019 #endif /* MAC */
6020 
6021 	vp->v_iocount = 1;
6022 	vp->v_lflag = 0;
6023 	vp->v_writecount = 0;
6024 	vp->v_references = 0;
6025 	vp->v_iterblkflags = 0;
6026 	if (can_free && (vp->v_flag & VCANDEALLOC)) {
6027 		vp->v_flag = VSTANDARD | VCANDEALLOC;
6028 	} else {
6029 		vp->v_flag = VSTANDARD;
6030 	}
6031 
6032 	/* vbad vnodes can point to dead_mountp */
6033 	vp->v_mount = NULL;
6034 	vp->v_defer_reclaimlist = (vnode_t)0;
6035 
6036 	/* process_vp returns a locked vnode with a holdcount */
6037 	vnode_drop_and_unlock(vp);
6038 
6039 done:
6040 	*vpp = vp;
6041 
6042 	return 0;
6043 }
6044 
6045 void
vnode_lock(vnode_t vp)6046 vnode_lock(vnode_t vp)
6047 {
6048 	lck_mtx_lock(&vp->v_lock);
6049 }
6050 
6051 void
vnode_lock_spin(vnode_t vp)6052 vnode_lock_spin(vnode_t vp)
6053 {
6054 	lck_mtx_lock_spin(&vp->v_lock);
6055 }
6056 
6057 void
vnode_unlock(vnode_t vp)6058 vnode_unlock(vnode_t vp)
6059 {
6060 	lck_mtx_unlock(&vp->v_lock);
6061 }
6062 
6063 void
vnode_hold(vnode_t vp)6064 vnode_hold(vnode_t vp)
6065 {
6066 	int32_t old_holdcount = os_atomic_inc_orig(&vp->v_holdcount, relaxed);
6067 
6068 	if (old_holdcount == INT32_MAX) {
6069 		/*
6070 		 * Because we allow atomic ops on the holdcount it is
6071 		 * possible that when the vnode is examined, its holdcount
6072 		 * is different than what will be printed in this
6073 		 * panic message.
6074 		 */
6075 		panic("%s: vp %p holdcount overflow from : %d v_tag = %d, v_type = %d, v_flag = %x.",
6076 		    __FUNCTION__, vp, old_holdcount, vp->v_tag, vp->v_type, vp->v_flag);
6077 	}
6078 }
6079 
6080 #define VNODE_HOLD_NO_SMR    (1<<29) /* Disable vnode_hold_smr */
6081 
6082 /*
6083  * To be used when smr is the only protection (cache_lookup and cache_lookup_path)
6084  */
6085 bool
vnode_hold_smr(vnode_t vp)6086 vnode_hold_smr(vnode_t vp)
6087 {
6088 	int32_t holdcount;
6089 
6090 	/*
6091 	 * For "high traffic" vnodes like rootvnode, the atomic
6092 	 * cmpexcg loop below can turn into a infinite loop, no need
6093 	 * to do it for vnodes that won't be dealloc'ed
6094 	 */
6095 	if (!(os_atomic_load(&vp->v_flag, relaxed) & VCANDEALLOC)) {
6096 		vnode_hold(vp);
6097 		return true;
6098 	}
6099 
6100 	for (;;) {
6101 		holdcount = os_atomic_load(&vp->v_holdcount, relaxed);
6102 
6103 		if (holdcount & VNODE_HOLD_NO_SMR) {
6104 			return false;
6105 		}
6106 
6107 		if ((os_atomic_cmpxchg(&vp->v_holdcount, holdcount, holdcount + 1, relaxed) != 0)) {
6108 			return true;
6109 		}
6110 	}
6111 }
6112 
6113 /*
6114  * free callback from smr enabled zones
6115  */
6116 static void
vnode_smr_free(void * _vp,__unused size_t _size)6117 vnode_smr_free(void *_vp, __unused size_t _size)
6118 {
6119 	vnode_t vp = _vp;
6120 
6121 	bzero(vp, sizeof(*vp));
6122 }
6123 
6124 static vnode_t
vnode_drop_internal(vnode_t vp,bool locked)6125 vnode_drop_internal(vnode_t vp, bool locked)
6126 {
6127 	int32_t old_holdcount = os_atomic_dec_orig(&vp->v_holdcount, relaxed);
6128 
6129 	if (old_holdcount < 1) {
6130 		if (locked) {
6131 			vnode_unlock(vp);
6132 		}
6133 
6134 		/*
6135 		 * Because we allow atomic ops on the holdcount it is possible
6136 		 * that when the vnode is examined, its holdcount is different
6137 		 * than what will be printed in this panic message.
6138 		 */
6139 		panic("%s : vp %p holdcount -ve: %d.  v_tag = %d, v_type = %d, v_flag = %x.",
6140 		    __FUNCTION__, vp, old_holdcount - 1, vp->v_tag, vp->v_type, vp->v_flag);
6141 	}
6142 
6143 	if (vn_dealloc_level == DEALLOC_VNODE_NONE || old_holdcount > 1 ||
6144 	    !(vp->v_flag & VCANDEALLOC) || !(vp->v_lflag & VL_DEAD)) {
6145 		if (locked) {
6146 			vnode_unlock(vp);
6147 		}
6148 		return vp;
6149 	}
6150 
6151 	if (!locked) {
6152 		vnode_lock(vp);
6153 	}
6154 
6155 	if ((os_atomic_load(&vp->v_holdcount, relaxed) != 0) || vp->v_iocount ||
6156 	    vp->v_usecount || !(vp->v_flag & VCANDEALLOC) || !(vp->v_lflag & VL_DEAD)) {
6157 		vnode_unlock(vp);
6158 		return vp;
6159 	}
6160 
6161 	vnode_list_lock();
6162 
6163 	/*
6164 	 * the v_listflag field is protected by the vnode_list_lock
6165 	 */
6166 	if (VONLIST(vp) && (vp->v_listflag & VLIST_DEAD) &&
6167 	    (numvnodes > desiredvnodes || (vp->v_listflag & VLIST_NO_REUSE) ||
6168 	    vn_dealloc_level != DEALLOC_VNODE_ALL || deadvnodes >= deadvnodes_high) &&
6169 	    (os_atomic_cmpxchg(&vp->v_holdcount, 0, VNODE_HOLD_NO_SMR, relaxed) != 0)) {
6170 		VREMDEAD("vnode_list_remove", vp);
6171 		numvnodes--;
6172 		freeablevnodes--;
6173 		deallocedvnodes++;
6174 		vp->v_listflag = 0;
6175 
6176 		send_freeable_vnodes_telemetry();
6177 		vnode_list_unlock();
6178 
6179 #if CONFIG_MACF
6180 		struct label *tmpl = mac_vnode_label(vp);
6181 		vp->v_label = NULL;
6182 #endif /* CONFIG_MACF */
6183 
6184 		vnode_unlock(vp);
6185 
6186 #if CONFIG_MACF
6187 		if (tmpl) {
6188 			mac_vnode_label_free(tmpl);
6189 		}
6190 #endif /* CONFIG_MACF */
6191 
6192 		if (nc_smr_enabled) {
6193 			zfree_smr(vnode_zone, vp);
6194 		} else {
6195 			zfree(vnode_zone, vp);
6196 		}
6197 
6198 		vp = NULLVP;
6199 	} else {
6200 		vnode_list_unlock();
6201 		vnode_unlock(vp);
6202 	}
6203 
6204 	return vp;
6205 }
6206 
6207 vnode_t
vnode_drop_and_unlock(vnode_t vp)6208 vnode_drop_and_unlock(vnode_t vp)
6209 {
6210 	return vnode_drop_internal(vp, true);
6211 }
6212 
6213 vnode_t
vnode_drop(vnode_t vp)6214 vnode_drop(vnode_t vp)
6215 {
6216 	return vnode_drop_internal(vp, false);
6217 }
6218 
6219 SYSCTL_NODE(_vfs, OID_AUTO, vnstats, CTLFLAG_RD | CTLFLAG_LOCKED, NULL, "vfs vnode stats");
6220 
6221 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, vn_dealloc_level,
6222     CTLFLAG_RD | CTLFLAG_LOCKED,
6223     &vn_dealloc_level, 0, "");
6224 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, desired_vnodes,
6225     CTLFLAG_RD | CTLFLAG_LOCKED,
6226     &desiredvnodes, 0, "");
6227 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_vnodes,
6228     CTLFLAG_RD | CTLFLAG_LOCKED,
6229     &numvnodes, "");
6230 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_vnodes_min,
6231     CTLFLAG_RD | CTLFLAG_LOCKED,
6232     &numvnodes_min, 0, "");
6233 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_vnodes_max,
6234     CTLFLAG_RD | CTLFLAG_LOCKED,
6235     &numvnodes_max, 0, "");
6236 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_deallocable_vnodes,
6237     CTLFLAG_RD | CTLFLAG_LOCKED,
6238     &freeablevnodes, 0, "");
6239 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_deallocable_busy_vnodes,
6240     CTLFLAG_RD | CTLFLAG_LOCKED,
6241     &busyvnodes, "");
6242 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_dead_vnodes,
6243     CTLFLAG_RD | CTLFLAG_LOCKED,
6244     &deadvnodes, "");
6245 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_dead_vnodes_to_dealloc,
6246     CTLFLAG_RD | CTLFLAG_LOCKED,
6247     &deadvnodes_noreuse, "");
6248 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_async_work_vnodes,
6249     CTLFLAG_RD | CTLFLAG_LOCKED,
6250     &async_work_vnodes, "");
6251 SYSCTL_COMPAT_INT(_vfs_vnstats, OID_AUTO, num_rapid_aging_vnodes,
6252     CTLFLAG_RD | CTLFLAG_LOCKED,
6253     &ragevnodes, 0, "");
6254 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_free_vnodes,
6255     CTLFLAG_RD | CTLFLAG_LOCKED,
6256     &freevnodes, "");
6257 SYSCTL_LONG(_vfs_vnstats, OID_AUTO, num_recycledvnodes,
6258     CTLFLAG_RD | CTLFLAG_LOCKED,
6259     &num_recycledvnodes, "");
6260 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_allocedvnodes,
6261     CTLFLAG_RD | CTLFLAG_LOCKED,
6262     &allocedvnodes, "");
6263 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_deallocedvnodes,
6264     CTLFLAG_RD | CTLFLAG_LOCKED,
6265     &deallocedvnodes, "");
6266 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_newvnode_calls,
6267     CTLFLAG_RD | CTLFLAG_LOCKED,
6268     &newvnode, "");
6269 SYSCTL_QUAD(_vfs_vnstats, OID_AUTO, num_newvnode_calls_nodead,
6270     CTLFLAG_RD | CTLFLAG_LOCKED,
6271     &newvnode_nodead, "");
6272 
6273 int
vnode_get(struct vnode * vp)6274 vnode_get(struct vnode *vp)
6275 {
6276 	int retval;
6277 
6278 	vnode_lock_spin(vp);
6279 	retval = vnode_get_locked(vp);
6280 	vnode_unlock(vp);
6281 
6282 	return retval;
6283 }
6284 
6285 int
vnode_get_locked(struct vnode * vp)6286 vnode_get_locked(struct vnode *vp)
6287 {
6288 #if DIAGNOSTIC
6289 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
6290 #endif
6291 	if ((vp->v_iocount == 0) && (vp->v_lflag & (VL_TERMINATE | VL_DEAD))) {
6292 		return ENOENT;
6293 	}
6294 
6295 	if (os_add_overflow(vp->v_iocount, 1, &vp->v_iocount)) {
6296 		panic("v_iocount overflow");
6297 	}
6298 
6299 #ifdef CONFIG_IOCOUNT_TRACE
6300 	record_vp(vp, 1);
6301 #endif
6302 	return 0;
6303 }
6304 
6305 /*
6306  * vnode_getwithvid() cuts in line in front of a vnode drain (that is,
6307  * while the vnode is draining, but at no point after that) to prevent
6308  * deadlocks when getting vnodes from filesystem hashes while holding
6309  * resources that may prevent other iocounts from being released.
6310  */
6311 int
vnode_getwithvid(vnode_t vp,uint32_t vid)6312 vnode_getwithvid(vnode_t vp, uint32_t vid)
6313 {
6314 	return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID | VNODE_DRAINO));
6315 }
6316 
6317 /*
6318  * vnode_getwithvid_drainok() is like vnode_getwithvid(), but *does* block behind a vnode
6319  * drain; it exists for use in the VFS name cache, where we really do want to block behind
6320  * vnode drain to prevent holding off an unmount.
6321  */
6322 int
vnode_getwithvid_drainok(vnode_t vp,uint32_t vid)6323 vnode_getwithvid_drainok(vnode_t vp, uint32_t vid)
6324 {
6325 	return vget_internal(vp, vid, (VNODE_NODEAD | VNODE_WITHID));
6326 }
6327 
6328 int
vnode_getwithref(vnode_t vp)6329 vnode_getwithref(vnode_t vp)
6330 {
6331 	return vget_internal(vp, 0, 0);
6332 }
6333 
6334 int
vnode_getwithref_noblock(vnode_t vp)6335 vnode_getwithref_noblock(vnode_t vp)
6336 {
6337 	return vget_internal(vp, 0, VNODE_NOBLOCK);
6338 }
6339 
6340 __private_extern__ int
vnode_getalways(vnode_t vp)6341 vnode_getalways(vnode_t vp)
6342 {
6343 	return vget_internal(vp, 0, VNODE_ALWAYS);
6344 }
6345 
6346 __private_extern__ int
vnode_getalways_from_pager(vnode_t vp)6347 vnode_getalways_from_pager(vnode_t vp)
6348 {
6349 	return vget_internal(vp, 0, VNODE_ALWAYS | VNODE_PAGER);
6350 }
6351 
6352 static inline void
vn_set_dead(vnode_t vp)6353 vn_set_dead(vnode_t vp)
6354 {
6355 	vp->v_mount = NULL;
6356 	vp->v_op = dead_vnodeop_p;
6357 	vp->v_tag = VT_NON;
6358 	vp->v_data = NULL;
6359 	vp->v_type = VBAD;
6360 	vp->v_lflag |= VL_DEAD;
6361 }
6362 
6363 static int
vnode_put_internal_locked(vnode_t vp,bool from_pager)6364 vnode_put_internal_locked(vnode_t vp, bool from_pager)
6365 {
6366 	vfs_context_t ctx = vfs_context_current();      /* hoist outside loop */
6367 
6368 #if DIAGNOSTIC
6369 	lck_mtx_assert(&vp->v_lock, LCK_MTX_ASSERT_OWNED);
6370 #endif
6371 retry:
6372 	if (vp->v_iocount < 1) {
6373 		panic("vnode_put(%p): iocount < 1", vp);
6374 	}
6375 
6376 	if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
6377 		vnode_dropiocount(vp);
6378 		return 0;
6379 	}
6380 
6381 	if (((vp->v_lflag & (VL_DEAD | VL_NEEDINACTIVE)) == VL_NEEDINACTIVE)) {
6382 		vp->v_lflag &= ~VL_NEEDINACTIVE;
6383 
6384 		if (UBCINFOEXISTS(vp)) {
6385 			ubc_cs_free_and_vnode_unlock(vp);
6386 		} else {
6387 			vnode_unlock(vp);
6388 		}
6389 
6390 		VNOP_INACTIVE(vp, ctx);
6391 
6392 		vnode_lock_spin(vp);
6393 		/*
6394 		 * because we had to drop the vnode lock before calling
6395 		 * VNOP_INACTIVE, the state of this vnode may have changed...
6396 		 * we may pick up both VL_MARTERM and either
6397 		 * an iocount or a usecount while in the VNOP_INACTIVE call
6398 		 * we don't want to call vnode_reclaim_internal on a vnode
6399 		 * that has active references on it... so loop back around
6400 		 * and reevaluate the state
6401 		 */
6402 		goto retry;
6403 	}
6404 	vp->v_lflag &= ~VL_NEEDINACTIVE;
6405 
6406 	vnode_lock_convert(vp);
6407 	if ((vp->v_lflag & (VL_MARKTERM | VL_TERMINATE | VL_DEAD)) == VL_MARKTERM) {
6408 		if (from_pager) {
6409 			/*
6410 			 * We can't initiate reclaim when called from the pager
6411 			 * because it will deadlock with itself so we hand it
6412 			 * off to the async cleaner thread.
6413 			 */
6414 			vnode_async_list_add(vp);
6415 		} else {
6416 			vnode_reclaim_internal(vp, 1, 1, 0);
6417 		}
6418 	}
6419 	vnode_dropiocount(vp);
6420 	vnode_list_add(vp);
6421 
6422 	return 0;
6423 }
6424 
6425 int
vnode_put_locked(vnode_t vp)6426 vnode_put_locked(vnode_t vp)
6427 {
6428 	return vnode_put_internal_locked(vp, false);
6429 }
6430 
6431 int
vnode_put(vnode_t vp)6432 vnode_put(vnode_t vp)
6433 {
6434 	int retval;
6435 
6436 	vnode_lock_spin(vp);
6437 	vnode_hold(vp);
6438 	retval = vnode_put_internal_locked(vp, false);
6439 	vnode_drop_and_unlock(vp);
6440 
6441 	return retval;
6442 }
6443 
6444 int
vnode_put_from_pager(vnode_t vp)6445 vnode_put_from_pager(vnode_t vp)
6446 {
6447 	int retval;
6448 
6449 	vnode_lock_spin(vp);
6450 	vnode_hold(vp);
6451 	/* Cannot initiate reclaim while paging */
6452 	retval = vnode_put_internal_locked(vp, true);
6453 	vnode_drop_and_unlock(vp);
6454 
6455 	return retval;
6456 }
6457 
6458 int
vnode_writecount(vnode_t vp)6459 vnode_writecount(vnode_t vp)
6460 {
6461 	return vp->v_writecount;
6462 }
6463 
6464 /* is vnode_t in use by others?  */
6465 int
vnode_isinuse(vnode_t vp,int refcnt)6466 vnode_isinuse(vnode_t vp, int refcnt)
6467 {
6468 	return vnode_isinuse_locked(vp, refcnt, 0);
6469 }
6470 
6471 int
vnode_usecount(vnode_t vp)6472 vnode_usecount(vnode_t vp)
6473 {
6474 	return vp->v_usecount;
6475 }
6476 
6477 int
vnode_iocount(vnode_t vp)6478 vnode_iocount(vnode_t vp)
6479 {
6480 	if (!(vp->v_ext_flag & VE_LINKCHANGE)) {
6481 		return vp->v_iocount;
6482 	} else {
6483 		int iocount = 0;
6484 		vnode_lock_spin(vp);
6485 		if (!(vp->v_ext_flag & VE_LINKCHANGE)) {
6486 			iocount = vp->v_iocount;
6487 		} else {
6488 			/* the "link lock" takes its own iocount */
6489 			iocount = vp->v_iocount - 1;
6490 		}
6491 		vnode_unlock(vp);
6492 		return iocount;
6493 	}
6494 }
6495 
6496 int
vnode_isinuse_locked(vnode_t vp,int refcnt,int locked)6497 vnode_isinuse_locked(vnode_t vp, int refcnt, int locked)
6498 {
6499 	int retval = 0;
6500 
6501 	if (!locked) {
6502 		vnode_lock_spin(vp);
6503 	}
6504 	if ((vp->v_type != VREG) && ((vp->v_usecount - vp->v_kusecount) > refcnt)) {
6505 		retval = 1;
6506 		goto out;
6507 	}
6508 	if (vp->v_type == VREG) {
6509 		retval = ubc_isinuse_locked(vp, refcnt, 1);
6510 	}
6511 
6512 out:
6513 	if (!locked) {
6514 		vnode_unlock(vp);
6515 	}
6516 	return retval;
6517 }
6518 
6519 kauth_cred_t
vnode_cred(vnode_t vp)6520 vnode_cred(vnode_t vp)
6521 {
6522 	if (vp->v_cred) {
6523 		return kauth_cred_require(vp->v_cred);
6524 	}
6525 
6526 	return NULL;
6527 }
6528 
6529 
6530 /* resume vnode_t */
6531 errno_t
vnode_resume(vnode_t vp)6532 vnode_resume(vnode_t vp)
6533 {
6534 	if ((vp->v_lflag & VL_SUSPENDED) && vp->v_owner == current_thread()) {
6535 		vnode_lock_spin(vp);
6536 		vp->v_lflag &= ~VL_SUSPENDED;
6537 		vp->v_owner = NULL;
6538 		vnode_unlock(vp);
6539 
6540 		wakeup(&vp->v_iocount);
6541 	}
6542 	return 0;
6543 }
6544 
6545 /* suspend vnode_t
6546  * Please do not use on more than one vnode at a time as it may
6547  * cause deadlocks.
6548  * xxx should we explicity prevent this from happening?
6549  */
6550 
6551 errno_t
vnode_suspend(vnode_t vp)6552 vnode_suspend(vnode_t vp)
6553 {
6554 	if (vp->v_lflag & VL_SUSPENDED) {
6555 		return EBUSY;
6556 	}
6557 
6558 	vnode_lock_spin(vp);
6559 
6560 	/*
6561 	 * xxx is this sufficient to check if a vnode_drain is
6562 	 * progress?
6563 	 */
6564 
6565 	if (vp->v_owner == NULL) {
6566 		vp->v_lflag |= VL_SUSPENDED;
6567 		vp->v_owner = current_thread();
6568 	}
6569 	vnode_unlock(vp);
6570 
6571 	return 0;
6572 }
6573 
6574 /*
6575  * Release any blocked locking requests on the vnode.
6576  * Used for forced-unmounts.
6577  *
6578  * XXX	What about network filesystems?
6579  */
6580 static void
vnode_abort_advlocks(vnode_t vp)6581 vnode_abort_advlocks(vnode_t vp)
6582 {
6583 	if (vp->v_flag & VLOCKLOCAL) {
6584 		lf_abort_advlocks(vp);
6585 	}
6586 }
6587 
6588 
6589 static errno_t
vnode_drain(vnode_t vp)6590 vnode_drain(vnode_t vp)
6591 {
6592 	if (vp->v_lflag & VL_DRAIN) {
6593 		panic("vnode_drain: recursive drain");
6594 		return ENOENT;
6595 	}
6596 	vp->v_lflag |= VL_DRAIN;
6597 	vp->v_owner = current_thread();
6598 
6599 	while (vp->v_iocount > 1) {
6600 		if (bootarg_no_vnode_drain) {
6601 			struct timespec ts = {.tv_sec = 10, .tv_nsec = 0};
6602 			int error;
6603 
6604 			if (vfs_unmountall_started) {
6605 				ts.tv_sec = 1;
6606 			}
6607 
6608 			error = msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain_with_timeout", &ts);
6609 
6610 			/* Try to deal with leaked iocounts under bootarg and shutting down */
6611 			if (vp->v_iocount > 1 && error == EWOULDBLOCK &&
6612 			    ts.tv_sec == 1 && vp->v_numoutput == 0) {
6613 				vp->v_iocount = 1;
6614 				break;
6615 			}
6616 		} else {
6617 			msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_drain", NULL);
6618 		}
6619 	}
6620 
6621 	vp->v_lflag &= ~VL_DRAIN;
6622 
6623 	return 0;
6624 }
6625 
6626 
6627 /*
6628  * if the number of recent references via vnode_getwithvid or vnode_getwithref
6629  * exceeds this threshold, than 'UN-AGE' the vnode by removing it from
6630  * the LRU list if it's currently on it... once the iocount and usecount both drop
6631  * to 0, it will get put back on the end of the list, effectively making it younger
6632  * this allows us to keep actively referenced vnodes in the list without having
6633  * to constantly remove and add to the list each time a vnode w/o a usecount is
6634  * referenced which costs us taking and dropping a global lock twice.
6635  * However, if the vnode is marked DIRTY, we want to pull it out much earlier
6636  */
6637 #define UNAGE_THRESHHOLD        25
6638 #define UNAGE_DIRTYTHRESHHOLD    6
6639 
6640 errno_t
vnode_getiocount(vnode_t vp,unsigned int vid,int vflags)6641 vnode_getiocount(vnode_t vp, unsigned int vid, int vflags)
6642 {
6643 	int nodead = vflags & VNODE_NODEAD;
6644 	int nosusp = vflags & VNODE_NOSUSPEND;
6645 	int always = vflags & VNODE_ALWAYS;
6646 	int beatdrain = vflags & VNODE_DRAINO;
6647 	int withvid = vflags & VNODE_WITHID;
6648 	int forpager = vflags & VNODE_PAGER;
6649 	int noblock = vflags & VNODE_NOBLOCK;
6650 
6651 	for (;;) {
6652 		int sleepflg = 0;
6653 
6654 		/*
6655 		 * if it is a dead vnode with deadfs
6656 		 */
6657 		if (nodead && (vp->v_lflag & VL_DEAD) && ((vp->v_type == VBAD) || (vp->v_data == 0))) {
6658 			return ENOENT;
6659 		}
6660 		/*
6661 		 * will return VL_DEAD ones
6662 		 */
6663 		if ((vp->v_lflag & (VL_SUSPENDED | VL_DRAIN | VL_TERMINATE)) == 0) {
6664 			break;
6665 		}
6666 		/*
6667 		 * if suspended vnodes are to be failed
6668 		 */
6669 		if (nosusp && (vp->v_lflag & VL_SUSPENDED)) {
6670 			return ENOENT;
6671 		}
6672 		/*
6673 		 * if you are the owner of drain/suspend/termination , can acquire iocount
6674 		 * check for VL_TERMINATE; it does not set owner
6675 		 */
6676 		if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE)) &&
6677 		    (vp->v_owner == current_thread())) {
6678 			break;
6679 		}
6680 
6681 		if (always != 0) {
6682 			break;
6683 		}
6684 
6685 		if (noblock && (vp->v_lflag & (VL_DRAIN | VL_SUSPENDED | VL_TERMINATE))) {
6686 			return ENOENT;
6687 		}
6688 
6689 		/*
6690 		 * If this vnode is getting drained, there are some cases where
6691 		 * we can't block or, in case of tty vnodes, want to be
6692 		 * interruptible.
6693 		 */
6694 		if (vp->v_lflag & VL_DRAIN) {
6695 			/*
6696 			 * In some situations, we want to get an iocount
6697 			 * even if the vnode is draining to prevent deadlock,
6698 			 * e.g. if we're in the filesystem, potentially holding
6699 			 * resources that could prevent other iocounts from
6700 			 * being released.
6701 			 */
6702 			if (beatdrain) {
6703 				break;
6704 			}
6705 			/*
6706 			 * Don't block if the vnode's mount point is unmounting as
6707 			 * we may be the thread the unmount is itself waiting on
6708 			 * Only callers who pass in vids (at this point, we've already
6709 			 * handled nosusp and nodead) are expecting error returns
6710 			 * from this function, so only we can only return errors for
6711 			 * those. ENODEV is intended to inform callers that the call
6712 			 * failed because an unmount is in progress.
6713 			 */
6714 			if (withvid && (vp->v_mount) && vfs_isunmount(vp->v_mount)) {
6715 				return ENODEV;
6716 			}
6717 
6718 			if (vnode_istty(vp)) {
6719 				sleepflg = PCATCH;
6720 			}
6721 		}
6722 
6723 		vnode_lock_convert(vp);
6724 
6725 		if (vp->v_lflag & VL_TERMINATE) {
6726 			int error;
6727 
6728 			vp->v_lflag |= VL_TERMWANT;
6729 
6730 			error = msleep(&vp->v_lflag, &vp->v_lock,
6731 			    (PVFS | sleepflg), "vnode getiocount", NULL);
6732 			if (error) {
6733 				return error;
6734 			}
6735 		} else {
6736 			msleep(&vp->v_iocount, &vp->v_lock, PVFS, "vnode_getiocount", NULL);
6737 		}
6738 	}
6739 	if (withvid && vid != vp->v_id) {
6740 		return ENOENT;
6741 	}
6742 	if (!forpager && (++vp->v_references >= UNAGE_THRESHHOLD ||
6743 	    (vp->v_flag & VISDIRTY && vp->v_references >= UNAGE_DIRTYTHRESHHOLD))) {
6744 		vp->v_references = 0;
6745 		vnode_list_remove(vp);
6746 	}
6747 	vp->v_iocount++;
6748 #ifdef CONFIG_IOCOUNT_TRACE
6749 	record_vp(vp, 1);
6750 #endif
6751 	return 0;
6752 }
6753 
6754 static void
vnode_dropiocount(vnode_t vp)6755 vnode_dropiocount(vnode_t vp)
6756 {
6757 	if (vp->v_iocount < 1) {
6758 		panic("vnode_dropiocount(%p): v_iocount < 1", vp);
6759 	}
6760 
6761 	vp->v_iocount--;
6762 #ifdef CONFIG_IOCOUNT_TRACE
6763 	record_vp(vp, -1);
6764 #endif
6765 	if ((vp->v_lflag & (VL_DRAIN | VL_SUSPENDED)) && (vp->v_iocount <= 1)) {
6766 		wakeup(&vp->v_iocount);
6767 	}
6768 }
6769 
6770 
6771 void
vnode_reclaim(struct vnode * vp)6772 vnode_reclaim(struct vnode * vp)
6773 {
6774 	vnode_reclaim_internal(vp, 0, 0, 0);
6775 }
6776 
6777 __private_extern__
6778 void
vnode_reclaim_internal(struct vnode * vp,int locked,int reuse,int flags)6779 vnode_reclaim_internal(struct vnode * vp, int locked, int reuse, int flags)
6780 {
6781 	int isfifo = 0;
6782 	bool clear_tty_revoke = false;
6783 
6784 	if (!locked) {
6785 		vnode_lock(vp);
6786 	}
6787 
6788 	if (vp->v_lflag & VL_TERMINATE) {
6789 		panic("vnode reclaim in progress");
6790 	}
6791 	vp->v_lflag |= VL_TERMINATE;
6792 
6793 	vn_clearunionwait(vp, 1);
6794 
6795 	/*
6796 	 * We have to force any terminals in reads to return and give up
6797 	 * their iocounts. It's important to do this after VL_TERMINATE
6798 	 * has been set to ensure new reads are blocked while the
6799 	 * revoke is in progress.
6800 	 */
6801 	if (vnode_istty(vp) && (flags & REVOKEALL) && (vp->v_iocount > 1)) {
6802 		vnode_unlock(vp);
6803 		VNOP_IOCTL(vp, TIOCREVOKE, (caddr_t)NULL, 0, vfs_context_kernel());
6804 		clear_tty_revoke = true;
6805 		vnode_lock(vp);
6806 	}
6807 
6808 	vnode_drain(vp);
6809 
6810 	if (clear_tty_revoke) {
6811 		vnode_unlock(vp);
6812 		VNOP_IOCTL(vp, TIOCREVOKECLEAR, (caddr_t)NULL, 0, vfs_context_kernel());
6813 		vnode_lock(vp);
6814 	}
6815 
6816 #if CONFIG_FILE_LEASES
6817 	/*
6818 	 * Revoke all leases in place for this vnode as it is about to be reclaimed.
6819 	 * In normal case, there shouldn't be any leases in place by the time we
6820 	 * get here as there shouldn't be any opens on the vnode (usecount == 0).
6821 	 * However, in the case of force unmount or unmount of a volume that
6822 	 * contains file that was opened with O_EVTONLY then the vnode can be
6823 	 * reclaimed while the file is still opened.
6824 	 */
6825 	vnode_revokelease(vp, true);
6826 #endif
6827 
6828 	isfifo = (vp->v_type == VFIFO);
6829 
6830 	if (vp->v_type != VBAD) {
6831 		vgone(vp, flags);               /* clean and reclaim the vnode */
6832 	}
6833 	/*
6834 	 * give the vnode a new identity so that vnode_getwithvid will fail
6835 	 * on any stale cache accesses...
6836 	 * grab the list_lock so that if we're in "new_vnode"
6837 	 * behind the list_lock trying to steal this vnode, the v_id is stable...
6838 	 * once new_vnode drops the list_lock, it will block trying to take
6839 	 * the vnode lock until we release it... at that point it will evaluate
6840 	 * whether the v_vid has changed
6841 	 * also need to make sure that the vnode isn't on a list where "new_vnode"
6842 	 * can find it after the v_id has been bumped until we are completely done
6843 	 * with the vnode (i.e. putting it back on a list has to be the very last
6844 	 * thing we do to this vnode... many of the callers of vnode_reclaim_internal
6845 	 * are holding an io_count on the vnode... they need to drop the io_count
6846 	 * BEFORE doing a vnode_list_add or make sure to hold the vnode lock until
6847 	 * they are completely done with the vnode
6848 	 */
6849 	vnode_list_lock();
6850 
6851 	vnode_list_remove_locked(vp);
6852 	vp->v_id++;
6853 
6854 	vnode_list_unlock();
6855 
6856 	if (isfifo) {
6857 		struct fifoinfo * fip;
6858 
6859 		fip = vp->v_fifoinfo;
6860 		vp->v_fifoinfo = NULL;
6861 		kfree_type(struct fifoinfo, fip);
6862 	}
6863 	vp->v_type = VBAD;
6864 
6865 	if (vp->v_data) {
6866 		panic("vnode_reclaim_internal: cleaned vnode isn't");
6867 	}
6868 	if (vp->v_numoutput) {
6869 		panic("vnode_reclaim_internal: clean vnode has pending I/O's");
6870 	}
6871 	if (UBCINFOEXISTS(vp)) {
6872 		panic("vnode_reclaim_internal: ubcinfo not cleaned");
6873 	}
6874 	if (vp->v_parent) {
6875 		panic("vnode_reclaim_internal: vparent not removed");
6876 	}
6877 	if (vp->v_name) {
6878 		panic("vnode_reclaim_internal: vname not removed");
6879 	}
6880 
6881 #if CONFIG_FILE_LEASES
6882 	if (__improbable(!LIST_EMPTY(&vp->v_leases))) {
6883 		panic("vnode_reclaim_internal: vleases NOT empty");
6884 	}
6885 #endif
6886 
6887 	vp->v_socket = NULL;
6888 
6889 	vp->v_lflag &= ~VL_TERMINATE;
6890 	vp->v_owner = NULL;
6891 
6892 #if CONFIG_IOCOUNT_TRACE
6893 	if (__improbable(bootarg_vnode_iocount_trace)) {
6894 		bzero(vp->v_iocount_trace,
6895 		    IOCOUNT_TRACE_MAX_TYPES * sizeof(struct vnode_iocount_trace));
6896 	}
6897 #endif /* CONFIG_IOCOUNT_TRACE */
6898 
6899 	KNOTE(&vp->v_knotes, NOTE_REVOKE);
6900 
6901 	/* Make sure that when we reuse the vnode, no knotes left over */
6902 	klist_init(&vp->v_knotes);
6903 
6904 	if (vp->v_lflag & VL_TERMWANT) {
6905 		vp->v_lflag &= ~VL_TERMWANT;
6906 		wakeup(&vp->v_lflag);
6907 	}
6908 	if (!reuse) {
6909 		/*
6910 		 * make sure we get on the
6911 		 * dead list if appropriate
6912 		 */
6913 		vnode_list_add(vp);
6914 	}
6915 	if (!locked) {
6916 		vnode_unlock(vp);
6917 	}
6918 }
6919 
6920 static int
vnode_create_internal(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,vnode_create_options_t vc_options)6921 vnode_create_internal(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp,
6922     vnode_create_options_t vc_options)
6923 {
6924 	int error;
6925 	int insert = 1;
6926 	vnode_t vp = NULLVP;
6927 	vnode_t nvp;
6928 	vnode_t dvp;
6929 	struct  uthread *ut;
6930 	struct componentname *cnp;
6931 	struct vnode_fsparam *param = (struct vnode_fsparam *)data;
6932 #if CONFIG_TRIGGERS
6933 	struct vnode_trigger_param *tinfo = NULL;
6934 #endif
6935 	bool existing_vnode;
6936 	bool init_vnode = !(vc_options & VNODE_CREATE_EMPTY);
6937 	bool is_bdevvp = false;
6938 
6939 	if (*vpp) {
6940 		vp = *vpp;
6941 		*vpp = NULLVP;
6942 		existing_vnode = true;
6943 	} else {
6944 		existing_vnode = false;
6945 	}
6946 
6947 	if (init_vnode) {
6948 		/* Do quick sanity check on the parameters. */
6949 		if ((param == NULL) || (param->vnfs_vtype == VBAD)) {
6950 			error = EINVAL;
6951 			goto error_out;
6952 		}
6953 
6954 #if CONFIG_TRIGGERS
6955 		if ((flavor == VNCREATE_TRIGGER) && (size == VNCREATE_TRIGGER_SIZE)) {
6956 			tinfo = (struct vnode_trigger_param *)data;
6957 
6958 			/* Validate trigger vnode input */
6959 			if ((param->vnfs_vtype != VDIR) ||
6960 			    (tinfo->vnt_resolve_func == NULL) ||
6961 			    (tinfo->vnt_flags & ~VNT_VALID_MASK)) {
6962 				error = EINVAL;
6963 				goto error_out;
6964 			}
6965 			/* Fall through a normal create (params will be the same) */
6966 			flavor = VNCREATE_FLAVOR;
6967 			size = VCREATESIZE;
6968 		}
6969 #endif
6970 		if ((flavor != VNCREATE_FLAVOR) || (size != VCREATESIZE)) {
6971 			error = EINVAL;
6972 			goto error_out;
6973 		}
6974 	}
6975 
6976 	if (!existing_vnode) {
6977 		if ((error = new_vnode(&vp, !(vc_options & VNODE_CREATE_NODEALLOC)))) {
6978 			return error;
6979 		}
6980 		if (!init_vnode) {
6981 			/* Make it so that it can be released by a vnode_put) */
6982 			vnode_lock(vp);
6983 			vn_set_dead(vp);
6984 			vnode_unlock(vp);
6985 			*vpp = vp;
6986 			return 0;
6987 		}
6988 	} else {
6989 		/*
6990 		 * A vnode obtained by vnode_create_empty has been passed to
6991 		 * vnode_initialize - Unset VL_DEAD set by vn_set_dead. After
6992 		 * this point, it is set back on any error.
6993 		 */
6994 		vnode_lock(vp);
6995 		vp->v_lflag &= ~VL_DEAD;
6996 		vnode_unlock(vp);
6997 	}
6998 
6999 	dvp = param->vnfs_dvp;
7000 	cnp = param->vnfs_cnp;
7001 
7002 	vp->v_op = param->vnfs_vops;
7003 	vp->v_type = (uint8_t)param->vnfs_vtype;
7004 	vp->v_data = param->vnfs_fsnode;
7005 
7006 	if (param->vnfs_markroot) {
7007 		vp->v_flag |= VROOT;
7008 	}
7009 	if (param->vnfs_marksystem) {
7010 		vp->v_flag |= VSYSTEM;
7011 	}
7012 	if (vp->v_type == VREG) {
7013 		error = ubc_info_init_withsize(vp, param->vnfs_filesize);
7014 		if (error) {
7015 #ifdef CONFIG_IOCOUNT_TRACE
7016 			record_vp(vp, 1);
7017 #endif
7018 			vnode_hold(vp);
7019 			vnode_lock(vp);
7020 			vn_set_dead(vp);
7021 
7022 			vnode_put_locked(vp);
7023 			vnode_drop_and_unlock(vp);
7024 			return error;
7025 		}
7026 		if (param->vnfs_mp->mnt_ioflags & MNT_IOFLAGS_IOSCHED_SUPPORTED) {
7027 			memory_object_mark_io_tracking(vp->v_ubcinfo->ui_control);
7028 		}
7029 	}
7030 #ifdef CONFIG_IOCOUNT_TRACE
7031 	record_vp(vp, 1);
7032 #endif
7033 
7034 #if CONFIG_FIRMLINKS
7035 	vp->v_fmlink = NULLVP;
7036 #endif
7037 	vp->v_flag &= ~VFMLINKTARGET;
7038 
7039 #if CONFIG_TRIGGERS
7040 	/*
7041 	 * For trigger vnodes, attach trigger info to vnode
7042 	 */
7043 	if ((vp->v_type == VDIR) && (tinfo != NULL)) {
7044 		/*
7045 		 * Note: has a side effect of incrementing trigger count on the
7046 		 * mount if successful, which we would need to undo on a
7047 		 * subsequent failure.
7048 		 */
7049 #ifdef CONFIG_IOCOUNT_TRACE
7050 		record_vp(vp, -1);
7051 #endif
7052 		error = vnode_resolver_create(param->vnfs_mp, vp, tinfo, FALSE);
7053 		if (error) {
7054 			printf("vnode_create: vnode_resolver_create() err %d\n", error);
7055 			vnode_hold(vp);
7056 			vnode_lock(vp);
7057 			vn_set_dead(vp);
7058 #ifdef CONFIG_IOCOUNT_TRACE
7059 			record_vp(vp, 1);
7060 #endif
7061 			vnode_put_locked(vp);
7062 			vnode_drop_and_unlock(vp);
7063 			return error;
7064 		}
7065 	}
7066 #endif
7067 	if (vp->v_type == VCHR || vp->v_type == VBLK) {
7068 		vp->v_tag = VT_DEVFS;           /* callers will reset if needed (bdevvp) */
7069 
7070 		if ((nvp = checkalias(vp, param->vnfs_rdev))) {
7071 			/*
7072 			 * if checkalias returns a vnode, it will be locked
7073 			 *
7074 			 * first get rid of the unneeded vnode we acquired
7075 			 */
7076 			vp->v_data = NULL;
7077 			vp->v_op = spec_vnodeop_p;
7078 			vp->v_type = VBAD;
7079 			vp->v_lflag = VL_DEAD;
7080 			vp->v_data = NULL;
7081 			vp->v_tag = VT_NON;
7082 			vnode_put(vp);
7083 
7084 			/*
7085 			 * switch to aliased vnode and finish
7086 			 * preparing it
7087 			 */
7088 			vp = nvp;
7089 
7090 			is_bdevvp = (vp->v_flag & VBDEVVP);
7091 
7092 			if (is_bdevvp) {
7093 				printf("%s: alias vnode (vid = %u) is in state of change (start) v_flags = 0x%x v_numoutput = %d\n",
7094 				    __func__, vp->v_id, vp->v_flag, vp->v_numoutput);
7095 			}
7096 
7097 			vnode_hold(vp);
7098 			vp->v_lflag |= VL_OPSCHANGE;
7099 			vclean(vp, 0);
7100 			vp->v_op = param->vnfs_vops;
7101 			vp->v_type = (uint8_t)param->vnfs_vtype;
7102 			vp->v_data = param->vnfs_fsnode;
7103 			vp->v_lflag = VL_OPSCHANGE;
7104 			vp->v_mount = NULL;
7105 			insmntque(vp, param->vnfs_mp);
7106 			insert = 0;
7107 
7108 			if (is_bdevvp) {
7109 				printf("%s: alias vnode (vid = %u), is in state of change (end) v_flags = 0x%x v_numoutput = %d\n",
7110 				    __func__, vp->v_id, vp->v_flag, vp->v_numoutput);
7111 			}
7112 
7113 			vnode_drop_and_unlock(vp);
7114 			wakeup(&vp->v_lflag); /* chkvnlock is waitng for VL_DEAD to get unset */
7115 		}
7116 
7117 		if (VCHR == vp->v_type) {
7118 			u_int maj = major(vp->v_rdev);
7119 
7120 			if (maj < (u_int)nchrdev && cdevsw[maj].d_type == D_TTY) {
7121 				vp->v_flag |= VISTTY;
7122 			}
7123 		}
7124 	}
7125 
7126 	if (vp->v_type == VFIFO) {
7127 		struct fifoinfo *fip;
7128 
7129 		fip = kalloc_type(struct fifoinfo, Z_WAITOK | Z_ZERO);
7130 		vp->v_fifoinfo = fip;
7131 	}
7132 	/* The file systems must pass the address of the location where
7133 	 * they store the vnode pointer. When we add the vnode into the mount
7134 	 * list and name cache they become discoverable. So the file system node
7135 	 * must have the connection to vnode setup by then
7136 	 */
7137 	*vpp = vp;
7138 
7139 	/* Add fs named reference. */
7140 	if (param->vnfs_flags & VNFS_ADDFSREF) {
7141 		vp->v_lflag |= VNAMED_FSHASH;
7142 	}
7143 	if (param->vnfs_mp) {
7144 		if (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL) {
7145 			vp->v_flag |= VLOCKLOCAL;
7146 		}
7147 		if (insert) {
7148 			if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
7149 				panic("insmntque: vp on the free list");
7150 			}
7151 
7152 			/*
7153 			 * enter in mount vnode list
7154 			 */
7155 			insmntque(vp, param->vnfs_mp);
7156 		}
7157 	}
7158 	if (dvp && vnode_ref(dvp) == 0) {
7159 		vp->v_parent = dvp;
7160 	}
7161 	if (cnp) {
7162 		if (dvp && ((param->vnfs_flags & (VNFS_NOCACHE | VNFS_CANTCACHE)) == 0)) {
7163 			/*
7164 			 * enter into name cache
7165 			 * we've got the info to enter it into the name cache now
7166 			 * cache_enter_create will pick up an extra reference on
7167 			 * the name entered into the string cache
7168 			 */
7169 			vp->v_name = cache_enter_create(dvp, vp, cnp);
7170 		} else {
7171 			vp->v_name = vfs_addname(cnp->cn_nameptr, cnp->cn_namelen, cnp->cn_hash, 0);
7172 		}
7173 
7174 		if ((cnp->cn_flags & UNIONCREATED) == UNIONCREATED) {
7175 			vp->v_flag |= VISUNION;
7176 		}
7177 	}
7178 	if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
7179 		/*
7180 		 * this vnode is being created as cacheable in the name cache
7181 		 * this allows us to re-enter it in the cache
7182 		 */
7183 		vp->v_flag |= VNCACHEABLE;
7184 	}
7185 	ut = current_uthread();
7186 
7187 	if ((current_proc()->p_lflag & P_LRAGE_VNODES) ||
7188 	    (ut->uu_flag & (UT_RAGE_VNODES | UT_KERN_RAGE_VNODES))) {
7189 		/*
7190 		 * process has indicated that it wants any
7191 		 * vnodes created on its behalf to be rapidly
7192 		 * aged to reduce the impact on the cached set
7193 		 * of vnodes
7194 		 *
7195 		 * if UT_KERN_RAGE_VNODES is set, then the
7196 		 * kernel internally wants vnodes to be rapidly
7197 		 * aged, even if the process hasn't requested
7198 		 * this
7199 		 */
7200 		vp->v_flag |= VRAGE;
7201 	}
7202 
7203 #if CONFIG_SECLUDED_MEMORY
7204 	switch (secluded_for_filecache) {
7205 	case SECLUDED_FILECACHE_NONE:
7206 		/*
7207 		 * secluded_for_filecache == 0:
7208 		 * + no file contents in secluded pool
7209 		 */
7210 		break;
7211 	case SECLUDED_FILECACHE_APPS:
7212 		/*
7213 		 * secluded_for_filecache == 1:
7214 		 * + no files from /
7215 		 * + files from /Applications/ are OK
7216 		 * + files from /Applications/Camera are not OK
7217 		 * + no files that are open for write
7218 		 */
7219 		if (vnode_vtype(vp) == VREG &&
7220 		    vnode_mount(vp) != NULL &&
7221 		    (!(vfs_flags(vnode_mount(vp)) & MNT_ROOTFS))) {
7222 			/* not from root filesystem: eligible for secluded pages */
7223 			memory_object_mark_eligible_for_secluded(
7224 				ubc_getobject(vp, UBC_FLAGS_NONE),
7225 				TRUE);
7226 		}
7227 		break;
7228 	case SECLUDED_FILECACHE_RDONLY:
7229 		/*
7230 		 * secluded_for_filecache == 2:
7231 		 * + all read-only files OK, except:
7232 		 *      + dyld_shared_cache_arm64*
7233 		 *      + Camera
7234 		 *      + mediaserverd
7235 		 *      + cameracaptured
7236 		 */
7237 		if (vnode_vtype(vp) == VREG) {
7238 			memory_object_mark_eligible_for_secluded(
7239 				ubc_getobject(vp, UBC_FLAGS_NONE),
7240 				TRUE);
7241 		}
7242 		break;
7243 	default:
7244 		break;
7245 	}
7246 #endif /* CONFIG_SECLUDED_MEMORY */
7247 
7248 	if (is_bdevvp) {
7249 		/*
7250 		 * The v_flags and v_lflags felds for the vndoe above are
7251 		 * manipulated without the vnode lock. This is fine for
7252 		 * everything because no other use  of this vnode is occurring.
7253 		 * However the case of the bdevvp alias vnode reuse is different
7254 		 * and the flags end up being modified while a thread may be in
7255 		 * vnode_waitforwrites which sets VTHROTTLED and any one of the
7256 		 * non atomic modifications of v_flag in this function can race
7257 		 * with the setting of that flag and cause VTHROTTLED on vflag
7258 		 * to get "lost".
7259 		 *
7260 		 * This should ideally be fixed by making sure all modifications
7261 		 * in this function to the vnode flags are done under the
7262 		 * vnode lock but at this time, a much smaller workaround is
7263 		 * being  employed and a the more correct (and potentially
7264 		 * much bigger) change will follow later.
7265 		 *
7266 		 * The effect of "losing" the VTHROTTLED flags would be a lost
7267 		 * wakeup so we just issue that wakeup here since this happens
7268 		 * only once per bdevvp vnode which are only one or two for a
7269 		 * given boot.
7270 		 */
7271 		wakeup(&vp->v_numoutput);
7272 
7273 		/*
7274 		 * now make sure the flags that we were suppossed to put aren't
7275 		 * lost.
7276 		 */
7277 		vnode_lock_spin(vp);
7278 		if (param->vnfs_flags & VNFS_ADDFSREF) {
7279 			vp->v_lflag |= VNAMED_FSHASH;
7280 		}
7281 		if (param->vnfs_mp && (param->vnfs_mp->mnt_kern_flag & MNTK_LOCK_LOCAL)) {
7282 			vp->v_flag |= VLOCKLOCAL;
7283 		}
7284 		if ((param->vnfs_flags & VNFS_CANTCACHE) == 0) {
7285 			vp->v_flag |= VNCACHEABLE;
7286 		}
7287 		vnode_unlock(vp);
7288 	}
7289 
7290 	return 0;
7291 
7292 error_out:
7293 	if (existing_vnode) {
7294 		vnode_put(vp);
7295 	}
7296 	return error;
7297 }
7298 
7299 int
vnode_create_ext(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp,vnode_create_options_t vc_options)7300 vnode_create_ext(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp, vnode_create_options_t vc_options)
7301 {
7302 	if (vc_options & ~(VNODE_CREATE_EMPTY | VNODE_CREATE_NODEALLOC)) {
7303 		return EINVAL;
7304 	}
7305 	*vpp = NULLVP;
7306 	return vnode_create_internal(flavor, size, data, vpp, vc_options);
7307 }
7308 
7309 /* USAGE:
7310  * The following api creates a vnode and associates all the parameter specified in vnode_fsparam
7311  * structure and returns a vnode handle with a reference. device aliasing is handled here so checkalias
7312  * is obsoleted by this.
7313  */
7314 int
vnode_create(uint32_t flavor,uint32_t size,void * data,vnode_t * vpp)7315 vnode_create(uint32_t flavor, uint32_t size, void *data, vnode_t *vpp)
7316 {
7317 	return vnode_create_ext(flavor, size, data, vpp, VNODE_CREATE_NODEALLOC);
7318 }
7319 
7320 int
vnode_create_empty(vnode_t * vpp)7321 vnode_create_empty(vnode_t *vpp)
7322 {
7323 	return vnode_create_ext(VNCREATE_FLAVOR, VCREATESIZE, NULL,
7324 	           vpp, VNODE_CREATE_EMPTY);
7325 }
7326 
7327 int
vnode_initialize(uint32_t __unused flavor,uint32_t size,void * data,vnode_t * vpp)7328 vnode_initialize(uint32_t __unused flavor, uint32_t size, void *data, vnode_t *vpp)
7329 {
7330 	if (*vpp == NULLVP) {
7331 		panic("NULL vnode passed to vnode_initialize");
7332 	}
7333 #if DEVELOPMENT || DEBUG
7334 	/*
7335 	 * We lock to check that vnode is fit for unlocked use in
7336 	 * vnode_create_internal.
7337 	 */
7338 	vnode_lock_spin(*vpp);
7339 	VNASSERT(((*vpp)->v_iocount == 1), *vpp,
7340 	    ("vnode_initialize : iocount not 1, is %d", (*vpp)->v_iocount));
7341 	VNASSERT(((*vpp)->v_usecount == 0), *vpp,
7342 	    ("vnode_initialize : usecount not 0, is %d", (*vpp)->v_usecount));
7343 	VNASSERT(((*vpp)->v_lflag & VL_DEAD), *vpp,
7344 	    ("vnode_initialize : v_lflag does not have VL_DEAD, is 0x%x",
7345 	    (*vpp)->v_lflag));
7346 	VNASSERT(((*vpp)->v_data == NULL), *vpp,
7347 	    ("vnode_initialize : v_data not NULL"));
7348 	vnode_unlock(*vpp);
7349 #endif
7350 	return vnode_create_internal(flavor, size, data, vpp, VNODE_CREATE_DEFAULT);
7351 }
7352 
7353 int
vnode_addfsref(vnode_t vp)7354 vnode_addfsref(vnode_t vp)
7355 {
7356 	vnode_lock_spin(vp);
7357 	if (vp->v_lflag & VNAMED_FSHASH) {
7358 		panic("add_fsref: vp already has named reference");
7359 	}
7360 	if ((vp->v_freelist.tqe_prev != (struct vnode **)0xdeadb)) {
7361 		panic("addfsref: vp on the free list");
7362 	}
7363 	vp->v_lflag |= VNAMED_FSHASH;
7364 	vnode_unlock(vp);
7365 	return 0;
7366 }
7367 int
vnode_removefsref(vnode_t vp)7368 vnode_removefsref(vnode_t vp)
7369 {
7370 	vnode_lock_spin(vp);
7371 	if ((vp->v_lflag & VNAMED_FSHASH) == 0) {
7372 		panic("remove_fsref: no named reference");
7373 	}
7374 	vp->v_lflag &= ~VNAMED_FSHASH;
7375 	vnode_unlock(vp);
7376 	return 0;
7377 }
7378 
7379 void
vnode_link_lock(vnode_t vp)7380 vnode_link_lock(vnode_t vp)
7381 {
7382 	vnode_lock_spin(vp);
7383 	while (vp->v_ext_flag & VE_LINKCHANGE) {
7384 		vp->v_ext_flag |= VE_LINKCHANGEWAIT;
7385 		msleep(&vp->v_ext_flag, &vp->v_lock, PVFS | PSPIN,
7386 		    "vnode_link_lock_wait", 0);
7387 	}
7388 	if (vp->v_iocount == 0) {
7389 		panic("%s called without an iocount on the vnode", __FUNCTION__);
7390 	}
7391 	vnode_get_locked(vp);
7392 	vp->v_ext_flag |= VE_LINKCHANGE;
7393 	vnode_unlock(vp);
7394 }
7395 
7396 void
vnode_link_unlock(vnode_t vp)7397 vnode_link_unlock(vnode_t vp)
7398 {
7399 	bool do_wakeup = false;
7400 	bool do_vnode_put = false;
7401 
7402 	vnode_lock_spin(vp);
7403 	if (vp->v_ext_flag & VE_LINKCHANGEWAIT) {
7404 		do_wakeup = true;
7405 	}
7406 	vp->v_ext_flag &= ~(VE_LINKCHANGE | VE_LINKCHANGEWAIT);
7407 	if ((vp->v_usecount > 0) || (vp->v_iocount > 1)) {
7408 		vnode_put_locked(vp);
7409 	} else {
7410 		do_vnode_put = true;
7411 	}
7412 	vnode_unlock(vp);
7413 	if (do_wakeup) {
7414 		wakeup(&vp->v_ext_flag);
7415 	}
7416 	if (do_vnode_put) {
7417 		vnode_put(vp);
7418 	}
7419 }
7420 
7421 int
vfs_iterate(int flags,int (* callout)(mount_t,void *),void * arg)7422 vfs_iterate(int flags, int (*callout)(mount_t, void *), void *arg)
7423 {
7424 	mount_t mp;
7425 	int ret = 0;
7426 	fsid_t * fsid_list;
7427 	int count, actualcount, i;
7428 	void * allocmem;
7429 	int indx_start, indx_stop, indx_incr;
7430 	int cb_dropref = (flags & VFS_ITERATE_CB_DROPREF);
7431 	int noskip_unmount = (flags & VFS_ITERATE_NOSKIP_UNMOUNT);
7432 
7433 	count = mount_getvfscnt();
7434 	count += 10;
7435 
7436 	fsid_list = kalloc_data(count * sizeof(fsid_t), Z_WAITOK);
7437 	allocmem = (void *)fsid_list;
7438 
7439 	actualcount = mount_fillfsids(fsid_list, count);
7440 
7441 	/*
7442 	 * Establish the iteration direction
7443 	 * VFS_ITERATE_TAIL_FIRST overrides default head first order (oldest first)
7444 	 */
7445 	if (flags & VFS_ITERATE_TAIL_FIRST) {
7446 		indx_start = actualcount - 1;
7447 		indx_stop = -1;
7448 		indx_incr = -1;
7449 	} else { /* Head first by default */
7450 		indx_start = 0;
7451 		indx_stop = actualcount;
7452 		indx_incr = 1;
7453 	}
7454 
7455 	for (i = indx_start; i != indx_stop; i += indx_incr) {
7456 		/* obtain the mount point with iteration reference */
7457 		mp = mount_list_lookupby_fsid(&fsid_list[i], 0, 1);
7458 
7459 		if (mp == (struct mount *)0) {
7460 			continue;
7461 		}
7462 		mount_lock(mp);
7463 		if ((mp->mnt_lflag & MNT_LDEAD) ||
7464 		    (!noskip_unmount && (mp->mnt_lflag & MNT_LUNMOUNT))) {
7465 			mount_unlock(mp);
7466 			mount_iterdrop(mp);
7467 			continue;
7468 		}
7469 		mount_unlock(mp);
7470 
7471 		/* iterate over all the vnodes */
7472 		ret = callout(mp, arg);
7473 
7474 		/*
7475 		 * Drop the iterref here if the callback didn't do it.
7476 		 * Note: If cb_dropref is set the mp may no longer exist.
7477 		 */
7478 		if (!cb_dropref) {
7479 			mount_iterdrop(mp);
7480 		}
7481 
7482 		switch (ret) {
7483 		case VFS_RETURNED:
7484 		case VFS_RETURNED_DONE:
7485 			if (ret == VFS_RETURNED_DONE) {
7486 				ret = 0;
7487 				goto out;
7488 			}
7489 			break;
7490 
7491 		case VFS_CLAIMED_DONE:
7492 			ret = 0;
7493 			goto out;
7494 		case VFS_CLAIMED:
7495 		default:
7496 			break;
7497 		}
7498 		ret = 0;
7499 	}
7500 
7501 out:
7502 	kfree_data(allocmem, count * sizeof(fsid_t));
7503 	return ret;
7504 }
7505 
7506 /*
7507  * Update the vfsstatfs structure in the mountpoint.
7508  * MAC: Parameter eventtype added, indicating whether the event that
7509  * triggered this update came from user space, via a system call
7510  * (VFS_USER_EVENT) or an internal kernel call (VFS_KERNEL_EVENT).
7511  */
7512 int
vfs_update_vfsstat(mount_t mp,vfs_context_t ctx,__unused int eventtype)7513 vfs_update_vfsstat(mount_t mp, vfs_context_t ctx, __unused int eventtype)
7514 {
7515 	struct vfs_attr va;
7516 	int             error;
7517 
7518 	/*
7519 	 * Request the attributes we want to propagate into
7520 	 * the per-mount vfsstat structure.
7521 	 */
7522 	VFSATTR_INIT(&va);
7523 	VFSATTR_WANTED(&va, f_iosize);
7524 	VFSATTR_WANTED(&va, f_blocks);
7525 	VFSATTR_WANTED(&va, f_bfree);
7526 	VFSATTR_WANTED(&va, f_bavail);
7527 	VFSATTR_WANTED(&va, f_bused);
7528 	VFSATTR_WANTED(&va, f_files);
7529 	VFSATTR_WANTED(&va, f_ffree);
7530 	VFSATTR_WANTED(&va, f_bsize);
7531 	VFSATTR_WANTED(&va, f_fssubtype);
7532 
7533 	if ((error = vfs_getattr(mp, &va, ctx)) != 0) {
7534 		KAUTH_DEBUG("STAT - filesystem returned error %d", error);
7535 		return error;
7536 	}
7537 #if CONFIG_MACF
7538 	if (eventtype == VFS_USER_EVENT) {
7539 		error = mac_mount_check_getattr(ctx, mp, &va);
7540 		if (error != 0) {
7541 			return error;
7542 		}
7543 	}
7544 #endif
7545 	/*
7546 	 * Unpack into the per-mount structure.
7547 	 *
7548 	 * We only overwrite these fields, which are likely to change:
7549 	 *	f_blocks
7550 	 *	f_bfree
7551 	 *	f_bavail
7552 	 *	f_bused
7553 	 *	f_files
7554 	 *	f_ffree
7555 	 *
7556 	 * And these which are not, but which the FS has no other way
7557 	 * of providing to us:
7558 	 *	f_bsize
7559 	 *	f_iosize
7560 	 *	f_fssubtype
7561 	 *
7562 	 */
7563 	if (VFSATTR_IS_SUPPORTED(&va, f_bsize)) {
7564 		/* 4822056 - protect against malformed server mount */
7565 		mp->mnt_vfsstat.f_bsize = (va.f_bsize > 0 ? va.f_bsize : 512);
7566 	} else {
7567 		mp->mnt_vfsstat.f_bsize = mp->mnt_devblocksize; /* default from the device block size */
7568 	}
7569 	if (VFSATTR_IS_SUPPORTED(&va, f_iosize)) {
7570 		mp->mnt_vfsstat.f_iosize = va.f_iosize;
7571 	} else {
7572 		mp->mnt_vfsstat.f_iosize = 1024 * 1024;         /* 1MB sensible I/O size */
7573 	}
7574 	if (VFSATTR_IS_SUPPORTED(&va, f_blocks)) {
7575 		mp->mnt_vfsstat.f_blocks = va.f_blocks;
7576 	}
7577 	if (VFSATTR_IS_SUPPORTED(&va, f_bfree)) {
7578 		mp->mnt_vfsstat.f_bfree = va.f_bfree;
7579 	}
7580 	if (VFSATTR_IS_SUPPORTED(&va, f_bavail)) {
7581 		mp->mnt_vfsstat.f_bavail = va.f_bavail;
7582 	}
7583 	if (VFSATTR_IS_SUPPORTED(&va, f_bused)) {
7584 		mp->mnt_vfsstat.f_bused = va.f_bused;
7585 	}
7586 	if (VFSATTR_IS_SUPPORTED(&va, f_files)) {
7587 		mp->mnt_vfsstat.f_files = va.f_files;
7588 	}
7589 	if (VFSATTR_IS_SUPPORTED(&va, f_ffree)) {
7590 		mp->mnt_vfsstat.f_ffree = va.f_ffree;
7591 	}
7592 
7593 	/* this is unlikely to change, but has to be queried for */
7594 	if (VFSATTR_IS_SUPPORTED(&va, f_fssubtype)) {
7595 		mp->mnt_vfsstat.f_fssubtype = va.f_fssubtype;
7596 	}
7597 
7598 	return 0;
7599 }
7600 
7601 int
mount_list_add(mount_t mp)7602 mount_list_add(mount_t mp)
7603 {
7604 	int res;
7605 
7606 	mount_list_lock();
7607 	if (get_system_inshutdown() != 0) {
7608 		res = -1;
7609 	} else {
7610 		TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
7611 		nummounts++;
7612 		res = 0;
7613 	}
7614 	mount_list_unlock();
7615 
7616 	return res;
7617 }
7618 
7619 void
mount_list_remove(mount_t mp)7620 mount_list_remove(mount_t mp)
7621 {
7622 	mount_list_lock();
7623 	TAILQ_REMOVE(&mountlist, mp, mnt_list);
7624 	nummounts--;
7625 	mp->mnt_list.tqe_next = NULL;
7626 	mp->mnt_list.tqe_prev = NULL;
7627 	mount_list_unlock();
7628 }
7629 
7630 mount_t
mount_lookupby_volfsid(int volfs_id,int withref)7631 mount_lookupby_volfsid(int volfs_id, int withref)
7632 {
7633 	mount_t cur_mount = (mount_t)0;
7634 	mount_t mp;
7635 
7636 	mount_list_lock();
7637 	TAILQ_FOREACH(mp, &mountlist, mnt_list) {
7638 		if (!(mp->mnt_kern_flag & MNTK_UNMOUNT) &&
7639 		    (mp->mnt_kern_flag & MNTK_PATH_FROM_ID) &&
7640 		    (mp->mnt_vfsstat.f_fsid.val[0] == volfs_id)) {
7641 			cur_mount = mp;
7642 			if (withref) {
7643 				if (mount_iterref(cur_mount, 1)) {
7644 					cur_mount = (mount_t)0;
7645 					mount_list_unlock();
7646 					goto out;
7647 				}
7648 			}
7649 			break;
7650 		}
7651 	}
7652 	mount_list_unlock();
7653 	if (withref && (cur_mount != (mount_t)0)) {
7654 		mp = cur_mount;
7655 		if (vfs_busy(mp, LK_NOWAIT) != 0) {
7656 			cur_mount = (mount_t)0;
7657 		}
7658 		mount_iterdrop(mp);
7659 	}
7660 out:
7661 	return cur_mount;
7662 }
7663 
7664 mount_t
mount_list_lookupby_fsid(fsid_t * fsid,int locked,int withref)7665 mount_list_lookupby_fsid(fsid_t *fsid, int locked, int withref)
7666 {
7667 	mount_t retmp = (mount_t)0;
7668 	mount_t mp;
7669 
7670 	if (!locked) {
7671 		mount_list_lock();
7672 	}
7673 	TAILQ_FOREACH(mp, &mountlist, mnt_list)
7674 	if (mp->mnt_vfsstat.f_fsid.val[0] == fsid->val[0] &&
7675 	    mp->mnt_vfsstat.f_fsid.val[1] == fsid->val[1]) {
7676 		retmp = mp;
7677 		if (withref) {
7678 			if (mount_iterref(retmp, 1)) {
7679 				retmp = (mount_t)0;
7680 			}
7681 		}
7682 		goto out;
7683 	}
7684 out:
7685 	if (!locked) {
7686 		mount_list_unlock();
7687 	}
7688 	return retmp;
7689 }
7690 
7691 errno_t
vnode_lookupat(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx,vnode_t start_dvp)7692 vnode_lookupat(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx,
7693     vnode_t start_dvp)
7694 {
7695 	struct nameidata *ndp;
7696 	int error = 0;
7697 	u_int32_t ndflags = 0;
7698 
7699 	if (ctx == NULL) {
7700 		return EINVAL;
7701 	}
7702 
7703 	ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
7704 
7705 	if (flags & VNODE_LOOKUP_NOFOLLOW) {
7706 		ndflags = NOFOLLOW;
7707 	} else {
7708 		ndflags = FOLLOW;
7709 	}
7710 
7711 	if (flags & VNODE_LOOKUP_NOCROSSMOUNT) {
7712 		ndflags |= NOCROSSMOUNT;
7713 	}
7714 
7715 	if (flags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
7716 		ndflags |= CN_NBMOUNTLOOK;
7717 	}
7718 
7719 	/* XXX AUDITVNPATH1 needed ? */
7720 	NDINIT(ndp, LOOKUP, OP_LOOKUP, ndflags, UIO_SYSSPACE,
7721 	    CAST_USER_ADDR_T(path), ctx);
7722 
7723 	if (flags & VNODE_LOOKUP_NOFOLLOW_ANY) {
7724 		ndp->ni_flag |= NAMEI_NOFOLLOW_ANY;
7725 	}
7726 
7727 	if (start_dvp && (path[0] != '/')) {
7728 		ndp->ni_dvp = start_dvp;
7729 		ndp->ni_cnd.cn_flags |= USEDVP;
7730 	}
7731 
7732 	if ((error = namei(ndp))) {
7733 		goto out_free;
7734 	}
7735 
7736 	ndp->ni_cnd.cn_flags &= ~USEDVP;
7737 
7738 	*vpp = ndp->ni_vp;
7739 	nameidone(ndp);
7740 
7741 out_free:
7742 	kfree_type(struct nameidata, ndp);
7743 	return error;
7744 }
7745 
7746 errno_t
vnode_lookup(const char * path,int flags,vnode_t * vpp,vfs_context_t ctx)7747 vnode_lookup(const char *path, int flags, vnode_t *vpp, vfs_context_t ctx)
7748 {
7749 	return vnode_lookupat(path, flags, vpp, ctx, NULLVP);
7750 }
7751 
7752 errno_t
vnode_open(const char * path,int fmode,int cmode,int flags,vnode_t * vpp,vfs_context_t ctx)7753 vnode_open(const char *path, int fmode, int cmode, int flags, vnode_t *vpp, vfs_context_t ctx)
7754 {
7755 	struct nameidata *ndp = NULL;
7756 	int error;
7757 	u_int32_t ndflags = 0;
7758 	int lflags = flags;
7759 
7760 	if (ctx == NULL) {              /* XXX technically an error */
7761 		ctx = vfs_context_current();
7762 	}
7763 
7764 	ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
7765 
7766 	if (fmode & O_NOFOLLOW) {
7767 		lflags |= VNODE_LOOKUP_NOFOLLOW;
7768 	}
7769 
7770 	if (lflags & VNODE_LOOKUP_NOFOLLOW) {
7771 		ndflags = NOFOLLOW;
7772 	} else {
7773 		ndflags = FOLLOW;
7774 	}
7775 
7776 	if (lflags & VNODE_LOOKUP_NOFOLLOW_ANY) {
7777 		fmode |= O_NOFOLLOW_ANY;
7778 	}
7779 
7780 	if (lflags & VNODE_LOOKUP_NOCROSSMOUNT) {
7781 		ndflags |= NOCROSSMOUNT;
7782 	}
7783 
7784 	if (lflags & VNODE_LOOKUP_CROSSMOUNTNOWAIT) {
7785 		ndflags |= CN_NBMOUNTLOOK;
7786 	}
7787 
7788 	/* XXX AUDITVNPATH1 needed ? */
7789 	NDINIT(ndp, LOOKUP, OP_OPEN, ndflags, UIO_SYSSPACE,
7790 	    CAST_USER_ADDR_T(path), ctx);
7791 
7792 	if ((error = vn_open(ndp, fmode, cmode))) {
7793 		*vpp = NULL;
7794 	} else {
7795 		*vpp = ndp->ni_vp;
7796 	}
7797 
7798 	kfree_type(struct nameidata, ndp);
7799 	return error;
7800 }
7801 
7802 errno_t
vnode_close(vnode_t vp,int flags,vfs_context_t ctx)7803 vnode_close(vnode_t vp, int flags, vfs_context_t ctx)
7804 {
7805 	int error;
7806 
7807 	if (ctx == NULL) {
7808 		ctx = vfs_context_current();
7809 	}
7810 
7811 	error = vn_close(vp, flags, ctx);
7812 	vnode_put(vp);
7813 	return error;
7814 }
7815 
7816 errno_t
vnode_mtime(vnode_t vp,struct timespec * mtime,vfs_context_t ctx)7817 vnode_mtime(vnode_t vp, struct timespec *mtime, vfs_context_t ctx)
7818 {
7819 	struct vnode_attr       va;
7820 	int                     error;
7821 
7822 	VATTR_INIT(&va);
7823 	VATTR_WANTED(&va, va_modify_time);
7824 	error = vnode_getattr(vp, &va, ctx);
7825 	if (!error) {
7826 		*mtime = va.va_modify_time;
7827 	}
7828 	return error;
7829 }
7830 
7831 errno_t
vnode_flags(vnode_t vp,uint32_t * flags,vfs_context_t ctx)7832 vnode_flags(vnode_t vp, uint32_t *flags, vfs_context_t ctx)
7833 {
7834 	struct vnode_attr       va;
7835 	int                     error;
7836 
7837 	VATTR_INIT(&va);
7838 	VATTR_WANTED(&va, va_flags);
7839 	error = vnode_getattr(vp, &va, ctx);
7840 	if (!error) {
7841 		*flags = va.va_flags;
7842 	}
7843 	return error;
7844 }
7845 
7846 /*
7847  * Returns:	0			Success
7848  *	vnode_getattr:???
7849  */
7850 errno_t
vnode_size(vnode_t vp,off_t * sizep,vfs_context_t ctx)7851 vnode_size(vnode_t vp, off_t *sizep, vfs_context_t ctx)
7852 {
7853 	struct vnode_attr       va;
7854 	int                     error;
7855 
7856 	VATTR_INIT(&va);
7857 	VATTR_WANTED(&va, va_data_size);
7858 	error = vnode_getattr(vp, &va, ctx);
7859 	if (!error) {
7860 		*sizep = va.va_data_size;
7861 	}
7862 	return error;
7863 }
7864 
7865 errno_t
vnode_setsize(vnode_t vp,off_t size,int ioflag,vfs_context_t ctx)7866 vnode_setsize(vnode_t vp, off_t size, int ioflag, vfs_context_t ctx)
7867 {
7868 	struct vnode_attr       va;
7869 
7870 	VATTR_INIT(&va);
7871 	VATTR_SET(&va, va_data_size, size);
7872 	va.va_vaflags = ioflag & 0xffff;
7873 	return vnode_setattr(vp, &va, ctx);
7874 }
7875 
7876 int
vnode_setdirty(vnode_t vp)7877 vnode_setdirty(vnode_t vp)
7878 {
7879 	vnode_lock_spin(vp);
7880 	vp->v_flag |= VISDIRTY;
7881 	vnode_unlock(vp);
7882 	return 0;
7883 }
7884 
7885 int
vnode_cleardirty(vnode_t vp)7886 vnode_cleardirty(vnode_t vp)
7887 {
7888 	vnode_lock_spin(vp);
7889 	vp->v_flag &= ~VISDIRTY;
7890 	vnode_unlock(vp);
7891 	return 0;
7892 }
7893 
7894 int
vnode_isdirty(vnode_t vp)7895 vnode_isdirty(vnode_t vp)
7896 {
7897 	int dirty;
7898 
7899 	vnode_lock_spin(vp);
7900 	dirty = (vp->v_flag & VISDIRTY) ? 1 : 0;
7901 	vnode_unlock(vp);
7902 
7903 	return dirty;
7904 }
7905 
7906 static int
vn_create_reg(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)7907 vn_create_reg(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
7908 {
7909 	/* Only use compound VNOP for compound operation */
7910 	if (vnode_compound_open_available(dvp) && ((flags & VN_CREATE_DOOPEN) != 0)) {
7911 		*vpp = NULLVP;
7912 		return VNOP_COMPOUND_OPEN(dvp, vpp, ndp, O_CREAT, fmode, statusp, vap, ctx);
7913 	} else {
7914 		return VNOP_CREATE(dvp, vpp, &ndp->ni_cnd, vap, ctx);
7915 	}
7916 }
7917 
7918 /*
7919  * Create a filesystem object of arbitrary type with arbitrary attributes in
7920  * the spevied directory with the specified name.
7921  *
7922  * Parameters:	dvp			Pointer to the vnode of the directory
7923  *					in which to create the object.
7924  *		vpp			Pointer to the area into which to
7925  *					return the vnode of the created object.
7926  *		cnp			Component name pointer from the namei
7927  *					data structure, containing the name to
7928  *					use for the create object.
7929  *		vap			Pointer to the vnode_attr structure
7930  *					describing the object to be created,
7931  *					including the type of object.
7932  *		flags			VN_* flags controlling ACL inheritance
7933  *					and whether or not authorization is to
7934  *					be required for the operation.
7935  *
7936  * Returns:	0			Success
7937  *		!0			errno value
7938  *
7939  * Implicit:	*vpp			Contains the vnode of the object that
7940  *					was created, if successful.
7941  *		*cnp			May be modified by the underlying VFS.
7942  *		*vap			May be modified by the underlying VFS.
7943  *					modified by either ACL inheritance or
7944  *
7945  *
7946  *					be modified, even if the operation is
7947  *
7948  *
7949  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order.
7950  *
7951  *		Modification of '*cnp' and '*vap' by the underlying VFS is
7952  *		strongly discouraged.
7953  *
7954  * XXX:		This function is a 'vn_*' function; it belongs in vfs_vnops.c
7955  *
7956  * XXX:		We should enummerate the possible errno values here, and where
7957  *		in the code they originated.
7958  */
7959 errno_t
vn_create(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,uint32_t flags,int fmode,uint32_t * statusp,vfs_context_t ctx)7960 vn_create(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, uint32_t flags, int fmode, uint32_t *statusp, vfs_context_t ctx)
7961 {
7962 	errno_t error, old_error;
7963 	vnode_t vp = (vnode_t)0;
7964 	boolean_t batched;
7965 	struct componentname *cnp;
7966 	uint32_t defaulted;
7967 
7968 	cnp = &ndp->ni_cnd;
7969 	error = 0;
7970 	batched = namei_compound_available(dvp, ndp) ? TRUE : FALSE;
7971 
7972 	KAUTH_DEBUG("%p    CREATE - '%s'", dvp, cnp->cn_nameptr);
7973 
7974 	if (flags & VN_CREATE_NOINHERIT) {
7975 		vap->va_vaflags |= VA_NOINHERIT;
7976 	}
7977 	if (flags & VN_CREATE_NOAUTH) {
7978 		vap->va_vaflags |= VA_NOAUTH;
7979 	}
7980 	/*
7981 	 * Handle ACL inheritance, initialize vap.
7982 	 */
7983 	error = vn_attribute_prepare(dvp, vap, &defaulted, ctx);
7984 	if (error) {
7985 		return error;
7986 	}
7987 
7988 	if (vap->va_type != VREG && (fmode != 0 || (flags & VN_CREATE_DOOPEN) || statusp)) {
7989 		panic("Open parameters, but not a regular file.");
7990 	}
7991 	if ((fmode != 0) && ((flags & VN_CREATE_DOOPEN) == 0)) {
7992 		panic("Mode for open, but not trying to open...");
7993 	}
7994 
7995 
7996 	/*
7997 	 * Create the requested node.
7998 	 */
7999 	switch (vap->va_type) {
8000 	case VREG:
8001 		error = vn_create_reg(dvp, vpp, ndp, vap, flags, fmode, statusp, ctx);
8002 		break;
8003 	case VDIR:
8004 		error = vn_mkdir(dvp, vpp, ndp, vap, ctx);
8005 		break;
8006 	case VSOCK:
8007 	case VFIFO:
8008 	case VBLK:
8009 	case VCHR:
8010 		error = VNOP_MKNOD(dvp, vpp, cnp, vap, ctx);
8011 		break;
8012 	default:
8013 		panic("vnode_create: unknown vtype %d", vap->va_type);
8014 	}
8015 	if (error != 0) {
8016 		KAUTH_DEBUG("%p    CREATE - error %d returned by filesystem", dvp, error);
8017 		goto out;
8018 	}
8019 
8020 	vp = *vpp;
8021 	old_error = error;
8022 
8023 	/*
8024 	 * If some of the requested attributes weren't handled by the VNOP,
8025 	 * use our fallback code.
8026 	 */
8027 	if ((error == 0) && !VATTR_ALL_SUPPORTED(vap) && *vpp) {
8028 		KAUTH_DEBUG("     CREATE - doing fallback with ACL %p", vap->va_acl);
8029 		error = vnode_setattr_fallback(*vpp, vap, ctx);
8030 	}
8031 
8032 #if CONFIG_MACF
8033 	if ((error == 0) && !(flags & VN_CREATE_NOLABEL)) {
8034 		error = vnode_label(vnode_mount(vp), dvp, vp, cnp, VNODE_LABEL_CREATE, ctx);
8035 	}
8036 #endif
8037 
8038 	if ((error != 0) && (vp != (vnode_t)0)) {
8039 		/* If we've done a compound open, close */
8040 		if (batched && (old_error == 0) && (vap->va_type == VREG)) {
8041 			VNOP_CLOSE(vp, fmode, ctx);
8042 		}
8043 
8044 		/* Need to provide notifications if a create succeeded */
8045 		if (!batched) {
8046 			*vpp = (vnode_t) 0;
8047 			vnode_put(vp);
8048 			vp = NULLVP;
8049 		}
8050 	}
8051 
8052 	/*
8053 	 * For creation VNOPs, this is the equivalent of
8054 	 * lookup_handle_found_vnode.
8055 	 */
8056 	if (kdebug_enable && *vpp) {
8057 		kdebug_lookup(*vpp, cnp);
8058 	}
8059 
8060 out:
8061 	vn_attribute_cleanup(vap, defaulted);
8062 
8063 	return error;
8064 }
8065 
8066 static kauth_scope_t    vnode_scope;
8067 static int      vnode_authorize_callback(kauth_cred_t credential, void *idata, kauth_action_t action,
8068     uintptr_t arg0, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3);
8069 static int vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
8070     vnode_t vp, vnode_t dvp, int *errorp);
8071 
8072 typedef struct _vnode_authorize_context {
8073 	vnode_t         vp;
8074 	struct vnode_attr *vap;
8075 	vnode_t         dvp;
8076 	struct vnode_attr *dvap;
8077 	vfs_context_t   ctx;
8078 	int             flags;
8079 	int             flags_valid;
8080 #define _VAC_IS_OWNER           (1<<0)
8081 #define _VAC_IN_GROUP           (1<<1)
8082 #define _VAC_IS_DIR_OWNER       (1<<2)
8083 #define _VAC_IN_DIR_GROUP       (1<<3)
8084 #define _VAC_NO_VNODE_POINTERS  (1<<4)
8085 } *vauth_ctx;
8086 
8087 void
vnode_authorize_init(void)8088 vnode_authorize_init(void)
8089 {
8090 	vnode_scope = kauth_register_scope(KAUTH_SCOPE_VNODE, vnode_authorize_callback, NULL);
8091 }
8092 
8093 #define VATTR_PREPARE_DEFAULTED_UID             0x1
8094 #define VATTR_PREPARE_DEFAULTED_GID             0x2
8095 #define VATTR_PREPARE_DEFAULTED_MODE            0x4
8096 
8097 int
vn_attribute_prepare(vnode_t dvp,struct vnode_attr * vap,uint32_t * defaulted_fieldsp,vfs_context_t ctx)8098 vn_attribute_prepare(vnode_t dvp, struct vnode_attr *vap, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
8099 {
8100 	kauth_acl_t nacl = NULL, oacl = NULL;
8101 	int error;
8102 
8103 	/*
8104 	 * Handle ACL inheritance.
8105 	 */
8106 	if (!(vap->va_vaflags & VA_NOINHERIT) && vfs_extendedsecurity(dvp->v_mount)) {
8107 		/* save the original filesec */
8108 		if (VATTR_IS_ACTIVE(vap, va_acl)) {
8109 			oacl = vap->va_acl;
8110 		}
8111 
8112 		vap->va_acl = NULL;
8113 		if ((error = kauth_acl_inherit(dvp,
8114 		    oacl,
8115 		    &nacl,
8116 		    vap->va_type == VDIR,
8117 		    ctx)) != 0) {
8118 			KAUTH_DEBUG("%p    CREATE - error %d processing inheritance", dvp, error);
8119 			return error;
8120 		}
8121 
8122 		/*
8123 		 * If the generated ACL is NULL, then we can save ourselves some effort
8124 		 * by clearing the active bit.
8125 		 */
8126 		if (nacl == NULL) {
8127 			VATTR_CLEAR_ACTIVE(vap, va_acl);
8128 		} else {
8129 			vap->va_base_acl = oacl;
8130 			VATTR_SET(vap, va_acl, nacl);
8131 		}
8132 	}
8133 
8134 	error = vnode_authattr_new_internal(dvp, vap, (vap->va_vaflags & VA_NOAUTH), defaulted_fieldsp, ctx);
8135 	if (error) {
8136 		vn_attribute_cleanup(vap, *defaulted_fieldsp);
8137 	}
8138 
8139 	return error;
8140 }
8141 
8142 void
vn_attribute_cleanup(struct vnode_attr * vap,uint32_t defaulted_fields)8143 vn_attribute_cleanup(struct vnode_attr *vap, uint32_t defaulted_fields)
8144 {
8145 	/*
8146 	 * If the caller supplied a filesec in vap, it has been replaced
8147 	 * now by the post-inheritance copy.  We need to put the original back
8148 	 * and free the inherited product.
8149 	 */
8150 	kauth_acl_t nacl, oacl;
8151 
8152 	if (VATTR_IS_ACTIVE(vap, va_acl)) {
8153 		nacl = vap->va_acl;
8154 		oacl = vap->va_base_acl;
8155 
8156 		if (oacl) {
8157 			VATTR_SET(vap, va_acl, oacl);
8158 			vap->va_base_acl = NULL;
8159 		} else {
8160 			VATTR_CLEAR_ACTIVE(vap, va_acl);
8161 		}
8162 
8163 		if (nacl != NULL) {
8164 			/*
8165 			 * Only free the ACL buffer if 'VA_FILESEC_ACL' is not set as it
8166 			 * should be freed by the caller or it is a post-inheritance copy.
8167 			 */
8168 			if (!(vap->va_vaflags & VA_FILESEC_ACL) ||
8169 			    (oacl != NULL && nacl != oacl)) {
8170 				kauth_acl_free(nacl);
8171 			}
8172 		}
8173 	}
8174 
8175 	if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_MODE) != 0) {
8176 		VATTR_CLEAR_ACTIVE(vap, va_mode);
8177 	}
8178 	if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_GID) != 0) {
8179 		VATTR_CLEAR_ACTIVE(vap, va_gid);
8180 	}
8181 	if ((defaulted_fields & VATTR_PREPARE_DEFAULTED_UID) != 0) {
8182 		VATTR_CLEAR_ACTIVE(vap, va_uid);
8183 	}
8184 
8185 	return;
8186 }
8187 
8188 #if CONFIG_APPLEDOUBLE
8189 
8190 #define NATIVE_XATTR(VP)  \
8191 	((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
8192 
8193 static int
dot_underbar_check_paired_vnode(struct componentname * cnp,vnode_t vp,vnode_t dvp,vfs_context_t ctx)8194 dot_underbar_check_paired_vnode(struct componentname *cnp, vnode_t vp,
8195     vnode_t dvp, vfs_context_t ctx)
8196 {
8197 	int error = 0;
8198 	bool dvp_needs_put = false;
8199 
8200 	if (!dvp) {
8201 		if ((dvp = vnode_getparent(vp)) == NULLVP) {
8202 			return 0;
8203 		}
8204 		dvp_needs_put = true;
8205 	}
8206 
8207 	vnode_t dupairedvp = NULLVP;
8208 	char lastchar = cnp->cn_nameptr[cnp->cn_namelen];
8209 
8210 	cnp->cn_nameptr[cnp->cn_namelen] = '\0';
8211 	error = vnode_lookupat(cnp->cn_nameptr + (sizeof("._") - 1), 0,
8212 	    &dupairedvp, ctx, dvp);
8213 	cnp->cn_nameptr[cnp->cn_namelen] = lastchar;
8214 	if (dvp_needs_put) {
8215 		vnode_put(dvp);
8216 		dvp = NULLVP;
8217 	}
8218 	if (!error && dupairedvp) {
8219 		error = mac_vnode_check_deleteextattr(ctx, dupairedvp,
8220 		    "com.apple.quarantine");
8221 		vnode_put(dupairedvp);
8222 		dupairedvp = NULLVP;
8223 	} else {
8224 		error = 0;
8225 	}
8226 
8227 	return error;
8228 }
8229 #endif /* CONFIG_APPLEDOUBLE */
8230 
8231 int
vn_authorize_unlink(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,__unused void * reserved)8232 vn_authorize_unlink(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, __unused void *reserved)
8233 {
8234 #if !CONFIG_MACF
8235 #pragma unused(cnp)
8236 #endif
8237 	int error = 0;
8238 
8239 	/*
8240 	 * Normally, unlinking of directories is not supported.
8241 	 * However, some file systems may have limited support.
8242 	 */
8243 	if ((vp->v_type == VDIR) &&
8244 	    !(vp->v_mount->mnt_kern_flag & MNTK_DIR_HARDLINKS)) {
8245 		return EPERM; /* POSIX */
8246 	}
8247 
8248 	/* authorize the delete operation */
8249 #if CONFIG_MACF
8250 	if (!error) {
8251 		error = mac_vnode_check_unlink(ctx, dvp, vp, cnp);
8252 #if CONFIG_APPLEDOUBLE
8253 		if (!error && !(NATIVE_XATTR(dvp)) &&
8254 		    (cnp->cn_namelen > (sizeof("._a") - 1)) &&
8255 		    cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '_') {
8256 			error = dot_underbar_check_paired_vnode(cnp, vp, dvp, ctx);
8257 		}
8258 #endif /* CONFIG_APPLEDOUBLE */
8259 	}
8260 #endif /* MAC */
8261 	if (!error) {
8262 		error = vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
8263 	}
8264 
8265 	return error;
8266 }
8267 
8268 int
vn_authorize_open_existing(vnode_t vp,struct componentname * cnp,int fmode,vfs_context_t ctx,void * reserved)8269 vn_authorize_open_existing(vnode_t vp, struct componentname *cnp, int fmode, vfs_context_t ctx, void *reserved)
8270 {
8271 	/* Open of existing case */
8272 	kauth_action_t action;
8273 	int error = 0;
8274 	if (cnp->cn_ndp == NULL) {
8275 		panic("NULL ndp");
8276 	}
8277 	if (reserved != NULL) {
8278 		panic("reserved not NULL.");
8279 	}
8280 
8281 #if CONFIG_MACF
8282 	/* XXX may do duplicate work here, but ignore that for now (idempotent) */
8283 	if (vfs_flags(vnode_mount(vp)) & MNT_MULTILABEL) {
8284 		error = vnode_label(vnode_mount(vp), NULL, vp, NULL, 0, ctx);
8285 		if (error) {
8286 			return error;
8287 		}
8288 	}
8289 #endif
8290 
8291 	if (vnode_isdir(vp)) {
8292 		if ((fmode & (FWRITE | O_TRUNC)) || /* disallow write operations on directories */
8293 		    ((fmode & FSEARCH) && !(fmode & O_DIRECTORY))) {
8294 			return EISDIR;
8295 		}
8296 	} else {
8297 		if (fmode & O_DIRECTORY) {
8298 			return ENOTDIR;
8299 		}
8300 
8301 		if (vp->v_type == VSOCK && vp->v_tag != VT_FDESC) {
8302 			return EOPNOTSUPP;    /* Operation not supported on socket */
8303 		}
8304 
8305 		if (vp->v_type == VLNK && (fmode & O_NOFOLLOW) != 0) {
8306 			return ELOOP;         /* O_NOFOLLOW was specified and the target is a symbolic link */
8307 		}
8308 
8309 		if (cnp->cn_ndp->ni_flag & NAMEI_TRAILINGSLASH) {
8310 			return ENOTDIR;
8311 		}
8312 
8313 		if (!vnode_isreg(vp) && (fmode & FEXEC)) {
8314 			return EACCES;
8315 		}
8316 	}
8317 
8318 #if CONFIG_MACF
8319 	/* If a file being opened is a shadow file containing
8320 	 * namedstream data, ignore the macf checks because it
8321 	 * is a kernel internal file and access should always
8322 	 * be allowed.
8323 	 */
8324 	if (!(vnode_isshadow(vp) && vnode_isnamedstream(vp))) {
8325 		error = mac_vnode_check_open(ctx, vp, fmode);
8326 		if (error) {
8327 			return error;
8328 		}
8329 	}
8330 #if CONFIG_APPLEDOUBLE
8331 	if (fmode & (FWRITE | O_TRUNC) && !(NATIVE_XATTR(vp)) &&
8332 	    (cnp->cn_namelen > (sizeof("._a") - 1)) &&
8333 	    cnp->cn_nameptr[0] == '.' && cnp->cn_nameptr[1] == '_') {
8334 		error = dot_underbar_check_paired_vnode(cnp, vp, NULLVP, ctx);
8335 		if (error) {
8336 			return error;
8337 		}
8338 	}
8339 #endif /* CONFIG_APPLEDOUBLE */
8340 #endif
8341 
8342 	/* compute action to be authorized */
8343 	action = 0;
8344 	if (fmode & FREAD) {
8345 		action |= KAUTH_VNODE_READ_DATA;
8346 	}
8347 	if (fmode & (FWRITE | O_TRUNC)) {
8348 		/*
8349 		 * If we are writing, appending, and not truncating,
8350 		 * indicate that we are appending so that if the
8351 		 * UF_APPEND or SF_APPEND bits are set, we do not deny
8352 		 * the open.
8353 		 */
8354 		if ((fmode & O_APPEND) && !(fmode & O_TRUNC)) {
8355 			action |= KAUTH_VNODE_APPEND_DATA;
8356 		} else {
8357 			action |= KAUTH_VNODE_WRITE_DATA;
8358 		}
8359 	}
8360 	if (fmode & (FSEARCH | FEXEC)) {
8361 		if (vnode_isdir(vp)) {
8362 			action |= KAUTH_VNODE_SEARCH;
8363 		} else {
8364 			action |= KAUTH_VNODE_EXECUTE;
8365 		}
8366 	}
8367 	error = vnode_authorize(vp, NULL, action, ctx);
8368 #if NAMEDSTREAMS
8369 	if (error == EACCES) {
8370 		/*
8371 		 * Shadow files may exist on-disk with a different UID/GID
8372 		 * than that of the current context.  Verify that this file
8373 		 * is really a shadow file.  If it was created successfully
8374 		 * then it should be authorized.
8375 		 */
8376 		if (vnode_isshadow(vp) && vnode_isnamedstream(vp)) {
8377 			error = vnode_verifynamedstream(vp);
8378 		}
8379 	}
8380 #endif
8381 
8382 	return error;
8383 }
8384 
8385 int
vn_authorize_create(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)8386 vn_authorize_create(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
8387 {
8388 #if !CONFIG_MACF
8389 #pragma unused(vap)
8390 #endif
8391 	/* Creation case */
8392 	int error;
8393 
8394 	if (cnp->cn_ndp == NULL) {
8395 		panic("NULL cn_ndp");
8396 	}
8397 	if (reserved != NULL) {
8398 		panic("reserved not NULL.");
8399 	}
8400 
8401 	/* Only validate path for creation if we didn't do a complete lookup */
8402 	if (cnp->cn_ndp->ni_flag & NAMEI_UNFINISHED) {
8403 		error = lookup_validate_creation_path(cnp->cn_ndp);
8404 		if (error) {
8405 			return error;
8406 		}
8407 	}
8408 
8409 #if CONFIG_MACF
8410 	error = mac_vnode_check_create(ctx, dvp, cnp, vap);
8411 	if (error) {
8412 		return error;
8413 	}
8414 #endif /* CONFIG_MACF */
8415 
8416 	return vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_FILE, ctx);
8417 }
8418 
8419 int
vn_authorize_rename(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,void * reserved)8420 vn_authorize_rename(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
8421     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
8422     vfs_context_t ctx, void *reserved)
8423 {
8424 	return vn_authorize_renamex(fdvp, fvp, fcnp, tdvp, tvp, tcnp, ctx, 0, reserved);
8425 }
8426 
8427 int
vn_authorize_renamex(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)8428 vn_authorize_renamex(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
8429     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
8430     vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
8431 {
8432 	return vn_authorize_renamex_with_paths(fdvp, fvp, fcnp, NULL, tdvp, tvp, tcnp, NULL, ctx, flags, reserved);
8433 }
8434 
8435 int
vn_authorize_renamex_with_paths(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,const char * from_path,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,const char * to_path,vfs_context_t ctx,vfs_rename_flags_t flags,void * reserved)8436 vn_authorize_renamex_with_paths(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp, const char *from_path,
8437     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp, const char *to_path,
8438     vfs_context_t ctx, vfs_rename_flags_t flags, void *reserved)
8439 {
8440 	int error = 0;
8441 	int moving = 0;
8442 	bool swap = flags & VFS_RENAME_SWAP;
8443 
8444 	if (reserved != NULL) {
8445 		panic("Passed something other than NULL as reserved field!");
8446 	}
8447 
8448 	/*
8449 	 * Avoid renaming "." and "..".
8450 	 *
8451 	 * XXX No need to check for this in the FS.  We should always have the leaves
8452 	 * in VFS in this case.
8453 	 */
8454 	if (fvp->v_type == VDIR &&
8455 	    ((fdvp == fvp) ||
8456 	    (fcnp->cn_namelen == 1 && fcnp->cn_nameptr[0] == '.') ||
8457 	    ((fcnp->cn_flags | tcnp->cn_flags) & ISDOTDOT))) {
8458 		error = EINVAL;
8459 		goto out;
8460 	}
8461 
8462 	if (tvp == NULLVP && vnode_compound_rename_available(tdvp)) {
8463 		error = lookup_validate_creation_path(tcnp->cn_ndp);
8464 		if (error) {
8465 			goto out;
8466 		}
8467 	}
8468 
8469 	/***** <MACF> *****/
8470 #if CONFIG_MACF
8471 	if (swap) {
8472 		error = mac_vnode_check_rename_swap(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
8473 	} else {
8474 		error = mac_vnode_check_rename(ctx, fdvp, fvp, fcnp, tdvp, tvp, tcnp);
8475 	}
8476 #if CONFIG_APPLEDOUBLE
8477 	if (!error && !(NATIVE_XATTR(fdvp)) &&
8478 	    fcnp->cn_namelen > (sizeof("._a") - 1) &&
8479 	    fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_') {
8480 		error = dot_underbar_check_paired_vnode(fcnp, fvp, fdvp, ctx);
8481 	}
8482 	/* Currently no Filesystem that does not support native xattrs supports rename swap */
8483 	if (!error && swap && !(NATIVE_XATTR(tdvp)) &&
8484 	    (tcnp->cn_namelen > (sizeof("._a") - 1)) &&
8485 	    (tcnp->cn_nameptr[0] == '.') && (tcnp->cn_nameptr[1] == '_')) {
8486 		error = dot_underbar_check_paired_vnode(tcnp, tvp, tdvp, ctx);
8487 	}
8488 #endif /* CONFIG_APPLEDOUBLE */
8489 	if (error) {
8490 		goto out;
8491 	}
8492 #endif
8493 	/***** </MACF> *****/
8494 
8495 	/***** <MiscChecks> *****/
8496 	if (tvp != NULL) {
8497 		if (!swap) {
8498 			if (fvp->v_type == VDIR && tvp->v_type != VDIR) {
8499 				error = ENOTDIR;
8500 				goto out;
8501 			} else if (fvp->v_type != VDIR && tvp->v_type == VDIR) {
8502 				error = EISDIR;
8503 				goto out;
8504 			}
8505 		}
8506 	} else if (swap) {
8507 		/*
8508 		 * Caller should have already checked this and returned
8509 		 * ENOENT.  If we send back ENOENT here, caller will retry
8510 		 * which isn't what we want so we send back EINVAL here
8511 		 * instead.
8512 		 */
8513 		error = EINVAL;
8514 		goto out;
8515 	}
8516 
8517 	if (fvp == tdvp) {
8518 		error = EINVAL;
8519 		goto out;
8520 	}
8521 
8522 	/*
8523 	 * The following edge case is caught here:
8524 	 * (to cannot be a descendent of from)
8525 	 *
8526 	 *       o fdvp
8527 	 *      /
8528 	 *     /
8529 	 *    o fvp
8530 	 *     \
8531 	 *      \
8532 	 *       o tdvp
8533 	 *      /
8534 	 *     /
8535 	 *    o tvp
8536 	 */
8537 	if (tdvp->v_parent == fvp) {
8538 		error = EINVAL;
8539 		goto out;
8540 	}
8541 
8542 	if (swap && fdvp->v_parent == tvp) {
8543 		error = EINVAL;
8544 		goto out;
8545 	}
8546 	/***** </MiscChecks> *****/
8547 
8548 	/***** <Kauth> *****/
8549 
8550 	/*
8551 	 * As part of the Kauth step, we call out to allow 3rd-party
8552 	 * fileop notification of "about to rename".  This is needed
8553 	 * in the event that 3rd-parties need to know that the DELETE
8554 	 * authorization is actually part of a rename.  It's important
8555 	 * that we guarantee that the DELETE call-out will always be
8556 	 * made if the WILL_RENAME call-out is made.  Another fileop
8557 	 * call-out will be performed once the operation is completed.
8558 	 * We can ignore the result of kauth_authorize_fileop().
8559 	 *
8560 	 * N.B. We are passing the vnode and *both* paths to each
8561 	 * call; kauth_authorize_fileop() extracts the "from" path
8562 	 * when posting a KAUTH_FILEOP_WILL_RENAME notification.
8563 	 * As such, we only post these notifications if all of the
8564 	 * information we need is provided.
8565 	 */
8566 
8567 	if (swap) {
8568 		kauth_action_t f = 0, t = 0;
8569 
8570 		/*
8571 		 * Directories changing parents need ...ADD_SUBDIR...  to
8572 		 * permit changing ".."
8573 		 */
8574 		if (fdvp != tdvp) {
8575 			if (vnode_isdir(fvp)) {
8576 				f = KAUTH_VNODE_ADD_SUBDIRECTORY;
8577 			}
8578 			if (vnode_isdir(tvp)) {
8579 				t = KAUTH_VNODE_ADD_SUBDIRECTORY;
8580 			}
8581 		}
8582 		if (to_path != NULL) {
8583 			kauth_authorize_fileop(vfs_context_ucred(ctx),
8584 			    KAUTH_FILEOP_WILL_RENAME,
8585 			    (uintptr_t)fvp,
8586 			    (uintptr_t)to_path);
8587 		}
8588 		error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | f, ctx);
8589 		if (error) {
8590 			goto out;
8591 		}
8592 		if (from_path != NULL) {
8593 			kauth_authorize_fileop(vfs_context_ucred(ctx),
8594 			    KAUTH_FILEOP_WILL_RENAME,
8595 			    (uintptr_t)tvp,
8596 			    (uintptr_t)from_path);
8597 		}
8598 		error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE | t, ctx);
8599 		if (error) {
8600 			goto out;
8601 		}
8602 		f = vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
8603 		t = vnode_isdir(tvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE;
8604 		if (fdvp == tdvp) {
8605 			error = vnode_authorize(fdvp, NULL, f | t, ctx);
8606 		} else {
8607 			error = vnode_authorize(fdvp, NULL, t, ctx);
8608 			if (error) {
8609 				goto out;
8610 			}
8611 			error = vnode_authorize(tdvp, NULL, f, ctx);
8612 		}
8613 		if (error) {
8614 			goto out;
8615 		}
8616 	} else {
8617 		error = 0;
8618 		if ((tvp != NULL) && vnode_isdir(tvp)) {
8619 			if (tvp != fdvp) {
8620 				moving = 1;
8621 			}
8622 		} else if (tdvp != fdvp) {
8623 			moving = 1;
8624 		}
8625 
8626 		/*
8627 		 * must have delete rights to remove the old name even in
8628 		 * the simple case of fdvp == tdvp.
8629 		 *
8630 		 * If fvp is a directory, and we are changing it's parent,
8631 		 * then we also need rights to rewrite its ".." entry as well.
8632 		 */
8633 		if (to_path != NULL) {
8634 			kauth_authorize_fileop(vfs_context_ucred(ctx),
8635 			    KAUTH_FILEOP_WILL_RENAME,
8636 			    (uintptr_t)fvp,
8637 			    (uintptr_t)to_path);
8638 		}
8639 		if (vnode_isdir(fvp)) {
8640 			if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE | KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
8641 				goto out;
8642 			}
8643 		} else {
8644 			if ((error = vnode_authorize(fvp, fdvp, KAUTH_VNODE_DELETE, ctx)) != 0) {
8645 				goto out;
8646 			}
8647 		}
8648 		if (moving) {
8649 			/* moving into tdvp or tvp, must have rights to add */
8650 			if ((error = vnode_authorize(((tvp != NULL) && vnode_isdir(tvp)) ? tvp : tdvp,
8651 			    NULL,
8652 			    vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE,
8653 			    ctx)) != 0) {
8654 				goto out;
8655 			}
8656 		} else {
8657 			/* node staying in same directory, must be allowed to add new name */
8658 			if ((error = vnode_authorize(fdvp, NULL,
8659 			    vnode_isdir(fvp) ? KAUTH_VNODE_ADD_SUBDIRECTORY : KAUTH_VNODE_ADD_FILE, ctx)) != 0) {
8660 				goto out;
8661 			}
8662 		}
8663 		/* overwriting tvp */
8664 		if ((tvp != NULL) && !vnode_isdir(tvp) &&
8665 		    ((error = vnode_authorize(tvp, tdvp, KAUTH_VNODE_DELETE, ctx)) != 0)) {
8666 			goto out;
8667 		}
8668 	}
8669 
8670 	/***** </Kauth> *****/
8671 
8672 	/* XXX more checks? */
8673 out:
8674 	return error;
8675 }
8676 
8677 int
vn_authorize_mkdir(vnode_t dvp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx,void * reserved)8678 vn_authorize_mkdir(vnode_t dvp, struct componentname *cnp, struct vnode_attr *vap, vfs_context_t ctx, void *reserved)
8679 {
8680 #if !CONFIG_MACF
8681 #pragma unused(vap)
8682 #endif
8683 	int error;
8684 
8685 	if (reserved != NULL) {
8686 		panic("reserved not NULL in vn_authorize_mkdir()");
8687 	}
8688 
8689 	/* XXX A hack for now, to make shadow files work */
8690 	if (cnp->cn_ndp == NULL) {
8691 		return 0;
8692 	}
8693 
8694 	if (vnode_compound_mkdir_available(dvp)) {
8695 		error = lookup_validate_creation_path(cnp->cn_ndp);
8696 		if (error) {
8697 			goto out;
8698 		}
8699 	}
8700 
8701 #if CONFIG_MACF
8702 	error = mac_vnode_check_create(ctx,
8703 	    dvp, cnp, vap);
8704 	if (error) {
8705 		goto out;
8706 	}
8707 #endif
8708 
8709 	/* authorize addition of a directory to the parent */
8710 	if ((error = vnode_authorize(dvp, NULL, KAUTH_VNODE_ADD_SUBDIRECTORY, ctx)) != 0) {
8711 		goto out;
8712 	}
8713 
8714 out:
8715 	return error;
8716 }
8717 
8718 int
vn_authorize_rmdir(vnode_t dvp,vnode_t vp,struct componentname * cnp,vfs_context_t ctx,void * reserved)8719 vn_authorize_rmdir(vnode_t dvp, vnode_t vp, struct componentname *cnp, vfs_context_t ctx, void *reserved)
8720 {
8721 #if CONFIG_MACF
8722 	int error;
8723 #else
8724 #pragma unused(cnp)
8725 #endif
8726 	if (reserved != NULL) {
8727 		panic("Non-NULL reserved argument to vn_authorize_rmdir()");
8728 	}
8729 
8730 	if (vp->v_type != VDIR) {
8731 		/*
8732 		 * rmdir only deals with directories
8733 		 */
8734 		return ENOTDIR;
8735 	}
8736 
8737 	if (dvp == vp) {
8738 		/*
8739 		 * No rmdir "." please.
8740 		 */
8741 		return EINVAL;
8742 	}
8743 
8744 #if CONFIG_MACF
8745 	error = mac_vnode_check_unlink(ctx, dvp,
8746 	    vp, cnp);
8747 	if (error) {
8748 		return error;
8749 	}
8750 #endif
8751 
8752 	return vnode_authorize(vp, dvp, KAUTH_VNODE_DELETE, ctx);
8753 }
8754 
8755 /*
8756  * Authorizer for directory cloning. This does not use vnodes but instead
8757  * uses prefilled vnode attributes from the filesystem.
8758  *
8759  * The same function is called to set up the attributes required, perform the
8760  * authorization and cleanup (if required)
8761  */
8762 int
vnode_attr_authorize_dir_clone(struct vnode_attr * vap,kauth_action_t action,struct vnode_attr * dvap,__unused vnode_t sdvp,mount_t mp,dir_clone_authorizer_op_t vattr_op,uint32_t flags,vfs_context_t ctx,__unused void * reserved)8763 vnode_attr_authorize_dir_clone(struct vnode_attr *vap, kauth_action_t action,
8764     struct vnode_attr *dvap, __unused vnode_t sdvp, mount_t mp,
8765     dir_clone_authorizer_op_t vattr_op, uint32_t flags, vfs_context_t ctx,
8766     __unused void *reserved)
8767 {
8768 	int error;
8769 	int is_suser = vfs_context_issuser(ctx);
8770 
8771 	if (vattr_op == OP_VATTR_SETUP) {
8772 		VATTR_INIT(vap);
8773 
8774 		/*
8775 		 * When ACL inheritence is implemented, both vap->va_acl and
8776 		 * dvap->va_acl will be required (even as superuser).
8777 		 */
8778 		VATTR_WANTED(vap, va_type);
8779 		VATTR_WANTED(vap, va_mode);
8780 		VATTR_WANTED(vap, va_flags);
8781 		VATTR_WANTED(vap, va_uid);
8782 		VATTR_WANTED(vap, va_gid);
8783 		if (dvap) {
8784 			VATTR_INIT(dvap);
8785 			VATTR_WANTED(dvap, va_flags);
8786 		}
8787 
8788 		if (!is_suser) {
8789 			/*
8790 			 * If not superuser, we have to evaluate ACLs and
8791 			 * need the target directory gid to set the initial
8792 			 * gid of the new object.
8793 			 */
8794 			VATTR_WANTED(vap, va_acl);
8795 			if (dvap) {
8796 				VATTR_WANTED(dvap, va_gid);
8797 			}
8798 		} else if (dvap && (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
8799 			VATTR_WANTED(dvap, va_gid);
8800 		}
8801 		return 0;
8802 	} else if (vattr_op == OP_VATTR_CLEANUP) {
8803 		return 0; /* Nothing to do for now */
8804 	}
8805 
8806 	/* dvap isn't used for authorization */
8807 	error = vnode_attr_authorize(vap, NULL, mp, action, ctx);
8808 
8809 	if (error) {
8810 		return error;
8811 	}
8812 
8813 	/*
8814 	 * vn_attribute_prepare should be able to accept attributes as well as
8815 	 * vnodes but for now we do this inline.
8816 	 */
8817 	if (!is_suser || (flags & VNODE_CLONEFILE_NOOWNERCOPY)) {
8818 		/*
8819 		 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit
8820 		 * owner is set, that owner takes ownership of all new files.
8821 		 */
8822 		if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
8823 		    (mp->mnt_fsowner != KAUTH_UID_NONE)) {
8824 			VATTR_SET(vap, va_uid, mp->mnt_fsowner);
8825 		} else {
8826 			/* default owner is current user */
8827 			VATTR_SET(vap, va_uid,
8828 			    kauth_cred_getuid(vfs_context_ucred(ctx)));
8829 		}
8830 
8831 		if ((mp->mnt_flag & MNT_IGNORE_OWNERSHIP) &&
8832 		    (mp->mnt_fsgroup != KAUTH_GID_NONE)) {
8833 			VATTR_SET(vap, va_gid, mp->mnt_fsgroup);
8834 		} else {
8835 			/*
8836 			 * default group comes from parent object,
8837 			 * fallback to current user
8838 			 */
8839 			if (VATTR_IS_SUPPORTED(dvap, va_gid)) {
8840 				VATTR_SET(vap, va_gid, dvap->va_gid);
8841 			} else {
8842 				VATTR_SET(vap, va_gid,
8843 				    kauth_cred_getgid(vfs_context_ucred(ctx)));
8844 			}
8845 		}
8846 	}
8847 
8848 	/* Inherit SF_RESTRICTED bit from destination directory only */
8849 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
8850 		VATTR_SET(vap, va_flags,
8851 		    ((vap->va_flags & ~(UF_DATAVAULT | SF_RESTRICTED)))); /* Turn off from source */
8852 		if (VATTR_IS_ACTIVE(dvap, va_flags)) {
8853 			VATTR_SET(vap, va_flags,
8854 			    vap->va_flags | (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
8855 		}
8856 	} else if (VATTR_IS_ACTIVE(dvap, va_flags)) {
8857 		VATTR_SET(vap, va_flags, (dvap->va_flags & (UF_DATAVAULT | SF_RESTRICTED)));
8858 	}
8859 
8860 	return 0;
8861 }
8862 
8863 
8864 /*
8865  * Authorize an operation on a vnode.
8866  *
8867  * This is KPI, but here because it needs vnode_scope.
8868  *
8869  * Returns:	0			Success
8870  *	kauth_authorize_action:EPERM	...
8871  *	xlate => EACCES			Permission denied
8872  *	kauth_authorize_action:0	Success
8873  *	kauth_authorize_action:		Depends on callback return; this is
8874  *					usually only vnode_authorize_callback(),
8875  *					but may include other listerners, if any
8876  *					exist.
8877  *		EROFS
8878  *		EACCES
8879  *		EPERM
8880  *		???
8881  */
8882 int
vnode_authorize(vnode_t vp,vnode_t dvp,kauth_action_t action,vfs_context_t ctx)8883 vnode_authorize(vnode_t vp, vnode_t dvp, kauth_action_t action, vfs_context_t ctx)
8884 {
8885 	int     error, result;
8886 
8887 	/*
8888 	 * We can't authorize against a dead vnode; allow all operations through so that
8889 	 * the correct error can be returned.
8890 	 */
8891 	if (vp->v_type == VBAD) {
8892 		return 0;
8893 	}
8894 
8895 	error = 0;
8896 	result = kauth_authorize_action(vnode_scope, vfs_context_ucred(ctx), action,
8897 	    (uintptr_t)ctx, (uintptr_t)vp, (uintptr_t)dvp, (uintptr_t)&error);
8898 	if (result == EPERM) {          /* traditional behaviour */
8899 		result = EACCES;
8900 	}
8901 	/* did the lower layers give a better error return? */
8902 	if ((result != 0) && (error != 0)) {
8903 		return error;
8904 	}
8905 	return result;
8906 }
8907 
8908 /*
8909  * Test for vnode immutability.
8910  *
8911  * The 'append' flag is set when the authorization request is constrained
8912  * to operations which only request the right to append to a file.
8913  *
8914  * The 'ignore' flag is set when an operation modifying the immutability flags
8915  * is being authorized.  We check the system securelevel to determine which
8916  * immutability flags we can ignore.
8917  */
8918 static int
vnode_immutable(struct vnode_attr * vap,int append,int ignore)8919 vnode_immutable(struct vnode_attr *vap, int append, int ignore)
8920 {
8921 	int     mask;
8922 
8923 	/* start with all bits precluding the operation */
8924 	mask = IMMUTABLE | APPEND;
8925 
8926 	/* if appending only, remove the append-only bits */
8927 	if (append) {
8928 		mask &= ~APPEND;
8929 	}
8930 
8931 	/* ignore only set when authorizing flags changes */
8932 	if (ignore) {
8933 		if (securelevel <= 0) {
8934 			/* in insecure state, flags do not inhibit changes */
8935 			mask = 0;
8936 		} else {
8937 			/* in secure state, user flags don't inhibit */
8938 			mask &= ~(UF_IMMUTABLE | UF_APPEND);
8939 		}
8940 	}
8941 	KAUTH_DEBUG("IMMUTABLE - file flags 0x%x mask 0x%x append = %d ignore = %d", vap->va_flags, mask, append, ignore);
8942 	if ((vap->va_flags & mask) != 0) {
8943 		return EPERM;
8944 	}
8945 	return 0;
8946 }
8947 
8948 static int
vauth_node_owner(struct vnode_attr * vap,kauth_cred_t cred)8949 vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
8950 {
8951 	int result;
8952 
8953 	/* default assumption is not-owner */
8954 	result = 0;
8955 
8956 	/*
8957 	 * If the filesystem has given us a UID, we treat this as authoritative.
8958 	 */
8959 	if (vap && VATTR_IS_SUPPORTED(vap, va_uid)) {
8960 		result = (vap->va_uid == kauth_cred_getuid(cred)) ? 1 : 0;
8961 	}
8962 	/* we could test the owner UUID here if we had a policy for it */
8963 
8964 	return result;
8965 }
8966 
8967 /*
8968  * vauth_node_group
8969  *
8970  * Description:	Ask if a cred is a member of the group owning the vnode object
8971  *
8972  * Parameters:		vap		vnode attribute
8973  *				vap->va_gid	group owner of vnode object
8974  *			cred		credential to check
8975  *			ismember	pointer to where to put the answer
8976  *			idontknow	Return this if we can't get an answer
8977  *
8978  * Returns:		0		Success
8979  *			idontknow	Can't get information
8980  *	kauth_cred_ismember_gid:?	Error from kauth subsystem
8981  *	kauth_cred_ismember_gid:?	Error from kauth subsystem
8982  */
8983 static int
vauth_node_group(struct vnode_attr * vap,kauth_cred_t cred,int * ismember,int idontknow)8984 vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int idontknow)
8985 {
8986 	int     error;
8987 	int     result;
8988 
8989 	error = 0;
8990 	result = 0;
8991 
8992 	/*
8993 	 * The caller is expected to have asked the filesystem for a group
8994 	 * at some point prior to calling this function.  The answer may
8995 	 * have been that there is no group ownership supported for the
8996 	 * vnode object, in which case we return
8997 	 */
8998 	if (vap && VATTR_IS_SUPPORTED(vap, va_gid)) {
8999 		error = kauth_cred_ismember_gid(cred, vap->va_gid, &result);
9000 		/*
9001 		 * Credentials which are opted into external group membership
9002 		 * resolution which are not known to the external resolver
9003 		 * will result in an ENOENT error.  We translate this into
9004 		 * the appropriate 'idontknow' response for our caller.
9005 		 *
9006 		 * XXX We do not make a distinction here between an ENOENT
9007 		 * XXX arising from a response from the external resolver,
9008 		 * XXX and an ENOENT which is internally generated.  This is
9009 		 * XXX a deficiency of the published kauth_cred_ismember_gid()
9010 		 * XXX KPI which can not be overcome without new KPI.  For
9011 		 * XXX all currently known cases, however, this wil result
9012 		 * XXX in correct behaviour.
9013 		 */
9014 		if (error == ENOENT) {
9015 			error = idontknow;
9016 		}
9017 	}
9018 	/*
9019 	 * XXX We could test the group UUID here if we had a policy for it,
9020 	 * XXX but this is problematic from the perspective of synchronizing
9021 	 * XXX group UUID and POSIX GID ownership of a file and keeping the
9022 	 * XXX values coherent over time.  The problem is that the local
9023 	 * XXX system will vend transient group UUIDs for unknown POSIX GID
9024 	 * XXX values, and these are not persistent, whereas storage of values
9025 	 * XXX is persistent.  One potential solution to this is a local
9026 	 * XXX (persistent) replica of remote directory entries and vended
9027 	 * XXX local ids in a local directory server (think in terms of a
9028 	 * XXX caching DNS server).
9029 	 */
9030 
9031 	if (!error) {
9032 		*ismember = result;
9033 	}
9034 	return error;
9035 }
9036 
9037 static int
vauth_file_owner(vauth_ctx vcp)9038 vauth_file_owner(vauth_ctx vcp)
9039 {
9040 	int result;
9041 
9042 	if (vcp->flags_valid & _VAC_IS_OWNER) {
9043 		result = (vcp->flags & _VAC_IS_OWNER) ? 1 : 0;
9044 	} else {
9045 		result = vauth_node_owner(vcp->vap, vcp->ctx->vc_ucred);
9046 
9047 		/* cache our result */
9048 		vcp->flags_valid |= _VAC_IS_OWNER;
9049 		if (result) {
9050 			vcp->flags |= _VAC_IS_OWNER;
9051 		} else {
9052 			vcp->flags &= ~_VAC_IS_OWNER;
9053 		}
9054 	}
9055 	return result;
9056 }
9057 
9058 
9059 /*
9060  * vauth_file_ingroup
9061  *
9062  * Description:	Ask if a user is a member of the group owning the directory
9063  *
9064  * Parameters:		vcp		The vnode authorization context that
9065  *					contains the user and directory info
9066  *				vcp->flags_valid	Valid flags
9067  *				vcp->flags		Flags values
9068  *				vcp->vap		File vnode attributes
9069  *				vcp->ctx		VFS Context (for user)
9070  *			ismember	pointer to where to put the answer
9071  *			idontknow	Return this if we can't get an answer
9072  *
9073  * Returns:		0		Success
9074  *		vauth_node_group:?	Error from vauth_node_group()
9075  *
9076  * Implicit returns:	*ismember	0	The user is not a group member
9077  *					1	The user is a group member
9078  */
9079 static int
vauth_file_ingroup(vauth_ctx vcp,int * ismember,int idontknow)9080 vauth_file_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
9081 {
9082 	int     error;
9083 
9084 	/* Check for a cached answer first, to avoid the check if possible */
9085 	if (vcp->flags_valid & _VAC_IN_GROUP) {
9086 		*ismember = (vcp->flags & _VAC_IN_GROUP) ? 1 : 0;
9087 		error = 0;
9088 	} else {
9089 		/* Otherwise, go look for it */
9090 		error = vauth_node_group(vcp->vap, vcp->ctx->vc_ucred, ismember, idontknow);
9091 
9092 		if (!error) {
9093 			/* cache our result */
9094 			vcp->flags_valid |= _VAC_IN_GROUP;
9095 			if (*ismember) {
9096 				vcp->flags |= _VAC_IN_GROUP;
9097 			} else {
9098 				vcp->flags &= ~_VAC_IN_GROUP;
9099 			}
9100 		}
9101 	}
9102 	return error;
9103 }
9104 
9105 static int
vauth_dir_owner(vauth_ctx vcp)9106 vauth_dir_owner(vauth_ctx vcp)
9107 {
9108 	int result;
9109 
9110 	if (vcp->flags_valid & _VAC_IS_DIR_OWNER) {
9111 		result = (vcp->flags & _VAC_IS_DIR_OWNER) ? 1 : 0;
9112 	} else {
9113 		result = vauth_node_owner(vcp->dvap, vcp->ctx->vc_ucred);
9114 
9115 		/* cache our result */
9116 		vcp->flags_valid |= _VAC_IS_DIR_OWNER;
9117 		if (result) {
9118 			vcp->flags |= _VAC_IS_DIR_OWNER;
9119 		} else {
9120 			vcp->flags &= ~_VAC_IS_DIR_OWNER;
9121 		}
9122 	}
9123 	return result;
9124 }
9125 
9126 /*
9127  * vauth_dir_ingroup
9128  *
9129  * Description:	Ask if a user is a member of the group owning the directory
9130  *
9131  * Parameters:		vcp		The vnode authorization context that
9132  *					contains the user and directory info
9133  *				vcp->flags_valid	Valid flags
9134  *				vcp->flags		Flags values
9135  *				vcp->dvap		Dir vnode attributes
9136  *				vcp->ctx		VFS Context (for user)
9137  *			ismember	pointer to where to put the answer
9138  *			idontknow	Return this if we can't get an answer
9139  *
9140  * Returns:		0		Success
9141  *		vauth_node_group:?	Error from vauth_node_group()
9142  *
9143  * Implicit returns:	*ismember	0	The user is not a group member
9144  *					1	The user is a group member
9145  */
9146 static int
vauth_dir_ingroup(vauth_ctx vcp,int * ismember,int idontknow)9147 vauth_dir_ingroup(vauth_ctx vcp, int *ismember, int idontknow)
9148 {
9149 	int     error;
9150 
9151 	/* Check for a cached answer first, to avoid the check if possible */
9152 	if (vcp->flags_valid & _VAC_IN_DIR_GROUP) {
9153 		*ismember = (vcp->flags & _VAC_IN_DIR_GROUP) ? 1 : 0;
9154 		error = 0;
9155 	} else {
9156 		/* Otherwise, go look for it */
9157 		error = vauth_node_group(vcp->dvap, vcp->ctx->vc_ucred, ismember, idontknow);
9158 
9159 		if (!error) {
9160 			/* cache our result */
9161 			vcp->flags_valid |= _VAC_IN_DIR_GROUP;
9162 			if (*ismember) {
9163 				vcp->flags |= _VAC_IN_DIR_GROUP;
9164 			} else {
9165 				vcp->flags &= ~_VAC_IN_DIR_GROUP;
9166 			}
9167 		}
9168 	}
9169 	return error;
9170 }
9171 
9172 /*
9173  * Test the posix permissions in (vap) to determine whether (credential)
9174  * may perform (action)
9175  */
9176 static int
vnode_authorize_posix(vauth_ctx vcp,int action,int on_dir)9177 vnode_authorize_posix(vauth_ctx vcp, int action, int on_dir)
9178 {
9179 	struct vnode_attr *vap;
9180 	int needed, error, owner_ok, group_ok, world_ok, ismember;
9181 #ifdef KAUTH_DEBUG_ENABLE
9182 	const char *where = "uninitialized";
9183 # define _SETWHERE(c)   where = c;
9184 #else
9185 # define _SETWHERE(c)
9186 #endif
9187 
9188 	/* checking file or directory? */
9189 	if (on_dir) {
9190 		vap = vcp->dvap;
9191 	} else {
9192 		vap = vcp->vap;
9193 	}
9194 
9195 	error = 0;
9196 
9197 	/*
9198 	 * We want to do as little work here as possible.  So first we check
9199 	 * which sets of permissions grant us the access we need, and avoid checking
9200 	 * whether specific permissions grant access when more generic ones would.
9201 	 */
9202 
9203 	/* owner permissions */
9204 	needed = 0;
9205 	if (action & VREAD) {
9206 		needed |= S_IRUSR;
9207 	}
9208 	if (action & VWRITE) {
9209 		needed |= S_IWUSR;
9210 	}
9211 	if (action & VEXEC) {
9212 		needed |= S_IXUSR;
9213 	}
9214 	owner_ok = (needed & vap->va_mode) == needed;
9215 
9216 	/*
9217 	 * Processes with the appropriate entitlement can marked themselves as
9218 	 * ignoring file/directory permissions if they own it.
9219 	 */
9220 	if (!owner_ok && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
9221 		owner_ok = 1;
9222 	}
9223 
9224 	/* group permissions */
9225 	needed = 0;
9226 	if (action & VREAD) {
9227 		needed |= S_IRGRP;
9228 	}
9229 	if (action & VWRITE) {
9230 		needed |= S_IWGRP;
9231 	}
9232 	if (action & VEXEC) {
9233 		needed |= S_IXGRP;
9234 	}
9235 	group_ok = (needed & vap->va_mode) == needed;
9236 
9237 	/* world permissions */
9238 	needed = 0;
9239 	if (action & VREAD) {
9240 		needed |= S_IROTH;
9241 	}
9242 	if (action & VWRITE) {
9243 		needed |= S_IWOTH;
9244 	}
9245 	if (action & VEXEC) {
9246 		needed |= S_IXOTH;
9247 	}
9248 	world_ok = (needed & vap->va_mode) == needed;
9249 
9250 	/* If granted/denied by all three, we're done */
9251 	if (owner_ok && group_ok && world_ok) {
9252 		_SETWHERE("all");
9253 		goto out;
9254 	}
9255 
9256 	if (!owner_ok && !group_ok && !world_ok) {
9257 		_SETWHERE("all");
9258 		error = EACCES;
9259 		goto out;
9260 	}
9261 
9262 	/* Check ownership (relatively cheap) */
9263 	if ((on_dir && vauth_dir_owner(vcp)) ||
9264 	    (!on_dir && vauth_file_owner(vcp))) {
9265 		_SETWHERE("user");
9266 		if (!owner_ok) {
9267 			error = EACCES;
9268 		}
9269 		goto out;
9270 	}
9271 
9272 	/* Not owner; if group and world both grant it we're done */
9273 	if (group_ok && world_ok) {
9274 		_SETWHERE("group/world");
9275 		goto out;
9276 	}
9277 	if (!group_ok && !world_ok) {
9278 		_SETWHERE("group/world");
9279 		error = EACCES;
9280 		goto out;
9281 	}
9282 
9283 	/* Check group membership (most expensive) */
9284 	ismember = 0;   /* Default to allow, if the target has no group owner */
9285 
9286 	/*
9287 	 * In the case we can't get an answer about the user from the call to
9288 	 * vauth_dir_ingroup() or vauth_file_ingroup(), we want to fail on
9289 	 * the side of caution, rather than simply granting access, or we will
9290 	 * fail to correctly implement exclusion groups, so we set the third
9291 	 * parameter on the basis of the state of 'group_ok'.
9292 	 */
9293 	if (on_dir) {
9294 		error = vauth_dir_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
9295 	} else {
9296 		error = vauth_file_ingroup(vcp, &ismember, (!group_ok ? EACCES : 0));
9297 	}
9298 	if (error) {
9299 		if (!group_ok) {
9300 			ismember = 1;
9301 		}
9302 		error = 0;
9303 	}
9304 	if (ismember) {
9305 		_SETWHERE("group");
9306 		if (!group_ok) {
9307 			error = EACCES;
9308 		}
9309 		goto out;
9310 	}
9311 
9312 	/* Not owner, not in group, use world result */
9313 	_SETWHERE("world");
9314 	if (!world_ok) {
9315 		error = EACCES;
9316 	}
9317 
9318 	/* FALLTHROUGH */
9319 
9320 out:
9321 	KAUTH_DEBUG("%p    %s - posix %s permissions : need %s%s%s %x have %s%s%s%s%s%s%s%s%s UID = %d file = %d,%d",
9322 	    vcp->vp, (error == 0) ? "ALLOWED" : "DENIED", where,
9323 	    (action & VREAD)  ? "r" : "-",
9324 	    (action & VWRITE) ? "w" : "-",
9325 	    (action & VEXEC)  ? "x" : "-",
9326 	    needed,
9327 	    (vap->va_mode & S_IRUSR) ? "r" : "-",
9328 	    (vap->va_mode & S_IWUSR) ? "w" : "-",
9329 	    (vap->va_mode & S_IXUSR) ? "x" : "-",
9330 	    (vap->va_mode & S_IRGRP) ? "r" : "-",
9331 	    (vap->va_mode & S_IWGRP) ? "w" : "-",
9332 	    (vap->va_mode & S_IXGRP) ? "x" : "-",
9333 	    (vap->va_mode & S_IROTH) ? "r" : "-",
9334 	    (vap->va_mode & S_IWOTH) ? "w" : "-",
9335 	    (vap->va_mode & S_IXOTH) ? "x" : "-",
9336 	    kauth_cred_getuid(vcp->ctx->vc_ucred),
9337 	    on_dir ? vcp->dvap->va_uid : vcp->vap->va_uid,
9338 	    on_dir ? vcp->dvap->va_gid : vcp->vap->va_gid);
9339 	return error;
9340 }
9341 
9342 /*
9343  * Authorize the deletion of the node vp from the directory dvp.
9344  *
9345  * We assume that:
9346  * - Neither the node nor the directory are immutable.
9347  * - The user is not the superuser.
9348  *
9349  * The precedence of factors for authorizing or denying delete for a credential
9350  *
9351  * 1) Explicit ACE on the node. (allow or deny DELETE)
9352  * 2) Explicit ACE on the directory (allow or deny DELETE_CHILD).
9353  *
9354  *    If there are conflicting ACEs on the node and the directory, the node
9355  *    ACE wins.
9356  *
9357  * 3) Sticky bit on the directory.
9358  *    Deletion is not permitted if the directory is sticky and the caller is
9359  *    not owner of the node or directory. The sticky bit rules are like a deny
9360  *    delete ACE except lower in priority than ACL's either allowing or denying
9361  *    delete.
9362  *
9363  * 4) POSIX permisions on the directory.
9364  *
9365  * As an optimization, we cache whether or not delete child is permitted
9366  * on directories. This enables us to skip directory ACL and POSIX checks
9367  * as we already have the result from those checks. However, we always check the
9368  * node ACL and, if the directory has the sticky bit set, we always check its
9369  * ACL (even for a directory with an authorized delete child). Furthermore,
9370  * caching the delete child authorization is independent of the sticky bit
9371  * being set as it is only applicable in determining whether the node can be
9372  * deleted or not.
9373  */
9374 static int
vnode_authorize_delete(vauth_ctx vcp,boolean_t cached_delete_child)9375 vnode_authorize_delete(vauth_ctx vcp, boolean_t cached_delete_child)
9376 {
9377 	struct vnode_attr       *vap = vcp->vap;
9378 	struct vnode_attr       *dvap = vcp->dvap;
9379 	kauth_cred_t            cred = vcp->ctx->vc_ucred;
9380 	struct kauth_acl_eval   eval;
9381 	int                     error, ismember;
9382 
9383 	/* Check the ACL on the node first */
9384 	if (VATTR_IS_NOT(vap, va_acl, NULL)) {
9385 		eval.ae_requested = KAUTH_VNODE_DELETE;
9386 		eval.ae_acl = &vap->va_acl->acl_ace[0];
9387 		eval.ae_count = vap->va_acl->acl_entrycount;
9388 		eval.ae_options = 0;
9389 		if (vauth_file_owner(vcp)) {
9390 			eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9391 		}
9392 		/*
9393 		 * We use ENOENT as a marker to indicate we could not get
9394 		 * information in order to delay evaluation until after we
9395 		 * have the ACL evaluation answer.  Previously, we would
9396 		 * always deny the operation at this point.
9397 		 */
9398 		if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9399 			return error;
9400 		}
9401 		if (error == ENOENT) {
9402 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9403 		} else if (ismember) {
9404 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9405 		}
9406 		eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9407 		eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9408 		eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9409 		eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9410 
9411 		if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
9412 			KAUTH_DEBUG("%p    ERROR during ACL processing - %d", vcp->vp, error);
9413 			return error;
9414 		}
9415 
9416 		switch (eval.ae_result) {
9417 		case KAUTH_RESULT_DENY:
9418 			if (vauth_file_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
9419 				KAUTH_DEBUG("%p    Override DENY due to entitlement", vcp->vp);
9420 				return 0;
9421 			}
9422 			KAUTH_DEBUG("%p    DENIED - denied by ACL", vcp->vp);
9423 			return EACCES;
9424 		case KAUTH_RESULT_ALLOW:
9425 			KAUTH_DEBUG("%p    ALLOWED - granted by ACL", vcp->vp);
9426 			return 0;
9427 		case KAUTH_RESULT_DEFER:
9428 		default:
9429 			/* Defer to directory */
9430 			KAUTH_DEBUG("%p    DEFERRED - by file ACL", vcp->vp);
9431 			break;
9432 		}
9433 	}
9434 
9435 	/*
9436 	 * Without a sticky bit, a previously authorized delete child is
9437 	 * sufficient to authorize this delete.
9438 	 *
9439 	 * If the sticky bit is set, a directory ACL which allows delete child
9440 	 * overrides a (potential) sticky bit deny. The authorized delete child
9441 	 * cannot tell us if it was authorized because of an explicit delete
9442 	 * child allow ACE or because of POSIX permisions so we have to check
9443 	 * the directory ACL everytime if the directory has a sticky bit.
9444 	 */
9445 	if (!(dvap->va_mode & S_ISTXT) && cached_delete_child) {
9446 		KAUTH_DEBUG("%p    ALLOWED - granted by directory ACL or POSIX permissions and no sticky bit on directory", vcp->vp);
9447 		return 0;
9448 	}
9449 
9450 	/* check the ACL on the directory */
9451 	if (VATTR_IS_NOT(dvap, va_acl, NULL)) {
9452 		eval.ae_requested = KAUTH_VNODE_DELETE_CHILD;
9453 		eval.ae_acl = &dvap->va_acl->acl_ace[0];
9454 		eval.ae_count = dvap->va_acl->acl_entrycount;
9455 		eval.ae_options = 0;
9456 		if (vauth_dir_owner(vcp)) {
9457 			eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9458 		}
9459 		/*
9460 		 * We use ENOENT as a marker to indicate we could not get
9461 		 * information in order to delay evaluation until after we
9462 		 * have the ACL evaluation answer.  Previously, we would
9463 		 * always deny the operation at this point.
9464 		 */
9465 		if ((error = vauth_dir_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9466 			return error;
9467 		}
9468 		if (error == ENOENT) {
9469 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9470 		} else if (ismember) {
9471 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9472 		}
9473 		eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9474 		eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9475 		eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9476 		eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9477 
9478 		/*
9479 		 * If there is no entry, we are going to defer to other
9480 		 * authorization mechanisms.
9481 		 */
9482 		error = kauth_acl_evaluate(cred, &eval);
9483 
9484 		if (error != 0) {
9485 			KAUTH_DEBUG("%p    ERROR during ACL processing - %d", vcp->vp, error);
9486 			return error;
9487 		}
9488 		switch (eval.ae_result) {
9489 		case KAUTH_RESULT_DENY:
9490 			if (vauth_dir_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
9491 				KAUTH_DEBUG("%p    Override DENY due to entitlement", vcp->vp);
9492 				return 0;
9493 			}
9494 			KAUTH_DEBUG("%p    DENIED - denied by directory ACL", vcp->vp);
9495 			return EACCES;
9496 		case KAUTH_RESULT_ALLOW:
9497 			KAUTH_DEBUG("%p    ALLOWED - granted by directory ACL", vcp->vp);
9498 			if (!cached_delete_child && vcp->dvp) {
9499 				vnode_cache_authorized_action(vcp->dvp,
9500 				    vcp->ctx, KAUTH_VNODE_DELETE_CHILD);
9501 			}
9502 			return 0;
9503 		case KAUTH_RESULT_DEFER:
9504 		default:
9505 			/* Deferred by directory ACL */
9506 			KAUTH_DEBUG("%p    DEFERRED - directory ACL", vcp->vp);
9507 			break;
9508 		}
9509 	}
9510 
9511 	/*
9512 	 * From this point, we can't explicitly allow and if we reach the end
9513 	 * of the function without a denial, then the delete is authorized.
9514 	 */
9515 	if (!cached_delete_child) {
9516 		if (vnode_authorize_posix(vcp, VWRITE, 1 /* on_dir */) != 0) {
9517 			KAUTH_DEBUG("%p    DENIED - denied by posix permisssions", vcp->vp);
9518 			return EACCES;
9519 		}
9520 		/*
9521 		 * Cache the authorized action on the vnode if allowed by the
9522 		 * directory ACL or POSIX permissions. It is correct to cache
9523 		 * this action even if sticky bit would deny deleting the node.
9524 		 */
9525 		if (vcp->dvp) {
9526 			vnode_cache_authorized_action(vcp->dvp, vcp->ctx,
9527 			    KAUTH_VNODE_DELETE_CHILD);
9528 		}
9529 	}
9530 
9531 	/* enforce sticky bit behaviour */
9532 	if ((dvap->va_mode & S_ISTXT) && !vauth_file_owner(vcp) && !vauth_dir_owner(vcp)) {
9533 		KAUTH_DEBUG("%p    DENIED - sticky bit rules (user %d  file %d  dir %d)",
9534 		    vcp->vp, cred->cr_posix.cr_uid, vap->va_uid, dvap->va_uid);
9535 		return EACCES;
9536 	}
9537 
9538 	/* not denied, must be OK */
9539 	return 0;
9540 }
9541 
9542 
9543 /*
9544  * Authorize an operation based on the node's attributes.
9545  */
9546 static int
vnode_authorize_simple(vauth_ctx vcp,kauth_ace_rights_t acl_rights,kauth_ace_rights_t preauth_rights,boolean_t * found_deny)9547 vnode_authorize_simple(vauth_ctx vcp, kauth_ace_rights_t acl_rights, kauth_ace_rights_t preauth_rights, boolean_t *found_deny)
9548 {
9549 	struct vnode_attr       *vap = vcp->vap;
9550 	kauth_cred_t            cred = vcp->ctx->vc_ucred;
9551 	struct kauth_acl_eval   eval;
9552 	int                     error, ismember;
9553 	mode_t                  posix_action;
9554 
9555 	/*
9556 	 * If we are the file owner, we automatically have some rights.
9557 	 *
9558 	 * Do we need to expand this to support group ownership?
9559 	 */
9560 	if (vauth_file_owner(vcp)) {
9561 		acl_rights &= ~(KAUTH_VNODE_WRITE_SECURITY);
9562 	}
9563 
9564 	/*
9565 	 * If we are checking both TAKE_OWNERSHIP and WRITE_SECURITY, we can
9566 	 * mask the latter.  If TAKE_OWNERSHIP is requested the caller is about to
9567 	 * change ownership to themselves, and WRITE_SECURITY is implicitly
9568 	 * granted to the owner.  We need to do this because at this point
9569 	 * WRITE_SECURITY may not be granted as the caller is not currently
9570 	 * the owner.
9571 	 */
9572 	if ((acl_rights & KAUTH_VNODE_TAKE_OWNERSHIP) &&
9573 	    (acl_rights & KAUTH_VNODE_WRITE_SECURITY)) {
9574 		acl_rights &= ~KAUTH_VNODE_WRITE_SECURITY;
9575 	}
9576 
9577 	if (acl_rights == 0) {
9578 		KAUTH_DEBUG("%p    ALLOWED - implicit or no rights required", vcp->vp);
9579 		return 0;
9580 	}
9581 
9582 	/* if we have an ACL, evaluate it */
9583 	if (VATTR_IS_NOT(vap, va_acl, NULL)) {
9584 		eval.ae_requested = acl_rights;
9585 		eval.ae_acl = &vap->va_acl->acl_ace[0];
9586 		eval.ae_count = vap->va_acl->acl_entrycount;
9587 		eval.ae_options = 0;
9588 		if (vauth_file_owner(vcp)) {
9589 			eval.ae_options |= KAUTH_AEVAL_IS_OWNER;
9590 		}
9591 		/*
9592 		 * We use ENOENT as a marker to indicate we could not get
9593 		 * information in order to delay evaluation until after we
9594 		 * have the ACL evaluation answer.  Previously, we would
9595 		 * always deny the operation at this point.
9596 		 */
9597 		if ((error = vauth_file_ingroup(vcp, &ismember, ENOENT)) != 0 && error != ENOENT) {
9598 			return error;
9599 		}
9600 		if (error == ENOENT) {
9601 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP_UNKNOWN;
9602 		} else if (ismember) {
9603 			eval.ae_options |= KAUTH_AEVAL_IN_GROUP;
9604 		}
9605 		eval.ae_exp_gall = KAUTH_VNODE_GENERIC_ALL_BITS;
9606 		eval.ae_exp_gread = KAUTH_VNODE_GENERIC_READ_BITS;
9607 		eval.ae_exp_gwrite = KAUTH_VNODE_GENERIC_WRITE_BITS;
9608 		eval.ae_exp_gexec = KAUTH_VNODE_GENERIC_EXECUTE_BITS;
9609 
9610 		if ((error = kauth_acl_evaluate(cred, &eval)) != 0) {
9611 			KAUTH_DEBUG("%p    ERROR during ACL processing - %d", vcp->vp, error);
9612 			return error;
9613 		}
9614 
9615 		switch (eval.ae_result) {
9616 		case KAUTH_RESULT_DENY:
9617 			if (vauth_file_owner(vcp) && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
9618 				KAUTH_DEBUG("%p    Override DENY due to entitlement", vcp->vp);
9619 				return 0;
9620 			}
9621 			KAUTH_DEBUG("%p    DENIED - by ACL", vcp->vp);
9622 			return EACCES;         /* deny, deny, counter-allege */
9623 		case KAUTH_RESULT_ALLOW:
9624 			KAUTH_DEBUG("%p    ALLOWED - all rights granted by ACL", vcp->vp);
9625 			return 0;
9626 		case KAUTH_RESULT_DEFER:
9627 		default:
9628 			/* Effectively the same as !delete_child_denied */
9629 			KAUTH_DEBUG("%p    DEFERRED - directory ACL", vcp->vp);
9630 			break;
9631 		}
9632 
9633 		*found_deny = eval.ae_found_deny;
9634 
9635 		/* fall through and evaluate residual rights */
9636 	} else {
9637 		/* no ACL, everything is residual */
9638 		eval.ae_residual = acl_rights;
9639 	}
9640 
9641 	/*
9642 	 * Grant residual rights that have been pre-authorized.
9643 	 */
9644 	eval.ae_residual &= ~preauth_rights;
9645 
9646 	/*
9647 	 * We grant WRITE_ATTRIBUTES to the owner if it hasn't been denied.
9648 	 */
9649 	if (vauth_file_owner(vcp)) {
9650 		eval.ae_residual &= ~KAUTH_VNODE_WRITE_ATTRIBUTES;
9651 	}
9652 
9653 	if (eval.ae_residual == 0) {
9654 		KAUTH_DEBUG("%p    ALLOWED - rights already authorized", vcp->vp);
9655 		return 0;
9656 	}
9657 
9658 	/*
9659 	 * Bail if we have residual rights that can't be granted by posix permissions,
9660 	 * or aren't presumed granted at this point.
9661 	 *
9662 	 * XXX these can be collapsed for performance
9663 	 */
9664 	if (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER) {
9665 		KAUTH_DEBUG("%p    DENIED - CHANGE_OWNER not permitted", vcp->vp);
9666 		return EACCES;
9667 	}
9668 	if (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY) {
9669 		KAUTH_DEBUG("%p    DENIED - WRITE_SECURITY not permitted", vcp->vp);
9670 		return EACCES;
9671 	}
9672 
9673 #if DIAGNOSTIC
9674 	if (eval.ae_residual & KAUTH_VNODE_DELETE) {
9675 		panic("vnode_authorize: can't be checking delete permission here");
9676 	}
9677 #endif
9678 
9679 	/*
9680 	 * Compute the fallback posix permissions that will satisfy the remaining
9681 	 * rights.
9682 	 */
9683 	posix_action = 0;
9684 	if (eval.ae_residual & (KAUTH_VNODE_READ_DATA |
9685 	    KAUTH_VNODE_LIST_DIRECTORY |
9686 	    KAUTH_VNODE_READ_EXTATTRIBUTES)) {
9687 		posix_action |= VREAD;
9688 	}
9689 	if (eval.ae_residual & (KAUTH_VNODE_WRITE_DATA |
9690 	    KAUTH_VNODE_ADD_FILE |
9691 	    KAUTH_VNODE_ADD_SUBDIRECTORY |
9692 	    KAUTH_VNODE_DELETE_CHILD |
9693 	    KAUTH_VNODE_WRITE_ATTRIBUTES |
9694 	    KAUTH_VNODE_WRITE_EXTATTRIBUTES)) {
9695 		posix_action |= VWRITE;
9696 	}
9697 	if (eval.ae_residual & (KAUTH_VNODE_EXECUTE |
9698 	    KAUTH_VNODE_SEARCH)) {
9699 		posix_action |= VEXEC;
9700 	}
9701 
9702 	if (posix_action != 0) {
9703 		return vnode_authorize_posix(vcp, posix_action, 0 /* !on_dir */);
9704 	} else {
9705 		KAUTH_DEBUG("%p    ALLOWED - residual rights %s%s%s%s%s%s%s%s%s%s%s%s%s%s granted due to no posix mapping",
9706 		    vcp->vp,
9707 		    (eval.ae_residual & KAUTH_VNODE_READ_DATA)
9708 		    ? vnode_isdir(vcp->vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
9709 		    (eval.ae_residual & KAUTH_VNODE_WRITE_DATA)
9710 		    ? vnode_isdir(vcp->vp) ? " ADD_FILE" : " WRITE_DATA" : "",
9711 		    (eval.ae_residual & KAUTH_VNODE_EXECUTE)
9712 		    ? vnode_isdir(vcp->vp) ? " SEARCH" : " EXECUTE" : "",
9713 		    (eval.ae_residual & KAUTH_VNODE_DELETE)
9714 		    ? " DELETE" : "",
9715 		    (eval.ae_residual & KAUTH_VNODE_APPEND_DATA)
9716 		    ? vnode_isdir(vcp->vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
9717 		    (eval.ae_residual & KAUTH_VNODE_DELETE_CHILD)
9718 		    ? " DELETE_CHILD" : "",
9719 		    (eval.ae_residual & KAUTH_VNODE_READ_ATTRIBUTES)
9720 		    ? " READ_ATTRIBUTES" : "",
9721 		    (eval.ae_residual & KAUTH_VNODE_WRITE_ATTRIBUTES)
9722 		    ? " WRITE_ATTRIBUTES" : "",
9723 		    (eval.ae_residual & KAUTH_VNODE_READ_EXTATTRIBUTES)
9724 		    ? " READ_EXTATTRIBUTES" : "",
9725 		    (eval.ae_residual & KAUTH_VNODE_WRITE_EXTATTRIBUTES)
9726 		    ? " WRITE_EXTATTRIBUTES" : "",
9727 		    (eval.ae_residual & KAUTH_VNODE_READ_SECURITY)
9728 		    ? " READ_SECURITY" : "",
9729 		    (eval.ae_residual & KAUTH_VNODE_WRITE_SECURITY)
9730 		    ? " WRITE_SECURITY" : "",
9731 		    (eval.ae_residual & KAUTH_VNODE_CHECKIMMUTABLE)
9732 		    ? " CHECKIMMUTABLE" : "",
9733 		    (eval.ae_residual & KAUTH_VNODE_CHANGE_OWNER)
9734 		    ? " CHANGE_OWNER" : "");
9735 	}
9736 
9737 	/*
9738 	 * Lack of required Posix permissions implies no reason to deny access.
9739 	 */
9740 	return 0;
9741 }
9742 
9743 /*
9744  * Check for file immutability.
9745  */
9746 static int
vnode_authorize_checkimmutable(mount_t mp,vauth_ctx vcp,struct vnode_attr * vap,int rights,int ignore)9747 vnode_authorize_checkimmutable(mount_t mp, vauth_ctx vcp,
9748     struct vnode_attr *vap, int rights, int ignore)
9749 {
9750 	int error;
9751 	int append;
9752 
9753 	/*
9754 	 * Perform immutability checks for operations that change data.
9755 	 *
9756 	 * Sockets, fifos and devices require special handling.
9757 	 */
9758 	switch (vap->va_type) {
9759 	case VSOCK:
9760 	case VFIFO:
9761 	case VBLK:
9762 	case VCHR:
9763 		/*
9764 		 * Writing to these nodes does not change the filesystem data,
9765 		 * so forget that it's being tried.
9766 		 */
9767 		rights &= ~KAUTH_VNODE_WRITE_DATA;
9768 		break;
9769 	default:
9770 		break;
9771 	}
9772 
9773 	error = 0;
9774 	if (rights & KAUTH_VNODE_WRITE_RIGHTS) {
9775 		/* check per-filesystem options if possible */
9776 		if (mp != NULL) {
9777 			/* check for no-EA filesystems */
9778 			if ((rights & KAUTH_VNODE_WRITE_EXTATTRIBUTES) &&
9779 			    (vfs_flags(mp) & MNT_NOUSERXATTR)) {
9780 				KAUTH_DEBUG("%p    DENIED - filesystem disallowed extended attributes", vap);
9781 				error = EACCES;  /* User attributes disabled */
9782 				goto out;
9783 			}
9784 		}
9785 
9786 		/*
9787 		 * check for file immutability. first, check if the requested rights are
9788 		 * allowable for a UF_APPEND file.
9789 		 */
9790 		append = 0;
9791 		if (vap->va_type == VDIR) {
9792 			if ((rights & (KAUTH_VNODE_ADD_FILE | KAUTH_VNODE_ADD_SUBDIRECTORY | KAUTH_VNODE_WRITE_EXTATTRIBUTES | ~KAUTH_VNODE_WRITE_RIGHTS)) == rights) {
9793 				append = 1;
9794 			}
9795 		} else {
9796 			if ((rights & (KAUTH_VNODE_APPEND_DATA | KAUTH_VNODE_WRITE_EXTATTRIBUTES | ~KAUTH_VNODE_WRITE_RIGHTS)) == rights) {
9797 				append = 1;
9798 			}
9799 		}
9800 		if ((error = vnode_immutable(vap, append, ignore)) != 0) {
9801 			if (error && !ignore) {
9802 				/*
9803 				 * In case of a rename, we want to check ownership for dvp as well.
9804 				 */
9805 				int owner = 0;
9806 				if (rights & KAUTH_VNODE_DELETE_CHILD && vcp->dvp != NULL) {
9807 					owner = vauth_file_owner(vcp) && vauth_dir_owner(vcp);
9808 				} else {
9809 					owner = vauth_file_owner(vcp);
9810 				}
9811 				if (owner && proc_ignores_node_permissions(vfs_context_proc(vcp->ctx))) {
9812 					error = vnode_immutable(vap, append, 1);
9813 				}
9814 			}
9815 		}
9816 		if (error) {
9817 			KAUTH_DEBUG("%p    DENIED - file is immutable", vap);
9818 			goto out;
9819 		}
9820 	}
9821 out:
9822 	return error;
9823 }
9824 
9825 /*
9826  * Handle authorization actions for filesystems that advertise that the
9827  * server will be enforcing.
9828  *
9829  * Returns:	0			Authorization should be handled locally
9830  *		1			Authorization was handled by the FS
9831  *
9832  * Note:	Imputed returns will only occur if the authorization request
9833  *		was handled by the FS.
9834  *
9835  * Imputed:	*resultp, modified	Return code from FS when the request is
9836  *					handled by the FS.
9837  *		VNOP_ACCESS:???
9838  *		VNOP_OPEN:???
9839  */
9840 static int
vnode_authorize_opaque(vnode_t vp,int * resultp,kauth_action_t action,vfs_context_t ctx)9841 vnode_authorize_opaque(vnode_t vp, int *resultp, kauth_action_t action, vfs_context_t ctx)
9842 {
9843 	int     error;
9844 
9845 	/*
9846 	 * If the vp is a device node, socket or FIFO it actually represents a local
9847 	 * endpoint, so we need to handle it locally.
9848 	 */
9849 	switch (vp->v_type) {
9850 	case VBLK:
9851 	case VCHR:
9852 	case VSOCK:
9853 	case VFIFO:
9854 		return 0;
9855 	default:
9856 		break;
9857 	}
9858 
9859 	/*
9860 	 * In the advisory request case, if the filesystem doesn't think it's reliable
9861 	 * we will attempt to formulate a result ourselves based on VNOP_GETATTR data.
9862 	 */
9863 	if ((action & KAUTH_VNODE_ACCESS) && !vfs_authopaqueaccess(vp->v_mount)) {
9864 		return 0;
9865 	}
9866 
9867 	/*
9868 	 * Let the filesystem have a say in the matter.  It's OK for it to not implemnent
9869 	 * VNOP_ACCESS, as most will authorise inline with the actual request.
9870 	 */
9871 	if ((error = VNOP_ACCESS(vp, action, ctx)) != ENOTSUP) {
9872 		*resultp = error;
9873 		KAUTH_DEBUG("%p    DENIED - opaque filesystem VNOP_ACCESS denied access", vp);
9874 		return 1;
9875 	}
9876 
9877 	/*
9878 	 * Typically opaque filesystems do authorisation in-line, but exec is a special case.  In
9879 	 * order to be reasonably sure that exec will be permitted, we try a bit harder here.
9880 	 */
9881 	if ((action & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG)) {
9882 		/* try a VNOP_OPEN for readonly access */
9883 		if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) {
9884 			*resultp = error;
9885 			KAUTH_DEBUG("%p    DENIED - EXECUTE denied because file could not be opened readonly", vp);
9886 			return 1;
9887 		}
9888 		VNOP_CLOSE(vp, FREAD, ctx);
9889 	}
9890 
9891 	/*
9892 	 * We don't have any reason to believe that the request has to be denied at this point,
9893 	 * so go ahead and allow it.
9894 	 */
9895 	*resultp = 0;
9896 	KAUTH_DEBUG("%p    ALLOWED - bypassing access check for non-local filesystem", vp);
9897 	return 1;
9898 }
9899 
9900 
9901 
9902 
9903 /*
9904  * Returns:	KAUTH_RESULT_ALLOW
9905  *		KAUTH_RESULT_DENY
9906  *
9907  * Imputed:	*arg3, modified		Error code in the deny case
9908  *		EROFS			Read-only file system
9909  *		EACCES			Permission denied
9910  *		EPERM			Operation not permitted [no execute]
9911  *	vnode_getattr:ENOMEM		Not enough space [only if has filesec]
9912  *	vnode_getattr:???
9913  *	vnode_authorize_opaque:*arg2	???
9914  *	vnode_authorize_checkimmutable:???
9915  *	vnode_authorize_delete:???
9916  *	vnode_authorize_simple:???
9917  */
9918 
9919 
9920 static int
vnode_authorize_callback(__unused kauth_cred_t cred,__unused void * idata,kauth_action_t action,uintptr_t arg0,uintptr_t arg1,uintptr_t arg2,uintptr_t arg3)9921 vnode_authorize_callback(__unused kauth_cred_t cred, __unused void *idata,
9922     kauth_action_t action, uintptr_t arg0, uintptr_t arg1, uintptr_t arg2,
9923     uintptr_t arg3)
9924 {
9925 	vfs_context_t   ctx;
9926 	vnode_t         cvp = NULLVP;
9927 	vnode_t         vp, dvp;
9928 	int             result = KAUTH_RESULT_DENY;
9929 	int             parent_iocount = 0;
9930 	int             parent_action = 0; /* In case we need to use namedstream's data fork for cached rights*/
9931 
9932 	ctx = (vfs_context_t)arg0;
9933 	vp = (vnode_t)arg1;
9934 	dvp = (vnode_t)arg2;
9935 
9936 	/*
9937 	 * if there are 2 vnodes passed in, we don't know at
9938 	 * this point which rights to look at based on the
9939 	 * combined action being passed in... defer until later...
9940 	 * otherwise check the kauth 'rights' cache hung
9941 	 * off of the vnode we're interested in... if we've already
9942 	 * been granted the right we're currently interested in,
9943 	 * we can just return success... otherwise we'll go through
9944 	 * the process of authorizing the requested right(s)... if that
9945 	 * succeeds, we'll add the right(s) to the cache.
9946 	 * VNOP_SETATTR and VNOP_SETXATTR will invalidate this cache
9947 	 */
9948 	if (dvp && vp) {
9949 		goto defer;
9950 	}
9951 	if (dvp) {
9952 		cvp = dvp;
9953 	} else {
9954 		/*
9955 		 * For named streams on local-authorization volumes, rights are cached on the parent;
9956 		 * authorization is determined by looking at the parent's properties anyway, so storing
9957 		 * on the parent means that we don't recompute for the named stream and that if
9958 		 * we need to flush rights (e.g. on VNOP_SETATTR()) we don't need to track down the
9959 		 * stream to flush its cache separately.  If we miss in the cache, then we authorize
9960 		 * as if there were no cached rights (passing the named stream vnode and desired rights to
9961 		 * vnode_authorize_callback_int()).
9962 		 *
9963 		 * On an opaquely authorized volume, we don't know the relationship between the
9964 		 * data fork's properties and the rights granted on a stream.  Thus, named stream vnodes
9965 		 * on such a volume are authorized directly (rather than using the parent) and have their
9966 		 * own caches.  When a named stream vnode is created, we mark the parent as having a named
9967 		 * stream. On a VNOP_SETATTR() for the parent that may invalidate cached authorization, we
9968 		 * find the stream and flush its cache.
9969 		 */
9970 		if (vnode_isnamedstream(vp) && (!vfs_authopaque(vp->v_mount))) {
9971 			cvp = vnode_getparent(vp);
9972 			if (cvp != NULLVP) {
9973 				parent_iocount = 1;
9974 			} else {
9975 				cvp = NULL;
9976 				goto defer; /* If we can't use the parent, take the slow path */
9977 			}
9978 
9979 			/* Have to translate some actions */
9980 			parent_action = action;
9981 			if (parent_action & KAUTH_VNODE_READ_DATA) {
9982 				parent_action &= ~KAUTH_VNODE_READ_DATA;
9983 				parent_action |= KAUTH_VNODE_READ_EXTATTRIBUTES;
9984 			}
9985 			if (parent_action & KAUTH_VNODE_WRITE_DATA) {
9986 				parent_action &= ~KAUTH_VNODE_WRITE_DATA;
9987 				parent_action |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
9988 			}
9989 		} else {
9990 			cvp = vp;
9991 		}
9992 	}
9993 
9994 	if (vnode_cache_is_authorized(cvp, ctx, parent_iocount ? parent_action : action) == TRUE) {
9995 		result = KAUTH_RESULT_ALLOW;
9996 		goto out;
9997 	}
9998 defer:
9999 	result = vnode_authorize_callback_int(action, ctx, vp, dvp, (int *)arg3);
10000 
10001 	if (result == KAUTH_RESULT_ALLOW && cvp != NULLVP) {
10002 		KAUTH_DEBUG("%p - caching action = %x", cvp, action);
10003 		vnode_cache_authorized_action(cvp, ctx, action);
10004 	}
10005 
10006 out:
10007 	if (parent_iocount) {
10008 		vnode_put(cvp);
10009 	}
10010 
10011 	return result;
10012 }
10013 
10014 static int
vnode_attr_authorize_internal(vauth_ctx vcp,mount_t mp,kauth_ace_rights_t rights,int is_suser,boolean_t * found_deny,int noimmutable,int parent_authorized_for_delete_child)10015 vnode_attr_authorize_internal(vauth_ctx vcp, mount_t mp,
10016     kauth_ace_rights_t rights, int is_suser, boolean_t *found_deny,
10017     int noimmutable, int parent_authorized_for_delete_child)
10018 {
10019 	int result;
10020 
10021 	/*
10022 	 * Check for immutability.
10023 	 *
10024 	 * In the deletion case, parent directory immutability vetoes specific
10025 	 * file rights.
10026 	 */
10027 	if ((result = vnode_authorize_checkimmutable(mp, vcp, vcp->vap, rights,
10028 	    noimmutable)) != 0) {
10029 		goto out;
10030 	}
10031 
10032 	if ((rights & KAUTH_VNODE_DELETE) &&
10033 	    !parent_authorized_for_delete_child) {
10034 		result = vnode_authorize_checkimmutable(mp, vcp, vcp->dvap,
10035 		    KAUTH_VNODE_DELETE_CHILD, 0);
10036 		if (result) {
10037 			goto out;
10038 		}
10039 	}
10040 
10041 	/*
10042 	 * Clear rights that have been authorized by reaching this point, bail if nothing left to
10043 	 * check.
10044 	 */
10045 	rights &= ~(KAUTH_VNODE_LINKTARGET | KAUTH_VNODE_CHECKIMMUTABLE);
10046 	if (rights == 0) {
10047 		goto out;
10048 	}
10049 
10050 	/*
10051 	 * If we're not the superuser, authorize based on file properties;
10052 	 * note that even if parent_authorized_for_delete_child is TRUE, we
10053 	 * need to check on the node itself.
10054 	 */
10055 	if (!is_suser) {
10056 		/* process delete rights */
10057 		if ((rights & KAUTH_VNODE_DELETE) &&
10058 		    ((result = vnode_authorize_delete(vcp, parent_authorized_for_delete_child)) != 0)) {
10059 			goto out;
10060 		}
10061 
10062 		/* process remaining rights */
10063 		if ((rights & ~KAUTH_VNODE_DELETE) &&
10064 		    (result = vnode_authorize_simple(vcp, rights, rights & KAUTH_VNODE_DELETE, found_deny)) != 0) {
10065 			goto out;
10066 		}
10067 	} else {
10068 		/*
10069 		 * Execute is only granted to root if one of the x bits is set.  This check only
10070 		 * makes sense if the posix mode bits are actually supported.
10071 		 */
10072 		if ((rights & KAUTH_VNODE_EXECUTE) &&
10073 		    (vcp->vap->va_type == VREG) &&
10074 		    VATTR_IS_SUPPORTED(vcp->vap, va_mode) &&
10075 		    !(vcp->vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) {
10076 			result = EPERM;
10077 			KAUTH_DEBUG("%p    DENIED - root execute requires at least one x bit in 0x%x", vcp, vcp->vap->va_mode);
10078 			goto out;
10079 		}
10080 
10081 		/* Assume that there were DENYs so we don't wrongly cache KAUTH_VNODE_SEARCHBYANYONE */
10082 		*found_deny = TRUE;
10083 
10084 		KAUTH_DEBUG("%p    ALLOWED - caller is superuser", vcp);
10085 	}
10086 out:
10087 	return result;
10088 }
10089 
10090 static int
vnode_authorize_callback_int(kauth_action_t action,vfs_context_t ctx,vnode_t vp,vnode_t dvp,int * errorp)10091 vnode_authorize_callback_int(kauth_action_t action, vfs_context_t ctx,
10092     vnode_t vp, vnode_t dvp, int *errorp)
10093 {
10094 	struct _vnode_authorize_context auth_context;
10095 	vauth_ctx               vcp;
10096 	kauth_cred_t            cred;
10097 	kauth_ace_rights_t      rights;
10098 	struct vnode_attr       va, dva;
10099 	int                     result;
10100 	int                     noimmutable;
10101 	boolean_t               parent_authorized_for_delete_child = FALSE;
10102 	boolean_t               found_deny = FALSE;
10103 	boolean_t               parent_ref = FALSE;
10104 	boolean_t               is_suser = FALSE;
10105 
10106 	vcp = &auth_context;
10107 	vcp->ctx = ctx;
10108 	vcp->vp = vp;
10109 	vcp->dvp = dvp;
10110 	/*
10111 	 * Note that we authorize against the context, not the passed cred
10112 	 * (the same thing anyway)
10113 	 */
10114 	cred = ctx->vc_ucred;
10115 
10116 	VATTR_INIT(&va);
10117 	vcp->vap = &va;
10118 	VATTR_INIT(&dva);
10119 	vcp->dvap = &dva;
10120 
10121 	vcp->flags = vcp->flags_valid = 0;
10122 
10123 #if DIAGNOSTIC
10124 	if ((ctx == NULL) || (vp == NULL) || (cred == NULL)) {
10125 		panic("vnode_authorize: bad arguments (context %p  vp %p  cred %p)", ctx, vp, cred);
10126 	}
10127 #endif
10128 
10129 	KAUTH_DEBUG("%p  AUTH - %s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s on %s '%s' (0x%x:%p/%p)",
10130 	    vp, vfs_context_proc(ctx)->p_comm,
10131 	    (action & KAUTH_VNODE_ACCESS)               ? "access" : "auth",
10132 	    (action & KAUTH_VNODE_READ_DATA)            ? vnode_isdir(vp) ? " LIST_DIRECTORY" : " READ_DATA" : "",
10133 	    (action & KAUTH_VNODE_WRITE_DATA)           ? vnode_isdir(vp) ? " ADD_FILE" : " WRITE_DATA" : "",
10134 	    (action & KAUTH_VNODE_EXECUTE)              ? vnode_isdir(vp) ? " SEARCH" : " EXECUTE" : "",
10135 	    (action & KAUTH_VNODE_DELETE)               ? " DELETE" : "",
10136 	    (action & KAUTH_VNODE_APPEND_DATA)          ? vnode_isdir(vp) ? " ADD_SUBDIRECTORY" : " APPEND_DATA" : "",
10137 	    (action & KAUTH_VNODE_DELETE_CHILD)         ? " DELETE_CHILD" : "",
10138 	    (action & KAUTH_VNODE_READ_ATTRIBUTES)      ? " READ_ATTRIBUTES" : "",
10139 	    (action & KAUTH_VNODE_WRITE_ATTRIBUTES)     ? " WRITE_ATTRIBUTES" : "",
10140 	    (action & KAUTH_VNODE_READ_EXTATTRIBUTES)   ? " READ_EXTATTRIBUTES" : "",
10141 	    (action & KAUTH_VNODE_WRITE_EXTATTRIBUTES)  ? " WRITE_EXTATTRIBUTES" : "",
10142 	    (action & KAUTH_VNODE_READ_SECURITY)        ? " READ_SECURITY" : "",
10143 	    (action & KAUTH_VNODE_WRITE_SECURITY)       ? " WRITE_SECURITY" : "",
10144 	    (action & KAUTH_VNODE_CHANGE_OWNER)         ? " CHANGE_OWNER" : "",
10145 	    (action & KAUTH_VNODE_NOIMMUTABLE)          ? " (noimmutable)" : "",
10146 	    vnode_isdir(vp) ? "directory" : "file",
10147 	    vp->v_name ? vp->v_name : "<NULL>", action, vp, dvp);
10148 
10149 	/*
10150 	 * Extract the control bits from the action, everything else is
10151 	 * requested rights.
10152 	 */
10153 	noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
10154 	rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
10155 
10156 	if (rights & KAUTH_VNODE_DELETE) {
10157 #if DIAGNOSTIC
10158 		if (dvp == NULL) {
10159 			panic("vnode_authorize: KAUTH_VNODE_DELETE test requires a directory");
10160 		}
10161 #endif
10162 		/*
10163 		 * check to see if we've already authorized the parent
10164 		 * directory for deletion of its children... if so, we
10165 		 * can skip a whole bunch of work... we will still have to
10166 		 * authorize that this specific child can be removed
10167 		 */
10168 		if (vnode_cache_is_authorized(dvp, ctx, KAUTH_VNODE_DELETE_CHILD) == TRUE) {
10169 			parent_authorized_for_delete_child = TRUE;
10170 		}
10171 	} else {
10172 		vcp->dvp = NULLVP;
10173 		vcp->dvap = NULL;
10174 	}
10175 
10176 	/*
10177 	 * Check for read-only filesystems.
10178 	 */
10179 	if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
10180 	    (vp->v_mount->mnt_flag & MNT_RDONLY) &&
10181 	    ((vp->v_type == VREG) || (vp->v_type == VDIR) ||
10182 	    (vp->v_type == VLNK) || (vp->v_type == VCPLX) ||
10183 	    (rights & KAUTH_VNODE_DELETE) || (rights & KAUTH_VNODE_DELETE_CHILD))) {
10184 		result = EROFS;
10185 		goto out;
10186 	}
10187 
10188 	/*
10189 	 * Check for noexec filesystems.
10190 	 */
10191 	if ((rights & KAUTH_VNODE_EXECUTE) && (vp->v_type == VREG) && (vp->v_mount->mnt_flag & MNT_NOEXEC)) {
10192 		result = EACCES;
10193 		goto out;
10194 	}
10195 
10196 	/*
10197 	 * Handle cases related to filesystems with non-local enforcement.
10198 	 * This call can return 0, in which case we will fall through to perform a
10199 	 * check based on VNOP_GETATTR data.  Otherwise it returns 1 and sets
10200 	 * an appropriate result, at which point we can return immediately.
10201 	 */
10202 	if ((vp->v_mount->mnt_kern_flag & MNTK_AUTH_OPAQUE) && vnode_authorize_opaque(vp, &result, action, ctx)) {
10203 		goto out;
10204 	}
10205 
10206 	/*
10207 	 * If the vnode is a namedstream (extended attribute) data vnode (eg.
10208 	 * a resource fork), *_DATA becomes *_EXTATTRIBUTES.
10209 	 */
10210 	if (vnode_isnamedstream(vp)) {
10211 		if (rights & KAUTH_VNODE_READ_DATA) {
10212 			rights &= ~KAUTH_VNODE_READ_DATA;
10213 			rights |= KAUTH_VNODE_READ_EXTATTRIBUTES;
10214 		}
10215 		if (rights & KAUTH_VNODE_WRITE_DATA) {
10216 			rights &= ~KAUTH_VNODE_WRITE_DATA;
10217 			rights |= KAUTH_VNODE_WRITE_EXTATTRIBUTES;
10218 		}
10219 
10220 		/*
10221 		 * Point 'vp' to the namedstream's parent for ACL checking
10222 		 */
10223 		if ((vp->v_parent != NULL) &&
10224 		    (vget_internal(vp->v_parent, 0, VNODE_NODEAD | VNODE_DRAINO) == 0)) {
10225 			parent_ref = TRUE;
10226 			vcp->vp = vp = vp->v_parent;
10227 		}
10228 	}
10229 
10230 	if (vfs_context_issuser(ctx)) {
10231 		/*
10232 		 * if we're not asking for execute permissions or modifications,
10233 		 * then we're done, this action is authorized.
10234 		 */
10235 		if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
10236 			goto success;
10237 		}
10238 
10239 		is_suser = TRUE;
10240 	}
10241 
10242 	/*
10243 	 * Get vnode attributes and extended security information for the vnode
10244 	 * and directory if required.
10245 	 *
10246 	 * If we're root we only want mode bits and flags for checking
10247 	 * execute and immutability.
10248 	 */
10249 	VATTR_WANTED(&va, va_mode);
10250 	VATTR_WANTED(&va, va_flags);
10251 	if (!is_suser) {
10252 		VATTR_WANTED(&va, va_uid);
10253 		VATTR_WANTED(&va, va_gid);
10254 		VATTR_WANTED(&va, va_acl);
10255 	}
10256 	if ((result = vnode_getattr(vp, &va, ctx)) != 0) {
10257 		KAUTH_DEBUG("%p    ERROR - failed to get vnode attributes - %d", vp, result);
10258 		goto out;
10259 	}
10260 	VATTR_WANTED(&va, va_type);
10261 	VATTR_RETURN(&va, va_type, vnode_vtype(vp));
10262 
10263 	if (vcp->dvp) {
10264 		VATTR_WANTED(&dva, va_mode);
10265 		VATTR_WANTED(&dva, va_flags);
10266 		if (!is_suser) {
10267 			VATTR_WANTED(&dva, va_uid);
10268 			VATTR_WANTED(&dva, va_gid);
10269 			VATTR_WANTED(&dva, va_acl);
10270 		}
10271 		if ((result = vnode_getattr(vcp->dvp, &dva, ctx)) != 0) {
10272 			KAUTH_DEBUG("%p    ERROR - failed to get directory vnode attributes - %d", vp, result);
10273 			goto out;
10274 		}
10275 		VATTR_WANTED(&dva, va_type);
10276 		VATTR_RETURN(&dva, va_type, vnode_vtype(vcp->dvp));
10277 	}
10278 
10279 	result = vnode_attr_authorize_internal(vcp, vp->v_mount, rights, is_suser,
10280 	    &found_deny, noimmutable, parent_authorized_for_delete_child);
10281 out:
10282 	if (VATTR_IS_SUPPORTED(&va, va_acl) && (va.va_acl != NULL)) {
10283 		kauth_acl_free(va.va_acl);
10284 	}
10285 	if (VATTR_IS_SUPPORTED(&dva, va_acl) && (dva.va_acl != NULL)) {
10286 		kauth_acl_free(dva.va_acl);
10287 	}
10288 
10289 	if (result) {
10290 		if (parent_ref) {
10291 			vnode_put(vp);
10292 		}
10293 		*errorp = result;
10294 		KAUTH_DEBUG("%p    DENIED - auth denied", vp);
10295 		return KAUTH_RESULT_DENY;
10296 	}
10297 	if ((rights & KAUTH_VNODE_SEARCH) && found_deny == FALSE && vp->v_type == VDIR) {
10298 		/*
10299 		 * if we were successfully granted the right to search this directory
10300 		 * and there were NO ACL DENYs for search and the posix permissions also don't
10301 		 * deny execute, we can synthesize a global right that allows anyone to
10302 		 * traverse this directory during a pathname lookup without having to
10303 		 * match the credential associated with this cache of rights.
10304 		 *
10305 		 * Note that we can correctly cache KAUTH_VNODE_SEARCHBYANYONE
10306 		 * only if we actually check ACLs which we don't for root. As
10307 		 * a workaround, the lookup fast path checks for root.
10308 		 */
10309 		if (!VATTR_IS_SUPPORTED(&va, va_mode) ||
10310 		    ((va.va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) ==
10311 		    (S_IXUSR | S_IXGRP | S_IXOTH))) {
10312 			vnode_cache_authorized_action(vp, ctx, KAUTH_VNODE_SEARCHBYANYONE);
10313 		}
10314 	}
10315 success:
10316 	if (parent_ref) {
10317 		vnode_put(vp);
10318 	}
10319 
10320 	/*
10321 	 * Note that this implies that we will allow requests for no rights, as well as
10322 	 * for rights that we do not recognise.  There should be none of these.
10323 	 */
10324 	KAUTH_DEBUG("%p    ALLOWED - auth granted", vp);
10325 	return KAUTH_RESULT_ALLOW;
10326 }
10327 
10328 int
vnode_attr_authorize_init(struct vnode_attr * vap,struct vnode_attr * dvap,kauth_action_t action,vfs_context_t ctx)10329 vnode_attr_authorize_init(struct vnode_attr *vap, struct vnode_attr *dvap,
10330     kauth_action_t action, vfs_context_t ctx)
10331 {
10332 	VATTR_INIT(vap);
10333 	VATTR_WANTED(vap, va_type);
10334 	VATTR_WANTED(vap, va_mode);
10335 	VATTR_WANTED(vap, va_flags);
10336 	if (dvap) {
10337 		VATTR_INIT(dvap);
10338 		if (action & KAUTH_VNODE_DELETE) {
10339 			VATTR_WANTED(dvap, va_type);
10340 			VATTR_WANTED(dvap, va_mode);
10341 			VATTR_WANTED(dvap, va_flags);
10342 		}
10343 	} else if (action & KAUTH_VNODE_DELETE) {
10344 		return EINVAL;
10345 	}
10346 
10347 	if (!vfs_context_issuser(ctx)) {
10348 		VATTR_WANTED(vap, va_uid);
10349 		VATTR_WANTED(vap, va_gid);
10350 		VATTR_WANTED(vap, va_acl);
10351 		if (dvap && (action & KAUTH_VNODE_DELETE)) {
10352 			VATTR_WANTED(dvap, va_uid);
10353 			VATTR_WANTED(dvap, va_gid);
10354 			VATTR_WANTED(dvap, va_acl);
10355 		}
10356 	}
10357 
10358 	return 0;
10359 }
10360 
10361 #define VNODE_SEC_ATTRS_NO_ACL (VNODE_ATTR_va_uid | VNODE_ATTR_va_gid | VNODE_ATTR_va_mode | VNODE_ATTR_va_flags | VNODE_ATTR_va_type)
10362 
10363 int
vnode_attr_authorize(struct vnode_attr * vap,struct vnode_attr * dvap,mount_t mp,kauth_action_t action,vfs_context_t ctx)10364 vnode_attr_authorize(struct vnode_attr *vap, struct vnode_attr *dvap, mount_t mp,
10365     kauth_action_t action, vfs_context_t ctx)
10366 {
10367 	struct _vnode_authorize_context auth_context;
10368 	vauth_ctx vcp;
10369 	kauth_ace_rights_t rights;
10370 	int noimmutable;
10371 	boolean_t found_deny;
10372 	boolean_t is_suser = FALSE;
10373 	int result = 0;
10374 	uid_t ouid = vap->va_uid;
10375 	gid_t ogid = vap->va_gid;
10376 
10377 	vcp = &auth_context;
10378 	vcp->ctx = ctx;
10379 	vcp->vp = NULLVP;
10380 	vcp->vap = vap;
10381 	vcp->dvp = NULLVP;
10382 	vcp->dvap = dvap;
10383 	vcp->flags = vcp->flags_valid = 0;
10384 
10385 	noimmutable = (action & KAUTH_VNODE_NOIMMUTABLE) ? 1 : 0;
10386 	rights = action & ~(KAUTH_VNODE_ACCESS | KAUTH_VNODE_NOIMMUTABLE);
10387 
10388 	/*
10389 	 * Check for read-only filesystems.
10390 	 */
10391 	if ((rights & KAUTH_VNODE_WRITE_RIGHTS) &&
10392 	    mp && (mp->mnt_flag & MNT_RDONLY) &&
10393 	    ((vap->va_type == VREG) || (vap->va_type == VDIR) ||
10394 	    (vap->va_type == VLNK) || (rights & KAUTH_VNODE_DELETE) ||
10395 	    (rights & KAUTH_VNODE_DELETE_CHILD))) {
10396 		result = EROFS;
10397 		goto out;
10398 	}
10399 
10400 	/*
10401 	 * Check for noexec filesystems.
10402 	 */
10403 	if ((rights & KAUTH_VNODE_EXECUTE) &&
10404 	    (vap->va_type == VREG) && mp && (mp->mnt_flag & MNT_NOEXEC)) {
10405 		result = EACCES;
10406 		goto out;
10407 	}
10408 
10409 	if (vfs_context_issuser(ctx)) {
10410 		/*
10411 		 * if we're not asking for execute permissions or modifications,
10412 		 * then we're done, this action is authorized.
10413 		 */
10414 		if (!(rights & (KAUTH_VNODE_EXECUTE | KAUTH_VNODE_WRITE_RIGHTS))) {
10415 			goto out;
10416 		}
10417 		is_suser = TRUE;
10418 	}
10419 
10420 	if (mp) {
10421 		if (vfs_extendedsecurity(mp) && VATTR_IS_ACTIVE(vap, va_acl) && !VATTR_IS_SUPPORTED(vap, va_acl)) {
10422 			panic("(1) vnode attrs not complete for vnode_attr_authorize");
10423 		}
10424 		vnode_attr_handle_uid_and_gid(vap, mp, ctx);
10425 	}
10426 
10427 	if ((vap->va_active & VNODE_SEC_ATTRS_NO_ACL) != (vap->va_supported & VNODE_SEC_ATTRS_NO_ACL)) {
10428 		panic("(2) vnode attrs not complete for vnode_attr_authorize (2) vap->va_active = 0x%llx , vap->va_supported = 0x%llx",
10429 		    vap->va_active, vap->va_supported);
10430 	}
10431 
10432 	result = vnode_attr_authorize_internal(vcp, mp, rights, is_suser,
10433 	    &found_deny, noimmutable, FALSE);
10434 
10435 	if (mp) {
10436 		vap->va_uid = ouid;
10437 		vap->va_gid = ogid;
10438 	}
10439 
10440 	if (result == EPERM) {
10441 		result = EACCES;
10442 	}
10443 out:
10444 	return result;
10445 }
10446 
10447 
10448 int
vnode_authattr_new(vnode_t dvp,struct vnode_attr * vap,int noauth,vfs_context_t ctx)10449 vnode_authattr_new(vnode_t dvp, struct vnode_attr *vap, int noauth, vfs_context_t ctx)
10450 {
10451 	return vnode_authattr_new_internal(dvp, vap, noauth, NULL, ctx);
10452 }
10453 
10454 /*
10455  * Check that the attribute information in vattr can be legally applied to
10456  * a new file by the context.
10457  */
10458 static int
vnode_authattr_new_internal(vnode_t dvp,struct vnode_attr * vap,int noauth,uint32_t * defaulted_fieldsp,vfs_context_t ctx)10459 vnode_authattr_new_internal(vnode_t dvp, struct vnode_attr *vap, int noauth, uint32_t *defaulted_fieldsp, vfs_context_t ctx)
10460 {
10461 	int             error;
10462 	int             has_priv_suser, ismember, defaulted_owner, defaulted_group, defaulted_mode;
10463 	uint32_t        inherit_flags;
10464 	kauth_cred_t    cred;
10465 	guid_t          changer;
10466 	mount_t         dmp;
10467 	struct vnode_attr dva;
10468 
10469 	error = 0;
10470 
10471 	if (defaulted_fieldsp) {
10472 		*defaulted_fieldsp = 0;
10473 	}
10474 
10475 	defaulted_owner = defaulted_group = defaulted_mode = 0;
10476 
10477 	inherit_flags = 0;
10478 
10479 	/*
10480 	 * Require that the filesystem support extended security to apply any.
10481 	 */
10482 	if (!vfs_extendedsecurity(dvp->v_mount) &&
10483 	    (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
10484 		error = EINVAL;
10485 		goto out;
10486 	}
10487 
10488 	/*
10489 	 * Default some fields.
10490 	 */
10491 	dmp = dvp->v_mount;
10492 
10493 	/*
10494 	 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit owner is set, that
10495 	 * owner takes ownership of all new files.
10496 	 */
10497 	if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsowner != KAUTH_UID_NONE)) {
10498 		VATTR_SET(vap, va_uid, dmp->mnt_fsowner);
10499 		defaulted_owner = 1;
10500 	} else {
10501 		if (!VATTR_IS_ACTIVE(vap, va_uid)) {
10502 			/* default owner is current user */
10503 			VATTR_SET(vap, va_uid, kauth_cred_getuid(vfs_context_ucred(ctx)));
10504 			defaulted_owner = 1;
10505 		}
10506 	}
10507 
10508 	/*
10509 	 * We need the dvp's va_flags and *may* need the gid of the directory,
10510 	 * we ask for both here.
10511 	 */
10512 	VATTR_INIT(&dva);
10513 	VATTR_WANTED(&dva, va_gid);
10514 	VATTR_WANTED(&dva, va_flags);
10515 	if ((error = vnode_getattr(dvp, &dva, ctx)) != 0) {
10516 		goto out;
10517 	}
10518 
10519 	/*
10520 	 * If the filesystem is mounted IGNORE_OWNERSHIP and an explicit grouo is set, that
10521 	 * group takes ownership of all new files.
10522 	 */
10523 	if ((dmp->mnt_flag & MNT_IGNORE_OWNERSHIP) && (dmp->mnt_fsgroup != KAUTH_GID_NONE)) {
10524 		VATTR_SET(vap, va_gid, dmp->mnt_fsgroup);
10525 		defaulted_group = 1;
10526 	} else {
10527 		if (!VATTR_IS_ACTIVE(vap, va_gid)) {
10528 			/* default group comes from parent object, fallback to current user */
10529 			if (VATTR_IS_SUPPORTED(&dva, va_gid)) {
10530 				VATTR_SET(vap, va_gid, dva.va_gid);
10531 			} else {
10532 				VATTR_SET(vap, va_gid, kauth_cred_getgid(vfs_context_ucred(ctx)));
10533 			}
10534 			defaulted_group = 1;
10535 		}
10536 	}
10537 
10538 	if (!VATTR_IS_ACTIVE(vap, va_flags)) {
10539 		VATTR_SET(vap, va_flags, 0);
10540 	}
10541 
10542 	/* Determine if SF_RESTRICTED should be inherited from the parent
10543 	 * directory. */
10544 	if (VATTR_IS_SUPPORTED(&dva, va_flags)) {
10545 		inherit_flags = dva.va_flags & (UF_DATAVAULT | SF_RESTRICTED);
10546 	}
10547 
10548 	/* default mode is everything, masked with current umask */
10549 	if (!VATTR_IS_ACTIVE(vap, va_mode)) {
10550 		VATTR_SET(vap, va_mode, ACCESSPERMS & ~vfs_context_proc(ctx)->p_fd.fd_cmask);
10551 		KAUTH_DEBUG("ATTR - defaulting new file mode to %o from umask %o",
10552 		    vap->va_mode, vfs_context_proc(ctx)->p_fd.fd_cmask);
10553 		defaulted_mode = 1;
10554 	}
10555 	/* set timestamps to now */
10556 	if (!VATTR_IS_ACTIVE(vap, va_create_time)) {
10557 		nanotime(&vap->va_create_time);
10558 		VATTR_SET_ACTIVE(vap, va_create_time);
10559 	}
10560 
10561 	/*
10562 	 * Check for attempts to set nonsensical fields.
10563 	 */
10564 	if (vap->va_active & ~VNODE_ATTR_NEWOBJ) {
10565 		error = EINVAL;
10566 		KAUTH_DEBUG("ATTR - ERROR - attempt to set unsupported new-file attributes %llx",
10567 		    vap->va_active & ~VNODE_ATTR_NEWOBJ);
10568 		goto out;
10569 	}
10570 
10571 	/*
10572 	 * Quickly check for the applicability of any enforcement here.
10573 	 * Tests below maintain the integrity of the local security model.
10574 	 */
10575 	if (vfs_authopaque(dvp->v_mount)) {
10576 		goto out;
10577 	}
10578 
10579 	/*
10580 	 * We need to know if the caller is the superuser, or if the work is
10581 	 * otherwise already authorised.
10582 	 */
10583 	cred = vfs_context_ucred(ctx);
10584 	if (noauth) {
10585 		/* doing work for the kernel */
10586 		has_priv_suser = 1;
10587 	} else {
10588 		has_priv_suser = vfs_context_issuser(ctx);
10589 	}
10590 
10591 
10592 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
10593 		vap->va_flags &= ~SF_SYNTHETIC;
10594 		if (has_priv_suser) {
10595 			if ((vap->va_flags & (UF_SETTABLE | SF_SETTABLE)) != vap->va_flags) {
10596 				error = EPERM;
10597 				KAUTH_DEBUG("  DENIED - superuser attempt to set illegal flag(s)");
10598 				goto out;
10599 			}
10600 		} else {
10601 			if ((vap->va_flags & UF_SETTABLE) != vap->va_flags) {
10602 				error = EPERM;
10603 				KAUTH_DEBUG("  DENIED - user attempt to set illegal flag(s)");
10604 				goto out;
10605 			}
10606 		}
10607 	}
10608 
10609 	/* if not superuser, validate legality of new-item attributes */
10610 	if (!has_priv_suser) {
10611 		if (!defaulted_mode && VATTR_IS_ACTIVE(vap, va_mode)) {
10612 			/* setgid? */
10613 			if (vap->va_mode & S_ISGID) {
10614 				if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
10615 					KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
10616 					goto out;
10617 				}
10618 				if (!ismember) {
10619 					KAUTH_DEBUG("  DENIED - can't set SGID bit, not a member of %d", vap->va_gid);
10620 					error = EPERM;
10621 					goto out;
10622 				}
10623 			}
10624 
10625 			/* setuid? */
10626 			if ((vap->va_mode & S_ISUID) && (vap->va_uid != kauth_cred_getuid(cred))) {
10627 				KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
10628 				error = EPERM;
10629 				goto out;
10630 			}
10631 		}
10632 		if (!defaulted_owner && (vap->va_uid != kauth_cred_getuid(cred))) {
10633 			KAUTH_DEBUG("  DENIED - cannot create new item owned by %d", vap->va_uid);
10634 			error = EPERM;
10635 			goto out;
10636 		}
10637 		if (!defaulted_group) {
10638 			if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
10639 				KAUTH_DEBUG("  ERROR - got %d checking for membership in %d", error, vap->va_gid);
10640 				goto out;
10641 			}
10642 			if (!ismember) {
10643 				KAUTH_DEBUG("  DENIED - cannot create new item with group %d - not a member", vap->va_gid);
10644 				error = EPERM;
10645 				goto out;
10646 			}
10647 		}
10648 
10649 		/* initialising owner/group UUID */
10650 		if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
10651 			if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
10652 				KAUTH_DEBUG("  ERROR - got %d trying to get caller UUID", error);
10653 				/* XXX ENOENT here - no GUID - should perhaps become EPERM */
10654 				goto out;
10655 			}
10656 			if (!kauth_guid_equal(&vap->va_uuuid, &changer)) {
10657 				KAUTH_DEBUG("  ERROR - cannot create item with supplied owner UUID - not us");
10658 				error = EPERM;
10659 				goto out;
10660 			}
10661 		}
10662 		if (VATTR_IS_ACTIVE(vap, va_guuid)) {
10663 			if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
10664 				KAUTH_DEBUG("  ERROR - got %d trying to check group membership", error);
10665 				goto out;
10666 			}
10667 			if (!ismember) {
10668 				KAUTH_DEBUG("  ERROR - cannot create item with supplied group UUID - not a member");
10669 				error = EPERM;
10670 				goto out;
10671 			}
10672 		}
10673 	}
10674 out:
10675 	if (inherit_flags) {
10676 		/* Apply SF_RESTRICTED to the file if its parent directory was
10677 		 * restricted.  This is done at the end so that root is not
10678 		 * required if this flag is only set due to inheritance. */
10679 		VATTR_SET(vap, va_flags, (vap->va_flags | inherit_flags));
10680 	}
10681 	if (defaulted_fieldsp) {
10682 		if (defaulted_mode) {
10683 			*defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_MODE;
10684 		}
10685 		if (defaulted_group) {
10686 			*defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_GID;
10687 		}
10688 		if (defaulted_owner) {
10689 			*defaulted_fieldsp |= VATTR_PREPARE_DEFAULTED_UID;
10690 		}
10691 	}
10692 	return error;
10693 }
10694 
10695 /*
10696  * Check that the attribute information in vap can be legally written by the
10697  * context.
10698  *
10699  * Call this when you're not sure about the vnode_attr; either its contents
10700  * have come from an unknown source, or when they are variable.
10701  *
10702  * Returns errno, or zero and sets *actionp to the KAUTH_VNODE_* actions that
10703  * must be authorized to be permitted to write the vattr.
10704  */
10705 int
vnode_authattr(vnode_t vp,struct vnode_attr * vap,kauth_action_t * actionp,vfs_context_t ctx)10706 vnode_authattr(vnode_t vp, struct vnode_attr *vap, kauth_action_t *actionp, vfs_context_t ctx)
10707 {
10708 	struct vnode_attr ova;
10709 	kauth_action_t  required_action;
10710 	int             error, has_priv_suser, ismember, chowner, chgroup, clear_suid, clear_sgid;
10711 	guid_t          changer;
10712 	gid_t           group;
10713 	uid_t           owner;
10714 	mode_t          newmode;
10715 	kauth_cred_t    cred;
10716 	uint32_t        fdelta;
10717 
10718 	VATTR_INIT(&ova);
10719 	required_action = 0;
10720 	error = 0;
10721 
10722 	/*
10723 	 * Quickly check for enforcement applicability.
10724 	 */
10725 	if (vfs_authopaque(vp->v_mount)) {
10726 		goto out;
10727 	}
10728 
10729 	/*
10730 	 * Check for attempts to set nonsensical fields.
10731 	 */
10732 	if (vap->va_active & VNODE_ATTR_RDONLY) {
10733 		KAUTH_DEBUG("ATTR - ERROR: attempt to set readonly attribute(s)");
10734 		error = EINVAL;
10735 		goto out;
10736 	}
10737 
10738 	/*
10739 	 * We need to know if the caller is the superuser.
10740 	 */
10741 	cred = vfs_context_ucred(ctx);
10742 	has_priv_suser = kauth_cred_issuser(cred);
10743 
10744 	/*
10745 	 * If any of the following are changing, we need information from the old file:
10746 	 * va_uid
10747 	 * va_gid
10748 	 * va_mode
10749 	 * va_uuuid
10750 	 * va_guuid
10751 	 */
10752 	if (VATTR_IS_ACTIVE(vap, va_uid) ||
10753 	    VATTR_IS_ACTIVE(vap, va_gid) ||
10754 	    VATTR_IS_ACTIVE(vap, va_mode) ||
10755 	    VATTR_IS_ACTIVE(vap, va_uuuid) ||
10756 	    VATTR_IS_ACTIVE(vap, va_guuid)) {
10757 		VATTR_WANTED(&ova, va_mode);
10758 		VATTR_WANTED(&ova, va_uid);
10759 		VATTR_WANTED(&ova, va_gid);
10760 		VATTR_WANTED(&ova, va_uuuid);
10761 		VATTR_WANTED(&ova, va_guuid);
10762 		KAUTH_DEBUG("ATTR - security information changing, fetching existing attributes");
10763 	}
10764 
10765 	/*
10766 	 * If timestamps are being changed, we need to know who the file is owned
10767 	 * by.
10768 	 */
10769 	if (VATTR_IS_ACTIVE(vap, va_create_time) ||
10770 	    VATTR_IS_ACTIVE(vap, va_change_time) ||
10771 	    VATTR_IS_ACTIVE(vap, va_modify_time) ||
10772 	    VATTR_IS_ACTIVE(vap, va_access_time) ||
10773 	    VATTR_IS_ACTIVE(vap, va_backup_time) ||
10774 	    VATTR_IS_ACTIVE(vap, va_addedtime)) {
10775 		VATTR_WANTED(&ova, va_uid);
10776 #if 0   /* enable this when we support UUIDs as official owners */
10777 		VATTR_WANTED(&ova, va_uuuid);
10778 #endif
10779 		KAUTH_DEBUG("ATTR - timestamps changing, fetching uid and GUID");
10780 	}
10781 
10782 	/*
10783 	 * If flags are being changed, we need the old flags.
10784 	 */
10785 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
10786 		KAUTH_DEBUG("ATTR - flags changing, fetching old flags");
10787 		VATTR_WANTED(&ova, va_flags);
10788 	}
10789 
10790 	/*
10791 	 * If ACLs are being changed, we need the old ACLs.
10792 	 */
10793 	if (VATTR_IS_ACTIVE(vap, va_acl)) {
10794 		KAUTH_DEBUG("ATTR - acl changing, fetching old flags");
10795 		VATTR_WANTED(&ova, va_acl);
10796 	}
10797 
10798 	/*
10799 	 * If the size is being set, make sure it's not a directory.
10800 	 */
10801 	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
10802 		/* size is only meaningful on regular files, don't permit otherwise */
10803 		if (!vnode_isreg(vp)) {
10804 			KAUTH_DEBUG("ATTR - ERROR: size change requested on non-file");
10805 			error = vnode_isdir(vp) ? EISDIR : EINVAL;
10806 			goto out;
10807 		}
10808 	}
10809 
10810 	/*
10811 	 * Get old data.
10812 	 */
10813 	KAUTH_DEBUG("ATTR - fetching old attributes %016llx", ova.va_active);
10814 	if ((error = vnode_getattr(vp, &ova, ctx)) != 0) {
10815 		KAUTH_DEBUG("  ERROR - got %d trying to get attributes", error);
10816 		goto out;
10817 	}
10818 
10819 	/*
10820 	 * Size changes require write access to the file data.
10821 	 */
10822 	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
10823 		/* if we can't get the size, or it's different, we need write access */
10824 		KAUTH_DEBUG("ATTR - size change, requiring WRITE_DATA");
10825 		required_action |= KAUTH_VNODE_WRITE_DATA;
10826 	}
10827 
10828 	/*
10829 	 * Changing timestamps?
10830 	 *
10831 	 * Note that we are only called to authorize user-requested time changes;
10832 	 * side-effect time changes are not authorized.  Authorisation is only
10833 	 * required for existing files.
10834 	 *
10835 	 * Non-owners are not permitted to change the time on an existing
10836 	 * file to anything other than the current time.
10837 	 */
10838 	if (VATTR_IS_ACTIVE(vap, va_create_time) ||
10839 	    VATTR_IS_ACTIVE(vap, va_change_time) ||
10840 	    VATTR_IS_ACTIVE(vap, va_modify_time) ||
10841 	    VATTR_IS_ACTIVE(vap, va_access_time) ||
10842 	    VATTR_IS_ACTIVE(vap, va_backup_time) ||
10843 	    VATTR_IS_ACTIVE(vap, va_addedtime)) {
10844 		/*
10845 		 * The owner and root may set any timestamps they like,
10846 		 * provided that the file is not immutable.  The owner still needs
10847 		 * WRITE_ATTRIBUTES (implied by ownership but still deniable).
10848 		 */
10849 		if (has_priv_suser || vauth_node_owner(&ova, cred)) {
10850 			KAUTH_DEBUG("ATTR - root or owner changing timestamps");
10851 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE | KAUTH_VNODE_WRITE_ATTRIBUTES;
10852 		} else {
10853 			/* just setting the current time? */
10854 			if (vap->va_vaflags & VA_UTIMES_NULL) {
10855 				KAUTH_DEBUG("ATTR - non-root/owner changing timestamps, requiring WRITE_ATTRIBUTES");
10856 				required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
10857 			} else {
10858 				KAUTH_DEBUG("ATTR - ERROR: illegal timestamp modification attempted");
10859 				error = EACCES;
10860 				goto out;
10861 			}
10862 		}
10863 	}
10864 
10865 	/*
10866 	 * Changing file mode?
10867 	 */
10868 	if (VATTR_IS_ACTIVE(vap, va_mode) && VATTR_IS_SUPPORTED(&ova, va_mode) && (ova.va_mode != vap->va_mode)) {
10869 		KAUTH_DEBUG("ATTR - mode change from %06o to %06o", ova.va_mode, vap->va_mode);
10870 
10871 		/*
10872 		 * Mode changes always have the same basic auth requirements.
10873 		 */
10874 		if (has_priv_suser) {
10875 			KAUTH_DEBUG("ATTR - superuser mode change, requiring immutability check");
10876 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
10877 		} else {
10878 			/* need WRITE_SECURITY */
10879 			KAUTH_DEBUG("ATTR - non-superuser mode change, requiring WRITE_SECURITY");
10880 			required_action |= KAUTH_VNODE_WRITE_SECURITY;
10881 		}
10882 
10883 		/*
10884 		 * Can't set the setgid bit if you're not in the group and not root.  Have to have
10885 		 * existing group information in the case we're not setting it right now.
10886 		 */
10887 		if (vap->va_mode & S_ISGID) {
10888 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;  /* always required */
10889 			if (!has_priv_suser) {
10890 				if (VATTR_IS_ACTIVE(vap, va_gid)) {
10891 					group = vap->va_gid;
10892 				} else if (VATTR_IS_SUPPORTED(&ova, va_gid)) {
10893 					group = ova.va_gid;
10894 				} else {
10895 					KAUTH_DEBUG("ATTR - ERROR: setgid but no gid available");
10896 					error = EINVAL;
10897 					goto out;
10898 				}
10899 				/*
10900 				 * This might be too restrictive; WRITE_SECURITY might be implied by
10901 				 * membership in this case, rather than being an additional requirement.
10902 				 */
10903 				if ((error = kauth_cred_ismember_gid(cred, group, &ismember)) != 0) {
10904 					KAUTH_DEBUG("ATTR - ERROR: got %d checking for membership in %d", error, vap->va_gid);
10905 					goto out;
10906 				}
10907 				if (!ismember) {
10908 					KAUTH_DEBUG("  DENIED - can't set SGID bit, not a member of %d", group);
10909 					error = EPERM;
10910 					goto out;
10911 				}
10912 			}
10913 		}
10914 
10915 		/*
10916 		 * Can't set the setuid bit unless you're root or the file's owner.
10917 		 */
10918 		if (vap->va_mode & S_ISUID) {
10919 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;  /* always required */
10920 			if (!has_priv_suser) {
10921 				if (VATTR_IS_ACTIVE(vap, va_uid)) {
10922 					owner = vap->va_uid;
10923 				} else if (VATTR_IS_SUPPORTED(&ova, va_uid)) {
10924 					owner = ova.va_uid;
10925 				} else {
10926 					KAUTH_DEBUG("ATTR - ERROR: setuid but no uid available");
10927 					error = EINVAL;
10928 					goto out;
10929 				}
10930 				if (owner != kauth_cred_getuid(cred)) {
10931 					/*
10932 					 * We could allow this if WRITE_SECURITY is permitted, perhaps.
10933 					 */
10934 					KAUTH_DEBUG("ATTR - ERROR: illegal attempt to set the setuid bit");
10935 					error = EPERM;
10936 					goto out;
10937 				}
10938 			}
10939 		}
10940 	}
10941 
10942 	/*
10943 	 * Validate/mask flags changes.  This checks that only the flags in
10944 	 * the UF_SETTABLE mask are being set, and preserves the flags in
10945 	 * the SF_SETTABLE case.
10946 	 *
10947 	 * Since flags changes may be made in conjunction with other changes,
10948 	 * we will ask the auth code to ignore immutability in the case that
10949 	 * the SF_* flags are not set and we are only manipulating the file flags.
10950 	 *
10951 	 */
10952 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
10953 		/* compute changing flags bits */
10954 		vap->va_flags &= ~SF_SYNTHETIC;
10955 		ova.va_flags &= ~SF_SYNTHETIC;
10956 		if (VATTR_IS_SUPPORTED(&ova, va_flags)) {
10957 			fdelta = vap->va_flags ^ ova.va_flags;
10958 		} else {
10959 			fdelta = vap->va_flags;
10960 		}
10961 
10962 		if (fdelta != 0) {
10963 			KAUTH_DEBUG("ATTR - flags changing, requiring WRITE_SECURITY");
10964 			required_action |= KAUTH_VNODE_WRITE_SECURITY;
10965 
10966 			/* check that changing bits are legal */
10967 			if (has_priv_suser) {
10968 				/*
10969 				 * The immutability check will prevent us from clearing the SF_*
10970 				 * flags unless the system securelevel permits it, so just check
10971 				 * for legal flags here.
10972 				 */
10973 				if (fdelta & ~(UF_SETTABLE | SF_SETTABLE)) {
10974 					error = EPERM;
10975 					KAUTH_DEBUG("  DENIED - superuser attempt to set illegal flag(s)");
10976 					goto out;
10977 				}
10978 			} else {
10979 				if (fdelta & ~UF_SETTABLE) {
10980 					error = EPERM;
10981 					KAUTH_DEBUG("  DENIED - user attempt to set illegal flag(s)");
10982 					goto out;
10983 				}
10984 			}
10985 			/*
10986 			 * If the caller has the ability to manipulate file flags,
10987 			 * security is not reduced by ignoring them for this operation.
10988 			 *
10989 			 * A more complete test here would consider the 'after' states of the flags
10990 			 * to determine whether it would permit the operation, but this becomes
10991 			 * very complex.
10992 			 *
10993 			 * Ignoring immutability is conditional on securelevel; this does not bypass
10994 			 * the SF_* flags if securelevel > 0.
10995 			 */
10996 			required_action |= KAUTH_VNODE_NOIMMUTABLE;
10997 		}
10998 	}
10999 
11000 	/*
11001 	 * Validate ownership information.
11002 	 */
11003 	chowner = 0;
11004 	chgroup = 0;
11005 	clear_suid = 0;
11006 	clear_sgid = 0;
11007 
11008 	/*
11009 	 * uid changing
11010 	 * Note that if the filesystem didn't give us a UID, we expect that it doesn't
11011 	 * support them in general, and will ignore it if/when we try to set it.
11012 	 * We might want to clear the uid out of vap completely here.
11013 	 */
11014 	if (VATTR_IS_ACTIVE(vap, va_uid)) {
11015 		if (VATTR_IS_SUPPORTED(&ova, va_uid) && (vap->va_uid != ova.va_uid)) {
11016 			if (!has_priv_suser && (kauth_cred_getuid(cred) != vap->va_uid)) {
11017 				KAUTH_DEBUG("  DENIED - non-superuser cannot change ownershipt to a third party");
11018 				error = EPERM;
11019 				goto out;
11020 			}
11021 			chowner = 1;
11022 		}
11023 		clear_suid = 1;
11024 	}
11025 
11026 	/*
11027 	 * gid changing
11028 	 * Note that if the filesystem didn't give us a GID, we expect that it doesn't
11029 	 * support them in general, and will ignore it if/when we try to set it.
11030 	 * We might want to clear the gid out of vap completely here.
11031 	 */
11032 	if (VATTR_IS_ACTIVE(vap, va_gid)) {
11033 		if (VATTR_IS_SUPPORTED(&ova, va_gid) && (vap->va_gid != ova.va_gid)) {
11034 			if (!has_priv_suser) {
11035 				if ((error = kauth_cred_ismember_gid(cred, vap->va_gid, &ismember)) != 0) {
11036 					KAUTH_DEBUG("  ERROR - got %d checking for membership in %d", error, vap->va_gid);
11037 					goto out;
11038 				}
11039 				if (!ismember) {
11040 					KAUTH_DEBUG("  DENIED - group change from %d to %d but not a member of target group",
11041 					    ova.va_gid, vap->va_gid);
11042 					error = EPERM;
11043 					goto out;
11044 				}
11045 			}
11046 			chgroup = 1;
11047 		}
11048 		clear_sgid = 1;
11049 	}
11050 
11051 	/*
11052 	 * Owner UUID being set or changed.
11053 	 */
11054 	if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
11055 		/* if the owner UUID is not actually changing ... */
11056 		if (VATTR_IS_SUPPORTED(&ova, va_uuuid)) {
11057 			if (kauth_guid_equal(&vap->va_uuuid, &ova.va_uuuid)) {
11058 				goto no_uuuid_change;
11059 			}
11060 
11061 			/*
11062 			 * If the current owner UUID is a null GUID, check
11063 			 * it against the UUID corresponding to the owner UID.
11064 			 */
11065 			if (kauth_guid_equal(&ova.va_uuuid, &kauth_null_guid) &&
11066 			    VATTR_IS_SUPPORTED(&ova, va_uid)) {
11067 				guid_t uid_guid;
11068 
11069 				if (kauth_cred_uid2guid(ova.va_uid, &uid_guid) == 0 &&
11070 				    kauth_guid_equal(&vap->va_uuuid, &uid_guid)) {
11071 					goto no_uuuid_change;
11072 				}
11073 			}
11074 		}
11075 
11076 		/*
11077 		 * The owner UUID cannot be set by a non-superuser to anything other than
11078 		 * their own or a null GUID (to "unset" the owner UUID).
11079 		 * Note that file systems must be prepared to handle the
11080 		 * null UUID case in a manner appropriate for that file
11081 		 * system.
11082 		 */
11083 		if (!has_priv_suser) {
11084 			if ((error = kauth_cred_getguid(cred, &changer)) != 0) {
11085 				KAUTH_DEBUG("  ERROR - got %d trying to get caller UUID", error);
11086 				/* XXX ENOENT here - no UUID - should perhaps become EPERM */
11087 				goto out;
11088 			}
11089 			if (!kauth_guid_equal(&vap->va_uuuid, &changer) &&
11090 			    !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
11091 				KAUTH_DEBUG("  ERROR - cannot set supplied owner UUID - not us / null");
11092 				error = EPERM;
11093 				goto out;
11094 			}
11095 		}
11096 		chowner = 1;
11097 		clear_suid = 1;
11098 	}
11099 no_uuuid_change:
11100 	/*
11101 	 * Group UUID being set or changed.
11102 	 */
11103 	if (VATTR_IS_ACTIVE(vap, va_guuid)) {
11104 		/* if the group UUID is not actually changing ... */
11105 		if (VATTR_IS_SUPPORTED(&ova, va_guuid)) {
11106 			if (kauth_guid_equal(&vap->va_guuid, &ova.va_guuid)) {
11107 				goto no_guuid_change;
11108 			}
11109 
11110 			/*
11111 			 * If the current group UUID is a null UUID, check
11112 			 * it against the UUID corresponding to the group GID.
11113 			 */
11114 			if (kauth_guid_equal(&ova.va_guuid, &kauth_null_guid) &&
11115 			    VATTR_IS_SUPPORTED(&ova, va_gid)) {
11116 				guid_t gid_guid;
11117 
11118 				if (kauth_cred_gid2guid(ova.va_gid, &gid_guid) == 0 &&
11119 				    kauth_guid_equal(&vap->va_guuid, &gid_guid)) {
11120 					goto no_guuid_change;
11121 				}
11122 			}
11123 		}
11124 
11125 		/*
11126 		 * The group UUID cannot be set by a non-superuser to anything other than
11127 		 * one of which they are a member or a null GUID (to "unset"
11128 		 * the group UUID).
11129 		 * Note that file systems must be prepared to handle the
11130 		 * null UUID case in a manner appropriate for that file
11131 		 * system.
11132 		 */
11133 		if (!has_priv_suser) {
11134 			if (kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
11135 				ismember = 1;
11136 			} else if ((error = kauth_cred_ismember_guid(cred, &vap->va_guuid, &ismember)) != 0) {
11137 				KAUTH_DEBUG("  ERROR - got %d trying to check group membership", error);
11138 				goto out;
11139 			}
11140 			if (!ismember) {
11141 				KAUTH_DEBUG("  ERROR - cannot set supplied group UUID - not a member / null");
11142 				error = EPERM;
11143 				goto out;
11144 			}
11145 		}
11146 		chgroup = 1;
11147 	}
11148 no_guuid_change:
11149 
11150 	/*
11151 	 * Compute authorisation for group/ownership changes.
11152 	 */
11153 	if (chowner || chgroup || clear_suid || clear_sgid) {
11154 		if (has_priv_suser) {
11155 			KAUTH_DEBUG("ATTR - superuser changing file owner/group, requiring immutability check");
11156 			required_action |= KAUTH_VNODE_CHECKIMMUTABLE;
11157 		} else {
11158 			if (chowner) {
11159 				KAUTH_DEBUG("ATTR - ownership change, requiring TAKE_OWNERSHIP");
11160 				required_action |= KAUTH_VNODE_TAKE_OWNERSHIP;
11161 			}
11162 			if (chgroup && !chowner) {
11163 				KAUTH_DEBUG("ATTR - group change, requiring WRITE_SECURITY");
11164 				required_action |= KAUTH_VNODE_WRITE_SECURITY;
11165 			}
11166 		}
11167 
11168 		/*
11169 		 * clear set-uid and set-gid bits. POSIX only requires this for
11170 		 * non-privileged processes but we do it even for root.
11171 		 */
11172 		if (VATTR_IS_ACTIVE(vap, va_mode)) {
11173 			newmode = vap->va_mode;
11174 		} else if (VATTR_IS_SUPPORTED(&ova, va_mode)) {
11175 			newmode = ova.va_mode;
11176 		} else {
11177 			KAUTH_DEBUG("CHOWN - trying to change owner but cannot get mode from filesystem to mask setugid bits");
11178 			newmode = 0;
11179 		}
11180 
11181 		/* chown always clears setuid/gid bits. An exception is made for
11182 		 * setattrlist which can set both at the same time: <uid, gid, mode> on a file:
11183 		 * setattrlist is allowed to set the new mode on the file and change (chown)
11184 		 * uid/gid.
11185 		 */
11186 		if (newmode & (S_ISUID | S_ISGID)) {
11187 			if (!VATTR_IS_ACTIVE(vap, va_mode)) {
11188 				KAUTH_DEBUG("CHOWN - masking setugid bits from mode %o to %o",
11189 				    newmode, newmode & ~(S_ISUID | S_ISGID));
11190 				newmode &= ~(S_ISUID | S_ISGID);
11191 			}
11192 			VATTR_SET(vap, va_mode, newmode);
11193 		}
11194 	}
11195 
11196 	/*
11197 	 * Authorise changes in the ACL.
11198 	 */
11199 	if (VATTR_IS_ACTIVE(vap, va_acl)) {
11200 		/* no existing ACL */
11201 		if (!VATTR_IS_ACTIVE(&ova, va_acl) || (ova.va_acl == NULL)) {
11202 			/* adding an ACL */
11203 			if (vap->va_acl != NULL) {
11204 				required_action |= KAUTH_VNODE_WRITE_SECURITY;
11205 				KAUTH_DEBUG("CHMOD - adding ACL");
11206 			}
11207 
11208 			/* removing an existing ACL */
11209 		} else if (vap->va_acl == NULL) {
11210 			required_action |= KAUTH_VNODE_WRITE_SECURITY;
11211 			KAUTH_DEBUG("CHMOD - removing ACL");
11212 
11213 			/* updating an existing ACL */
11214 		} else {
11215 			if (vap->va_acl->acl_entrycount != ova.va_acl->acl_entrycount) {
11216 				/* entry count changed, must be different */
11217 				required_action |= KAUTH_VNODE_WRITE_SECURITY;
11218 				KAUTH_DEBUG("CHMOD - adding/removing ACL entries");
11219 			} else if (vap->va_acl->acl_entrycount > 0) {
11220 				/* both ACLs have the same ACE count, said count is 1 or more, bitwise compare ACLs */
11221 				if (memcmp(&vap->va_acl->acl_ace[0], &ova.va_acl->acl_ace[0],
11222 				    sizeof(struct kauth_ace) * vap->va_acl->acl_entrycount)) {
11223 					required_action |= KAUTH_VNODE_WRITE_SECURITY;
11224 					KAUTH_DEBUG("CHMOD - changing ACL entries");
11225 				}
11226 			}
11227 		}
11228 	}
11229 
11230 	/*
11231 	 * Other attributes that require authorisation.
11232 	 */
11233 	if (VATTR_IS_ACTIVE(vap, va_encoding)) {
11234 		required_action |= KAUTH_VNODE_WRITE_ATTRIBUTES;
11235 	}
11236 
11237 out:
11238 	if (VATTR_IS_SUPPORTED(&ova, va_acl) && (ova.va_acl != NULL)) {
11239 		kauth_acl_free(ova.va_acl);
11240 	}
11241 	if (error == 0) {
11242 		*actionp = required_action;
11243 	}
11244 	return error;
11245 }
11246 
11247 static int
setlocklocal_callback(struct vnode * vp,__unused void * cargs)11248 setlocklocal_callback(struct vnode *vp, __unused void *cargs)
11249 {
11250 	vnode_lock_spin(vp);
11251 	vp->v_flag |= VLOCKLOCAL;
11252 	vnode_unlock(vp);
11253 
11254 	return VNODE_RETURNED;
11255 }
11256 
11257 void
vfs_setlocklocal(mount_t mp)11258 vfs_setlocklocal(mount_t mp)
11259 {
11260 	mount_lock_spin(mp);
11261 	mp->mnt_kern_flag |= MNTK_LOCK_LOCAL;
11262 	mount_unlock(mp);
11263 
11264 	/*
11265 	 * The number of active vnodes is expected to be
11266 	 * very small when vfs_setlocklocal is invoked.
11267 	 */
11268 	vnode_iterate(mp, 0, setlocklocal_callback, NULL);
11269 }
11270 
11271 void
vfs_setcompoundopen(mount_t mp)11272 vfs_setcompoundopen(mount_t mp)
11273 {
11274 	mount_lock_spin(mp);
11275 	mp->mnt_compound_ops |= COMPOUND_VNOP_OPEN;
11276 	mount_unlock(mp);
11277 }
11278 
11279 void
vnode_setswapmount(vnode_t vp)11280 vnode_setswapmount(vnode_t vp)
11281 {
11282 	mount_lock(vp->v_mount);
11283 	vp->v_mount->mnt_kern_flag |= MNTK_SWAP_MOUNT;
11284 	mount_unlock(vp->v_mount);
11285 }
11286 
11287 void
vfs_setfskit(mount_t mp)11288 vfs_setfskit(mount_t mp)
11289 {
11290 	mount_lock_spin(mp);
11291 	mp->mnt_kern_flag |= MNTK_FSKIT;
11292 	mount_unlock(mp);
11293 }
11294 
11295 uint32_t
vfs_getextflags(mount_t mp)11296 vfs_getextflags(mount_t mp)
11297 {
11298 	uint32_t flags_ext = 0;
11299 
11300 	if (mp->mnt_kern_flag & MNTK_SYSTEMDATA) {
11301 		flags_ext |= MNT_EXT_ROOT_DATA_VOL;
11302 	}
11303 	if (mp->mnt_kern_flag & MNTK_FSKIT) {
11304 		flags_ext |= MNT_EXT_FSKIT;
11305 	}
11306 	return flags_ext;
11307 }
11308 
11309 char *
vfs_getfstypenameref_locked(mount_t mp,size_t * lenp)11310 vfs_getfstypenameref_locked(mount_t mp, size_t *lenp)
11311 {
11312 	char *name;
11313 
11314 	if (mp->mnt_kern_flag & MNTK_TYPENAME_OVERRIDE) {
11315 		name = mp->fstypename_override;
11316 	} else {
11317 		name = mp->mnt_vfsstat.f_fstypename;
11318 	}
11319 	if (lenp != NULL) {
11320 		*lenp = strlen(name);
11321 	}
11322 	return name;
11323 }
11324 
11325 void
vfs_getfstypename(mount_t mp,char * buf,size_t buflen)11326 vfs_getfstypename(mount_t mp, char *buf, size_t buflen)
11327 {
11328 	mount_lock_spin(mp);
11329 	strlcpy(buf, vfs_getfstypenameref_locked(mp, NULL), buflen);
11330 	mount_unlock(mp);
11331 }
11332 
11333 void
vfs_setfstypename_locked(mount_t mp,const char * name)11334 vfs_setfstypename_locked(mount_t mp, const char *name)
11335 {
11336 	if (name == NULL || name[0] == '\0') {
11337 		mp->mnt_kern_flag &= ~MNTK_TYPENAME_OVERRIDE;
11338 		mp->fstypename_override[0] = '\0';
11339 	} else {
11340 		strlcpy(mp->fstypename_override, name,
11341 		    sizeof(mp->fstypename_override));
11342 		mp->mnt_kern_flag |= MNTK_TYPENAME_OVERRIDE;
11343 	}
11344 }
11345 
11346 void
vfs_setfstypename(mount_t mp,const char * name)11347 vfs_setfstypename(mount_t mp, const char *name)
11348 {
11349 	mount_lock_spin(mp);
11350 	vfs_setfstypename_locked(mp, name);
11351 	mount_unlock(mp);
11352 }
11353 
11354 int64_t
vnode_getswappin_avail(vnode_t vp)11355 vnode_getswappin_avail(vnode_t vp)
11356 {
11357 	int64_t max_swappin_avail = 0;
11358 
11359 	mount_lock(vp->v_mount);
11360 	if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_SWAPPIN_SUPPORTED) {
11361 		max_swappin_avail = vp->v_mount->mnt_max_swappin_available;
11362 	}
11363 	mount_unlock(vp->v_mount);
11364 
11365 	return max_swappin_avail;
11366 }
11367 
11368 
11369 void
vn_setunionwait(vnode_t vp)11370 vn_setunionwait(vnode_t vp)
11371 {
11372 	vnode_lock_spin(vp);
11373 	vp->v_flag |= VISUNION;
11374 	vnode_unlock(vp);
11375 }
11376 
11377 
11378 void
vn_checkunionwait(vnode_t vp)11379 vn_checkunionwait(vnode_t vp)
11380 {
11381 	vnode_lock_spin(vp);
11382 	while ((vp->v_flag & VISUNION) == VISUNION) {
11383 		msleep((caddr_t)&vp->v_flag, &vp->v_lock, 0, 0, 0);
11384 	}
11385 	vnode_unlock(vp);
11386 }
11387 
11388 void
vn_clearunionwait(vnode_t vp,int locked)11389 vn_clearunionwait(vnode_t vp, int locked)
11390 {
11391 	if (!locked) {
11392 		vnode_lock_spin(vp);
11393 	}
11394 	if ((vp->v_flag & VISUNION) == VISUNION) {
11395 		vp->v_flag &= ~VISUNION;
11396 		wakeup((caddr_t)&vp->v_flag);
11397 	}
11398 	if (!locked) {
11399 		vnode_unlock(vp);
11400 	}
11401 }
11402 
11403 /*
11404  * Removes orphaned apple double files during a rmdir
11405  * Works by:
11406  * 1. vnode_suspend().
11407  * 2. Call VNOP_READDIR() till the end of directory is reached.
11408  * 3. Check if the directory entries returned are regular files with name starting with "._".  If not, return ENOTEMPTY.
11409  * 4. Continue (2) and (3) till end of directory is reached.
11410  * 5. If all the entries in the directory were files with "._" name, delete all the files.
11411  * 6. vnode_resume()
11412  * 7. If deletion of all files succeeded, call VNOP_RMDIR() again.
11413  */
11414 
11415 errno_t
rmdir_remove_orphaned_appleDouble(vnode_t vp,vfs_context_t ctx,int * restart_flag)11416 rmdir_remove_orphaned_appleDouble(vnode_t vp, vfs_context_t ctx, int * restart_flag)
11417 {
11418 #define UIO_BUFF_SIZE 2048
11419 	uio_t auio = NULL;
11420 	int eofflag, siz = UIO_BUFF_SIZE, alloc_size = 0, nentries = 0;
11421 	int open_flag = 0, full_erase_flag = 0;
11422 	UIO_STACKBUF(uio_buf, 1);
11423 	char *rbuf = NULL;
11424 	void *dir_pos;
11425 	void *dir_end;
11426 	struct dirent *dp;
11427 	errno_t error;
11428 
11429 	error = vnode_suspend(vp);
11430 
11431 	/*
11432 	 * restart_flag is set so that the calling rmdir sleeps and resets
11433 	 */
11434 	if (error == EBUSY) {
11435 		*restart_flag = 1;
11436 	}
11437 	if (error != 0) {
11438 		return error;
11439 	}
11440 
11441 	/*
11442 	 * Prevent dataless fault materialization while we have
11443 	 * a suspended vnode.
11444 	 */
11445 	uthread_t ut = current_uthread();
11446 	bool saved_nodatalessfaults =
11447 	    (ut->uu_flag & UT_NSPACE_NODATALESSFAULTS) ? true : false;
11448 	ut->uu_flag |= UT_NSPACE_NODATALESSFAULTS;
11449 
11450 	/*
11451 	 * set up UIO
11452 	 */
11453 	rbuf = kalloc_data(siz, Z_WAITOK);
11454 	alloc_size = siz;
11455 	if (rbuf) {
11456 		auio = uio_createwithbuffer(1, 0, UIO_SYSSPACE, UIO_READ,
11457 		    &uio_buf[0], sizeof(uio_buf));
11458 	}
11459 	if (!rbuf || !auio) {
11460 		error = ENOMEM;
11461 		goto outsc;
11462 	}
11463 
11464 	uio_setoffset(auio, 0);
11465 
11466 	eofflag = 0;
11467 
11468 	if ((error = VNOP_OPEN(vp, FREAD, ctx))) {
11469 		goto outsc;
11470 	} else {
11471 		open_flag = 1;
11472 	}
11473 
11474 	/*
11475 	 * First pass checks if all files are appleDouble files.
11476 	 */
11477 
11478 	do {
11479 		siz = UIO_BUFF_SIZE;
11480 		uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
11481 		uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
11482 
11483 		if ((error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx))) {
11484 			goto outsc;
11485 		}
11486 
11487 		if (uio_resid(auio) != 0) {
11488 			siz -= uio_resid(auio);
11489 		}
11490 
11491 		/*
11492 		 * Iterate through directory
11493 		 */
11494 		dir_pos = (void*) rbuf;
11495 		dir_end = (void*) (rbuf + siz);
11496 		dp = (struct dirent*) (dir_pos);
11497 
11498 		if (dir_pos == dir_end) {
11499 			eofflag = 1;
11500 		}
11501 
11502 		while (dir_pos < dir_end) {
11503 			/*
11504 			 * Check for . and .. as well as directories
11505 			 */
11506 			if (dp->d_ino != 0 &&
11507 			    !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
11508 			    (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))) {
11509 				/*
11510 				 * Check for irregular files and ._ files
11511 				 * If there is a ._._ file abort the op
11512 				 */
11513 				if (dp->d_namlen < 2 ||
11514 				    strncmp(dp->d_name, "._", 2) ||
11515 				    (dp->d_namlen >= 4 && !strncmp(&(dp->d_name[2]), "._", 2))) {
11516 					error = ENOTEMPTY;
11517 					goto outsc;
11518 				}
11519 			}
11520 			dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
11521 			dp = (struct dirent*)dir_pos;
11522 		}
11523 
11524 		/*
11525 		 * workaround for HFS/NFS setting eofflag before end of file
11526 		 */
11527 		if (vp->v_tag == VT_HFS && nentries > 2) {
11528 			eofflag = 0;
11529 		}
11530 
11531 		if (vp->v_tag == VT_NFS) {
11532 			if (eofflag && !full_erase_flag) {
11533 				full_erase_flag = 1;
11534 				eofflag = 0;
11535 				uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11536 			} else if (!eofflag && full_erase_flag) {
11537 				full_erase_flag = 0;
11538 			}
11539 		}
11540 	} while (!eofflag);
11541 	/*
11542 	 * If we've made it here all the files in the dir are ._ files.
11543 	 * We can delete the files even though the node is suspended
11544 	 * because we are the owner of the file.
11545 	 */
11546 
11547 	uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11548 	eofflag = 0;
11549 	full_erase_flag = 0;
11550 
11551 	do {
11552 		siz = UIO_BUFF_SIZE;
11553 		uio_reset(auio, uio_offset(auio), UIO_SYSSPACE, UIO_READ);
11554 		uio_addiov(auio, CAST_USER_ADDR_T(rbuf), UIO_BUFF_SIZE);
11555 
11556 		error = VNOP_READDIR(vp, auio, 0, &eofflag, &nentries, ctx);
11557 
11558 		if (error != 0) {
11559 			goto outsc;
11560 		}
11561 
11562 		if (uio_resid(auio) != 0) {
11563 			siz -= uio_resid(auio);
11564 		}
11565 
11566 		/*
11567 		 * Iterate through directory
11568 		 */
11569 		dir_pos = (void*) rbuf;
11570 		dir_end = (void*) (rbuf + siz);
11571 		dp = (struct dirent*) dir_pos;
11572 
11573 		if (dir_pos == dir_end) {
11574 			eofflag = 1;
11575 		}
11576 
11577 		while (dir_pos < dir_end) {
11578 			/*
11579 			 * Check for . and .. as well as directories
11580 			 */
11581 			if (dp->d_ino != 0 &&
11582 			    !((dp->d_namlen == 1 && dp->d_name[0] == '.') ||
11583 			    (dp->d_namlen == 2 && dp->d_name[0] == '.' && dp->d_name[1] == '.'))
11584 			    ) {
11585 				error = unlink1(ctx, vp,
11586 				    CAST_USER_ADDR_T(dp->d_name), UIO_SYSSPACE,
11587 				    VNODE_REMOVE_SKIP_NAMESPACE_EVENT |
11588 				    VNODE_REMOVE_NO_AUDIT_PATH);
11589 
11590 				if (error && error != ENOENT) {
11591 					goto outsc;
11592 				}
11593 			}
11594 			dir_pos = (void*) ((uint8_t*)dir_pos + dp->d_reclen);
11595 			dp = (struct dirent*)dir_pos;
11596 		}
11597 
11598 		/*
11599 		 * workaround for HFS/NFS setting eofflag before end of file
11600 		 */
11601 		if (vp->v_tag == VT_HFS && nentries > 2) {
11602 			eofflag = 0;
11603 		}
11604 
11605 		if (vp->v_tag == VT_NFS) {
11606 			if (eofflag && !full_erase_flag) {
11607 				full_erase_flag = 1;
11608 				eofflag = 0;
11609 				uio_reset(auio, 0, UIO_SYSSPACE, UIO_READ);
11610 			} else if (!eofflag && full_erase_flag) {
11611 				full_erase_flag = 0;
11612 			}
11613 		}
11614 	} while (!eofflag);
11615 
11616 
11617 	error = 0;
11618 
11619 outsc:
11620 	if (open_flag) {
11621 		VNOP_CLOSE(vp, FREAD, ctx);
11622 	}
11623 
11624 	if (auio) {
11625 		uio_free(auio);
11626 	}
11627 	kfree_data(rbuf, alloc_size);
11628 
11629 	if (saved_nodatalessfaults == false) {
11630 		ut->uu_flag &= ~UT_NSPACE_NODATALESSFAULTS;
11631 	}
11632 
11633 	vnode_resume(vp);
11634 
11635 	return error;
11636 }
11637 
11638 
11639 void
lock_vnode_and_post(vnode_t vp,int kevent_num)11640 lock_vnode_and_post(vnode_t vp, int kevent_num)
11641 {
11642 	/* Only take the lock if there's something there! */
11643 	if (vp->v_knotes.slh_first != NULL) {
11644 		vnode_lock(vp);
11645 		KNOTE(&vp->v_knotes, kevent_num);
11646 		vnode_unlock(vp);
11647 	}
11648 }
11649 
11650 void panic_print_vnodes(void);
11651 
11652 /* define PANIC_PRINTS_VNODES only if investigation is required. */
11653 #ifdef PANIC_PRINTS_VNODES
11654 
11655 static const char *
__vtype(uint16_t vtype)11656 __vtype(uint16_t vtype)
11657 {
11658 	switch (vtype) {
11659 	case VREG:
11660 		return "R";
11661 	case VDIR:
11662 		return "D";
11663 	case VBLK:
11664 		return "B";
11665 	case VCHR:
11666 		return "C";
11667 	case VLNK:
11668 		return "L";
11669 	case VSOCK:
11670 		return "S";
11671 	case VFIFO:
11672 		return "F";
11673 	case VBAD:
11674 		return "x";
11675 	case VSTR:
11676 		return "T";
11677 	case VCPLX:
11678 		return "X";
11679 	default:
11680 		return "?";
11681 	}
11682 }
11683 
11684 /*
11685  * build a path from the bottom up
11686  * NOTE: called from the panic path - no alloc'ing of memory and no locks!
11687  */
11688 static char *
__vpath(vnode_t vp,char * str,int len,int depth)11689 __vpath(vnode_t vp, char *str, int len, int depth)
11690 {
11691 	int vnm_len;
11692 	const char *src;
11693 	char *dst;
11694 
11695 	if (len <= 0) {
11696 		return str;
11697 	}
11698 	/* str + len is the start of the string we created */
11699 	if (!vp->v_name) {
11700 		return str + len;
11701 	}
11702 
11703 	/* follow mount vnodes to get the full path */
11704 	if ((vp->v_flag & VROOT)) {
11705 		if (vp->v_mount != NULL && vp->v_mount->mnt_vnodecovered) {
11706 			return __vpath(vp->v_mount->mnt_vnodecovered,
11707 			           str, len, depth + 1);
11708 		}
11709 		return str + len;
11710 	}
11711 
11712 	src = vp->v_name;
11713 	vnm_len = strlen(src);
11714 	if (vnm_len > len) {
11715 		/* truncate the name to fit in the string */
11716 		src += (vnm_len - len);
11717 		vnm_len = len;
11718 	}
11719 
11720 	/* start from the back and copy just characters (no NULLs) */
11721 
11722 	/* this will chop off leaf path (file) names */
11723 	if (depth > 0) {
11724 		dst = str + len - vnm_len;
11725 		memcpy(dst, src, vnm_len);
11726 		len -= vnm_len;
11727 	} else {
11728 		dst = str + len;
11729 	}
11730 
11731 	if (vp->v_parent && len > 1) {
11732 		/* follow parents up the chain */
11733 		len--;
11734 		*(dst - 1) = '/';
11735 		return __vpath(vp->v_parent, str, len, depth + 1);
11736 	}
11737 
11738 	return dst;
11739 }
11740 
11741 #define SANE_VNODE_PRINT_LIMIT 5000
11742 void
panic_print_vnodes(void)11743 panic_print_vnodes(void)
11744 {
11745 	mount_t mnt;
11746 	vnode_t vp;
11747 	int nvnodes = 0;
11748 	const char *type;
11749 	char *nm;
11750 	char vname[257];
11751 
11752 	paniclog_append_noflush("\n***** VNODES *****\n"
11753 	    "TYPE UREF ICNT PATH\n");
11754 
11755 	/* NULL-terminate the path name */
11756 	vname[sizeof(vname) - 1] = '\0';
11757 
11758 	/*
11759 	 * iterate all vnodelist items in all mounts (mntlist) -> mnt_vnodelist
11760 	 */
11761 	TAILQ_FOREACH(mnt, &mountlist, mnt_list) {
11762 		if (!ml_validate_nofault((vm_offset_t)mnt, sizeof(mount_t))) {
11763 			paniclog_append_noflush("Unable to iterate the mount list %p - encountered an invalid mount pointer %p \n",
11764 			    &mountlist, mnt);
11765 			break;
11766 		}
11767 
11768 		TAILQ_FOREACH(vp, &mnt->mnt_vnodelist, v_mntvnodes) {
11769 			if (!ml_validate_nofault((vm_offset_t)vp, sizeof(vnode_t))) {
11770 				paniclog_append_noflush("Unable to iterate the vnode list %p - encountered an invalid vnode pointer %p \n",
11771 				    &mnt->mnt_vnodelist, vp);
11772 				break;
11773 			}
11774 
11775 			if (++nvnodes > SANE_VNODE_PRINT_LIMIT) {
11776 				return;
11777 			}
11778 			type = __vtype(vp->v_type);
11779 			nm = __vpath(vp, vname, sizeof(vname) - 1, 0);
11780 			paniclog_append_noflush("%s %0d %0d %s\n",
11781 			    type, vp->v_usecount, vp->v_iocount, nm);
11782 		}
11783 	}
11784 }
11785 
11786 #else /* !PANIC_PRINTS_VNODES */
11787 void
panic_print_vnodes(void)11788 panic_print_vnodes(void)
11789 {
11790 	return;
11791 }
11792 #endif
11793 
11794 
11795 #ifdef CONFIG_IOCOUNT_TRACE
11796 static void
record_iocount_trace_vnode(vnode_t vp,int type)11797 record_iocount_trace_vnode(vnode_t vp, int type)
11798 {
11799 	void *stacks[IOCOUNT_TRACE_MAX_FRAMES] = {0};
11800 	int idx = vp->v_iocount_trace[type].idx;
11801 
11802 	if (idx >= IOCOUNT_TRACE_MAX_IDX) {
11803 		return;
11804 	}
11805 
11806 	OSBacktrace((void **)&stacks[0], IOCOUNT_TRACE_MAX_FRAMES);
11807 
11808 	/*
11809 	 * To save index space, only store the unique backtraces. If dup is found,
11810 	 * just bump the count and return.
11811 	 */
11812 	for (int i = 0; i < idx; i++) {
11813 		if (memcmp(&stacks[0], &vp->v_iocount_trace[type].stacks[i][0],
11814 		    sizeof(stacks)) == 0) {
11815 			vp->v_iocount_trace[type].counts[i]++;
11816 			return;
11817 		}
11818 	}
11819 
11820 	memcpy(&vp->v_iocount_trace[type].stacks[idx][0], &stacks[0],
11821 	    sizeof(stacks));
11822 	vp->v_iocount_trace[type].counts[idx] = 1;
11823 	vp->v_iocount_trace[type].idx++;
11824 }
11825 
11826 static void
record_iocount_trace_uthread(vnode_t vp,int count)11827 record_iocount_trace_uthread(vnode_t vp, int count)
11828 {
11829 	struct uthread *ut;
11830 
11831 	ut = current_uthread();
11832 	ut->uu_iocount += count;
11833 
11834 	if (count == 1) {
11835 		if (ut->uu_vpindex < 32) {
11836 			OSBacktrace((void **)&ut->uu_pcs[ut->uu_vpindex][0], 10);
11837 
11838 			ut->uu_vps[ut->uu_vpindex] = vp;
11839 			ut->uu_vpindex++;
11840 		}
11841 	}
11842 }
11843 
11844 static void
record_vp(vnode_t vp,int count)11845 record_vp(vnode_t vp, int count)
11846 {
11847 	if (__probable(bootarg_vnode_iocount_trace == 0 &&
11848 	    bootarg_uthread_iocount_trace == 0)) {
11849 		return;
11850 	}
11851 
11852 #if CONFIG_TRIGGERS
11853 	if (vp->v_resolve) {
11854 		return;
11855 	}
11856 #endif
11857 	if ((vp->v_flag & VSYSTEM)) {
11858 		return;
11859 	}
11860 
11861 	if (bootarg_vnode_iocount_trace) {
11862 		record_iocount_trace_vnode(vp,
11863 		    (count > 0) ? IOCOUNT_TRACE_VGET : IOCOUNT_TRACE_VPUT);
11864 	}
11865 	if (bootarg_uthread_iocount_trace) {
11866 		record_iocount_trace_uthread(vp, count);
11867 	}
11868 }
11869 #endif /* CONFIG_IOCOUNT_TRACE */
11870 
11871 #if CONFIG_TRIGGERS
11872 #define __triggers_unused
11873 #else
11874 #define __triggers_unused       __unused
11875 #endif
11876 
11877 resolver_result_t
vfs_resolver_result(__triggers_unused uint32_t seq,__triggers_unused enum resolver_status stat,__triggers_unused int aux)11878 vfs_resolver_result(__triggers_unused uint32_t seq, __triggers_unused enum resolver_status stat, __triggers_unused int aux)
11879 {
11880 #if CONFIG_TRIGGERS
11881 	/*
11882 	 * |<---   32   --->|<---  28  --->|<- 4 ->|
11883 	 *      sequence        auxiliary    status
11884 	 */
11885 	return (((uint64_t)seq) << 32) |
11886 	       (((uint64_t)(aux & 0x0fffffff)) << 4) |
11887 	       (uint64_t)(stat & 0x0000000F);
11888 #else
11889 	return (0x0ULL) | (((uint64_t)ENOTSUP) << 4) | (((uint64_t)RESOLVER_ERROR) & 0xF);
11890 #endif
11891 }
11892 
11893 #if CONFIG_TRIGGERS
11894 
11895 #define TRIG_DEBUG 0
11896 
11897 #if TRIG_DEBUG
11898 #define TRIG_LOG(...) do { printf("%s: ", __FUNCTION__); printf(__VA_ARGS__); } while (0)
11899 #else
11900 #define TRIG_LOG(...)
11901 #endif
11902 
11903 /*
11904  * Resolver result functions
11905  */
11906 
11907 
11908 enum resolver_status
vfs_resolver_status(resolver_result_t result)11909 vfs_resolver_status(resolver_result_t result)
11910 {
11911 	/* lower 4 bits is status */
11912 	return result & 0x0000000F;
11913 }
11914 
11915 uint32_t
vfs_resolver_sequence(resolver_result_t result)11916 vfs_resolver_sequence(resolver_result_t result)
11917 {
11918 	/* upper 32 bits is sequence */
11919 	return (uint32_t)(result >> 32);
11920 }
11921 
11922 int
vfs_resolver_auxiliary(resolver_result_t result)11923 vfs_resolver_auxiliary(resolver_result_t result)
11924 {
11925 	/* 28 bits of auxiliary */
11926 	return (int)(((uint32_t)(result & 0xFFFFFFF0)) >> 4);
11927 }
11928 
11929 /*
11930  * SPI
11931  * Call in for resolvers to update vnode trigger state
11932  */
11933 int
vnode_trigger_update(vnode_t vp,resolver_result_t result)11934 vnode_trigger_update(vnode_t vp, resolver_result_t result)
11935 {
11936 	vnode_resolve_t rp;
11937 	uint32_t seq;
11938 	enum resolver_status stat;
11939 
11940 	if (vp->v_resolve == NULL) {
11941 		return EINVAL;
11942 	}
11943 
11944 	stat = vfs_resolver_status(result);
11945 	seq = vfs_resolver_sequence(result);
11946 
11947 	if ((stat != RESOLVER_RESOLVED) && (stat != RESOLVER_UNRESOLVED)) {
11948 		return EINVAL;
11949 	}
11950 
11951 	rp = vp->v_resolve;
11952 	lck_mtx_lock(&rp->vr_lock);
11953 
11954 	if (seq > rp->vr_lastseq) {
11955 		if (stat == RESOLVER_RESOLVED) {
11956 			rp->vr_flags |= VNT_RESOLVED;
11957 		} else {
11958 			rp->vr_flags &= ~VNT_RESOLVED;
11959 		}
11960 
11961 		rp->vr_lastseq = seq;
11962 	}
11963 
11964 	lck_mtx_unlock(&rp->vr_lock);
11965 
11966 	return 0;
11967 }
11968 
11969 static int
vnode_resolver_attach(vnode_t vp,vnode_resolve_t rp,boolean_t ref)11970 vnode_resolver_attach(vnode_t vp, vnode_resolve_t rp, boolean_t ref)
11971 {
11972 	int error;
11973 
11974 	vnode_lock_spin(vp);
11975 	if (vp->v_resolve != NULL) {
11976 		vnode_unlock(vp);
11977 		return EINVAL;
11978 	} else {
11979 		vp->v_resolve = rp;
11980 	}
11981 	vnode_unlock(vp);
11982 
11983 	if (ref) {
11984 		error = vnode_ref_ext(vp, O_EVTONLY, VNODE_REF_FORCE);
11985 		if (error != 0) {
11986 			panic("VNODE_REF_FORCE didn't help...");
11987 		}
11988 	}
11989 
11990 	return 0;
11991 }
11992 
11993 /*
11994  * VFS internal interfaces for vnode triggers
11995  *
11996  * vnode must already have an io count on entry
11997  * v_resolve is stable when io count is non-zero
11998  */
11999 static int
vnode_resolver_create(mount_t mp,vnode_t vp,struct vnode_trigger_param * tinfo,boolean_t external)12000 vnode_resolver_create(mount_t mp, vnode_t vp, struct vnode_trigger_param *tinfo, boolean_t external)
12001 {
12002 	vnode_resolve_t rp;
12003 	int result;
12004 	char byte;
12005 
12006 #if 1
12007 	/* minimum pointer test (debugging) */
12008 	if (tinfo->vnt_data) {
12009 		byte = *((char *)tinfo->vnt_data);
12010 	}
12011 #endif
12012 	rp = kalloc_type(struct vnode_resolve, Z_WAITOK | Z_NOFAIL);
12013 
12014 	lck_mtx_init(&rp->vr_lock, &trigger_vnode_lck_grp, &trigger_vnode_lck_attr);
12015 
12016 	rp->vr_resolve_func = tinfo->vnt_resolve_func;
12017 	rp->vr_unresolve_func = tinfo->vnt_unresolve_func;
12018 	rp->vr_rearm_func = tinfo->vnt_rearm_func;
12019 	rp->vr_reclaim_func = tinfo->vnt_reclaim_func;
12020 	rp->vr_data = tinfo->vnt_data;
12021 	rp->vr_lastseq = 0;
12022 	rp->vr_flags = tinfo->vnt_flags & VNT_VALID_MASK;
12023 	if (external) {
12024 		rp->vr_flags |= VNT_EXTERNAL;
12025 	}
12026 
12027 	result = vnode_resolver_attach(vp, rp, external);
12028 	if (result != 0) {
12029 		goto out;
12030 	}
12031 
12032 	if (mp) {
12033 		OSAddAtomic(1, &mp->mnt_numtriggers);
12034 	}
12035 
12036 	return result;
12037 
12038 out:
12039 	kfree_type(struct vnode_resolve, rp);
12040 	return result;
12041 }
12042 
12043 static void
vnode_resolver_release(vnode_resolve_t rp)12044 vnode_resolver_release(vnode_resolve_t rp)
12045 {
12046 	/*
12047 	 * Give them a chance to free any private data
12048 	 */
12049 	if (rp->vr_data && rp->vr_reclaim_func) {
12050 		rp->vr_reclaim_func(NULLVP, rp->vr_data);
12051 	}
12052 
12053 	lck_mtx_destroy(&rp->vr_lock, &trigger_vnode_lck_grp);
12054 	kfree_type(struct vnode_resolve, rp);
12055 }
12056 
12057 /* Called after the vnode has been drained */
12058 static void
vnode_resolver_detach(vnode_t vp)12059 vnode_resolver_detach(vnode_t vp)
12060 {
12061 	vnode_resolve_t rp;
12062 	mount_t mp;
12063 
12064 	mp = vnode_mount(vp);
12065 
12066 	vnode_lock(vp);
12067 	rp = vp->v_resolve;
12068 	vp->v_resolve = NULL;
12069 	vnode_unlock(vp);
12070 
12071 	if ((rp->vr_flags & VNT_EXTERNAL) != 0) {
12072 		vnode_rele_ext(vp, O_EVTONLY, 1);
12073 	}
12074 
12075 	vnode_resolver_release(rp);
12076 
12077 	/* Keep count of active trigger vnodes per mount */
12078 	OSAddAtomic(-1, &mp->mnt_numtriggers);
12079 }
12080 
12081 __private_extern__
12082 void
vnode_trigger_rearm(vnode_t vp,vfs_context_t ctx)12083 vnode_trigger_rearm(vnode_t vp, vfs_context_t ctx)
12084 {
12085 	vnode_resolve_t rp;
12086 	resolver_result_t result;
12087 	enum resolver_status status;
12088 	uint32_t seq;
12089 
12090 	if ((vp->v_resolve == NULL) ||
12091 	    (vp->v_resolve->vr_rearm_func == NULL) ||
12092 	    (vp->v_resolve->vr_flags & VNT_AUTO_REARM) == 0) {
12093 		return;
12094 	}
12095 
12096 	rp = vp->v_resolve;
12097 	lck_mtx_lock(&rp->vr_lock);
12098 
12099 	/*
12100 	 * Check if VFS initiated this unmount. If so, we'll catch it after the unresolve completes.
12101 	 */
12102 	if (rp->vr_flags & VNT_VFS_UNMOUNTED) {
12103 		lck_mtx_unlock(&rp->vr_lock);
12104 		return;
12105 	}
12106 
12107 	/* Check if this vnode is already armed */
12108 	if ((rp->vr_flags & VNT_RESOLVED) == 0) {
12109 		lck_mtx_unlock(&rp->vr_lock);
12110 		return;
12111 	}
12112 
12113 	lck_mtx_unlock(&rp->vr_lock);
12114 
12115 	result = rp->vr_rearm_func(vp, 0, rp->vr_data, ctx);
12116 	status = vfs_resolver_status(result);
12117 	seq = vfs_resolver_sequence(result);
12118 
12119 	lck_mtx_lock(&rp->vr_lock);
12120 	if (seq > rp->vr_lastseq) {
12121 		if (status == RESOLVER_UNRESOLVED) {
12122 			rp->vr_flags &= ~VNT_RESOLVED;
12123 		}
12124 		rp->vr_lastseq = seq;
12125 	}
12126 	lck_mtx_unlock(&rp->vr_lock);
12127 }
12128 
12129 __private_extern__
12130 int
vnode_trigger_resolve(vnode_t vp,struct nameidata * ndp,vfs_context_t ctx)12131 vnode_trigger_resolve(vnode_t vp, struct nameidata *ndp, vfs_context_t ctx)
12132 {
12133 	vnode_resolve_t rp;
12134 	enum path_operation op;
12135 	resolver_result_t result;
12136 	enum resolver_status status;
12137 	uint32_t seq;
12138 
12139 	/*
12140 	 * N.B. we cannot call vfs_context_can_resolve_triggers()
12141 	 * here because we really only want to suppress that in
12142 	 * the event the trigger will be resolved by something in
12143 	 * user-space.  Any triggers that are resolved by the kernel
12144 	 * do not pose a threat of deadlock.
12145 	 */
12146 
12147 	/* Only trigger on topmost vnodes */
12148 	if ((vp->v_resolve == NULL) ||
12149 	    (vp->v_resolve->vr_resolve_func == NULL) ||
12150 	    (vp->v_mountedhere != NULL)) {
12151 		return 0;
12152 	}
12153 
12154 	rp = vp->v_resolve;
12155 	lck_mtx_lock(&rp->vr_lock);
12156 
12157 	/* Check if this vnode is already resolved */
12158 	if (rp->vr_flags & VNT_RESOLVED) {
12159 		lck_mtx_unlock(&rp->vr_lock);
12160 		return 0;
12161 	}
12162 
12163 	lck_mtx_unlock(&rp->vr_lock);
12164 
12165 #if CONFIG_MACF
12166 	if ((rp->vr_flags & VNT_KERN_RESOLVE) == 0) {
12167 		/*
12168 		 * VNT_KERN_RESOLVE indicates this trigger has no parameters
12169 		 * at the discression of the accessing process other than
12170 		 * the act of access. All other triggers must be checked
12171 		 */
12172 		int rv = mac_vnode_check_trigger_resolve(ctx, vp, &ndp->ni_cnd);
12173 		if (rv != 0) {
12174 			return rv;
12175 		}
12176 	}
12177 #endif
12178 
12179 	/*
12180 	 * XXX
12181 	 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
12182 	 * is there anyway to know this???
12183 	 * there can also be other legitimate lookups in parallel
12184 	 *
12185 	 * XXX - should we call this on a separate thread with a timeout?
12186 	 *
12187 	 * XXX - should we use ISLASTCN to pick the op value???  Perhaps only leafs should
12188 	 * get the richer set and non-leafs should get generic OP_LOOKUP?  TBD
12189 	 */
12190 	op = (ndp->ni_op < OP_MAXOP) ? ndp->ni_op: OP_LOOKUP;
12191 
12192 	result = rp->vr_resolve_func(vp, &ndp->ni_cnd, op, 0, rp->vr_data, ctx);
12193 	status = vfs_resolver_status(result);
12194 	seq = vfs_resolver_sequence(result);
12195 
12196 	lck_mtx_lock(&rp->vr_lock);
12197 	if (seq > rp->vr_lastseq) {
12198 		if (status == RESOLVER_RESOLVED) {
12199 			rp->vr_flags |= VNT_RESOLVED;
12200 		}
12201 		rp->vr_lastseq = seq;
12202 	}
12203 	lck_mtx_unlock(&rp->vr_lock);
12204 
12205 	/* On resolver errors, propagate the error back up */
12206 	return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
12207 }
12208 
12209 static int
vnode_trigger_unresolve(vnode_t vp,int flags,vfs_context_t ctx)12210 vnode_trigger_unresolve(vnode_t vp, int flags, vfs_context_t ctx)
12211 {
12212 	vnode_resolve_t rp;
12213 	resolver_result_t result;
12214 	enum resolver_status status;
12215 	uint32_t seq;
12216 
12217 	if ((vp->v_resolve == NULL) || (vp->v_resolve->vr_unresolve_func == NULL)) {
12218 		return 0;
12219 	}
12220 
12221 	rp = vp->v_resolve;
12222 	lck_mtx_lock(&rp->vr_lock);
12223 
12224 	/* Check if this vnode is already resolved */
12225 	if ((rp->vr_flags & VNT_RESOLVED) == 0) {
12226 		printf("vnode_trigger_unresolve: not currently resolved\n");
12227 		lck_mtx_unlock(&rp->vr_lock);
12228 		return 0;
12229 	}
12230 
12231 	rp->vr_flags |= VNT_VFS_UNMOUNTED;
12232 
12233 	lck_mtx_unlock(&rp->vr_lock);
12234 
12235 	/*
12236 	 * XXX
12237 	 * assumes that resolver will not access this trigger vnode (otherwise the kernel will deadlock)
12238 	 * there can also be other legitimate lookups in parallel
12239 	 *
12240 	 * XXX - should we call this on a separate thread with a timeout?
12241 	 */
12242 
12243 	result = rp->vr_unresolve_func(vp, flags, rp->vr_data, ctx);
12244 	status = vfs_resolver_status(result);
12245 	seq = vfs_resolver_sequence(result);
12246 
12247 	lck_mtx_lock(&rp->vr_lock);
12248 	if (seq > rp->vr_lastseq) {
12249 		if (status == RESOLVER_UNRESOLVED) {
12250 			rp->vr_flags &= ~VNT_RESOLVED;
12251 		}
12252 		rp->vr_lastseq = seq;
12253 	}
12254 	rp->vr_flags &= ~VNT_VFS_UNMOUNTED;
12255 	lck_mtx_unlock(&rp->vr_lock);
12256 
12257 	/* On resolver errors, propagate the error back up */
12258 	return status == RESOLVER_ERROR ? vfs_resolver_auxiliary(result) : 0;
12259 }
12260 
12261 static int
triggerisdescendant(mount_t mp,mount_t rmp)12262 triggerisdescendant(mount_t mp, mount_t rmp)
12263 {
12264 	int match = FALSE;
12265 
12266 	/*
12267 	 * walk up vnode covered chain looking for a match
12268 	 */
12269 	name_cache_lock_shared();
12270 
12271 	while (1) {
12272 		vnode_t vp;
12273 
12274 		/* did we encounter "/" ? */
12275 		if (mp->mnt_flag & MNT_ROOTFS) {
12276 			break;
12277 		}
12278 
12279 		vp = mp->mnt_vnodecovered;
12280 		if (vp == NULLVP) {
12281 			break;
12282 		}
12283 
12284 		mp = vp->v_mount;
12285 		if (mp == rmp) {
12286 			match = TRUE;
12287 			break;
12288 		}
12289 	}
12290 
12291 	name_cache_unlock();
12292 
12293 	return match;
12294 }
12295 
12296 struct trigger_unmount_info {
12297 	vfs_context_t   ctx;
12298 	mount_t         top_mp;
12299 	vnode_t         trigger_vp;
12300 	mount_t         trigger_mp;
12301 	uint32_t        trigger_vid;
12302 	int             flags;
12303 };
12304 
12305 static int
trigger_unmount_callback(mount_t mp,void * arg)12306 trigger_unmount_callback(mount_t mp, void * arg)
12307 {
12308 	struct trigger_unmount_info * infop = (struct trigger_unmount_info *)arg;
12309 	boolean_t mountedtrigger = FALSE;
12310 
12311 	/*
12312 	 * When we encounter the top level mount we're done
12313 	 */
12314 	if (mp == infop->top_mp) {
12315 		return VFS_RETURNED_DONE;
12316 	}
12317 
12318 	if ((mp->mnt_vnodecovered == NULL) ||
12319 	    (vnode_getwithref(mp->mnt_vnodecovered) != 0)) {
12320 		return VFS_RETURNED;
12321 	}
12322 
12323 	if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
12324 	    (mp->mnt_vnodecovered->v_resolve != NULL) &&
12325 	    (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_RESOLVED)) {
12326 		mountedtrigger = TRUE;
12327 	}
12328 	vnode_put(mp->mnt_vnodecovered);
12329 
12330 	/*
12331 	 * When we encounter a mounted trigger, check if its under the top level mount
12332 	 */
12333 	if (!mountedtrigger || !triggerisdescendant(mp, infop->top_mp)) {
12334 		return VFS_RETURNED;
12335 	}
12336 
12337 	/*
12338 	 * Process any pending nested mount (now that its not referenced)
12339 	 */
12340 	if ((infop->trigger_vp != NULLVP) &&
12341 	    (vnode_getwithvid(infop->trigger_vp, infop->trigger_vid) == 0)) {
12342 		vnode_t vp = infop->trigger_vp;
12343 		int error;
12344 
12345 		vnode_drop(infop->trigger_vp);
12346 		infop->trigger_vp = NULLVP;
12347 
12348 		if (mp == vp->v_mountedhere) {
12349 			vnode_put(vp);
12350 			printf("trigger_unmount_callback: unexpected match '%s'\n",
12351 			    mp->mnt_vfsstat.f_mntonname);
12352 			return VFS_RETURNED;
12353 		}
12354 		if (infop->trigger_mp != vp->v_mountedhere) {
12355 			vnode_put(vp);
12356 			printf("trigger_unmount_callback: trigger mnt changed! (%p != %p)\n",
12357 			    infop->trigger_mp, vp->v_mountedhere);
12358 			goto savenext;
12359 		}
12360 
12361 		error = vnode_trigger_unresolve(vp, infop->flags, infop->ctx);
12362 		vnode_put(vp);
12363 		if (error) {
12364 			printf("unresolving: '%s', err %d\n",
12365 			    vp->v_mountedhere ? vp->v_mountedhere->mnt_vfsstat.f_mntonname :
12366 			    "???", error);
12367 			return VFS_RETURNED_DONE; /* stop iteration on errors */
12368 		}
12369 	} else if (infop->trigger_vp != NULLVP) {
12370 		vnode_drop(infop->trigger_vp);
12371 	}
12372 
12373 savenext:
12374 	/*
12375 	 * We can't call resolver here since we hold a mount iter
12376 	 * ref on mp so save its covered vp for later processing
12377 	 */
12378 	infop->trigger_vp = mp->mnt_vnodecovered;
12379 	if ((infop->trigger_vp != NULLVP) &&
12380 	    (vnode_getwithref(infop->trigger_vp) == 0)) {
12381 		if (infop->trigger_vp->v_mountedhere == mp) {
12382 			infop->trigger_vid = infop->trigger_vp->v_id;
12383 			vnode_hold(infop->trigger_vp);
12384 			infop->trigger_mp = mp;
12385 		}
12386 		vnode_put(infop->trigger_vp);
12387 	}
12388 
12389 	return VFS_RETURNED;
12390 }
12391 
12392 /*
12393  * Attempt to unmount any trigger mounts nested underneath a mount.
12394  * This is a best effort attempt and no retries are performed here.
12395  *
12396  * Note: mp->mnt_rwlock is held exclusively on entry (so be carefull)
12397  */
12398 __private_extern__
12399 void
vfs_nested_trigger_unmounts(mount_t mp,int flags,vfs_context_t ctx)12400 vfs_nested_trigger_unmounts(mount_t mp, int flags, vfs_context_t ctx)
12401 {
12402 	struct trigger_unmount_info info;
12403 
12404 	/* Must have trigger vnodes */
12405 	if (mp->mnt_numtriggers == 0) {
12406 		return;
12407 	}
12408 	/* Avoid recursive requests (by checking covered vnode) */
12409 	if ((mp->mnt_vnodecovered != NULL) &&
12410 	    (vnode_getwithref(mp->mnt_vnodecovered) == 0)) {
12411 		boolean_t recursive = FALSE;
12412 
12413 		if ((mp->mnt_vnodecovered->v_mountedhere == mp) &&
12414 		    (mp->mnt_vnodecovered->v_resolve != NULL) &&
12415 		    (mp->mnt_vnodecovered->v_resolve->vr_flags & VNT_VFS_UNMOUNTED)) {
12416 			recursive = TRUE;
12417 		}
12418 		vnode_put(mp->mnt_vnodecovered);
12419 		if (recursive) {
12420 			return;
12421 		}
12422 	}
12423 
12424 	/*
12425 	 * Attempt to unmount any nested trigger mounts (best effort)
12426 	 */
12427 	info.ctx = ctx;
12428 	info.top_mp = mp;
12429 	info.trigger_vp = NULLVP;
12430 	info.trigger_vid = 0;
12431 	info.trigger_mp = NULL;
12432 	info.flags = flags;
12433 
12434 	(void) vfs_iterate(VFS_ITERATE_TAIL_FIRST, trigger_unmount_callback, &info);
12435 
12436 	/*
12437 	 * Process remaining nested mount (now that its not referenced)
12438 	 */
12439 	if ((info.trigger_vp != NULLVP) &&
12440 	    (vnode_getwithvid(info.trigger_vp, info.trigger_vid) == 0)) {
12441 		vnode_t vp = info.trigger_vp;
12442 
12443 		if (info.trigger_mp == vp->v_mountedhere) {
12444 			(void) vnode_trigger_unresolve(vp, flags, ctx);
12445 		}
12446 		vnode_put(vp);
12447 		vnode_drop(vp);
12448 	} else if (info.trigger_vp != NULLVP) {
12449 		vnode_drop(info.trigger_vp);
12450 	}
12451 }
12452 
12453 int
vfs_addtrigger(mount_t mp,const char * relpath,struct vnode_trigger_info * vtip,vfs_context_t ctx)12454 vfs_addtrigger(mount_t mp, const char *relpath, struct vnode_trigger_info *vtip, vfs_context_t ctx)
12455 {
12456 	struct nameidata *ndp;
12457 	int res;
12458 	vnode_t rvp, vp;
12459 	struct vnode_trigger_param vtp;
12460 
12461 	/*
12462 	 * Must be called for trigger callback, wherein rwlock is held
12463 	 */
12464 	lck_rw_assert(&mp->mnt_rwlock, LCK_RW_ASSERT_HELD);
12465 
12466 	TRIG_LOG("Adding trigger at %s\n", relpath);
12467 	TRIG_LOG("Trying VFS_ROOT\n");
12468 
12469 	ndp = kalloc_type(struct nameidata, Z_WAITOK | Z_NOFAIL);
12470 
12471 	/*
12472 	 * We do a lookup starting at the root of the mountpoint, unwilling
12473 	 * to cross into other mountpoints.
12474 	 */
12475 	res = VFS_ROOT(mp, &rvp, ctx);
12476 	if (res != 0) {
12477 		goto out;
12478 	}
12479 
12480 	TRIG_LOG("Trying namei\n");
12481 
12482 	NDINIT(ndp, LOOKUP, OP_LOOKUP, USEDVP | NOCROSSMOUNT | FOLLOW, UIO_SYSSPACE,
12483 	    CAST_USER_ADDR_T(relpath), ctx);
12484 	ndp->ni_dvp = rvp;
12485 	res = namei(ndp);
12486 	if (res != 0) {
12487 		vnode_put(rvp);
12488 		goto out;
12489 	}
12490 
12491 	vp = ndp->ni_vp;
12492 	nameidone(ndp);
12493 	vnode_put(rvp);
12494 
12495 	TRIG_LOG("Trying vnode_resolver_create()\n");
12496 
12497 	/*
12498 	 * Set up blob.  vnode_create() takes a larger structure
12499 	 * with creation info, and we needed something different
12500 	 * for this case.  One needs to win, or we need to munge both;
12501 	 * vnode_create() wins.
12502 	 */
12503 	bzero(&vtp, sizeof(vtp));
12504 	vtp.vnt_resolve_func = vtip->vti_resolve_func;
12505 	vtp.vnt_unresolve_func = vtip->vti_unresolve_func;
12506 	vtp.vnt_rearm_func = vtip->vti_rearm_func;
12507 	vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
12508 	vtp.vnt_reclaim_func = vtip->vti_reclaim_func;
12509 	vtp.vnt_data = vtip->vti_data;
12510 	vtp.vnt_flags = vtip->vti_flags;
12511 
12512 	res = vnode_resolver_create(mp, vp, &vtp, TRUE);
12513 	vnode_put(vp);
12514 out:
12515 	kfree_type(struct nameidata, ndp);
12516 	TRIG_LOG("Returning %d\n", res);
12517 	return res;
12518 }
12519 
12520 #endif /* CONFIG_TRIGGERS */
12521 
12522 vm_offset_t
kdebug_vnode(vnode_t vp)12523 kdebug_vnode(vnode_t vp)
12524 {
12525 	return VM_KERNEL_ADDRPERM(vp);
12526 }
12527 
12528 static int flush_cache_on_write = 0;
12529 SYSCTL_INT(_kern, OID_AUTO, flush_cache_on_write,
12530     CTLFLAG_RW | CTLFLAG_LOCKED, &flush_cache_on_write, 0,
12531     "always flush the drive cache on writes to uncached files");
12532 
12533 int
vnode_should_flush_after_write(vnode_t vp,int ioflag)12534 vnode_should_flush_after_write(vnode_t vp, int ioflag)
12535 {
12536 	return flush_cache_on_write
12537 	       && (ISSET(ioflag, IO_NOCACHE) || vnode_isnocache(vp));
12538 }
12539 
12540 /*
12541  * sysctl for use by disk I/O tracing tools to get the list of existing
12542  * vnodes' paths
12543  */
12544 
12545 #define NPATH_WORDS (MAXPATHLEN / sizeof(unsigned long))
12546 struct vnode_trace_paths_context {
12547 	uint64_t count;
12548 	/*
12549 	 * Must be a multiple of 4, then -1, for tracing!
12550 	 */
12551 	unsigned long path[NPATH_WORDS + (4 - (NPATH_WORDS % 4)) - 1];
12552 };
12553 
12554 static int
vnode_trace_path_callback(struct vnode * vp,void * vctx)12555 vnode_trace_path_callback(struct vnode *vp, void *vctx)
12556 {
12557 	struct vnode_trace_paths_context *ctx = vctx;
12558 	size_t path_len = sizeof(ctx->path);
12559 
12560 	int getpath_len = (int)path_len;
12561 	if (vn_getpath(vp, (char *)ctx->path, &getpath_len) == 0) {
12562 		/* vn_getpath() NUL-terminates, and len includes the NUL. */
12563 		assert(getpath_len >= 0);
12564 		path_len = (size_t)getpath_len;
12565 
12566 		assert(path_len <= sizeof(ctx->path));
12567 		kdebug_vfs_lookup(ctx->path, (int)path_len, vp,
12568 		    KDBG_VFS_LOOKUP_FLAG_LOOKUP | KDBG_VFS_LOOKUP_FLAG_NOPROCFILT);
12569 
12570 		if (++(ctx->count) == 1000) {
12571 			thread_yield_to_preemption();
12572 			ctx->count = 0;
12573 		}
12574 	}
12575 
12576 	return VNODE_RETURNED;
12577 }
12578 
12579 static int
vfs_trace_paths_callback(mount_t mp,void * arg)12580 vfs_trace_paths_callback(mount_t mp, void *arg)
12581 {
12582 	if (mp->mnt_flag & MNT_LOCAL) {
12583 		vnode_iterate(mp, VNODE_ITERATE_ALL, vnode_trace_path_callback, arg);
12584 	}
12585 
12586 	return VFS_RETURNED;
12587 }
12588 
12589 static int sysctl_vfs_trace_paths SYSCTL_HANDLER_ARGS {
12590 	struct vnode_trace_paths_context ctx;
12591 
12592 	(void)oidp;
12593 	(void)arg1;
12594 	(void)arg2;
12595 	(void)req;
12596 
12597 	if (!kauth_cred_issuser(kauth_cred_get())) {
12598 		return EPERM;
12599 	}
12600 
12601 	if (!kdebug_enable || !kdebug_debugid_enabled(VFS_LOOKUP)) {
12602 		return EINVAL;
12603 	}
12604 
12605 	bzero(&ctx, sizeof(struct vnode_trace_paths_context));
12606 
12607 	vfs_iterate(0, vfs_trace_paths_callback, &ctx);
12608 
12609 	return 0;
12610 }
12611 
12612 SYSCTL_PROC(_vfs_generic, OID_AUTO, trace_paths, CTLFLAG_RD | CTLFLAG_LOCKED | CTLFLAG_MASKED, NULL, 0, &sysctl_vfs_trace_paths, "-", "trace_paths");
12613 
12614 #if CONFIG_FILE_LEASES
12615 #include <IOKit/IOBSD.h>
12616 #include <sys/file_internal.h>
12617 
12618 #define FILE_LEASES_ENTITLEMENT    "com.apple.private.vfs.file-leases"
12619 
12620 static uint32_t lease_break_timeout = 60; /* secs */
12621 
12622 #if (DEVELOPMENT || DEBUG)
12623 static int lease_debug = 0;
12624 static int lease_entitlement_override = 0;
12625 
12626 SYSCTL_NODE(_vfs, OID_AUTO, lease, CTLFLAG_RW | CTLFLAG_LOCKED, NULL, "vfs lease");
12627 SYSCTL_UINT(_vfs_lease, OID_AUTO, break_timeout, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_break_timeout, 0, "");
12628 SYSCTL_INT(_vfs_lease, OID_AUTO, debug, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_debug, 0, "");
12629 SYSCTL_INT(_vfs_lease, OID_AUTO, entitlement_override, CTLFLAG_RW | CTLFLAG_LOCKED, &lease_entitlement_override, 0, "");
12630 
12631 #define LEASEDBG(fmt, args...)                                       \
12632 do {                                                                 \
12633 	if (__improbable(lease_debug)) {                                 \
12634 	        pid_t cur_pid = proc_getpid(current_proc());             \
12635 	        printf("%s(%d): " fmt "\n", __func__, cur_pid, ##args);  \
12636 	}                                                                \
12637 } while(0)
12638 #else
12639 #define LEASEDBG(fmt, args...)  /**/
12640 #endif /* (DEVELOPMENT || DEBUG) */
12641 
12642 static bool
allow_setlease(vfs_context_t ctx)12643 allow_setlease(vfs_context_t ctx)
12644 {
12645 	bool entitled;
12646 
12647 	entitled = IOTaskHasEntitlement(vfs_context_task(ctx),
12648 	    FILE_LEASES_ENTITLEMENT);
12649 
12650 #if (DEVELOPMENT || DEBUG)
12651 	if (!entitled) {
12652 		entitled = (lease_entitlement_override == 1);
12653 	}
12654 #endif
12655 
12656 	return entitled;
12657 }
12658 
12659 static file_lease_t
file_lease_alloc(struct fileglob * fg,int fl_type,pid_t pid)12660 file_lease_alloc(struct fileglob *fg, int fl_type, pid_t pid)
12661 {
12662 	file_lease_t fl;
12663 
12664 	fl = kalloc_type(struct file_lease, Z_WAITOK);
12665 	/*
12666 	 * Duplicated file descriptors created by dup() or fork() would have the
12667 	 * same 'fileglob' so the lease can be released or modified with the
12668 	 * duplicated fds. Opening the same file (by either same or different
12669 	 * process) would have different 'fileglob' so a lease always follows a
12670 	 * 'fileglob'.
12671 	 */
12672 	fl->fl_fg = fg;
12673 	fl->fl_type = fl_type;
12674 	fl->fl_pid = pid;
12675 	fl->fl_downgrade_start = fl->fl_release_start = 0;
12676 
12677 	return fl;
12678 }
12679 
12680 static void
file_lease_free(file_lease_t fl)12681 file_lease_free(file_lease_t fl)
12682 {
12683 	kfree_type(struct file_lease, fl);
12684 }
12685 
12686 /*
12687  * A read lease can be placed only on a file/directory that is opened for
12688  * read-only which means no other processes have the file/directory opened in
12689  * read-write/write-only mode or mmap'ed writable.
12690  * A write lease can be placed on a file only if there are no other opens
12691  * for the file.
12692  *
12693  * Needs to be called with vnode's lock held.
12694  */
12695 static int
check_for_open_conflict(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts)12696 check_for_open_conflict(vnode_t vp, struct fileglob *fg, int fl_type,
12697     int expcounts)
12698 {
12699 	int error = 0;
12700 
12701 	if (fl_type == F_RDLCK) {
12702 		if (vp->v_writecount > expcounts &&
12703 		    !(vp->v_writecount == 1 && (fg->fg_flag & FWRITE))) {
12704 			error = EAGAIN;
12705 		} else if (ubc_is_mapped_writable(vp)) {
12706 			error = EAGAIN;
12707 		}
12708 	} else if (fl_type == F_WRLCK && vp->v_usecount > expcounts) {
12709 		error = EAGAIN;
12710 	}
12711 
12712 	return error;
12713 }
12714 
12715 /* Needs to be called with vnode's lock held. */
12716 static void
modify_file_lease(vnode_t vp,file_lease_t fl,int new_fl_type,struct fileglob * new_fg)12717 modify_file_lease(vnode_t vp, file_lease_t fl, int new_fl_type,
12718     struct fileglob *new_fg)
12719 {
12720 	LEASEDBG("fl %p changing fl_type from %d to %d (flags 0x%x)",
12721 	    fl, fl->fl_type, new_fl_type, fl->fl_flags);
12722 
12723 	fl->fl_type = new_fl_type;
12724 
12725 	/*
12726 	 * The lease being modified may be using a different file
12727 	 * descriptor, so usurp the fileglob pointer here.  In this
12728 	 * case the old descriptor no longer holds the lease.
12729 	 */
12730 	if (new_fg != NULL) {
12731 		fl->fl_fg = new_fg;
12732 	}
12733 
12734 	if (fl->fl_flags & FL_FLAG_RELEASE_PENDING ||
12735 	    fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING) {
12736 		wakeup(&vp->v_leases);
12737 	}
12738 }
12739 
12740 static int
acquire_file_lease(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts,vfs_context_t ctx)12741 acquire_file_lease(vnode_t vp, struct fileglob *fg, int fl_type, int expcounts,
12742     vfs_context_t ctx)
12743 {
12744 	file_lease_t fl, new_fl, our_fl;
12745 	int error;
12746 
12747 	/* Make sure "expected count" looks sane. */
12748 	if (expcounts < 0 || expcounts > OPEN_MAX) {
12749 		return EINVAL;
12750 	}
12751 
12752 	new_fl = file_lease_alloc(fg, fl_type, vfs_context_pid(ctx));
12753 
12754 	vnode_lock(vp);
12755 
12756 	error = check_for_open_conflict(vp, fg, fl_type, expcounts);
12757 	if (error) {
12758 		LEASEDBG("open conflict on vp %p type %d writecnt %d usecnt %d "
12759 		    "fl_type %d expcounts %d",
12760 		    vp, vp->v_type, vp->v_writecount, vp->v_usecount, fl_type,
12761 		    expcounts);
12762 		goto out;
12763 	}
12764 
12765 	our_fl = NULL;
12766 	LIST_FOREACH(fl, &vp->v_leases, fl_link) {
12767 		/* Does the existing lease belong to us? */
12768 		if (fl->fl_fg == new_fl->fl_fg ||
12769 		    fl->fl_pid == new_fl->fl_pid) {
12770 			our_fl = fl;
12771 			continue;
12772 		}
12773 
12774 		/*
12775 		 * We don't allow placing a new write lease when there is an existing
12776 		 * read lease that doesn't belong to us. We also don't allow putting
12777 		 * a new read lease if there is a pending release on the lease.
12778 		 * Putting a new read lease when there is a pending downgrade on the
12779 		 * lease is fine as it won't cause lease conflict.
12780 		 */
12781 		if (fl_type == F_WRLCK || fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
12782 			break;
12783 		}
12784 	}
12785 
12786 	/*
12787 	 * Found an existing lease that we don't own and it conflicts with the
12788 	 * new lease.
12789 	 */
12790 	if (fl) {
12791 		LEASEDBG("lease conflict on vp %p fl %p fl_type %d cur_fl_type %d",
12792 		    vp, fl, fl_type, fl->fl_type);
12793 		goto out;
12794 	}
12795 
12796 	/* Found an existing lease that we own so just change the type. */
12797 	if (our_fl) {
12798 		LEASEDBG("replace lease on vp %p fl %p old_fl_type %d new_fl_type %d",
12799 		    vp, our_fl, our_fl->fl_type, fl_type);
12800 
12801 		modify_file_lease(vp, our_fl, new_fl->fl_type, new_fl->fl_fg);
12802 		goto out;
12803 	}
12804 
12805 	LEASEDBG("acquired lease on vp %p type %d fl %p fl_type %d fg %p",
12806 	    vp, vp->v_type, new_fl, new_fl->fl_type, new_fl->fl_fg);
12807 
12808 	LIST_INSERT_HEAD(&vp->v_leases, new_fl, fl_link);
12809 	new_fl = NULL;
12810 
12811 out:
12812 	vnode_unlock(vp);
12813 
12814 	if (new_fl) {
12815 		file_lease_free(new_fl);
12816 	}
12817 
12818 	return error;
12819 }
12820 
12821 static int
release_file_lease(vnode_t vp,struct fileglob * fg)12822 release_file_lease(vnode_t vp, struct fileglob *fg)
12823 {
12824 	file_lease_t fl, fl_tmp;
12825 	int error = 0;
12826 
12827 	LEASEDBG("request to release lease on vp %p type %d fg %p",
12828 	    vp, vp->v_type, fg);
12829 
12830 	vnode_lock(vp);
12831 
12832 	LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
12833 		if (fl->fl_fg == fg) {
12834 			LEASEDBG("released lease on vp %p fl %p type %d",
12835 			    vp, fl, fl->fl_type);
12836 
12837 			LIST_REMOVE(fl, fl_link);
12838 			modify_file_lease(vp, fl, F_UNLCK, NULL);
12839 			break;
12840 		}
12841 	}
12842 
12843 	vnode_unlock(vp);
12844 
12845 	if (fl) {
12846 		file_lease_free(fl);
12847 	} else {
12848 		error = ENOLCK;
12849 	}
12850 
12851 	return error;
12852 }
12853 
12854 /*
12855  * Acquire or release a file lease according to the given type (F_RDLCK,
12856  * F_WRLCK or F_UNLCK).
12857  *
12858  * Returns:	0			Success
12859  *		EAGAIN			Failed to acquire a file lease due to conflicting opens
12860  *		ENOLCK			Failed to release a file lease due to lease not found
12861  *		EPERM           Current task doesn't have the entitlement
12862  */
12863 int
vnode_setlease(vnode_t vp,struct fileglob * fg,int fl_type,int expcounts,vfs_context_t ctx)12864 vnode_setlease(vnode_t vp, struct fileglob *fg, int fl_type, int expcounts,
12865     vfs_context_t ctx)
12866 {
12867 	int error;
12868 
12869 	if (!allow_setlease(ctx)) {
12870 		return EPERM;
12871 	}
12872 
12873 	error = (fl_type == F_UNLCK) ? release_file_lease(vp, fg) :
12874 	    acquire_file_lease(vp, fg, fl_type, expcounts, ctx);
12875 
12876 	return error;
12877 }
12878 
12879 /*
12880  * Retrieve the currently in place lease for the file.
12881  *
12882  * Returns:
12883  *		F_RDLCK			Read lease
12884  *		F_WRLCK			Write lease
12885  *		F_UNLCK			No lease
12886  */
12887 int
vnode_getlease(vnode_t vp)12888 vnode_getlease(vnode_t vp)
12889 {
12890 	file_lease_t fl;
12891 	int fl_type = F_UNLCK;
12892 
12893 	vnode_lock(vp);
12894 
12895 	/*
12896 	 * There should be only one type of lease in the list as read and write
12897 	 * leases can't co-exist for the same file.
12898 	 */
12899 	fl = LIST_FIRST(&vp->v_leases);
12900 	if (fl) {
12901 		fl_type = fl->fl_type;
12902 	}
12903 
12904 	vnode_unlock(vp);
12905 
12906 	LEASEDBG("vp %p fl %p fl_type %d", vp, fl, fl_type);
12907 
12908 	return fl_type;
12909 }
12910 
12911 /* Must be called with vnode's lock held. */
12912 static bool
check_for_lease_conflict(vnode_t vp,int breaker_fl_type,vfs_context_t ctx)12913 check_for_lease_conflict(vnode_t vp, int breaker_fl_type, vfs_context_t ctx)
12914 {
12915 	file_lease_t fl;
12916 	pid_t pid = vfs_context_pid(ctx);
12917 	bool is_conflict = false;
12918 
12919 	LIST_FOREACH(fl, &vp->v_leases, fl_link) {
12920 		if ((fl->fl_type == F_WRLCK && fl->fl_pid != pid) ||
12921 		    (breaker_fl_type == F_WRLCK && fl->fl_pid != pid)) {
12922 			LEASEDBG("conflict detected on vp %p type %d fl_type %d "
12923 			    "breaker_fl_type %d",
12924 			    vp, vp->v_type, fl->fl_type, breaker_fl_type);
12925 
12926 			is_conflict = true;
12927 			break;
12928 		}
12929 	}
12930 
12931 	return is_conflict;
12932 }
12933 
12934 static uint64_t
absolutetime_elapsed_in_secs(uint64_t start)12935 absolutetime_elapsed_in_secs(uint64_t start)
12936 {
12937 	uint64_t elapsed, elapsed_sec;
12938 	uint64_t now = mach_absolute_time();
12939 
12940 	elapsed = now - start;
12941 	absolutetime_to_nanoseconds(elapsed, &elapsed_sec);
12942 	elapsed_sec /= NSEC_PER_SEC;
12943 
12944 	return elapsed_sec;
12945 }
12946 
12947 /* Must be called with vnode's lock held. */
12948 static void
handle_lease_break_timedout(vnode_t vp)12949 handle_lease_break_timedout(vnode_t vp)
12950 {
12951 	file_lease_t fl, fl_tmp;
12952 	uint64_t elapsed_sec;
12953 
12954 	LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
12955 		if (fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING) {
12956 			elapsed_sec = absolutetime_elapsed_in_secs(fl->fl_downgrade_start);
12957 
12958 			if (elapsed_sec >= lease_break_timeout) {
12959 				LEASEDBG("force downgrade on vp %p for fl %p elapsed %llu "
12960 				    "timeout %u", vp, fl, elapsed_sec, lease_break_timeout);
12961 
12962 				fl->fl_flags &= ~FL_FLAG_DOWNGRADE_PENDING;
12963 				fl->fl_downgrade_start = 0;
12964 				modify_file_lease(vp, fl, F_RDLCK, NULL);
12965 				continue;
12966 			}
12967 		}
12968 		if (fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
12969 			elapsed_sec = absolutetime_elapsed_in_secs(fl->fl_release_start);
12970 
12971 			if (elapsed_sec >= lease_break_timeout) {
12972 				LEASEDBG("force release on vp %p for fl %p elapsed %llu "
12973 				    "timeout %u", vp, fl, elapsed_sec, lease_break_timeout);
12974 
12975 				LIST_REMOVE(fl, fl_link);
12976 				file_lease_free(fl);
12977 				continue;
12978 			}
12979 		}
12980 	}
12981 
12982 	/* Wakeup the lease breaker(s). */
12983 	wakeup(&vp->v_leases);
12984 }
12985 
12986 /* Must be called with vnode's lock held. */
12987 static void
wait_for_lease_break(vnode_t vp,int breaker_fl_type,vfs_context_t ctx)12988 wait_for_lease_break(vnode_t vp, int breaker_fl_type, vfs_context_t ctx)
12989 {
12990 	file_lease_t fl;
12991 	struct timespec ts;
12992 	uint64_t elapsed_sec, start_time;
12993 	int error;
12994 
12995 restart:
12996 	fl = LIST_FIRST(&vp->v_leases);
12997 	assert(fl);
12998 
12999 	/*
13000 	 * In a rare case it is possible that the lease that we are blocked on has
13001 	 * been released and a new lease has been put in place after we are
13002 	 * signalled to wake up. In this particular, we would treat it as no
13003 	 * conflict and proceed. This could only happen for directory leasing.
13004 	 */
13005 	if ((fl->fl_flags & (FL_FLAG_DOWNGRADE_PENDING | FL_FLAG_RELEASE_PENDING)) == 0) {
13006 		LEASEDBG("new lease in place on vp %p fl %p fl_type %d "
13007 		    "breaker_fl_type %d",
13008 		    vp, fl, fl->fl_type, breaker_fl_type);
13009 
13010 		return;
13011 	}
13012 	/*
13013 	 * Figure out which timer to use for lease break timedout as we could have
13014 	 * both timers active. If both timers active, pick the one with earliest
13015 	 * start time.
13016 	 */
13017 	if (fl->fl_release_start) {
13018 		if (fl->fl_downgrade_start == 0 ||
13019 		    fl->fl_downgrade_start < fl->fl_release_start) {
13020 			start_time = fl->fl_release_start;
13021 		} else {
13022 			start_time = fl->fl_downgrade_start;
13023 		}
13024 	} else {
13025 		start_time = fl->fl_downgrade_start;
13026 	}
13027 	assert(start_time > 0);
13028 
13029 	elapsed_sec = absolutetime_elapsed_in_secs(start_time);
13030 
13031 	LEASEDBG("elapsed_sec %llu release_start %llu downgrade_start %llu",
13032 	    elapsed_sec, fl->fl_release_start, fl->fl_downgrade_start);
13033 
13034 	ts.tv_sec = (lease_break_timeout > elapsed_sec ?
13035 	    (lease_break_timeout - elapsed_sec) : 0);
13036 	ts.tv_nsec = (ts.tv_sec == 0 ? 1 : 0);
13037 	error = msleep(&vp->v_leases, &vp->v_lock, PVFS, __func__, &ts);
13038 
13039 	if (error == 0 || error != EWOULDBLOCK) {
13040 		/*
13041 		 * Woken up due to lease is released/downgraded by lease holder.
13042 		 * We don't expect any other error from msleep() beside EWOULDBLOCK.
13043 		 * Check if there is any further conflicts. If so, then continue to
13044 		 * wait for the next conflict to resolve.
13045 		 */
13046 		if (check_for_lease_conflict(vp, breaker_fl_type, ctx)) {
13047 			goto restart;
13048 		}
13049 	} else {
13050 		/*
13051 		 * Woken due to lease break timeout expired (EWOULDBLOCK returned).
13052 		 * Break/downgrade all conflicting leases.
13053 		 */
13054 		handle_lease_break_timedout(vp);
13055 
13056 		if (check_for_lease_conflict(vp, breaker_fl_type, ctx)) {
13057 			goto restart;
13058 		}
13059 	}
13060 }
13061 
13062 /* Must be called with vnode's lock held. */
13063 static void
send_lease_break_event(vnode_t vp,uint32_t event)13064 send_lease_break_event(vnode_t vp, uint32_t event)
13065 {
13066 	if (vp->v_knotes.slh_first != NULL) {
13067 		KNOTE(&vp->v_knotes, event);
13068 	}
13069 }
13070 
13071 static bool
is_dataless_file(vnode_t vp,vfs_context_t ctx)13072 is_dataless_file(vnode_t vp, vfs_context_t ctx)
13073 {
13074 	struct vnode_attr va;
13075 	bool is_dataless = false;
13076 	int error;
13077 
13078 	VATTR_INIT(&va);
13079 	VATTR_WANTED(&va, va_flags);
13080 
13081 	error = vnode_getattr(vp, &va, ctx);
13082 	if (!error && (va.va_flags & SF_DATALESS)) {
13083 		is_dataless = true;
13084 	}
13085 
13086 	return is_dataless;
13087 }
13088 
13089 /*
13090  * Break lease(s) in place for the file when there is conflict.
13091  * This function would return 0 for almost all call sites. The only exception
13092  * is when it is called from open1() with O_NONBLOCK flag and it needs to block
13093  * waiting for the lease conflict(s) to resolve. In this case EWOULDBLOCK is
13094  * returned.
13095  */
13096 int
vnode_breaklease(vnode_t vp,uint32_t oflags,vfs_context_t ctx)13097 vnode_breaklease(vnode_t vp, uint32_t oflags, vfs_context_t ctx)
13098 {
13099 	file_lease_t fl;
13100 	uint64_t now;
13101 	int fl_type;
13102 	int error = 0;
13103 
13104 	vnode_lock(vp);
13105 
13106 	if (__probable(LIST_EMPTY(&vp->v_leases))) {
13107 		goto out_unlock;
13108 	}
13109 
13110 	/* Determine the access mode requested by the lease breaker. */
13111 	fl_type = (oflags & (O_WRONLY | O_RDWR | O_CREAT | O_TRUNC)) ? F_WRLCK : F_RDLCK;
13112 
13113 	/*
13114 	 * If the lease-breaker is just reading, check that it can break
13115 	 * leases first. If the lease-breaker is writing, or if the
13116 	 * context was not specified, we always break.
13117 	 * We skip lease break if the lease-breaker is dataless manipulator and
13118 	 * the file is dataless.
13119 	 */
13120 	if ((fl_type == F_RDLCK && !vfs_context_can_break_leases(ctx)) ||
13121 	    (vfs_context_is_dataless_manipulator(ctx) && (vp->v_type == VREG) &&
13122 	    is_dataless_file(vp, ctx))) {
13123 		goto out_unlock;
13124 	}
13125 
13126 	if (!check_for_lease_conflict(vp, fl_type, ctx)) {
13127 		goto out_unlock;
13128 	}
13129 
13130 	now = mach_absolute_time();
13131 
13132 	LEASEDBG("break lease on vp %p type %d oflags 0x%x cur_time %llu",
13133 	    vp, vp->v_type, oflags, now);
13134 
13135 	/*
13136 	 * We get to this point then this means all lease(s) are conflict and
13137 	 * we need to send the lease break event to the lease holder(s).
13138 	 * It is possible that a lease could have both downgrade and release events
13139 	 * pending triggered by multiple breakers trying to open the file in
13140 	 * different modes. Both events would have different lease break timers.
13141 	 * Consider the following case:
13142 	 * 1. Process A holds the write lease on file X.
13143 	 * 2. Provess B opens the file X in read-only mode.
13144 	 *    This triggers downgrade lease event to Process A.
13145 	 * 3. While downgrade is pending, Process C opens the file X in read-write
13146 	 *    mode. This triggers release lease event to Process A.
13147 	 */
13148 	LIST_FOREACH(fl, &vp->v_leases, fl_link) {
13149 		if (fl_type == F_WRLCK) {
13150 			/* File is opened for writing or truncate. */
13151 			if (fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13152 				continue;
13153 			}
13154 			fl->fl_release_start = now;
13155 			fl->fl_flags |= FL_FLAG_RELEASE_PENDING;
13156 			send_lease_break_event(vp, NOTE_LEASE_RELEASE);
13157 		} else {
13158 			/* File is opened for reading. */
13159 			if (fl->fl_flags & FL_FLAG_DOWNGRADE_PENDING ||
13160 			    fl->fl_flags & FL_FLAG_RELEASE_PENDING) {
13161 				continue;
13162 			}
13163 			fl->fl_downgrade_start = now;
13164 			fl->fl_flags |= FL_FLAG_DOWNGRADE_PENDING;
13165 			send_lease_break_event(vp, NOTE_LEASE_DOWNGRADE);
13166 		}
13167 	}
13168 
13169 	/*
13170 	 * If open is requested with O_NONBLOCK, then we can't block and wait for
13171 	 * the lease to be released/downgraded. Just bail out with EWOULDBLOCK.
13172 	 */
13173 	if (oflags & O_NONBLOCK) {
13174 		error = EWOULDBLOCK;
13175 		goto out;
13176 	}
13177 
13178 	wait_for_lease_break(vp, fl_type, ctx);
13179 
13180 out:
13181 	LEASEDBG("break lease on vp %p oflags 0x%x, error %d", vp, oflags, error);
13182 
13183 out_unlock:
13184 	vnode_unlock(vp);
13185 
13186 	return error;
13187 }
13188 
13189 /*
13190  * Get parent vnode by parent ID (only for file system that supports
13191  * MNTK_PATH_FROM_ID).
13192  * On success, the parent's vnode is returned with iocount held.
13193  */
13194 static vnode_t
vnode_getparent_byid(vnode_t vp)13195 vnode_getparent_byid(vnode_t vp)
13196 {
13197 	struct vnode_attr va;
13198 	vnode_t dvp = NULLVP;
13199 	vfs_context_t ctx = vfs_context_current();
13200 	int error;
13201 
13202 	if (!(vp->v_mount->mnt_kern_flag & MNTK_PATH_FROM_ID)) {
13203 		goto out;
13204 	}
13205 
13206 	VATTR_INIT(&va);
13207 	VATTR_WANTED(&va, va_parentid);
13208 
13209 	/* Get the vnode's parent id from the file system. */
13210 	error = vnode_getattr(vp, &va, ctx);
13211 	if (error || !VATTR_IS_SUPPORTED(&va, va_parentid)) {
13212 		goto out;
13213 	}
13214 
13215 	/*
13216 	 * Ask the file system for the parent vnode.
13217 	 * We are ignoring the error here as we don't expect the parent vnode to be
13218 	 * populated on error.
13219 	 */
13220 	(void)VFS_VGET(vp->v_mount, (ino64_t)va.va_parentid, &dvp, ctx);
13221 
13222 out:
13223 	return dvp;
13224 }
13225 
13226 /*
13227  * Break directory's lease.
13228  * If 'need_parent' is true, then parent is obtained via vnode_getparent() (or
13229  * vnode_getparent_byid()) on the provided 'vp'.
13230  */
13231 void
vnode_breakdirlease(vnode_t vp,bool need_parent,uint32_t oflags)13232 vnode_breakdirlease(vnode_t vp, bool need_parent, uint32_t oflags)
13233 {
13234 	vnode_t dvp;
13235 
13236 	if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR) ||
13237 	    (vp == rootvnode)) {
13238 		return;
13239 	}
13240 
13241 	/*
13242 	 * If parent is not provided, first try to get it from the name cache.
13243 	 * If failed, then we will attempt to ask the file system for parent vnode.
13244 	 * This is just a best effort as both attempts could still fail.
13245 	 */
13246 	if (need_parent) {
13247 		dvp = vnode_getparent(vp);
13248 		if (__improbable(dvp == NULLVP)) {
13249 			dvp = vnode_getparent_byid(vp);
13250 		}
13251 	} else {
13252 		dvp = vp;
13253 	}
13254 
13255 	if (__probable(dvp != NULLVP)) {
13256 		/* Always break dir leases. */
13257 		(void)vnode_breaklease(dvp, oflags, vfs_context_current());
13258 	}
13259 
13260 	if (need_parent && (dvp != NULLVP)) {
13261 		vnode_put(dvp);
13262 	}
13263 }
13264 
13265 /*
13266  * Revoke all lease(s) in place for the file.
13267  * This is called when the vnode is reclaimed.
13268  */
13269 void
vnode_revokelease(vnode_t vp,bool locked)13270 vnode_revokelease(vnode_t vp, bool locked)
13271 {
13272 	file_lease_t fl, fl_tmp;
13273 	bool need_wakeup = false;
13274 
13275 	if ((vnode_vtype(vp) != VREG && vnode_vtype(vp) != VDIR)) {
13276 		return;
13277 	}
13278 
13279 	if (!locked) {
13280 		vnode_lock(vp);
13281 	}
13282 
13283 	LIST_FOREACH_SAFE(fl, &vp->v_leases, fl_link, fl_tmp) {
13284 		LIST_REMOVE(fl, fl_link);
13285 		file_lease_free(fl);
13286 		need_wakeup = true;
13287 	}
13288 
13289 	/* Wakeup any lease breaker(s) that might be currently blocked. */
13290 	if (__improbable(need_wakeup)) {
13291 		wakeup(&vp->v_leases);
13292 	}
13293 
13294 	if (!locked) {
13295 		vnode_unlock(vp);
13296 	}
13297 }
13298 
13299 #endif /* CONFIG_FILE_LEASES */
13300 
13301 errno_t
vnode_rdadvise(vnode_t vp,off_t offset,int len,vfs_context_t ctx)13302 vnode_rdadvise(vnode_t vp, off_t offset, int len, vfs_context_t ctx)
13303 {
13304 	struct radvisory ra_struct;
13305 
13306 	assert(vp);
13307 
13308 	if (offset < 0 || len < 0) {
13309 		return EINVAL;
13310 	}
13311 
13312 	ra_struct.ra_offset = offset;
13313 	ra_struct.ra_count = len;
13314 
13315 	return VNOP_IOCTL(vp, F_RDADVISE, (caddr_t)&ra_struct, 0, ctx);
13316 }
13317