xref: /xnu-11215/bsd/vfs/kpi_vfs.c (revision 8d741a5d)
1 /*
2  * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*
30  * Copyright (c) 1989, 1993
31  *	The Regents of the University of California.  All rights reserved.
32  * (c) UNIX System Laboratories, Inc.
33  * All or some portions of this file are derived from material licensed
34  * to the University of California by American Telephone and Telegraph
35  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
36  * the permission of UNIX System Laboratories, Inc.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. All advertising materials mentioning features or use of this software
47  *    must display the following acknowledgement:
48  *	This product includes software developed by the University of
49  *	California, Berkeley and its contributors.
50  * 4. Neither the name of the University nor the names of its contributors
51  *    may be used to endorse or promote products derived from this software
52  *    without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64  * SUCH DAMAGE.
65  *
66  *	@(#)kpi_vfs.c
67  */
68 /*
69  * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
70  * support for mandatory and extensible security protections.  This notice
71  * is included in support of clause 2.2 (b) of the Apple Public License,
72  * Version 2.0.
73  */
74 
75 /*
76  * External virtual filesystem routines
77  */
78 
79 
80 #include <sys/param.h>
81 #include <sys/systm.h>
82 #include <sys/proc_internal.h>
83 #include <sys/kauth.h>
84 #include <sys/mount.h>
85 #include <sys/mount_internal.h>
86 #include <sys/time.h>
87 #include <sys/disk.h>
88 #include <sys/vnode_internal.h>
89 #include <sys/stat.h>
90 #include <sys/namei.h>
91 #include <sys/ucred.h>
92 #include <sys/buf.h>
93 #include <sys/errno.h>
94 #include <kern/kalloc.h>
95 #include <sys/domain.h>
96 #include <sys/mbuf.h>
97 #include <sys/syslog.h>
98 #include <sys/ubc.h>
99 #include <sys/vm.h>
100 #include <sys/sysctl.h>
101 #include <sys/filedesc.h>
102 #include <sys/event.h>
103 #include <sys/fsevents.h>
104 #include <sys/user.h>
105 #include <sys/lockf.h>
106 #include <sys/xattr.h>
107 #include <sys/kdebug.h>
108 
109 #include <kern/assert.h>
110 #include <kern/zalloc.h>
111 #include <kern/task.h>
112 #include <kern/policy_internal.h>
113 
114 #include <libkern/OSByteOrder.h>
115 
116 #include <miscfs/specfs/specdev.h>
117 
118 #include <mach/mach_types.h>
119 #include <mach/memory_object_types.h>
120 #include <mach/task.h>
121 
122 #if CONFIG_MACF
123 #include <security/mac_framework.h>
124 #endif
125 
126 #if NULLFS
127 #include <miscfs/nullfs/nullfs.h>
128 #endif
129 
130 #include <sys/sdt.h>
131 
132 #define ESUCCESS 0
133 #undef mount_t
134 #undef vnode_t
135 
136 #define COMPAT_ONLY
137 
138 #define NATIVE_XATTR(VP)  \
139 	((VP)->v_mount ? (VP)->v_mount->mnt_kern_flag & MNTK_EXTENDED_ATTRS : 0)
140 
141 #if CONFIG_APPLEDOUBLE
142 static void xattrfile_remove(vnode_t dvp, const char *basename,
143     vfs_context_t ctx, int force);
144 static void xattrfile_setattr(vnode_t dvp, const char * basename,
145     struct vnode_attr * vap, vfs_context_t ctx);
146 #endif /* CONFIG_APPLEDOUBLE */
147 
148 extern lck_rw_t rootvnode_rw_lock;
149 
150 static errno_t post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp);
151 
152 KALLOC_TYPE_DEFINE(KT_VFS_CONTEXT, struct vfs_context, KT_PRIV_ACCT);
153 
154 extern int fstypenumstart;
155 char vfs_typenum_arr[13];
156 
157 LCK_GRP_DECLARE(typenum_arr_grp, "typenum array group");
158 LCK_MTX_DECLARE(vfs_typenum_mtx, &typenum_arr_grp);
159 /*
160  * vnode_setneedinactive
161  *
162  * Description: Indicate that when the last iocount on this vnode goes away,
163  *              and the usecount is also zero, we should inform the filesystem
164  *              via VNOP_INACTIVE.
165  *
166  * Parameters:  vnode_t		vnode to mark
167  *
168  * Returns:     Nothing
169  *
170  * Notes:       Notably used when we're deleting a file--we need not have a
171  *              usecount, so VNOP_INACTIVE may not get called by anyone.  We
172  *              want it called when we drop our iocount.
173  */
174 void
vnode_setneedinactive(vnode_t vp)175 vnode_setneedinactive(vnode_t vp)
176 {
177 	cache_purge(vp);
178 
179 	vnode_lock_spin(vp);
180 	vp->v_lflag |= VL_NEEDINACTIVE;
181 	vnode_unlock(vp);
182 }
183 
184 
185 /* ====================================================================== */
186 /* ************  EXTERNAL KERNEL APIS  ********************************** */
187 /* ====================================================================== */
188 
189 /*
190  * implementations of exported VFS operations
191  */
192 int
VFS_MOUNT(mount_t mp,vnode_t devvp,user_addr_t data,vfs_context_t ctx)193 VFS_MOUNT(mount_t mp, vnode_t devvp, user_addr_t data, vfs_context_t ctx)
194 {
195 	int error;
196 
197 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_mount == 0)) {
198 		return ENOTSUP;
199 	}
200 
201 	if (vfs_context_is64bit(ctx)) {
202 		if (vfs_64bitready(mp)) {
203 			error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
204 		} else {
205 			error = ENOTSUP;
206 		}
207 	} else {
208 		error = (*mp->mnt_op->vfs_mount)(mp, devvp, data, ctx);
209 	}
210 
211 	return error;
212 }
213 
214 int
VFS_START(mount_t mp,int flags,vfs_context_t ctx)215 VFS_START(mount_t mp, int flags, vfs_context_t ctx)
216 {
217 	int error;
218 
219 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_start == 0)) {
220 		return ENOTSUP;
221 	}
222 
223 	error = (*mp->mnt_op->vfs_start)(mp, flags, ctx);
224 
225 	return error;
226 }
227 
228 int
VFS_UNMOUNT(mount_t mp,int flags,vfs_context_t ctx)229 VFS_UNMOUNT(mount_t mp, int flags, vfs_context_t ctx)
230 {
231 	int error;
232 
233 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_unmount == 0)) {
234 		return ENOTSUP;
235 	}
236 
237 	error = (*mp->mnt_op->vfs_unmount)(mp, flags, ctx);
238 
239 	return error;
240 }
241 
242 /*
243  * Returns:	0			Success
244  *		ENOTSUP			Not supported
245  *		<vfs_root>:ENOENT
246  *		<vfs_root>:???
247  *
248  * Note:	The return codes from the underlying VFS's root routine can't
249  *		be fully enumerated here, since third party VFS authors may not
250  *		limit their error returns to the ones documented here, even
251  *		though this may result in some programs functioning incorrectly.
252  *
253  *		The return codes documented above are those which may currently
254  *		be returned by HFS from hfs_vfs_root, which is a simple wrapper
255  *		for a call to hfs_vget on the volume mount point, not including
256  *		additional error codes which may be propagated from underlying
257  *		routines called by hfs_vget.
258  */
259 int
VFS_ROOT(mount_t mp,struct vnode ** vpp,vfs_context_t ctx)260 VFS_ROOT(mount_t mp, struct vnode  ** vpp, vfs_context_t ctx)
261 {
262 	int error;
263 
264 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_root == 0)) {
265 		return ENOTSUP;
266 	}
267 
268 	if (ctx == NULL) {
269 		ctx = vfs_context_current();
270 	}
271 
272 	error = (*mp->mnt_op->vfs_root)(mp, vpp, ctx);
273 
274 	return error;
275 }
276 
277 int
VFS_QUOTACTL(mount_t mp,int cmd,uid_t uid,caddr_t datap,vfs_context_t ctx)278 VFS_QUOTACTL(mount_t mp, int cmd, uid_t uid, caddr_t datap, vfs_context_t ctx)
279 {
280 	int error;
281 
282 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_quotactl == 0)) {
283 		return ENOTSUP;
284 	}
285 
286 	error = (*mp->mnt_op->vfs_quotactl)(mp, cmd, uid, datap, ctx);
287 
288 	return error;
289 }
290 
291 int
VFS_GETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)292 VFS_GETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
293 {
294 	int error;
295 
296 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_getattr == 0)) {
297 		return ENOTSUP;
298 	}
299 
300 	if (ctx == NULL) {
301 		ctx = vfs_context_current();
302 	}
303 
304 	error = (*mp->mnt_op->vfs_getattr)(mp, vfa, ctx);
305 
306 	return error;
307 }
308 
309 int
VFS_SETATTR(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)310 VFS_SETATTR(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
311 {
312 	int error;
313 
314 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_setattr == 0)) {
315 		return ENOTSUP;
316 	}
317 
318 	if (ctx == NULL) {
319 		ctx = vfs_context_current();
320 	}
321 
322 	error = (*mp->mnt_op->vfs_setattr)(mp, vfa, ctx);
323 
324 	return error;
325 }
326 
327 int
VFS_SYNC(mount_t mp,int flags,vfs_context_t ctx)328 VFS_SYNC(mount_t mp, int flags, vfs_context_t ctx)
329 {
330 	int error;
331 
332 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_sync == 0)) {
333 		return ENOTSUP;
334 	}
335 
336 	if (ctx == NULL) {
337 		ctx = vfs_context_current();
338 	}
339 
340 	error = (*mp->mnt_op->vfs_sync)(mp, flags, ctx);
341 
342 	return error;
343 }
344 
345 int
VFS_VGET(mount_t mp,ino64_t ino,struct vnode ** vpp,vfs_context_t ctx)346 VFS_VGET(mount_t mp, ino64_t ino, struct vnode **vpp, vfs_context_t ctx)
347 {
348 	int error;
349 
350 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget == 0)) {
351 		return ENOTSUP;
352 	}
353 
354 	if (ctx == NULL) {
355 		ctx = vfs_context_current();
356 	}
357 
358 	error = (*mp->mnt_op->vfs_vget)(mp, ino, vpp, ctx);
359 
360 	return error;
361 }
362 
363 int
VFS_FHTOVP(mount_t mp,int fhlen,unsigned char * fhp,vnode_t * vpp,vfs_context_t ctx)364 VFS_FHTOVP(mount_t mp, int fhlen, unsigned char *fhp, vnode_t *vpp, vfs_context_t ctx)
365 {
366 	int error;
367 
368 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_fhtovp == 0)) {
369 		return ENOTSUP;
370 	}
371 
372 	if (ctx == NULL) {
373 		ctx = vfs_context_current();
374 	}
375 
376 	error = (*mp->mnt_op->vfs_fhtovp)(mp, fhlen, fhp, vpp, ctx);
377 
378 	return error;
379 }
380 
381 int
VFS_VPTOFH(struct vnode * vp,int * fhlenp,unsigned char * fhp,vfs_context_t ctx)382 VFS_VPTOFH(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t ctx)
383 {
384 	int error;
385 
386 	if ((vp->v_mount == dead_mountp) || (vp->v_mount->mnt_op->vfs_vptofh == 0)) {
387 		return ENOTSUP;
388 	}
389 
390 	if (ctx == NULL) {
391 		ctx = vfs_context_current();
392 	}
393 
394 	error = (*vp->v_mount->mnt_op->vfs_vptofh)(vp, fhlenp, fhp, ctx);
395 
396 	return error;
397 }
398 
399 int
VFS_IOCTL(struct mount * mp,u_long command,caddr_t data,int flags,vfs_context_t context)400 VFS_IOCTL(struct mount *mp, u_long command, caddr_t data,
401     int flags, vfs_context_t context)
402 {
403 	if (mp == dead_mountp || !mp->mnt_op->vfs_ioctl) {
404 		return ENOTSUP;
405 	}
406 
407 	return mp->mnt_op->vfs_ioctl(mp, command, data, flags,
408 	           context ?: vfs_context_current());
409 }
410 
411 int
VFS_VGET_SNAPDIR(mount_t mp,vnode_t * vpp,vfs_context_t ctx)412 VFS_VGET_SNAPDIR(mount_t mp, vnode_t *vpp, vfs_context_t ctx)
413 {
414 	int error;
415 
416 	if ((mp == dead_mountp) || (mp->mnt_op->vfs_vget_snapdir == 0)) {
417 		return ENOTSUP;
418 	}
419 
420 	if (ctx == NULL) {
421 		ctx = vfs_context_current();
422 	}
423 
424 	error = (*mp->mnt_op->vfs_vget_snapdir)(mp, vpp, ctx);
425 
426 	return error;
427 }
428 
429 /* returns the cached throttle mask for the mount_t */
430 uint64_t
vfs_throttle_mask(mount_t mp)431 vfs_throttle_mask(mount_t mp)
432 {
433 	return mp->mnt_throttle_mask;
434 }
435 
436 /* returns a  copy of vfs type name for the mount_t */
437 void
vfs_name(mount_t mp,char * buffer)438 vfs_name(mount_t mp, char *buffer)
439 {
440 	strncpy(buffer, mp->mnt_vtable->vfc_name, MFSNAMELEN);
441 }
442 
443 /* returns  vfs type number for the mount_t */
444 int
vfs_typenum(mount_t mp)445 vfs_typenum(mount_t mp)
446 {
447 	return mp->mnt_vtable->vfc_typenum;
448 }
449 
450 /* Safe to cast to "struct label*"; returns "void*" to limit dependence of mount.h on security headers.  */
451 void*
vfs_mntlabel(mount_t mp)452 vfs_mntlabel(mount_t mp)
453 {
454 	return (void*)mac_mount_label(mp);
455 }
456 
457 uint64_t
vfs_mount_id(mount_t mp)458 vfs_mount_id(mount_t mp)
459 {
460 	return mp->mnt_mount_id;
461 }
462 
463 /* returns command modifier flags of mount_t ie. MNT_CMDFLAGS */
464 uint64_t
vfs_flags(mount_t mp)465 vfs_flags(mount_t mp)
466 {
467 	return (uint64_t)(mp->mnt_flag & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
468 }
469 
470 /* set any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
471 void
vfs_setflags(mount_t mp,uint64_t flags)472 vfs_setflags(mount_t mp, uint64_t flags)
473 {
474 	uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
475 
476 	mount_lock(mp);
477 	mp->mnt_flag |= lflags;
478 	mount_unlock(mp);
479 }
480 
481 /* clear any of the command modifier flags(MNT_CMDFLAGS) in mount_t */
482 void
vfs_clearflags(mount_t mp,uint64_t flags)483 vfs_clearflags(mount_t mp, uint64_t flags)
484 {
485 	uint32_t lflags = (uint32_t)(flags & (MNT_CMDFLAGS | MNT_VISFLAGMASK));
486 
487 	mount_lock(mp);
488 	mp->mnt_flag &= ~lflags;
489 	mount_unlock(mp);
490 }
491 
492 /* Is the mount_t ronly and upgrade read/write requested? */
493 int
vfs_iswriteupgrade(mount_t mp)494 vfs_iswriteupgrade(mount_t mp) /* ronly &&  MNTK_WANTRDWR */
495 {
496 	return (mp->mnt_flag & MNT_RDONLY) && (mp->mnt_kern_flag & MNTK_WANTRDWR);
497 }
498 
499 
500 /* Is the mount_t mounted ronly */
501 int
vfs_isrdonly(mount_t mp)502 vfs_isrdonly(mount_t mp)
503 {
504 	return mp->mnt_flag & MNT_RDONLY;
505 }
506 
507 /* Is the mount_t mounted for filesystem synchronous writes? */
508 int
vfs_issynchronous(mount_t mp)509 vfs_issynchronous(mount_t mp)
510 {
511 	return mp->mnt_flag & MNT_SYNCHRONOUS;
512 }
513 
514 /* Is the mount_t mounted read/write? */
515 int
vfs_isrdwr(mount_t mp)516 vfs_isrdwr(mount_t mp)
517 {
518 	return (mp->mnt_flag & MNT_RDONLY) == 0;
519 }
520 
521 
522 /* Is mount_t marked for update (ie MNT_UPDATE) */
523 int
vfs_isupdate(mount_t mp)524 vfs_isupdate(mount_t mp)
525 {
526 	return mp->mnt_flag & MNT_UPDATE;
527 }
528 
529 
530 /* Is mount_t marked for reload (ie MNT_RELOAD) */
531 int
vfs_isreload(mount_t mp)532 vfs_isreload(mount_t mp)
533 {
534 	return (mp->mnt_flag & MNT_UPDATE) && (mp->mnt_flag & MNT_RELOAD);
535 }
536 
537 /* Is mount_t marked for forced unmount (ie MNT_FORCE or MNTK_FRCUNMOUNT) */
538 int
vfs_isforce(mount_t mp)539 vfs_isforce(mount_t mp)
540 {
541 	if (mp->mnt_lflag & MNT_LFORCE) {
542 		return 1;
543 	} else {
544 		return 0;
545 	}
546 }
547 
548 int
vfs_isunmount(mount_t mp)549 vfs_isunmount(mount_t mp)
550 {
551 	if ((mp->mnt_lflag & MNT_LUNMOUNT)) {
552 		return 1;
553 	} else {
554 		return 0;
555 	}
556 }
557 
558 int
vfs_64bitready(mount_t mp)559 vfs_64bitready(mount_t mp)
560 {
561 	if ((mp->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
562 		return 1;
563 	} else {
564 		return 0;
565 	}
566 }
567 
568 
569 int
vfs_authcache_ttl(mount_t mp)570 vfs_authcache_ttl(mount_t mp)
571 {
572 	if ((mp->mnt_kern_flag & (MNTK_AUTH_OPAQUE | MNTK_AUTH_CACHE_TTL))) {
573 		return mp->mnt_authcache_ttl;
574 	} else {
575 		return CACHED_RIGHT_INFINITE_TTL;
576 	}
577 }
578 
579 void
vfs_setauthcache_ttl(mount_t mp,int ttl)580 vfs_setauthcache_ttl(mount_t mp, int ttl)
581 {
582 	mount_lock(mp);
583 	mp->mnt_kern_flag |= MNTK_AUTH_CACHE_TTL;
584 	mp->mnt_authcache_ttl = ttl;
585 	mount_unlock(mp);
586 }
587 
588 void
vfs_clearauthcache_ttl(mount_t mp)589 vfs_clearauthcache_ttl(mount_t mp)
590 {
591 	mount_lock(mp);
592 	mp->mnt_kern_flag &= ~MNTK_AUTH_CACHE_TTL;
593 	/*
594 	 * back to the default TTL value in case
595 	 * MNTK_AUTH_OPAQUE is set on this mount
596 	 */
597 	mp->mnt_authcache_ttl = CACHED_LOOKUP_RIGHT_TTL;
598 	mount_unlock(mp);
599 }
600 
601 int
vfs_authopaque(mount_t mp)602 vfs_authopaque(mount_t mp)
603 {
604 	if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE)) {
605 		return 1;
606 	} else {
607 		return 0;
608 	}
609 }
610 
611 int
vfs_authopaqueaccess(mount_t mp)612 vfs_authopaqueaccess(mount_t mp)
613 {
614 	if ((mp->mnt_kern_flag & MNTK_AUTH_OPAQUE_ACCESS)) {
615 		return 1;
616 	} else {
617 		return 0;
618 	}
619 }
620 
621 void
vfs_setauthopaque(mount_t mp)622 vfs_setauthopaque(mount_t mp)
623 {
624 	mount_lock(mp);
625 	mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE;
626 	mount_unlock(mp);
627 }
628 
629 void
vfs_setauthopaqueaccess(mount_t mp)630 vfs_setauthopaqueaccess(mount_t mp)
631 {
632 	mount_lock(mp);
633 	mp->mnt_kern_flag |= MNTK_AUTH_OPAQUE_ACCESS;
634 	mount_unlock(mp);
635 }
636 
637 void
vfs_clearauthopaque(mount_t mp)638 vfs_clearauthopaque(mount_t mp)
639 {
640 	mount_lock(mp);
641 	mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE;
642 	mount_unlock(mp);
643 }
644 
645 void
vfs_clearauthopaqueaccess(mount_t mp)646 vfs_clearauthopaqueaccess(mount_t mp)
647 {
648 	mount_lock(mp);
649 	mp->mnt_kern_flag &= ~MNTK_AUTH_OPAQUE_ACCESS;
650 	mount_unlock(mp);
651 }
652 
653 void
vfs_setextendedsecurity(mount_t mp)654 vfs_setextendedsecurity(mount_t mp)
655 {
656 	mount_lock(mp);
657 	mp->mnt_kern_flag |= MNTK_EXTENDED_SECURITY;
658 	mount_unlock(mp);
659 }
660 
661 void
vfs_setmntsystem(mount_t mp)662 vfs_setmntsystem(mount_t mp)
663 {
664 	mount_lock(mp);
665 	mp->mnt_kern_flag |= MNTK_SYSTEM;
666 	mount_unlock(mp);
667 }
668 
669 void
vfs_setmntsystemdata(mount_t mp)670 vfs_setmntsystemdata(mount_t mp)
671 {
672 	mount_lock(mp);
673 	mp->mnt_kern_flag |= MNTK_SYSTEMDATA;
674 	mount_unlock(mp);
675 }
676 
677 void
vfs_setmntswap(mount_t mp)678 vfs_setmntswap(mount_t mp)
679 {
680 	mount_lock(mp);
681 	mp->mnt_kern_flag |= (MNTK_SYSTEM | MNTK_SWAP_MOUNT);
682 	mount_unlock(mp);
683 }
684 
685 void
vfs_clearextendedsecurity(mount_t mp)686 vfs_clearextendedsecurity(mount_t mp)
687 {
688 	mount_lock(mp);
689 	mp->mnt_kern_flag &= ~MNTK_EXTENDED_SECURITY;
690 	mount_unlock(mp);
691 }
692 
693 void
vfs_setnoswap(mount_t mp)694 vfs_setnoswap(mount_t mp)
695 {
696 	mount_lock(mp);
697 	mp->mnt_kern_flag |= MNTK_NOSWAP;
698 	mount_unlock(mp);
699 }
700 
701 void
vfs_clearnoswap(mount_t mp)702 vfs_clearnoswap(mount_t mp)
703 {
704 	mount_lock(mp);
705 	mp->mnt_kern_flag &= ~MNTK_NOSWAP;
706 	mount_unlock(mp);
707 }
708 
709 int
vfs_extendedsecurity(mount_t mp)710 vfs_extendedsecurity(mount_t mp)
711 {
712 	return mp->mnt_kern_flag & MNTK_EXTENDED_SECURITY;
713 }
714 
715 /* returns the max size of short symlink in this mount_t */
716 uint32_t
vfs_maxsymlen(mount_t mp)717 vfs_maxsymlen(mount_t mp)
718 {
719 	return mp->mnt_maxsymlinklen;
720 }
721 
722 /* set  max size of short symlink on mount_t */
723 void
vfs_setmaxsymlen(mount_t mp,uint32_t symlen)724 vfs_setmaxsymlen(mount_t mp, uint32_t symlen)
725 {
726 	mp->mnt_maxsymlinklen = symlen;
727 }
728 
729 boolean_t
vfs_is_basesystem(mount_t mp)730 vfs_is_basesystem(mount_t mp)
731 {
732 	return ((mp->mnt_supl_kern_flag & MNTK_SUPL_BASESYSTEM) == 0) ? false : true;
733 }
734 
735 /* return a pointer to the RO vfs_statfs associated with mount_t */
736 struct vfsstatfs *
vfs_statfs(mount_t mp)737 vfs_statfs(mount_t mp)
738 {
739 	return &mp->mnt_vfsstat;
740 }
741 
742 int
vfs_getattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)743 vfs_getattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
744 {
745 	int             error;
746 
747 	if ((error = VFS_GETATTR(mp, vfa, ctx)) != 0) {
748 		return error;
749 	}
750 
751 	/*
752 	 * If we have a filesystem create time, use it to default some others.
753 	 */
754 	if (VFSATTR_IS_SUPPORTED(vfa, f_create_time)) {
755 		if (VFSATTR_IS_ACTIVE(vfa, f_modify_time) && !VFSATTR_IS_SUPPORTED(vfa, f_modify_time)) {
756 			VFSATTR_RETURN(vfa, f_modify_time, vfa->f_create_time);
757 		}
758 	}
759 
760 	return 0;
761 }
762 
763 int
vfs_setattr(mount_t mp,struct vfs_attr * vfa,vfs_context_t ctx)764 vfs_setattr(mount_t mp, struct vfs_attr *vfa, vfs_context_t ctx)
765 {
766 	int error;
767 
768 	/*
769 	 * with a read-only system volume, we need to allow rename of the root volume
770 	 * even if it's read-only.  Don't return EROFS here if setattr changes only
771 	 * the volume name
772 	 */
773 	if (vfs_isrdonly(mp) &&
774 	    !((strcmp(mp->mnt_vfsstat.f_fstypename, "apfs") == 0) && (vfa->f_active == VFSATTR_f_vol_name))) {
775 		return EROFS;
776 	}
777 
778 	error = VFS_SETATTR(mp, vfa, ctx);
779 
780 	/*
781 	 * If we had alternate ways of setting vfs attributes, we'd
782 	 * fall back here.
783 	 */
784 
785 	return error;
786 }
787 
788 /* return the private data handle stored in mount_t */
789 void *
vfs_fsprivate(mount_t mp)790 vfs_fsprivate(mount_t mp)
791 {
792 	return mp->mnt_data;
793 }
794 
795 /* set the private data handle in mount_t */
796 void
vfs_setfsprivate(mount_t mp,void * mntdata)797 vfs_setfsprivate(mount_t mp, void *mntdata)
798 {
799 	mount_lock(mp);
800 	mp->mnt_data = mntdata;
801 	mount_unlock(mp);
802 }
803 
804 /* query whether the mount point supports native EAs */
805 int
vfs_nativexattrs(mount_t mp)806 vfs_nativexattrs(mount_t mp)
807 {
808 	return mp->mnt_kern_flag & MNTK_EXTENDED_ATTRS;
809 }
810 
811 /*
812  * return the block size of the underlying
813  * device associated with mount_t
814  */
815 int
vfs_devblocksize(mount_t mp)816 vfs_devblocksize(mount_t mp)
817 {
818 	return mp->mnt_devblocksize;
819 }
820 
821 /*
822  * Returns vnode with an iocount that must be released with vnode_put()
823  */
824 vnode_t
vfs_vnodecovered(mount_t mp)825 vfs_vnodecovered(mount_t mp)
826 {
827 	vnode_t vp = mp->mnt_vnodecovered;
828 	if ((vp == NULL) || (vnode_getwithref(vp) != 0)) {
829 		return NULL;
830 	} else {
831 		return vp;
832 	}
833 }
834 
835 int
vfs_setdevvp(mount_t mp,vnode_t devvp)836 vfs_setdevvp(mount_t mp, vnode_t devvp)
837 {
838 	if (mp == NULL) {
839 		return 0;
840 	}
841 
842 	if (devvp) {
843 		if (devvp->v_type != VBLK) {
844 			return EINVAL;
845 		}
846 
847 		if (major(devvp->v_rdev) >= nblkdev) {
848 			return ENXIO;
849 		}
850 	}
851 
852 	mp->mnt_devvp = devvp;
853 
854 	return 0;
855 }
856 
857 /*
858  * Returns device vnode backing a mountpoint with an iocount (if valid vnode exists).
859  * The iocount must be released with vnode_put().  Note that this KPI is subtle
860  * with respect to the validity of using this device vnode for anything substantial
861  * (which is discouraged).  If commands are sent to the device driver without
862  * taking proper steps to ensure that the device is still open, chaos may ensue.
863  * Similarly, this routine should only be called if there is some guarantee that
864  * the mount itself is still valid.
865  */
866 vnode_t
vfs_devvp(mount_t mp)867 vfs_devvp(mount_t mp)
868 {
869 	vnode_t vp = mp->mnt_devvp;
870 
871 	if ((vp != NULLVP) && (vnode_get(vp) == 0)) {
872 		return vp;
873 	}
874 
875 	return NULLVP;
876 }
877 
878 /*
879  * return the io attributes associated with mount_t
880  */
881 void
vfs_ioattr(mount_t mp,struct vfsioattr * ioattrp)882 vfs_ioattr(mount_t mp, struct vfsioattr *ioattrp)
883 {
884 	ioattrp->io_reserved[0] = NULL;
885 	ioattrp->io_reserved[1] = NULL;
886 	if (mp == NULL) {
887 		ioattrp->io_maxreadcnt  = MAXPHYS;
888 		ioattrp->io_maxwritecnt = MAXPHYS;
889 		ioattrp->io_segreadcnt  = 32;
890 		ioattrp->io_segwritecnt = 32;
891 		ioattrp->io_maxsegreadsize  = MAXPHYS;
892 		ioattrp->io_maxsegwritesize = MAXPHYS;
893 		ioattrp->io_devblocksize = DEV_BSIZE;
894 		ioattrp->io_flags = 0;
895 		ioattrp->io_max_swappin_available = 0;
896 	} else {
897 		ioattrp->io_maxreadcnt  = mp->mnt_maxreadcnt;
898 		ioattrp->io_maxwritecnt = mp->mnt_maxwritecnt;
899 		ioattrp->io_segreadcnt  = mp->mnt_segreadcnt;
900 		ioattrp->io_segwritecnt = mp->mnt_segwritecnt;
901 		ioattrp->io_maxsegreadsize  = mp->mnt_maxsegreadsize;
902 		ioattrp->io_maxsegwritesize = mp->mnt_maxsegwritesize;
903 		ioattrp->io_devblocksize = mp->mnt_devblocksize;
904 		ioattrp->io_flags = mp->mnt_ioflags;
905 		ioattrp->io_max_swappin_available = mp->mnt_max_swappin_available;
906 	}
907 }
908 
909 
910 /*
911  * set the IO attributes associated with mount_t
912  */
913 void
vfs_setioattr(mount_t mp,struct vfsioattr * ioattrp)914 vfs_setioattr(mount_t mp, struct vfsioattr * ioattrp)
915 {
916 	if (mp == NULL) {
917 		return;
918 	}
919 	mp->mnt_maxreadcnt  = ioattrp->io_maxreadcnt;
920 	mp->mnt_maxwritecnt = ioattrp->io_maxwritecnt;
921 	mp->mnt_segreadcnt  = ioattrp->io_segreadcnt;
922 	mp->mnt_segwritecnt = ioattrp->io_segwritecnt;
923 	mp->mnt_maxsegreadsize = ioattrp->io_maxsegreadsize;
924 	mp->mnt_maxsegwritesize = ioattrp->io_maxsegwritesize;
925 	mp->mnt_devblocksize = ioattrp->io_devblocksize;
926 	mp->mnt_ioflags = ioattrp->io_flags;
927 	mp->mnt_max_swappin_available = ioattrp->io_max_swappin_available;
928 }
929 
930 /*
931  * Add a new filesystem into the kernel specified in passed in
932  * vfstable structure. It fills in the vnode
933  * dispatch vector that is to be passed to when vnodes are created.
934  * It returns a handle which is to be used to when the FS is to be removed
935  */
936 typedef int (*PFI)(void *);
937 extern int vfs_opv_numops;
938 errno_t
vfs_fsadd(struct vfs_fsentry * vfe,vfstable_t * handle)939 vfs_fsadd(struct vfs_fsentry *vfe, vfstable_t *handle)
940 {
941 	struct vfstable *newvfstbl = NULL;
942 	int     i, j;
943 	int(***opv_desc_vector_p)(void *);
944 	int(**opv_desc_vector)(void *);
945 	const struct vnodeopv_entry_desc        *opve_descp;
946 	int desccount;
947 	int descsize;
948 	PFI *descptr;
949 
950 	/*
951 	 * This routine is responsible for all the initialization that would
952 	 * ordinarily be done as part of the system startup;
953 	 */
954 
955 	if (vfe == (struct vfs_fsentry *)0) {
956 		return EINVAL;
957 	}
958 
959 	desccount = vfe->vfe_vopcnt;
960 	if ((desccount <= 0) || ((desccount > 8)) || (vfe->vfe_vfsops == (struct vfsops *)NULL)
961 	    || (vfe->vfe_opvdescs == (struct vnodeopv_desc **)NULL)) {
962 		return EINVAL;
963 	}
964 
965 	/* Non-threadsafe filesystems are not supported */
966 	if ((vfe->vfe_flags &  (VFS_TBLTHREADSAFE | VFS_TBLFSNODELOCK)) == 0) {
967 		return EINVAL;
968 	}
969 
970 	newvfstbl = kalloc_type(struct vfstable, Z_WAITOK | Z_ZERO);
971 	newvfstbl->vfc_vfsops = vfe->vfe_vfsops;
972 	strncpy(&newvfstbl->vfc_name[0], vfe->vfe_fsname, MFSNAMELEN);
973 	if ((vfe->vfe_flags & VFS_TBLNOTYPENUM)) {
974 		int tmp;
975 		int found = 0;
976 		lck_mtx_lock(&vfs_typenum_mtx);
977 		for (tmp = fstypenumstart; tmp < OID_AUTO_START; tmp++) {
978 			if (isclr(vfs_typenum_arr, tmp)) {
979 				newvfstbl->vfc_typenum = tmp;
980 				setbit(vfs_typenum_arr, tmp);
981 				found = 1;
982 				break;
983 			}
984 		}
985 		if (!found) {
986 			lck_mtx_unlock(&vfs_typenum_mtx);
987 			return EINVAL;
988 		}
989 		if (maxvfstypenum < OID_AUTO_START) {
990 			/* getvfsbyname checks up to but not including maxvfstypenum */
991 			maxvfstypenum = newvfstbl->vfc_typenum + 1;
992 		}
993 		lck_mtx_unlock(&vfs_typenum_mtx);
994 	} else {
995 		newvfstbl->vfc_typenum = vfe->vfe_fstypenum;
996 		lck_mtx_lock(&vfs_typenum_mtx);
997 		setbit(vfs_typenum_arr, newvfstbl->vfc_typenum);
998 		if (newvfstbl->vfc_typenum >= maxvfstypenum) {
999 			maxvfstypenum = newvfstbl->vfc_typenum + 1;
1000 		}
1001 		lck_mtx_unlock(&vfs_typenum_mtx);
1002 	}
1003 
1004 
1005 	newvfstbl->vfc_refcount = 0;
1006 	newvfstbl->vfc_flags = 0;
1007 	newvfstbl->vfc_mountroot = NULL;
1008 	newvfstbl->vfc_next = NULL;
1009 	newvfstbl->vfc_vfsflags = 0;
1010 	if (vfe->vfe_flags &  VFS_TBL64BITREADY) {
1011 		newvfstbl->vfc_vfsflags |= VFC_VFS64BITREADY;
1012 	}
1013 	if (vfe->vfe_flags &  VFS_TBLVNOP_PAGEINV2) {
1014 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEINV2;
1015 	}
1016 	if (vfe->vfe_flags &  VFS_TBLVNOP_PAGEOUTV2) {
1017 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_PAGEOUTV2;
1018 	}
1019 	if ((vfe->vfe_flags & VFS_TBLLOCALVOL) == VFS_TBLLOCALVOL) {
1020 		newvfstbl->vfc_flags |= MNT_LOCAL;
1021 	}
1022 	if ((vfe->vfe_flags & VFS_TBLLOCALVOL) && (vfe->vfe_flags & VFS_TBLGENERICMNTARGS) == 0) {
1023 		newvfstbl->vfc_vfsflags |= VFC_VFSLOCALARGS;
1024 	} else {
1025 		newvfstbl->vfc_vfsflags |= VFC_VFSGENERICARGS;
1026 	}
1027 
1028 	if (vfe->vfe_flags &  VFS_TBLNATIVEXATTR) {
1029 		newvfstbl->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1030 	}
1031 	if (vfe->vfe_flags &  VFS_TBLUNMOUNT_PREFLIGHT) {
1032 		newvfstbl->vfc_vfsflags |= VFC_VFSPREFLIGHT;
1033 	}
1034 	if (vfe->vfe_flags &  VFS_TBLREADDIR_EXTENDED) {
1035 		newvfstbl->vfc_vfsflags |= VFC_VFSREADDIR_EXTENDED;
1036 	}
1037 	if (vfe->vfe_flags & VFS_TBLNOMACLABEL) {
1038 		newvfstbl->vfc_vfsflags |= VFC_VFSNOMACLABEL;
1039 	}
1040 	if (vfe->vfe_flags & VFS_TBLVNOP_NOUPDATEID_RENAME) {
1041 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_NOUPDATEID_RENAME;
1042 	}
1043 	if (vfe->vfe_flags & VFS_TBLVNOP_SECLUDE_RENAME) {
1044 		newvfstbl->vfc_vfsflags |= VFC_VFSVNOP_SECLUDE_RENAME;
1045 	}
1046 	if (vfe->vfe_flags & VFS_TBLCANMOUNTROOT) {
1047 		newvfstbl->vfc_vfsflags |= VFC_VFSCANMOUNTROOT;
1048 	}
1049 
1050 	/*
1051 	 * Allocate and init the vectors.
1052 	 * Also handle backwards compatibility.
1053 	 *
1054 	 * We allocate one large block to hold all <desccount>
1055 	 * vnode operation vectors stored contiguously.
1056 	 */
1057 	/* XXX - shouldn't be M_TEMP */
1058 
1059 	descsize = desccount * vfs_opv_numops;
1060 	descptr = kalloc_type(PFI, descsize, Z_WAITOK | Z_ZERO);
1061 
1062 	newvfstbl->vfc_descptr = descptr;
1063 	newvfstbl->vfc_descsize = descsize;
1064 
1065 	newvfstbl->vfc_sysctl = NULL;
1066 
1067 	for (i = 0; i < desccount; i++) {
1068 		opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1069 		/*
1070 		 * Fill in the caller's pointer to the start of the i'th vector.
1071 		 * They'll need to supply it when calling vnode_create.
1072 		 */
1073 		opv_desc_vector = descptr + i * vfs_opv_numops;
1074 		*opv_desc_vector_p = opv_desc_vector;
1075 
1076 		for (j = 0; vfe->vfe_opvdescs[i]->opv_desc_ops[j].opve_op; j++) {
1077 			opve_descp = &(vfe->vfe_opvdescs[i]->opv_desc_ops[j]);
1078 
1079 			/* Silently skip known-disabled operations */
1080 			if (opve_descp->opve_op->vdesc_flags & VDESC_DISABLED) {
1081 				printf("vfs_fsadd: Ignoring reference in %p to disabled operation %s.\n",
1082 				    vfe->vfe_opvdescs[i], opve_descp->opve_op->vdesc_name);
1083 				continue;
1084 			}
1085 
1086 			/*
1087 			 * Sanity check:  is this operation listed
1088 			 * in the list of operations?  We check this
1089 			 * by seeing if its offset is zero.  Since
1090 			 * the default routine should always be listed
1091 			 * first, it should be the only one with a zero
1092 			 * offset.  Any other operation with a zero
1093 			 * offset is probably not listed in
1094 			 * vfs_op_descs, and so is probably an error.
1095 			 *
1096 			 * A panic here means the layer programmer
1097 			 * has committed the all-too common bug
1098 			 * of adding a new operation to the layer's
1099 			 * list of vnode operations but
1100 			 * not adding the operation to the system-wide
1101 			 * list of supported operations.
1102 			 */
1103 			if (opve_descp->opve_op->vdesc_offset == 0 &&
1104 			    opve_descp->opve_op != VDESC(vnop_default)) {
1105 				printf("vfs_fsadd: operation %s not listed in %s.\n",
1106 				    opve_descp->opve_op->vdesc_name,
1107 				    "vfs_op_descs");
1108 				panic("vfs_fsadd: bad operation");
1109 			}
1110 			/*
1111 			 * Fill in this entry.
1112 			 */
1113 			opv_desc_vector[opve_descp->opve_op->vdesc_offset] =
1114 			    opve_descp->opve_impl;
1115 		}
1116 
1117 		/*
1118 		 * Finally, go back and replace unfilled routines
1119 		 * with their default.  (Sigh, an O(n^3) algorithm.  I
1120 		 * could make it better, but that'd be work, and n is small.)
1121 		 */
1122 		opv_desc_vector_p = vfe->vfe_opvdescs[i]->opv_desc_vector_p;
1123 
1124 		/*
1125 		 * Force every operations vector to have a default routine.
1126 		 */
1127 		opv_desc_vector = *opv_desc_vector_p;
1128 		if (opv_desc_vector[VOFFSET(vnop_default)] == NULL) {
1129 			panic("vfs_fsadd: operation vector without default routine.");
1130 		}
1131 		for (j = 0; j < vfs_opv_numops; j++) {
1132 			if (opv_desc_vector[j] == NULL) {
1133 				opv_desc_vector[j] =
1134 				    opv_desc_vector[VOFFSET(vnop_default)];
1135 			}
1136 		}
1137 	} /* end of each vnodeopv_desc parsing */
1138 
1139 	*handle = vfstable_add(newvfstbl);
1140 
1141 	if (newvfstbl->vfc_vfsops->vfs_init) {
1142 		struct vfsconf vfsc;
1143 		bzero(&vfsc, sizeof(struct vfsconf));
1144 		vfsc.vfc_reserved1 = 0;
1145 		bcopy((*handle)->vfc_name, vfsc.vfc_name, sizeof(vfsc.vfc_name));
1146 		vfsc.vfc_typenum = (*handle)->vfc_typenum;
1147 		vfsc.vfc_refcount = (*handle)->vfc_refcount;
1148 		vfsc.vfc_flags = (*handle)->vfc_flags;
1149 		vfsc.vfc_reserved2 = 0;
1150 		vfsc.vfc_reserved3 = 0;
1151 
1152 		(*newvfstbl->vfc_vfsops->vfs_init)(&vfsc);
1153 	}
1154 
1155 	kfree_type(struct vfstable, newvfstbl);
1156 
1157 	return 0;
1158 }
1159 
1160 /*
1161  * Removes the filesystem from kernel.
1162  * The argument passed in is the handle that was given when
1163  * file system was added
1164  */
1165 errno_t
vfs_fsremove(vfstable_t handle)1166 vfs_fsremove(vfstable_t handle)
1167 {
1168 	struct vfstable * vfstbl =  (struct vfstable *)handle;
1169 	void *old_desc = NULL;
1170 	size_t descsize = 0;
1171 	errno_t err;
1172 
1173 	/* Preflight check for any mounts */
1174 	mount_list_lock();
1175 	if (vfstbl->vfc_refcount != 0) {
1176 		mount_list_unlock();
1177 		return EBUSY;
1178 	}
1179 
1180 	/* Free the spot in vfs_typenum_arr */
1181 	lck_mtx_lock(&vfs_typenum_mtx);
1182 	clrbit(vfs_typenum_arr, handle->vfc_typenum);
1183 	if (maxvfstypenum == handle->vfc_typenum) {
1184 		maxvfstypenum--;
1185 	}
1186 	lck_mtx_unlock(&vfs_typenum_mtx);
1187 
1188 	/*
1189 	 * save the old descriptor; the free cannot occur unconditionally,
1190 	 * since vfstable_del() may fail.
1191 	 */
1192 	if (vfstbl->vfc_descptr && vfstbl->vfc_descsize) {
1193 		old_desc = vfstbl->vfc_descptr;
1194 		descsize = vfstbl->vfc_descsize;
1195 	}
1196 	err = vfstable_del(vfstbl);
1197 
1198 	mount_list_unlock();
1199 
1200 	/* free the descriptor if the delete was successful */
1201 	if (err == 0) {
1202 		kfree_type(PFI, descsize, old_desc);
1203 	}
1204 
1205 	return err;
1206 }
1207 
1208 void
vfs_setowner(mount_t mp,uid_t uid,gid_t gid)1209 vfs_setowner(mount_t mp, uid_t uid, gid_t gid)
1210 {
1211 	mp->mnt_fsowner = uid;
1212 	mp->mnt_fsgroup = gid;
1213 }
1214 
1215 /*
1216  * Callers should be careful how they use this; accessing
1217  * mnt_last_write_completed_timestamp is not thread-safe.  Writing to
1218  * it isn't either.  Point is: be prepared to deal with strange values
1219  * being returned.
1220  */
1221 uint64_t
vfs_idle_time(mount_t mp)1222 vfs_idle_time(mount_t mp)
1223 {
1224 	if (mp->mnt_pending_write_size) {
1225 		return 0;
1226 	}
1227 
1228 	struct timeval now;
1229 
1230 	microuptime(&now);
1231 
1232 	return (now.tv_sec
1233 	       - mp->mnt_last_write_completed_timestamp.tv_sec) * 1000000
1234 	       + now.tv_usec - mp->mnt_last_write_completed_timestamp.tv_usec;
1235 }
1236 
1237 /*
1238  * vfs_context_create_with_proc() takes a reference on an arbitrary
1239  * thread in the process.  To distinguish this reference-counted thread
1240  * from the usual non-reference-counted thread, we set the least significant
1241  * bit of of vc_thread.
1242  */
1243 #define VFS_CONTEXT_THREAD_IS_REFERENCED(ctx) \
1244 	(!!(((uintptr_t)(ctx)->vc_thread) & 1UL))
1245 
1246 #define VFS_CONTEXT_SET_REFERENCED_THREAD(ctx, thr) \
1247 	(ctx)->vc_thread = (thread_t)(((uintptr_t)(thr)) | 1UL)
1248 
1249 #define VFS_CONTEXT_GET_THREAD(ctx) \
1250 	((thread_t)(((uintptr_t)(ctx)->vc_thread) & ~1UL))
1251 
1252 int
vfs_context_pid(vfs_context_t ctx)1253 vfs_context_pid(vfs_context_t ctx)
1254 {
1255 	return proc_pid(vfs_context_proc(ctx));
1256 }
1257 
1258 int
vfs_context_copy_audit_token(vfs_context_t ctx,audit_token_t * token)1259 vfs_context_copy_audit_token(vfs_context_t ctx, audit_token_t *token)
1260 {
1261 	kern_return_t           err;
1262 	task_t                  task;
1263 	mach_msg_type_number_t  info_size = TASK_AUDIT_TOKEN_COUNT;
1264 
1265 	task = vfs_context_task(ctx);
1266 
1267 	if (task == NULL) {
1268 		// Not sure how this would happen; we are supposed to be
1269 		// in the middle of using the context. Regardless, don't
1270 		// wander off a NULL pointer.
1271 		return ESRCH;
1272 	}
1273 
1274 	err = task_info(task, TASK_AUDIT_TOKEN, (integer_t *)token, &info_size);
1275 	return (err) ? ESRCH : 0;
1276 }
1277 
1278 int
vfs_context_suser(vfs_context_t ctx)1279 vfs_context_suser(vfs_context_t ctx)
1280 {
1281 	return suser(ctx->vc_ucred, NULL);
1282 }
1283 
1284 /*
1285  * Return bit field of signals posted to all threads in the context's process.
1286  *
1287  * XXX Signals should be tied to threads, not processes, for most uses of this
1288  * XXX call.
1289  */
1290 int
vfs_context_issignal(vfs_context_t ctx,sigset_t mask)1291 vfs_context_issignal(vfs_context_t ctx, sigset_t mask)
1292 {
1293 	proc_t p = vfs_context_proc(ctx);
1294 	if (p) {
1295 		return proc_pendingsignals(p, mask);
1296 	}
1297 	return 0;
1298 }
1299 
1300 int
vfs_context_is64bit(vfs_context_t ctx)1301 vfs_context_is64bit(vfs_context_t ctx)
1302 {
1303 	uthread_t uth;
1304 	thread_t t;
1305 
1306 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1307 		uth = get_bsdthread_info(t);
1308 	} else {
1309 		uth = current_uthread();
1310 	}
1311 	return uthread_is64bit(uth);
1312 }
1313 
1314 boolean_t
vfs_context_can_resolve_triggers(vfs_context_t ctx)1315 vfs_context_can_resolve_triggers(vfs_context_t ctx)
1316 {
1317 	proc_t proc = vfs_context_proc(ctx);
1318 
1319 	if (proc) {
1320 		if (proc->p_vfs_iopolicy &
1321 		    P_VFS_IOPOLICY_TRIGGER_RESOLVE_DISABLE) {
1322 			return false;
1323 		}
1324 		return true;
1325 	}
1326 	return false;
1327 }
1328 
1329 boolean_t
vfs_context_can_break_leases(vfs_context_t ctx)1330 vfs_context_can_break_leases(vfs_context_t ctx)
1331 {
1332 	proc_t proc = vfs_context_proc(ctx);
1333 
1334 	if (proc) {
1335 		/*
1336 		 * We do not have a separate I/O policy for this,
1337 		 * because the scenarios where we would not want
1338 		 * local file lease breaks are currently exactly
1339 		 * the same as where we would not want dataless
1340 		 * file materialization (mainly, system daemons
1341 		 * passively snooping file activity).
1342 		 */
1343 		if (proc->p_vfs_iopolicy &
1344 		    P_VFS_IOPOLICY_MATERIALIZE_DATALESS_FILES) {
1345 			return true;
1346 		}
1347 		return false;
1348 	}
1349 	return true;
1350 }
1351 
1352 bool
vfs_context_allow_fs_blksize_nocache_write(vfs_context_t ctx)1353 vfs_context_allow_fs_blksize_nocache_write(vfs_context_t ctx)
1354 {
1355 	uthread_t uth;
1356 	thread_t t;
1357 	proc_t p;
1358 
1359 	if ((ctx == NULL) || (t = VFS_CONTEXT_GET_THREAD(ctx)) == NULL) {
1360 		return false;
1361 	}
1362 
1363 	uth = get_bsdthread_info(t);
1364 	if (uth && (uth->uu_flag & UT_FS_BLKSIZE_NOCACHE_WRITES)) {
1365 		return true;
1366 	}
1367 
1368 	p = (proc_t)get_bsdthreadtask_info(t);
1369 	if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_NOCACHE_WRITE_FS_BLKSIZE)) {
1370 		return true;
1371 	}
1372 
1373 	return false;
1374 }
1375 
1376 boolean_t
vfs_context_skip_mtime_update(vfs_context_t ctx)1377 vfs_context_skip_mtime_update(vfs_context_t ctx)
1378 {
1379 	proc_t p = vfs_context_proc(ctx);
1380 	thread_t t = vfs_context_thread(ctx);
1381 	uthread_t ut = t ? get_bsdthread_info(t) : NULL;
1382 
1383 	if (ut && (os_atomic_load(&ut->uu_flag, relaxed) & UT_SKIP_MTIME_UPDATE)) {
1384 		return true;
1385 	}
1386 
1387 	if (p && (os_atomic_load(&p->p_vfs_iopolicy, relaxed) & P_VFS_IOPOLICY_SKIP_MTIME_UPDATE)) {
1388 		return true;
1389 	}
1390 
1391 	return false;
1392 }
1393 
1394 /*
1395  * vfs_context_proc
1396  *
1397  * Description:	Given a vfs_context_t, return the proc_t associated with it.
1398  *
1399  * Parameters:	vfs_context_t			The context to use
1400  *
1401  * Returns:	proc_t				The process for this context
1402  *
1403  * Notes:	This function will return the current_proc() if any of the
1404  *		following conditions are true:
1405  *
1406  *		o	The supplied context pointer is NULL
1407  *		o	There is no Mach thread associated with the context
1408  *		o	There is no Mach task associated with the Mach thread
1409  *		o	There is no proc_t associated with the Mach task
1410  *		o	The proc_t has no per process open file table
1411  *
1412  *		This causes this function to return a value matching as
1413  *		closely as possible the previous behaviour.
1414  */
1415 proc_t
vfs_context_proc(vfs_context_t ctx)1416 vfs_context_proc(vfs_context_t ctx)
1417 {
1418 	proc_t  proc = NULL;
1419 	thread_t t;
1420 
1421 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1422 		proc = (proc_t)get_bsdthreadtask_info(t);
1423 	}
1424 
1425 	return proc == NULL ? current_proc() : proc;
1426 }
1427 
1428 /*
1429  * vfs_context_get_special_port
1430  *
1431  * Description: Return the requested special port from the task associated
1432  *              with the given context.
1433  *
1434  * Parameters:	vfs_context_t			The context to use
1435  *              int				Index of special port
1436  *              ipc_port_t *			Pointer to returned port
1437  *
1438  * Returns:	kern_return_t			see task_get_special_port()
1439  */
1440 kern_return_t
vfs_context_get_special_port(vfs_context_t ctx,int which,ipc_port_t * portp)1441 vfs_context_get_special_port(vfs_context_t ctx, int which, ipc_port_t *portp)
1442 {
1443 	return task_get_special_port(vfs_context_task(ctx), which, portp);
1444 }
1445 
1446 /*
1447  * vfs_context_set_special_port
1448  *
1449  * Description: Set the requested special port in the task associated
1450  *              with the given context.
1451  *
1452  * Parameters:	vfs_context_t			The context to use
1453  *              int				Index of special port
1454  *              ipc_port_t			New special port
1455  *
1456  * Returns:	kern_return_t			see task_set_special_port_internal()
1457  */
1458 kern_return_t
vfs_context_set_special_port(vfs_context_t ctx,int which,ipc_port_t port)1459 vfs_context_set_special_port(vfs_context_t ctx, int which, ipc_port_t port)
1460 {
1461 	return task_set_special_port_internal(vfs_context_task(ctx),
1462 	           which, port);
1463 }
1464 
1465 /*
1466  * vfs_context_thread
1467  *
1468  * Description:	Return the Mach thread associated with a vfs_context_t
1469  *
1470  * Parameters:	vfs_context_t			The context to use
1471  *
1472  * Returns:	thread_t			The thread for this context, or
1473  *						NULL, if there is not one.
1474  *
1475  * Notes:	NULL thread_t's are legal, but discouraged.  They occur only
1476  *		as a result of a static vfs_context_t declaration in a function
1477  *		and will result in this function returning NULL.
1478  *
1479  *		This is intentional; this function should NOT return the
1480  *		current_thread() in this case.
1481  */
1482 thread_t
vfs_context_thread(vfs_context_t ctx)1483 vfs_context_thread(vfs_context_t ctx)
1484 {
1485 	return VFS_CONTEXT_GET_THREAD(ctx);
1486 }
1487 
1488 /*
1489  * vfs_context_task
1490  *
1491  * Description:	Return the Mach task associated with a vfs_context_t
1492  *
1493  * Parameters:	vfs_context_t			The context to use
1494  *
1495  * Returns:	task_t				The task for this context, or
1496  *						NULL, if there is not one.
1497  *
1498  * Notes:	NULL task_t's are legal, but discouraged.  They occur only
1499  *		as a result of a static vfs_context_t declaration in a function
1500  *		and will result in this function returning NULL.
1501  *
1502  *		This is intentional; this function should NOT return the
1503  *		task associated with current_thread() in this case.
1504  */
1505 task_t
vfs_context_task(vfs_context_t ctx)1506 vfs_context_task(vfs_context_t ctx)
1507 {
1508 	task_t                  task = NULL;
1509 	thread_t                t;
1510 
1511 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1512 		task = get_threadtask(t);
1513 	}
1514 
1515 	return task;
1516 }
1517 
1518 /*
1519  * vfs_context_cwd
1520  *
1521  * Description:	Returns a reference on the vnode for the current working
1522  *		directory for the supplied context
1523  *
1524  * Parameters:	vfs_context_t			The context to use
1525  *
1526  * Returns:	vnode_t				The current working directory
1527  *						for this context
1528  *
1529  * Notes:	The function first attempts to obtain the current directory
1530  *		from the thread, and if it is not present there, falls back
1531  *		to obtaining it from the process instead.  If it can't be
1532  *		obtained from either place, we return NULLVP.
1533  */
1534 vnode_t
vfs_context_cwd(vfs_context_t ctx)1535 vfs_context_cwd(vfs_context_t ctx)
1536 {
1537 	vnode_t cwd = NULLVP;
1538 	thread_t t;
1539 
1540 	if (ctx != NULL && (t = VFS_CONTEXT_GET_THREAD(ctx)) != NULL) {
1541 		uthread_t uth = get_bsdthread_info(t);
1542 		proc_t proc;
1543 
1544 		/*
1545 		 * Get the cwd from the thread; if there isn't one, get it
1546 		 * from the process, instead.
1547 		 */
1548 		if ((cwd = uth->uu_cdir) == NULLVP &&
1549 		    (proc = (proc_t)get_bsdthreadtask_info(t)) != NULL) {
1550 			cwd = proc->p_fd.fd_cdir;
1551 		}
1552 	}
1553 
1554 	return cwd;
1555 }
1556 
1557 /*
1558  * vfs_context_create
1559  *
1560  * Description: Allocate and initialize a new context.
1561  *
1562  * Parameters:  vfs_context_t:                  Context to copy, or NULL for new
1563  *
1564  * Returns:     Pointer to new context
1565  *
1566  * Notes:       Copy cred and thread from argument, if available; else
1567  *              initialize with current thread and new cred.  Returns
1568  *              with a reference held on the credential.
1569  */
1570 vfs_context_t
vfs_context_create(vfs_context_t ctx)1571 vfs_context_create(vfs_context_t ctx)
1572 {
1573 	vfs_context_t newcontext;
1574 
1575 	newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1576 
1577 	if (ctx == NULL) {
1578 		ctx = vfs_context_current();
1579 	}
1580 	*newcontext = *ctx;
1581 	if (IS_VALID_CRED(ctx->vc_ucred)) {
1582 		kauth_cred_ref(ctx->vc_ucred);
1583 	}
1584 
1585 	return newcontext;
1586 }
1587 
1588 /*
1589  * vfs_context_create_with_proc
1590  *
1591  * Description: Create a new context with credentials taken from
1592  *              the specified proc.
1593  *
1594  * Parameters:  proc_t: The process whose crendials to use.
1595  *
1596  * Returns:     Pointer to new context.
1597  *
1598  * Notes:       The context will also take a reference on an arbitrary
1599  *              thread in the process as well as the process's credentials.
1600  */
1601 vfs_context_t
vfs_context_create_with_proc(proc_t p)1602 vfs_context_create_with_proc(proc_t p)
1603 {
1604 	vfs_context_t newcontext;
1605 	thread_t thread;
1606 	kauth_cred_t cred;
1607 
1608 	if (p == current_proc()) {
1609 		return vfs_context_create(NULL);
1610 	}
1611 
1612 	newcontext = zalloc_flags(KT_VFS_CONTEXT, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1613 
1614 	proc_lock(p);
1615 	thread = proc_thread(p);        /* XXX */
1616 	if (thread != NULL) {
1617 		thread_reference(thread);
1618 	}
1619 	proc_unlock(p);
1620 
1621 	cred = kauth_cred_proc_ref(p);
1622 
1623 	if (thread != NULL) {
1624 		VFS_CONTEXT_SET_REFERENCED_THREAD(newcontext, thread);
1625 	}
1626 	newcontext->vc_ucred = cred;
1627 
1628 	return newcontext;
1629 }
1630 
1631 vfs_context_t
vfs_context_current(void)1632 vfs_context_current(void)
1633 {
1634 	static_assert(offsetof(struct thread_ro, tro_owner) ==
1635 	    offsetof(struct vfs_context, vc_thread));
1636 	static_assert(offsetof(struct thread_ro, tro_cred) ==
1637 	    offsetof(struct vfs_context, vc_ucred));
1638 
1639 	return (vfs_context_t)current_thread_ro();
1640 }
1641 
1642 vfs_context_t
vfs_context_kernel(void)1643 vfs_context_kernel(void)
1644 {
1645 	return &vfs_context0;
1646 }
1647 
1648 int
vfs_context_rele(vfs_context_t ctx)1649 vfs_context_rele(vfs_context_t ctx)
1650 {
1651 	if (ctx) {
1652 		if (IS_VALID_CRED(ctx->vc_ucred)) {
1653 			kauth_cred_unref(&ctx->vc_ucred);
1654 		}
1655 		if (VFS_CONTEXT_THREAD_IS_REFERENCED(ctx)) {
1656 			assert(VFS_CONTEXT_GET_THREAD(ctx) != NULL);
1657 			thread_deallocate(VFS_CONTEXT_GET_THREAD(ctx));
1658 		}
1659 		zfree(KT_VFS_CONTEXT, ctx);
1660 	}
1661 	return 0;
1662 }
1663 
1664 
1665 kauth_cred_t
vfs_context_ucred(vfs_context_t ctx)1666 vfs_context_ucred(vfs_context_t ctx)
1667 {
1668 	return ctx->vc_ucred;
1669 }
1670 
1671 /*
1672  * Return true if the context is owned by the superuser.
1673  */
1674 int
vfs_context_issuser(vfs_context_t ctx)1675 vfs_context_issuser(vfs_context_t ctx)
1676 {
1677 	return kauth_cred_issuser(vfs_context_ucred(ctx));
1678 }
1679 
1680 int
vfs_context_iskernel(vfs_context_t ctx)1681 vfs_context_iskernel(vfs_context_t ctx)
1682 {
1683 	return ctx == &vfs_context0;
1684 }
1685 
1686 /*
1687  * Given a context, for all fields of vfs_context_t which
1688  * are not held with a reference, set those fields to the
1689  * values for the current execution context.
1690  *
1691  * Returns: 0 for success, nonzero for failure
1692  *
1693  * The intended use is:
1694  * 1. vfs_context_create()	gets the caller a context
1695  * 2. vfs_context_bind()        sets the unrefcounted data
1696  * 3. vfs_context_rele()        releases the context
1697  *
1698  */
1699 int
vfs_context_bind(vfs_context_t ctx)1700 vfs_context_bind(vfs_context_t ctx)
1701 {
1702 	assert(!VFS_CONTEXT_THREAD_IS_REFERENCED(ctx));
1703 	ctx->vc_thread = current_thread();
1704 	return 0;
1705 }
1706 
1707 int
vfs_set_thread_fs_private(uint8_t tag,uint64_t fs_private)1708 vfs_set_thread_fs_private(uint8_t tag, uint64_t fs_private)
1709 {
1710 	struct uthread *ut;
1711 
1712 	if (tag != FS_PRIVATE_TAG_APFS) {
1713 		return ENOTSUP;
1714 	}
1715 
1716 	ut = current_uthread();
1717 	ut->t_fs_private = fs_private;
1718 
1719 	return 0;
1720 }
1721 
1722 int
vfs_get_thread_fs_private(uint8_t tag,uint64_t * fs_private)1723 vfs_get_thread_fs_private(uint8_t tag, uint64_t *fs_private)
1724 {
1725 	struct uthread *ut;
1726 
1727 	if (tag != FS_PRIVATE_TAG_APFS) {
1728 		return ENOTSUP;
1729 	}
1730 
1731 	ut = current_uthread();
1732 	*fs_private = ut->t_fs_private;
1733 
1734 	return 0;
1735 }
1736 
1737 int
vfs_isswapmount(mount_t mnt)1738 vfs_isswapmount(mount_t mnt)
1739 {
1740 	return mnt && ISSET(mnt->mnt_kern_flag, MNTK_SWAP_MOUNT) ? 1 : 0;
1741 }
1742 
1743 /* XXXXXXXXXXXXXX VNODE KAPIS XXXXXXXXXXXXXXXXXXXXXXXXX */
1744 
1745 
1746 /*
1747  * Convert between vnode types and inode formats (since POSIX.1
1748  * defines mode word of stat structure in terms of inode formats).
1749  */
1750 enum vtype
vnode_iftovt(int mode)1751 vnode_iftovt(int mode)
1752 {
1753 	return iftovt_tab[((mode) & S_IFMT) >> 12];
1754 }
1755 
1756 int
vnode_vttoif(enum vtype indx)1757 vnode_vttoif(enum vtype indx)
1758 {
1759 	return vttoif_tab[(int)(indx)];
1760 }
1761 
1762 int
vnode_makeimode(int indx,int mode)1763 vnode_makeimode(int indx, int mode)
1764 {
1765 	return (int)(VTTOIF(indx) | (mode));
1766 }
1767 
1768 
1769 /*
1770  * vnode manipulation functions.
1771  */
1772 
1773 /* returns system root vnode iocount; It should be released using vnode_put() */
1774 vnode_t
vfs_rootvnode(void)1775 vfs_rootvnode(void)
1776 {
1777 	vnode_t vp = NULLVP;
1778 
1779 	if (rootvnode) {
1780 		lck_rw_lock_shared(&rootvnode_rw_lock);
1781 		vp = rootvnode;
1782 		if (vp && (vnode_get(vp) != 0)) {
1783 			vp = NULLVP;
1784 		}
1785 		lck_rw_unlock_shared(&rootvnode_rw_lock);
1786 	}
1787 
1788 	return vp;
1789 }
1790 
1791 uint32_t
vnode_vid(vnode_t vp)1792 vnode_vid(vnode_t vp)
1793 {
1794 	return (uint32_t)(vp->v_id);
1795 }
1796 
1797 mount_t
vnode_mount(vnode_t vp)1798 vnode_mount(vnode_t vp)
1799 {
1800 	return vp->v_mount;
1801 }
1802 
1803 #if CONFIG_IOSCHED
1804 vnode_t
vnode_mountdevvp(vnode_t vp)1805 vnode_mountdevvp(vnode_t vp)
1806 {
1807 	if (vp->v_mount) {
1808 		return vp->v_mount->mnt_devvp;
1809 	} else {
1810 		return (vnode_t)0;
1811 	}
1812 }
1813 #endif
1814 
1815 boolean_t
vnode_isonexternalstorage(vnode_t vp)1816 vnode_isonexternalstorage(vnode_t vp)
1817 {
1818 	if (vp) {
1819 		if (vp->v_mount) {
1820 			if (vp->v_mount->mnt_ioflags & MNT_IOFLAGS_PERIPHERAL_DRIVE) {
1821 				return TRUE;
1822 			}
1823 		}
1824 	}
1825 	return FALSE;
1826 }
1827 
1828 boolean_t
vnode_isonssd(vnode_t vp)1829 vnode_isonssd(vnode_t vp)
1830 {
1831 	if (vp) {
1832 		if (vp->v_mount) {
1833 			if (vp->v_mount->mnt_kern_flag & MNTK_SSD) {
1834 				return TRUE;
1835 			}
1836 		}
1837 	}
1838 	return FALSE;
1839 }
1840 
1841 mount_t
vnode_mountedhere(vnode_t vp)1842 vnode_mountedhere(vnode_t vp)
1843 {
1844 	mount_t mp;
1845 
1846 	if ((vp->v_type == VDIR) && ((mp = vp->v_mountedhere) != NULL) &&
1847 	    (mp->mnt_vnodecovered == vp)) {
1848 		return mp;
1849 	} else {
1850 		return (mount_t)NULL;
1851 	}
1852 }
1853 
1854 /* returns vnode type of vnode_t */
1855 enum vtype
vnode_vtype(vnode_t vp)1856 vnode_vtype(vnode_t vp)
1857 {
1858 	return vp->v_type;
1859 }
1860 
1861 /* returns FS specific node saved in vnode */
1862 void *
vnode_fsnode(vnode_t vp)1863 vnode_fsnode(vnode_t vp)
1864 {
1865 	return vp->v_data;
1866 }
1867 
1868 void
vnode_clearfsnode(vnode_t vp)1869 vnode_clearfsnode(vnode_t vp)
1870 {
1871 	vp->v_data = NULL;
1872 }
1873 
1874 dev_t
vnode_specrdev(vnode_t vp)1875 vnode_specrdev(vnode_t vp)
1876 {
1877 	return vp->v_rdev;
1878 }
1879 
1880 
1881 /* Accessor functions */
1882 /* is vnode_t a root vnode */
1883 int
vnode_isvroot(vnode_t vp)1884 vnode_isvroot(vnode_t vp)
1885 {
1886 	return (vp->v_flag & VROOT)? 1 : 0;
1887 }
1888 
1889 /* is vnode_t a system vnode */
1890 int
vnode_issystem(vnode_t vp)1891 vnode_issystem(vnode_t vp)
1892 {
1893 	return (vp->v_flag & VSYSTEM)? 1 : 0;
1894 }
1895 
1896 /* is vnode_t a swap file vnode */
1897 int
vnode_isswap(vnode_t vp)1898 vnode_isswap(vnode_t vp)
1899 {
1900 	return (vp->v_flag & VSWAP)? 1 : 0;
1901 }
1902 
1903 /* is vnode_t a tty */
1904 int
vnode_istty(vnode_t vp)1905 vnode_istty(vnode_t vp)
1906 {
1907 	return (vp->v_flag & VISTTY) ? 1 : 0;
1908 }
1909 
1910 /* if vnode_t mount operation in progress */
1911 int
vnode_ismount(vnode_t vp)1912 vnode_ismount(vnode_t vp)
1913 {
1914 	return (vp->v_flag & VMOUNT)? 1 : 0;
1915 }
1916 
1917 /* is this vnode under recyle now */
1918 int
vnode_isrecycled(vnode_t vp)1919 vnode_isrecycled(vnode_t vp)
1920 {
1921 	int ret;
1922 
1923 	vnode_lock_spin(vp);
1924 	ret =  (vp->v_lflag & (VL_TERMINATE | VL_DEAD))? 1 : 0;
1925 	vnode_unlock(vp);
1926 	return ret;
1927 }
1928 
1929 /* is this vnode marked for termination */
1930 int
vnode_willberecycled(vnode_t vp)1931 vnode_willberecycled(vnode_t vp)
1932 {
1933 	return (vp->v_lflag & VL_MARKTERM) ? 1 : 0;
1934 }
1935 
1936 
1937 /* vnode was created by background task requesting rapid aging
1938  *  and has not since been referenced by a normal task */
1939 int
vnode_israge(vnode_t vp)1940 vnode_israge(vnode_t vp)
1941 {
1942 	return (vp->v_flag & VRAGE)? 1 : 0;
1943 }
1944 
1945 int
vnode_needssnapshots(__unused vnode_t vp)1946 vnode_needssnapshots(__unused vnode_t vp)
1947 {
1948 	return 0;
1949 }
1950 
1951 
1952 /* Check the process/thread to see if we should skip atime updates */
1953 int
vfs_ctx_skipatime(vfs_context_t ctx)1954 vfs_ctx_skipatime(vfs_context_t ctx)
1955 {
1956 	struct uthread *ut;
1957 	proc_t proc;
1958 	thread_t thr;
1959 
1960 	proc = vfs_context_proc(ctx);
1961 	thr = vfs_context_thread(ctx);
1962 
1963 	/* Validate pointers in case we were invoked via a kernel context */
1964 	if (thr && proc) {
1965 		ut = get_bsdthread_info(thr);
1966 
1967 		if (proc->p_lflag & P_LRAGE_VNODES) {
1968 			return 1;
1969 		}
1970 
1971 		if (ut) {
1972 			if (ut->uu_flag & (UT_RAGE_VNODES | UT_ATIME_UPDATE)) {
1973 				return 1;
1974 			}
1975 		}
1976 
1977 		if (proc->p_vfs_iopolicy & P_VFS_IOPOLICY_ATIME_UPDATES) {
1978 			return 1;
1979 		}
1980 	}
1981 	return 0;
1982 }
1983 
1984 /* is vnode_t marked to not keep data cached once it's been consumed */
1985 int
vnode_isnocache(vnode_t vp)1986 vnode_isnocache(vnode_t vp)
1987 {
1988 	return (vp->v_flag & VNOCACHE_DATA)? 1 : 0;
1989 }
1990 
1991 /*
1992  * has sequential readahead been disabled on this vnode
1993  */
1994 int
vnode_isnoreadahead(vnode_t vp)1995 vnode_isnoreadahead(vnode_t vp)
1996 {
1997 	return (vp->v_flag & VRAOFF)? 1 : 0;
1998 }
1999 
2000 int
vnode_is_openevt(vnode_t vp)2001 vnode_is_openevt(vnode_t vp)
2002 {
2003 	return (vp->v_flag & VOPENEVT)? 1 : 0;
2004 }
2005 
2006 /* is vnode_t a standard one? */
2007 int
vnode_isstandard(vnode_t vp)2008 vnode_isstandard(vnode_t vp)
2009 {
2010 	return (vp->v_flag & VSTANDARD)? 1 : 0;
2011 }
2012 
2013 /* don't vflush() if SKIPSYSTEM */
2014 int
vnode_isnoflush(vnode_t vp)2015 vnode_isnoflush(vnode_t vp)
2016 {
2017 	return (vp->v_flag & VNOFLUSH)? 1 : 0;
2018 }
2019 
2020 /* is vnode_t a regular file */
2021 int
vnode_isreg(vnode_t vp)2022 vnode_isreg(vnode_t vp)
2023 {
2024 	return (vp->v_type == VREG)? 1 : 0;
2025 }
2026 
2027 /* is vnode_t a directory? */
2028 int
vnode_isdir(vnode_t vp)2029 vnode_isdir(vnode_t vp)
2030 {
2031 	return (vp->v_type == VDIR)? 1 : 0;
2032 }
2033 
2034 /* is vnode_t a symbolic link ? */
2035 int
vnode_islnk(vnode_t vp)2036 vnode_islnk(vnode_t vp)
2037 {
2038 	return (vp->v_type == VLNK)? 1 : 0;
2039 }
2040 
2041 int
vnode_lookup_continue_needed(vnode_t vp,struct componentname * cnp)2042 vnode_lookup_continue_needed(vnode_t vp, struct componentname *cnp)
2043 {
2044 	struct nameidata *ndp = cnp->cn_ndp;
2045 
2046 	if (ndp == NULL) {
2047 		panic("vnode_lookup_continue_needed(): cnp->cn_ndp is NULL");
2048 	}
2049 
2050 	if (vnode_isdir(vp)) {
2051 		if (vp->v_mountedhere != NULL) {
2052 			goto yes;
2053 		}
2054 
2055 #if CONFIG_TRIGGERS
2056 		if (vp->v_resolve) {
2057 			goto yes;
2058 		}
2059 #endif /* CONFIG_TRIGGERS */
2060 	}
2061 
2062 
2063 	if (vnode_islnk(vp)) {
2064 		/* From lookup():  || *ndp->ni_next == '/') No need for this, we know we're NULL-terminated here */
2065 		if (cnp->cn_flags & FOLLOW) {
2066 			goto yes;
2067 		}
2068 		if (ndp->ni_flag & NAMEI_TRAILINGSLASH) {
2069 			goto yes;
2070 		}
2071 	}
2072 
2073 	return 0;
2074 
2075 yes:
2076 	ndp->ni_flag |= NAMEI_CONTLOOKUP;
2077 	return EKEEPLOOKING;
2078 }
2079 
2080 /* is vnode_t a fifo ? */
2081 int
vnode_isfifo(vnode_t vp)2082 vnode_isfifo(vnode_t vp)
2083 {
2084 	return (vp->v_type == VFIFO)? 1 : 0;
2085 }
2086 
2087 /* is vnode_t a block device? */
2088 int
vnode_isblk(vnode_t vp)2089 vnode_isblk(vnode_t vp)
2090 {
2091 	return (vp->v_type == VBLK)? 1 : 0;
2092 }
2093 
2094 int
vnode_isspec(vnode_t vp)2095 vnode_isspec(vnode_t vp)
2096 {
2097 	return ((vp->v_type == VCHR) || (vp->v_type == VBLK)) ? 1 : 0;
2098 }
2099 
2100 /* is vnode_t a char device? */
2101 int
vnode_ischr(vnode_t vp)2102 vnode_ischr(vnode_t vp)
2103 {
2104 	return (vp->v_type == VCHR)? 1 : 0;
2105 }
2106 
2107 /* is vnode_t a socket? */
2108 int
vnode_issock(vnode_t vp)2109 vnode_issock(vnode_t vp)
2110 {
2111 	return (vp->v_type == VSOCK)? 1 : 0;
2112 }
2113 
2114 /* is vnode_t a device with multiple active vnodes referring to it? */
2115 int
vnode_isaliased(vnode_t vp)2116 vnode_isaliased(vnode_t vp)
2117 {
2118 	enum vtype vt = vp->v_type;
2119 	if (!((vt == VCHR) || (vt == VBLK))) {
2120 		return 0;
2121 	} else {
2122 		return vp->v_specflags & SI_ALIASED;
2123 	}
2124 }
2125 
2126 /* is vnode_t a named stream? */
2127 int
vnode_isnamedstream(vnode_t vp)2128 vnode_isnamedstream(
2129 #if NAMEDSTREAMS
2130 	vnode_t vp
2131 #else
2132 	__unused vnode_t vp
2133 #endif
2134 	)
2135 {
2136 #if NAMEDSTREAMS
2137 	return (vp->v_flag & VISNAMEDSTREAM) ? 1 : 0;
2138 #else
2139 	return 0;
2140 #endif
2141 }
2142 
2143 int
vnode_isshadow(vnode_t vp)2144 vnode_isshadow(
2145 #if NAMEDSTREAMS
2146 	vnode_t vp
2147 #else
2148 	__unused vnode_t vp
2149 #endif
2150 	)
2151 {
2152 #if NAMEDSTREAMS
2153 	return (vp->v_flag & VISSHADOW) ? 1 : 0;
2154 #else
2155 	return 0;
2156 #endif
2157 }
2158 
2159 /* does vnode have associated named stream vnodes ? */
2160 int
vnode_hasnamedstreams(vnode_t vp)2161 vnode_hasnamedstreams(
2162 #if NAMEDSTREAMS
2163 	vnode_t vp
2164 #else
2165 	__unused vnode_t vp
2166 #endif
2167 	)
2168 {
2169 #if NAMEDSTREAMS
2170 	return (vp->v_lflag & VL_HASSTREAMS) ? 1 : 0;
2171 #else
2172 	return 0;
2173 #endif
2174 }
2175 /* TBD:  set vnode_t to not cache data after it is consumed once; used for quota */
2176 void
vnode_setnocache(vnode_t vp)2177 vnode_setnocache(vnode_t vp)
2178 {
2179 	vnode_lock_spin(vp);
2180 	vp->v_flag |= VNOCACHE_DATA;
2181 	vnode_unlock(vp);
2182 }
2183 
2184 void
vnode_clearnocache(vnode_t vp)2185 vnode_clearnocache(vnode_t vp)
2186 {
2187 	vnode_lock_spin(vp);
2188 	vp->v_flag &= ~VNOCACHE_DATA;
2189 	vnode_unlock(vp);
2190 }
2191 
2192 void
vnode_set_openevt(vnode_t vp)2193 vnode_set_openevt(vnode_t vp)
2194 {
2195 	vnode_lock_spin(vp);
2196 	vp->v_flag |= VOPENEVT;
2197 	vnode_unlock(vp);
2198 }
2199 
2200 void
vnode_clear_openevt(vnode_t vp)2201 vnode_clear_openevt(vnode_t vp)
2202 {
2203 	vnode_lock_spin(vp);
2204 	vp->v_flag &= ~VOPENEVT;
2205 	vnode_unlock(vp);
2206 }
2207 
2208 
2209 void
vnode_setnoreadahead(vnode_t vp)2210 vnode_setnoreadahead(vnode_t vp)
2211 {
2212 	vnode_lock_spin(vp);
2213 	vp->v_flag |= VRAOFF;
2214 	vnode_unlock(vp);
2215 }
2216 
2217 void
vnode_clearnoreadahead(vnode_t vp)2218 vnode_clearnoreadahead(vnode_t vp)
2219 {
2220 	vnode_lock_spin(vp);
2221 	vp->v_flag &= ~VRAOFF;
2222 	vnode_unlock(vp);
2223 }
2224 
2225 int
vnode_isfastdevicecandidate(vnode_t vp)2226 vnode_isfastdevicecandidate(vnode_t vp)
2227 {
2228 	return (vp->v_flag & VFASTDEVCANDIDATE)? 1 : 0;
2229 }
2230 
2231 void
vnode_setfastdevicecandidate(vnode_t vp)2232 vnode_setfastdevicecandidate(vnode_t vp)
2233 {
2234 	vnode_lock_spin(vp);
2235 	vp->v_flag |= VFASTDEVCANDIDATE;
2236 	vnode_unlock(vp);
2237 }
2238 
2239 void
vnode_clearfastdevicecandidate(vnode_t vp)2240 vnode_clearfastdevicecandidate(vnode_t vp)
2241 {
2242 	vnode_lock_spin(vp);
2243 	vp->v_flag &= ~VFASTDEVCANDIDATE;
2244 	vnode_unlock(vp);
2245 }
2246 
2247 int
vnode_isautocandidate(vnode_t vp)2248 vnode_isautocandidate(vnode_t vp)
2249 {
2250 	return (vp->v_flag & VAUTOCANDIDATE)? 1 : 0;
2251 }
2252 
2253 void
vnode_setautocandidate(vnode_t vp)2254 vnode_setautocandidate(vnode_t vp)
2255 {
2256 	vnode_lock_spin(vp);
2257 	vp->v_flag |= VAUTOCANDIDATE;
2258 	vnode_unlock(vp);
2259 }
2260 
2261 void
vnode_clearautocandidate(vnode_t vp)2262 vnode_clearautocandidate(vnode_t vp)
2263 {
2264 	vnode_lock_spin(vp);
2265 	vp->v_flag &= ~VAUTOCANDIDATE;
2266 	vnode_unlock(vp);
2267 }
2268 
2269 
2270 
2271 
2272 /* mark vnode_t to skip vflush() is SKIPSYSTEM */
2273 void
vnode_setnoflush(vnode_t vp)2274 vnode_setnoflush(vnode_t vp)
2275 {
2276 	vnode_lock_spin(vp);
2277 	vp->v_flag |= VNOFLUSH;
2278 	vnode_unlock(vp);
2279 }
2280 
2281 void
vnode_clearnoflush(vnode_t vp)2282 vnode_clearnoflush(vnode_t vp)
2283 {
2284 	vnode_lock_spin(vp);
2285 	vp->v_flag &= ~VNOFLUSH;
2286 	vnode_unlock(vp);
2287 }
2288 
2289 
2290 /* is vnode_t a blkdevice and has a FS mounted on it */
2291 int
vnode_ismountedon(vnode_t vp)2292 vnode_ismountedon(vnode_t vp)
2293 {
2294 	return (vp->v_specflags & SI_MOUNTEDON)? 1 : 0;
2295 }
2296 
2297 void
vnode_setmountedon(vnode_t vp)2298 vnode_setmountedon(vnode_t vp)
2299 {
2300 	vnode_lock_spin(vp);
2301 	vp->v_specflags |= SI_MOUNTEDON;
2302 	vnode_unlock(vp);
2303 }
2304 
2305 void
vnode_clearmountedon(vnode_t vp)2306 vnode_clearmountedon(vnode_t vp)
2307 {
2308 	vnode_lock_spin(vp);
2309 	vp->v_specflags &= ~SI_MOUNTEDON;
2310 	vnode_unlock(vp);
2311 }
2312 
2313 
2314 void
vnode_settag(vnode_t vp,int tag)2315 vnode_settag(vnode_t vp, int tag)
2316 {
2317 	/*
2318 	 * We only assign enum values to v_tag, but add an assert to make sure we
2319 	 * catch it in dev/debug builds if this ever change.
2320 	 */
2321 	assert(tag >= SHRT_MIN && tag <= SHRT_MAX);
2322 	vp->v_tag = (uint16_t)tag;
2323 }
2324 
2325 int
vnode_tag(vnode_t vp)2326 vnode_tag(vnode_t vp)
2327 {
2328 	return vp->v_tag;
2329 }
2330 
2331 vnode_t
vnode_parent(vnode_t vp)2332 vnode_parent(vnode_t vp)
2333 {
2334 	return vp->v_parent;
2335 }
2336 
2337 void
vnode_setparent(vnode_t vp,vnode_t dvp)2338 vnode_setparent(vnode_t vp, vnode_t dvp)
2339 {
2340 	vp->v_parent = dvp;
2341 }
2342 
2343 void
vnode_setname(vnode_t vp,char * name)2344 vnode_setname(vnode_t vp, char * name)
2345 {
2346 	vp->v_name = name;
2347 }
2348 
2349 /* return the registered  FS name when adding the FS to kernel */
2350 void
vnode_vfsname(vnode_t vp,char * buf)2351 vnode_vfsname(vnode_t vp, char * buf)
2352 {
2353 	strlcpy(buf, vp->v_mount->mnt_vtable->vfc_name, MFSNAMELEN);
2354 }
2355 
2356 /* return the FS type number */
2357 int
vnode_vfstypenum(vnode_t vp)2358 vnode_vfstypenum(vnode_t vp)
2359 {
2360 	return vp->v_mount->mnt_vtable->vfc_typenum;
2361 }
2362 
2363 int
vnode_vfs64bitready(vnode_t vp)2364 vnode_vfs64bitready(vnode_t vp)
2365 {
2366 	/*
2367 	 * Checking for dead_mountp is a bit of a hack for SnowLeopard: <rdar://problem/6269051>
2368 	 */
2369 	if ((vp->v_mount != dead_mountp) && (vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFS64BITREADY)) {
2370 		return 1;
2371 	} else {
2372 		return 0;
2373 	}
2374 }
2375 
2376 
2377 
2378 /* return the visible flags on associated mount point of vnode_t */
2379 uint32_t
vnode_vfsvisflags(vnode_t vp)2380 vnode_vfsvisflags(vnode_t vp)
2381 {
2382 	return vp->v_mount->mnt_flag & MNT_VISFLAGMASK;
2383 }
2384 
2385 /* return the command modifier flags on associated mount point of vnode_t */
2386 uint32_t
vnode_vfscmdflags(vnode_t vp)2387 vnode_vfscmdflags(vnode_t vp)
2388 {
2389 	return vp->v_mount->mnt_flag & MNT_CMDFLAGS;
2390 }
2391 
2392 /* return the max symlink of short links  of vnode_t */
2393 uint32_t
vnode_vfsmaxsymlen(vnode_t vp)2394 vnode_vfsmaxsymlen(vnode_t vp)
2395 {
2396 	return vp->v_mount->mnt_maxsymlinklen;
2397 }
2398 
2399 /* return a pointer to the RO vfs_statfs associated with vnode_t's mount point */
2400 struct vfsstatfs *
vnode_vfsstatfs(vnode_t vp)2401 vnode_vfsstatfs(vnode_t vp)
2402 {
2403 	return &vp->v_mount->mnt_vfsstat;
2404 }
2405 
2406 /* return a handle to the FSs specific private handle associated with vnode_t's mount point */
2407 void *
vnode_vfsfsprivate(vnode_t vp)2408 vnode_vfsfsprivate(vnode_t vp)
2409 {
2410 	return vp->v_mount->mnt_data;
2411 }
2412 
2413 /* is vnode_t in a rdonly mounted  FS */
2414 int
vnode_vfsisrdonly(vnode_t vp)2415 vnode_vfsisrdonly(vnode_t vp)
2416 {
2417 	return (vp->v_mount->mnt_flag & MNT_RDONLY)? 1 : 0;
2418 }
2419 
2420 int
vnode_compound_rename_available(vnode_t vp)2421 vnode_compound_rename_available(vnode_t vp)
2422 {
2423 	return vnode_compound_op_available(vp, COMPOUND_VNOP_RENAME);
2424 }
2425 int
vnode_compound_rmdir_available(vnode_t vp)2426 vnode_compound_rmdir_available(vnode_t vp)
2427 {
2428 	return vnode_compound_op_available(vp, COMPOUND_VNOP_RMDIR);
2429 }
2430 int
vnode_compound_mkdir_available(vnode_t vp)2431 vnode_compound_mkdir_available(vnode_t vp)
2432 {
2433 	return vnode_compound_op_available(vp, COMPOUND_VNOP_MKDIR);
2434 }
2435 int
vnode_compound_remove_available(vnode_t vp)2436 vnode_compound_remove_available(vnode_t vp)
2437 {
2438 	return vnode_compound_op_available(vp, COMPOUND_VNOP_REMOVE);
2439 }
2440 int
vnode_compound_open_available(vnode_t vp)2441 vnode_compound_open_available(vnode_t vp)
2442 {
2443 	return vnode_compound_op_available(vp, COMPOUND_VNOP_OPEN);
2444 }
2445 
2446 int
vnode_compound_op_available(vnode_t vp,compound_vnop_id_t opid)2447 vnode_compound_op_available(vnode_t vp, compound_vnop_id_t opid)
2448 {
2449 	return (vp->v_mount->mnt_compound_ops & opid) != 0;
2450 }
2451 
2452 /*
2453  * Returns vnode ref to current working directory; if a per-thread current
2454  * working directory is in effect, return that instead of the per process one.
2455  *
2456  * XXX Published, but not used.
2457  */
2458 vnode_t
current_workingdir(void)2459 current_workingdir(void)
2460 {
2461 	return vfs_context_cwd(vfs_context_current());
2462 }
2463 
2464 /*
2465  * Get a filesec and optional acl contents from an extended attribute.
2466  * Function will attempt to retrive ACL, UUID, and GUID information using a
2467  * read of a named extended attribute (KAUTH_FILESEC_XATTR).
2468  *
2469  * Parameters:	vp			The vnode on which to operate.
2470  *		fsecp			The filesec (and ACL, if any) being
2471  *					retrieved.
2472  *		ctx			The vnode context in which the
2473  *					operation is to be attempted.
2474  *
2475  * Returns:	0			Success
2476  *		!0			errno value
2477  *
2478  * Notes:	The kauth_filesec_t in '*fsecp', if retrieved, will be in
2479  *		host byte order, as will be the ACL contents, if any.
2480  *		Internally, we will cannonize these values from network (PPC)
2481  *		byte order after we retrieve them so that the on-disk contents
2482  *		of the extended attribute are identical for both PPC and Intel
2483  *		(if we were not being required to provide this service via
2484  *		fallback, this would be the job of the filesystem
2485  *		'VNOP_GETATTR' call).
2486  *
2487  *		We use ntohl() because it has a transitive property on Intel
2488  *		machines and no effect on PPC mancines.  This guarantees us
2489  *
2490  * XXX:		Deleting rather than ignoreing a corrupt security structure is
2491  *		probably the only way to reset it without assistance from an
2492  *		file system integrity checking tool.  Right now we ignore it.
2493  *
2494  * XXX:		We should enummerate the possible errno values here, and where
2495  *		in the code they originated.
2496  */
2497 static int
vnode_get_filesec(vnode_t vp,kauth_filesec_t * fsecp,vfs_context_t ctx)2498 vnode_get_filesec(vnode_t vp, kauth_filesec_t *fsecp, vfs_context_t ctx)
2499 {
2500 	kauth_filesec_t fsec;
2501 	uio_t   fsec_uio;
2502 	size_t  fsec_size;
2503 	size_t  xsize, rsize;
2504 	int     error;
2505 	uint32_t        host_fsec_magic;
2506 	uint32_t        host_acl_entrycount;
2507 
2508 	fsec = NULL;
2509 	fsec_uio = NULL;
2510 
2511 	/* find out how big the EA is */
2512 	error = vn_getxattr(vp, KAUTH_FILESEC_XATTR, NULL, &xsize, XATTR_NOSECURITY, ctx);
2513 	if (error != 0) {
2514 		/* no EA, no filesec */
2515 		if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2516 			error = 0;
2517 		}
2518 		/* either way, we are done */
2519 		goto out;
2520 	}
2521 
2522 	/*
2523 	 * To be valid, a kauth_filesec_t must be large enough to hold a zero
2524 	 * ACE entrly ACL, and if it's larger than that, it must have the right
2525 	 * number of bytes such that it contains an atomic number of ACEs,
2526 	 * rather than partial entries.  Otherwise, we ignore it.
2527 	 */
2528 	if (!KAUTH_FILESEC_VALID(xsize)) {
2529 		KAUTH_DEBUG("    ERROR - Bogus kauth_fiilesec_t: %ld bytes", xsize);
2530 		error = 0;
2531 		goto out;
2532 	}
2533 
2534 	/* how many entries would fit? */
2535 	fsec_size = KAUTH_FILESEC_COUNT(xsize);
2536 	if (fsec_size > KAUTH_ACL_MAX_ENTRIES) {
2537 		KAUTH_DEBUG("    ERROR - Bogus (too large) kauth_fiilesec_t: %ld bytes", xsize);
2538 		error = 0;
2539 		goto out;
2540 	}
2541 
2542 	/* get buffer and uio */
2543 	if (((fsec = kauth_filesec_alloc((int)fsec_size)) == NULL) ||
2544 	    ((fsec_uio = uio_create(1, 0, UIO_SYSSPACE, UIO_READ)) == NULL) ||
2545 	    uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), xsize)) {
2546 		KAUTH_DEBUG("    ERROR - could not allocate iov to read ACL");
2547 		error = ENOMEM;
2548 		goto out;
2549 	}
2550 
2551 	/* read security attribute */
2552 	rsize = xsize;
2553 	if ((error = vn_getxattr(vp,
2554 	    KAUTH_FILESEC_XATTR,
2555 	    fsec_uio,
2556 	    &rsize,
2557 	    XATTR_NOSECURITY,
2558 	    ctx)) != 0) {
2559 		/* no attribute - no security data */
2560 		if ((error == ENOATTR) || (error == ENOENT) || (error == EJUSTRETURN)) {
2561 			error = 0;
2562 		}
2563 		/* either way, we are done */
2564 		goto out;
2565 	}
2566 
2567 	/*
2568 	 * Validate security structure; the validation must take place in host
2569 	 * byte order.  If it's corrupt, we will just ignore it.
2570 	 */
2571 
2572 	/* Validate the size before trying to convert it */
2573 	if (rsize < KAUTH_FILESEC_SIZE(0)) {
2574 		KAUTH_DEBUG("ACL - DATA TOO SMALL (%d)", rsize);
2575 		goto out;
2576 	}
2577 
2578 	/* Validate the magic number before trying to convert it */
2579 	host_fsec_magic = ntohl(KAUTH_FILESEC_MAGIC);
2580 	if (fsec->fsec_magic != host_fsec_magic) {
2581 		KAUTH_DEBUG("ACL - BAD MAGIC %x", host_fsec_magic);
2582 		goto out;
2583 	}
2584 
2585 	/* Validate the entry count before trying to convert it. */
2586 	host_acl_entrycount = ntohl(fsec->fsec_acl.acl_entrycount);
2587 	if (host_acl_entrycount != KAUTH_FILESEC_NOACL) {
2588 		if (host_acl_entrycount > KAUTH_ACL_MAX_ENTRIES) {
2589 			KAUTH_DEBUG("ACL - BAD ENTRYCOUNT %x", host_acl_entrycount);
2590 			goto out;
2591 		}
2592 		if (KAUTH_FILESEC_SIZE(host_acl_entrycount) > rsize) {
2593 			KAUTH_DEBUG("ACL - BUFFER OVERFLOW (%d entries too big for %d)", host_acl_entrycount, rsize);
2594 			goto out;
2595 		}
2596 	}
2597 
2598 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, NULL);
2599 
2600 	*fsecp = fsec;
2601 	fsec = NULL;
2602 	error = 0;
2603 out:
2604 	if (fsec != NULL) {
2605 		kauth_filesec_free(fsec);
2606 	}
2607 	if (fsec_uio != NULL) {
2608 		uio_free(fsec_uio);
2609 	}
2610 	if (error) {
2611 		*fsecp = NULL;
2612 	}
2613 	return error;
2614 }
2615 
2616 /*
2617  * Set a filesec and optional acl contents into an extended attribute.
2618  * function will attempt to store ACL, UUID, and GUID information using a
2619  * write to a named extended attribute (KAUTH_FILESEC_XATTR).  The 'acl'
2620  * may or may not point to the `fsec->fsec_acl`, depending on whether the
2621  * original caller supplied an acl.
2622  *
2623  * Parameters:	vp			The vnode on which to operate.
2624  *		fsec			The filesec being set.
2625  *		acl			The acl to be associated with 'fsec'.
2626  *		ctx			The vnode context in which the
2627  *					operation is to be attempted.
2628  *
2629  * Returns:	0			Success
2630  *		!0			errno value
2631  *
2632  * Notes:	Both the fsec and the acl are always valid.
2633  *
2634  *		The kauth_filesec_t in 'fsec', if any, is in host byte order,
2635  *		as are the acl contents, if they are used.  Internally, we will
2636  *		cannonize these values into network (PPC) byte order before we
2637  *		attempt to write them so that the on-disk contents of the
2638  *		extended attribute are identical for both PPC and Intel (if we
2639  *		were not being required to provide this service via fallback,
2640  *		this would be the job of the filesystem 'VNOP_SETATTR' call).
2641  *		We reverse this process on the way out, so we leave with the
2642  *		same byte order we started with.
2643  *
2644  * XXX:		We should enummerate the possible errno values here, and where
2645  *		in the code they originated.
2646  */
2647 static int
vnode_set_filesec(vnode_t vp,kauth_filesec_t fsec,kauth_acl_t acl,vfs_context_t ctx)2648 vnode_set_filesec(vnode_t vp, kauth_filesec_t fsec, kauth_acl_t acl, vfs_context_t ctx)
2649 {
2650 	uio_t           fsec_uio;
2651 	int             error;
2652 	uint32_t        saved_acl_copysize;
2653 
2654 	fsec_uio = NULL;
2655 
2656 	if ((fsec_uio = uio_create(2, 0, UIO_SYSSPACE, UIO_WRITE)) == NULL) {
2657 		KAUTH_DEBUG("    ERROR - could not allocate iov to write ACL");
2658 		error = ENOMEM;
2659 		goto out;
2660 	}
2661 	/*
2662 	 * Save the pre-converted ACL copysize, because it gets swapped too
2663 	 * if we are running with the wrong endianness.
2664 	 */
2665 	saved_acl_copysize = KAUTH_ACL_COPYSIZE(acl);
2666 
2667 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_DISK, fsec, acl);
2668 
2669 	uio_addiov(fsec_uio, CAST_USER_ADDR_T(fsec), KAUTH_FILESEC_SIZE(0) - KAUTH_ACL_SIZE(KAUTH_FILESEC_NOACL));
2670 	uio_addiov(fsec_uio, CAST_USER_ADDR_T(acl), saved_acl_copysize);
2671 	error = vn_setxattr(vp,
2672 	    KAUTH_FILESEC_XATTR,
2673 	    fsec_uio,
2674 	    XATTR_NOSECURITY,           /* we have auth'ed already */
2675 	    ctx);
2676 	VFS_DEBUG(ctx, vp, "SETATTR - set ACL returning %d", error);
2677 
2678 	kauth_filesec_acl_setendian(KAUTH_ENDIAN_HOST, fsec, acl);
2679 
2680 out:
2681 	if (fsec_uio != NULL) {
2682 		uio_free(fsec_uio);
2683 	}
2684 	return error;
2685 }
2686 
2687 /*
2688  * Handle uid/gid == 99 and MNT_IGNORE_OWNERSHIP here.
2689  */
2690 void
vnode_attr_handle_uid_and_gid(struct vnode_attr * vap,mount_t mp,vfs_context_t ctx)2691 vnode_attr_handle_uid_and_gid(struct vnode_attr *vap, mount_t mp, vfs_context_t ctx)
2692 {
2693 	uid_t   nuid;
2694 	gid_t   ngid;
2695 	bool is_suser = vfs_context_issuser(ctx) ? true : false;
2696 
2697 	if (VATTR_IS_ACTIVE(vap, va_uid)) {
2698 		if (is_suser && VATTR_IS_SUPPORTED(vap, va_uid)) {
2699 			nuid = vap->va_uid;
2700 		} else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2701 			nuid = mp->mnt_fsowner;
2702 			if (nuid == KAUTH_UID_NONE) {
2703 				nuid = 99;
2704 			}
2705 		} else if (VATTR_IS_SUPPORTED(vap, va_uid)) {
2706 			nuid = vap->va_uid;
2707 		} else {
2708 			/* this will always be something sensible */
2709 			nuid = mp->mnt_fsowner;
2710 		}
2711 		if ((nuid == 99) && !is_suser) {
2712 			nuid = kauth_cred_getuid(vfs_context_ucred(ctx));
2713 		}
2714 		VATTR_RETURN(vap, va_uid, nuid);
2715 	}
2716 	if (VATTR_IS_ACTIVE(vap, va_gid)) {
2717 		if (is_suser && VATTR_IS_SUPPORTED(vap, va_gid)) {
2718 			ngid = vap->va_gid;
2719 		} else if (mp->mnt_flag & MNT_IGNORE_OWNERSHIP) {
2720 			ngid = mp->mnt_fsgroup;
2721 			if (ngid == KAUTH_GID_NONE) {
2722 				ngid = 99;
2723 			}
2724 		} else if (VATTR_IS_SUPPORTED(vap, va_gid)) {
2725 			ngid = vap->va_gid;
2726 		} else {
2727 			/* this will always be something sensible */
2728 			ngid = mp->mnt_fsgroup;
2729 		}
2730 		if ((ngid == 99) && !is_suser) {
2731 			ngid = kauth_cred_getgid(vfs_context_ucred(ctx));
2732 		}
2733 		VATTR_RETURN(vap, va_gid, ngid);
2734 	}
2735 }
2736 
2737 /*
2738  * Returns:	0			Success
2739  *		ENOMEM			Not enough space [only if has filesec]
2740  *		EINVAL			Requested unknown attributes
2741  *		VNOP_GETATTR:		???
2742  *		vnode_get_filesec:	???
2743  *		kauth_cred_guid2uid:	???
2744  *		kauth_cred_guid2gid:	???
2745  *		vfs_update_vfsstat:	???
2746  */
2747 int
vnode_getattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2748 vnode_getattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2749 {
2750 	kauth_filesec_t fsec;
2751 	kauth_acl_t facl;
2752 	int     error;
2753 
2754 	/*
2755 	 * Reject attempts to fetch unknown attributes.
2756 	 */
2757 	if (vap->va_active & ~VNODE_ATTR_ALL) {
2758 		return EINVAL;
2759 	}
2760 
2761 	/* don't ask for extended security data if the filesystem doesn't support it */
2762 	if (!vfs_extendedsecurity(vnode_mount(vp))) {
2763 		VATTR_CLEAR_ACTIVE(vap, va_acl);
2764 		VATTR_CLEAR_ACTIVE(vap, va_uuuid);
2765 		VATTR_CLEAR_ACTIVE(vap, va_guuid);
2766 	}
2767 
2768 	/*
2769 	 * If the caller wants size values we might have to synthesise, give the
2770 	 * filesystem the opportunity to supply better intermediate results.
2771 	 */
2772 	if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2773 	    VATTR_IS_ACTIVE(vap, va_total_size) ||
2774 	    VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2775 		VATTR_SET_ACTIVE(vap, va_data_size);
2776 		VATTR_SET_ACTIVE(vap, va_data_alloc);
2777 		VATTR_SET_ACTIVE(vap, va_total_size);
2778 		VATTR_SET_ACTIVE(vap, va_total_alloc);
2779 	}
2780 
2781 	vap->va_vaflags &= ~VA_USEFSID;
2782 
2783 	error = VNOP_GETATTR(vp, vap, ctx);
2784 	if (error) {
2785 		KAUTH_DEBUG("ERROR - returning %d", error);
2786 		goto out;
2787 	}
2788 
2789 	/*
2790 	 * If extended security data was requested but not returned, try the fallback
2791 	 * path.
2792 	 */
2793 	if (VATTR_NOT_RETURNED(vap, va_acl) || VATTR_NOT_RETURNED(vap, va_uuuid) || VATTR_NOT_RETURNED(vap, va_guuid)) {
2794 		fsec = NULL;
2795 
2796 		if (XATTR_VNODE_SUPPORTED(vp)) {
2797 			/* try to get the filesec */
2798 			if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
2799 				goto out;
2800 			}
2801 		}
2802 		/* if no filesec, no attributes */
2803 		if (fsec == NULL) {
2804 			VATTR_RETURN(vap, va_acl, NULL);
2805 			VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
2806 			VATTR_RETURN(vap, va_guuid, kauth_null_guid);
2807 		} else {
2808 			/* looks good, try to return what we were asked for */
2809 			VATTR_RETURN(vap, va_uuuid, fsec->fsec_owner);
2810 			VATTR_RETURN(vap, va_guuid, fsec->fsec_group);
2811 
2812 			/* only return the ACL if we were actually asked for it */
2813 			if (VATTR_IS_ACTIVE(vap, va_acl)) {
2814 				if (fsec->fsec_acl.acl_entrycount == KAUTH_FILESEC_NOACL) {
2815 					VATTR_RETURN(vap, va_acl, NULL);
2816 				} else {
2817 					facl = kauth_acl_alloc(fsec->fsec_acl.acl_entrycount);
2818 					if (facl == NULL) {
2819 						kauth_filesec_free(fsec);
2820 						error = ENOMEM;
2821 						goto out;
2822 					}
2823 					__nochk_bcopy(&fsec->fsec_acl, facl, KAUTH_ACL_COPYSIZE(&fsec->fsec_acl));
2824 					VATTR_RETURN(vap, va_acl, facl);
2825 				}
2826 			}
2827 			kauth_filesec_free(fsec);
2828 		}
2829 	}
2830 	/*
2831 	 * If someone gave us an unsolicited filesec, toss it.  We promise that
2832 	 * we're OK with a filesystem giving us anything back, but our callers
2833 	 * only expect what they asked for.
2834 	 */
2835 	if (VATTR_IS_SUPPORTED(vap, va_acl) && !VATTR_IS_ACTIVE(vap, va_acl)) {
2836 		if (vap->va_acl != NULL) {
2837 			kauth_acl_free(vap->va_acl);
2838 		}
2839 		VATTR_CLEAR_SUPPORTED(vap, va_acl);
2840 	}
2841 
2842 #if 0   /* enable when we have a filesystem only supporting UUIDs */
2843 	/*
2844 	 * Handle the case where we need a UID/GID, but only have extended
2845 	 * security information.
2846 	 */
2847 	if (VATTR_NOT_RETURNED(vap, va_uid) &&
2848 	    VATTR_IS_SUPPORTED(vap, va_uuuid) &&
2849 	    !kauth_guid_equal(&vap->va_uuuid, &kauth_null_guid)) {
2850 		if ((error = kauth_cred_guid2uid(&vap->va_uuuid, &nuid)) == 0) {
2851 			VATTR_RETURN(vap, va_uid, nuid);
2852 		}
2853 	}
2854 	if (VATTR_NOT_RETURNED(vap, va_gid) &&
2855 	    VATTR_IS_SUPPORTED(vap, va_guuid) &&
2856 	    !kauth_guid_equal(&vap->va_guuid, &kauth_null_guid)) {
2857 		if ((error = kauth_cred_guid2gid(&vap->va_guuid, &ngid)) == 0) {
2858 			VATTR_RETURN(vap, va_gid, ngid);
2859 		}
2860 	}
2861 #endif
2862 
2863 	vnode_attr_handle_uid_and_gid(vap, vp->v_mount, ctx);
2864 
2865 	/*
2866 	 * Synthesise some values that can be reasonably guessed.
2867 	 */
2868 	if (!VATTR_IS_SUPPORTED(vap, va_iosize)) {
2869 		assert(vp->v_mount->mnt_vfsstat.f_iosize <= UINT32_MAX);
2870 		VATTR_RETURN(vap, va_iosize, (uint32_t)vp->v_mount->mnt_vfsstat.f_iosize);
2871 	}
2872 
2873 	if (!VATTR_IS_SUPPORTED(vap, va_flags)) {
2874 		VATTR_RETURN(vap, va_flags, 0);
2875 	}
2876 
2877 	if (!VATTR_IS_SUPPORTED(vap, va_filerev)) {
2878 		VATTR_RETURN(vap, va_filerev, 0);
2879 	}
2880 
2881 	if (!VATTR_IS_SUPPORTED(vap, va_gen)) {
2882 		VATTR_RETURN(vap, va_gen, 0);
2883 	}
2884 
2885 	/*
2886 	 * Default sizes.  Ordering here is important, as later defaults build on earlier ones.
2887 	 */
2888 	if (!VATTR_IS_SUPPORTED(vap, va_data_size)) {
2889 		VATTR_RETURN(vap, va_data_size, 0);
2890 	}
2891 
2892 	/* do we want any of the possibly-computed values? */
2893 	if (VATTR_IS_ACTIVE(vap, va_data_alloc) ||
2894 	    VATTR_IS_ACTIVE(vap, va_total_size) ||
2895 	    VATTR_IS_ACTIVE(vap, va_total_alloc)) {
2896 		/* make sure f_bsize is valid */
2897 		if (vp->v_mount->mnt_vfsstat.f_bsize == 0) {
2898 			if ((error = vfs_update_vfsstat(vp->v_mount, ctx, VFS_KERNEL_EVENT)) != 0) {
2899 				goto out;
2900 			}
2901 		}
2902 
2903 		/* default va_data_alloc from va_data_size */
2904 		if (!VATTR_IS_SUPPORTED(vap, va_data_alloc)) {
2905 			VATTR_RETURN(vap, va_data_alloc, roundup(vap->va_data_size, vp->v_mount->mnt_vfsstat.f_bsize));
2906 		}
2907 
2908 		/* default va_total_size from va_data_size */
2909 		if (!VATTR_IS_SUPPORTED(vap, va_total_size)) {
2910 			VATTR_RETURN(vap, va_total_size, vap->va_data_size);
2911 		}
2912 
2913 		/* default va_total_alloc from va_total_size which is guaranteed at this point */
2914 		if (!VATTR_IS_SUPPORTED(vap, va_total_alloc)) {
2915 			VATTR_RETURN(vap, va_total_alloc, roundup(vap->va_total_size, vp->v_mount->mnt_vfsstat.f_bsize));
2916 		}
2917 	}
2918 
2919 	/*
2920 	 * If we don't have a change time, pull it from the modtime.
2921 	 */
2922 	if (!VATTR_IS_SUPPORTED(vap, va_change_time) && VATTR_IS_SUPPORTED(vap, va_modify_time)) {
2923 		VATTR_RETURN(vap, va_change_time, vap->va_modify_time);
2924 	}
2925 
2926 	/*
2927 	 * This is really only supported for the creation VNOPs, but since the field is there
2928 	 * we should populate it correctly.
2929 	 */
2930 	VATTR_RETURN(vap, va_type, vp->v_type);
2931 
2932 	/*
2933 	 * The fsid can be obtained from the mountpoint directly.
2934 	 */
2935 	if (VATTR_IS_ACTIVE(vap, va_fsid) &&
2936 	    (!VATTR_IS_SUPPORTED(vap, va_fsid) ||
2937 	    vap->va_vaflags & VA_REALFSID || !(vap->va_vaflags & VA_USEFSID))) {
2938 		VATTR_RETURN(vap, va_fsid, vp->v_mount->mnt_vfsstat.f_fsid.val[0]);
2939 	}
2940 
2941 out:
2942 	vap->va_vaflags &= ~VA_USEFSID;
2943 
2944 	return error;
2945 }
2946 
2947 /*
2948  * Choose 32 bit or 64 bit fsid
2949  */
2950 uint64_t
vnode_get_va_fsid(struct vnode_attr * vap)2951 vnode_get_va_fsid(struct vnode_attr *vap)
2952 {
2953 	if (VATTR_IS_SUPPORTED(vap, va_fsid64)) {
2954 		return (uint64_t)vap->va_fsid64.val[0] + ((uint64_t)vap->va_fsid64.val[1] << 32);
2955 	}
2956 	return vap->va_fsid;
2957 }
2958 
2959 /*
2960  * Set the attributes on a vnode in a vnode context.
2961  *
2962  * Parameters:	vp			The vnode whose attributes to set.
2963  *		vap			A pointer to the attributes to set.
2964  *		ctx			The vnode context in which the
2965  *					operation is to be attempted.
2966  *
2967  * Returns:	0			Success
2968  *		!0			errno value
2969  *
2970  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order.
2971  *
2972  *		The contents of the data area pointed to by 'vap' may be
2973  *		modified if the vnode is on a filesystem which has been
2974  *		mounted with ingore ownership flags, or by the underlyng
2975  *		VFS itself, or by the fallback code, if the underlying VFS
2976  *		does not support ACL, UUID, or GUUID attributes directly.
2977  *
2978  * XXX:		We should enummerate the possible errno values here, and where
2979  *		in the code they originated.
2980  */
2981 int
vnode_setattr(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)2982 vnode_setattr(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
2983 {
2984 	int     error;
2985 #if CONFIG_FSE
2986 	uint64_t active;
2987 	int     is_perm_change = 0;
2988 	int     is_stat_change = 0;
2989 #endif
2990 
2991 	/*
2992 	 * Reject attempts to set unknown attributes.
2993 	 */
2994 	if (vap->va_active & ~VNODE_ATTR_ALL) {
2995 		return EINVAL;
2996 	}
2997 
2998 	/*
2999 	 * Make sure the filesystem is mounted R/W.
3000 	 * If not, return an error.
3001 	 */
3002 	if (vfs_isrdonly(vp->v_mount)) {
3003 		error = EROFS;
3004 		goto out;
3005 	}
3006 
3007 #if DEVELOPMENT || DEBUG
3008 	/*
3009 	 * XXX VSWAP: Check for entitlements or special flag here
3010 	 * so we can restrict access appropriately.
3011 	 */
3012 #else /* DEVELOPMENT || DEBUG */
3013 
3014 	if (vnode_isswap(vp) && (ctx != vfs_context_kernel())) {
3015 		error = EPERM;
3016 		goto out;
3017 	}
3018 #endif /* DEVELOPMENT || DEBUG */
3019 
3020 #if NAMEDSTREAMS
3021 	/* For streams, va_data_size is the only setable attribute. */
3022 	if ((vp->v_flag & VISNAMEDSTREAM) && (vap->va_active != VNODE_ATTR_va_data_size)) {
3023 		error = EPERM;
3024 		goto out;
3025 	}
3026 #endif
3027 	/* Check for truncation */
3028 	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
3029 		switch (vp->v_type) {
3030 		case VREG:
3031 			/* For regular files it's ok */
3032 			break;
3033 		case VDIR:
3034 			/* Not allowed to truncate directories */
3035 			error = EISDIR;
3036 			goto out;
3037 		default:
3038 			/* For everything else we will clear the bit and let underlying FS decide on the rest */
3039 			VATTR_CLEAR_ACTIVE(vap, va_data_size);
3040 			if (vap->va_active) {
3041 				break;
3042 			}
3043 			/* If it was the only bit set, return success, to handle cases like redirect to /dev/null */
3044 			return 0;
3045 		}
3046 	}
3047 
3048 	/*
3049 	 * If ownership is being ignored on this volume, we silently discard
3050 	 * ownership changes.
3051 	 */
3052 	if (vp->v_mount->mnt_flag & MNT_IGNORE_OWNERSHIP) {
3053 		VATTR_CLEAR_ACTIVE(vap, va_uid);
3054 		VATTR_CLEAR_ACTIVE(vap, va_gid);
3055 	}
3056 
3057 	/*
3058 	 * Make sure that extended security is enabled if we're going to try
3059 	 * to set any.
3060 	 */
3061 	if (!vfs_extendedsecurity(vnode_mount(vp)) &&
3062 	    (VATTR_IS_ACTIVE(vap, va_acl) || VATTR_IS_ACTIVE(vap, va_uuuid) || VATTR_IS_ACTIVE(vap, va_guuid))) {
3063 		KAUTH_DEBUG("SETATTR - returning ENOTSUP to request to set extended security");
3064 		error = ENOTSUP;
3065 		goto out;
3066 	}
3067 
3068 	/* Never allow the setting of any unsupported superuser flags. */
3069 	if (VATTR_IS_ACTIVE(vap, va_flags)) {
3070 		vap->va_flags &= (SF_SUPPORTED | UF_SETTABLE);
3071 	}
3072 
3073 #if CONFIG_FSE
3074 	/*
3075 	 * Remember all of the active attributes that we're
3076 	 * attempting to modify.
3077 	 */
3078 	active = vap->va_active & ~VNODE_ATTR_RDONLY;
3079 #endif
3080 
3081 	error = VNOP_SETATTR(vp, vap, ctx);
3082 
3083 	if ((error == 0) && !VATTR_ALL_SUPPORTED(vap)) {
3084 		error = vnode_setattr_fallback(vp, vap, ctx);
3085 	}
3086 
3087 #if CONFIG_FSE
3088 #define PERMISSION_BITS (VNODE_ATTR_BIT(va_uid) | VNODE_ATTR_BIT(va_uuuid) | \
3089 	                 VNODE_ATTR_BIT(va_gid) | VNODE_ATTR_BIT(va_guuid) | \
3090 	                 VNODE_ATTR_BIT(va_mode) | VNODE_ATTR_BIT(va_acl))
3091 
3092 	/*
3093 	 * Now that we've changed them, decide whether to send an
3094 	 * FSevent.
3095 	 */
3096 	if ((active & PERMISSION_BITS) & vap->va_supported) {
3097 		is_perm_change = 1;
3098 	} else {
3099 		/*
3100 		 * We've already checked the permission bits, and we
3101 		 * also want to filter out access time / backup time
3102 		 * changes.
3103 		 */
3104 		active &= ~(PERMISSION_BITS |
3105 		    VNODE_ATTR_BIT(va_access_time) |
3106 		    VNODE_ATTR_BIT(va_backup_time));
3107 
3108 		/* Anything left to notify about? */
3109 		if (active & vap->va_supported) {
3110 			is_stat_change = 1;
3111 		}
3112 	}
3113 
3114 	if (error == 0) {
3115 		if (is_perm_change) {
3116 			if (need_fsevent(FSE_CHOWN, vp)) {
3117 				add_fsevent(FSE_CHOWN, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3118 			}
3119 		} else if (is_stat_change && need_fsevent(FSE_STAT_CHANGED, vp)) {
3120 			add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
3121 		}
3122 	}
3123 #undef PERMISSION_BITS
3124 #endif
3125 
3126 out:
3127 	return error;
3128 }
3129 
3130 /*
3131  * Fallback for setting the attributes on a vnode in a vnode context.  This
3132  * Function will attempt to store ACL, UUID, and GUID information utilizing
3133  * a read/modify/write operation against an EA used as a backing store for
3134  * the object.
3135  *
3136  * Parameters:	vp			The vnode whose attributes to set.
3137  *		vap			A pointer to the attributes to set.
3138  *		ctx			The vnode context in which the
3139  *					operation is to be attempted.
3140  *
3141  * Returns:	0			Success
3142  *		!0			errno value
3143  *
3144  * Notes:	The kauth_filesec_t in 'vap', if any, is in host byte order,
3145  *		as are the fsec and lfsec, if they are used.
3146  *
3147  *		The contents of the data area pointed to by 'vap' may be
3148  *		modified to indicate that the attribute is supported for
3149  *		any given requested attribute.
3150  *
3151  * XXX:		We should enummerate the possible errno values here, and where
3152  *		in the code they originated.
3153  */
3154 int
vnode_setattr_fallback(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3155 vnode_setattr_fallback(vnode_t vp, struct vnode_attr *vap, vfs_context_t ctx)
3156 {
3157 	kauth_filesec_t fsec;
3158 	kauth_acl_t facl;
3159 	struct kauth_filesec lfsec;
3160 	int     error;
3161 
3162 	error = 0;
3163 
3164 	/*
3165 	 * Extended security fallback via extended attributes.
3166 	 *
3167 	 * Note that we do not free the filesec; the caller is expected to
3168 	 * do this.
3169 	 */
3170 	if (VATTR_NOT_RETURNED(vap, va_acl) ||
3171 	    VATTR_NOT_RETURNED(vap, va_uuuid) ||
3172 	    VATTR_NOT_RETURNED(vap, va_guuid)) {
3173 		VFS_DEBUG(ctx, vp, "SETATTR - doing filesec fallback");
3174 
3175 		/*
3176 		 * Fail for file types that we don't permit extended security
3177 		 * to be set on.
3178 		 */
3179 		if (!XATTR_VNODE_SUPPORTED(vp)) {
3180 			VFS_DEBUG(ctx, vp, "SETATTR - Can't write ACL to file type %d", vnode_vtype(vp));
3181 			error = EINVAL;
3182 			goto out;
3183 		}
3184 
3185 		/*
3186 		 * If we don't have all the extended security items, we need
3187 		 * to fetch the existing data to perform a read-modify-write
3188 		 * operation.
3189 		 */
3190 		fsec = NULL;
3191 		if (!VATTR_IS_ACTIVE(vap, va_acl) ||
3192 		    !VATTR_IS_ACTIVE(vap, va_uuuid) ||
3193 		    !VATTR_IS_ACTIVE(vap, va_guuid)) {
3194 			if ((error = vnode_get_filesec(vp, &fsec, ctx)) != 0) {
3195 				KAUTH_DEBUG("SETATTR - ERROR %d fetching filesec for update", error);
3196 				goto out;
3197 			}
3198 		}
3199 		/* if we didn't get a filesec, use our local one */
3200 		if (fsec == NULL) {
3201 			KAUTH_DEBUG("SETATTR - using local filesec for new/full update");
3202 			fsec = &lfsec;
3203 		} else {
3204 			KAUTH_DEBUG("SETATTR - updating existing filesec");
3205 		}
3206 		/* find the ACL */
3207 		facl = &fsec->fsec_acl;
3208 
3209 		/* if we're using the local filesec, we need to initialise it */
3210 		if (fsec == &lfsec) {
3211 			fsec->fsec_magic = KAUTH_FILESEC_MAGIC;
3212 			fsec->fsec_owner = kauth_null_guid;
3213 			fsec->fsec_group = kauth_null_guid;
3214 			facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3215 			facl->acl_flags = 0;
3216 		}
3217 
3218 		/*
3219 		 * Update with the supplied attributes.
3220 		 */
3221 		if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
3222 			KAUTH_DEBUG("SETATTR - updating owner UUID");
3223 			fsec->fsec_owner = vap->va_uuuid;
3224 			VATTR_SET_SUPPORTED(vap, va_uuuid);
3225 		}
3226 		if (VATTR_IS_ACTIVE(vap, va_guuid)) {
3227 			KAUTH_DEBUG("SETATTR - updating group UUID");
3228 			fsec->fsec_group = vap->va_guuid;
3229 			VATTR_SET_SUPPORTED(vap, va_guuid);
3230 		}
3231 		if (VATTR_IS_ACTIVE(vap, va_acl)) {
3232 			if (vap->va_acl == NULL) {
3233 				KAUTH_DEBUG("SETATTR - removing ACL");
3234 				facl->acl_entrycount = KAUTH_FILESEC_NOACL;
3235 			} else {
3236 				KAUTH_DEBUG("SETATTR - setting ACL with %d entries", vap->va_acl->acl_entrycount);
3237 				facl = vap->va_acl;
3238 			}
3239 			VATTR_SET_SUPPORTED(vap, va_acl);
3240 		}
3241 
3242 		/*
3243 		 * If the filesec data is all invalid, we can just remove
3244 		 * the EA completely.
3245 		 */
3246 		if ((facl->acl_entrycount == KAUTH_FILESEC_NOACL) &&
3247 		    kauth_guid_equal(&fsec->fsec_owner, &kauth_null_guid) &&
3248 		    kauth_guid_equal(&fsec->fsec_group, &kauth_null_guid)) {
3249 			error = vn_removexattr(vp, KAUTH_FILESEC_XATTR, XATTR_NOSECURITY, ctx);
3250 			/* no attribute is ok, nothing to delete */
3251 			if (error == ENOATTR) {
3252 				error = 0;
3253 			}
3254 			VFS_DEBUG(ctx, vp, "SETATTR - remove filesec returning %d", error);
3255 		} else {
3256 			/* write the EA */
3257 			error = vnode_set_filesec(vp, fsec, facl, ctx);
3258 			VFS_DEBUG(ctx, vp, "SETATTR - update filesec returning %d", error);
3259 		}
3260 
3261 		/* if we fetched a filesec, dispose of the buffer */
3262 		if (fsec != &lfsec) {
3263 			kauth_filesec_free(fsec);
3264 		}
3265 	}
3266 out:
3267 
3268 	return error;
3269 }
3270 
3271 /*
3272  * Upcall for a filesystem to tell VFS about an EVFILT_VNODE-type
3273  * event on a vnode.
3274  */
3275 int
vnode_notify(vnode_t vp,uint32_t events,struct vnode_attr * vap)3276 vnode_notify(vnode_t vp, uint32_t events, struct vnode_attr *vap)
3277 {
3278 	/* These are the same as the corresponding knotes, at least for now.  Cheating a little. */
3279 	uint32_t knote_mask = (VNODE_EVENT_WRITE | VNODE_EVENT_DELETE | VNODE_EVENT_RENAME
3280 	    | VNODE_EVENT_LINK | VNODE_EVENT_EXTEND | VNODE_EVENT_ATTRIB);
3281 	uint32_t dir_contents_mask = (VNODE_EVENT_DIR_CREATED | VNODE_EVENT_FILE_CREATED
3282 	    | VNODE_EVENT_DIR_REMOVED | VNODE_EVENT_FILE_REMOVED);
3283 	uint32_t knote_events = (events & knote_mask);
3284 
3285 	/* Permissions are not explicitly part of the kqueue model */
3286 	if (events & VNODE_EVENT_PERMS) {
3287 		knote_events |= NOTE_ATTRIB;
3288 	}
3289 
3290 	/* Directory contents information just becomes NOTE_WRITE */
3291 	if ((vnode_isdir(vp)) && (events & dir_contents_mask)) {
3292 		knote_events |= NOTE_WRITE;
3293 	}
3294 
3295 	if (knote_events) {
3296 		lock_vnode_and_post(vp, knote_events);
3297 #if CONFIG_FSE
3298 		if (vap != NULL) {
3299 			create_fsevent_from_kevent(vp, events, vap);
3300 		}
3301 #else
3302 		(void)vap;
3303 #endif
3304 	}
3305 
3306 	return 0;
3307 }
3308 
3309 
3310 
3311 int
vnode_isdyldsharedcache(vnode_t vp)3312 vnode_isdyldsharedcache(vnode_t vp)
3313 {
3314 	return (vp->v_flag & VSHARED_DYLD) ? 1 : 0;
3315 }
3316 
3317 
3318 /*
3319  * For a filesystem that isn't tracking its own vnode watchers:
3320  * check whether a vnode is being monitored.
3321  */
3322 int
vnode_ismonitored(vnode_t vp)3323 vnode_ismonitored(vnode_t vp)
3324 {
3325 	return vp->v_knotes.slh_first != NULL;
3326 }
3327 
3328 int
vnode_getbackingvnode(vnode_t in_vp,vnode_t * out_vpp)3329 vnode_getbackingvnode(vnode_t in_vp, vnode_t* out_vpp)
3330 {
3331 	if (out_vpp) {
3332 		*out_vpp = NULLVP;
3333 	}
3334 #if NULLFS
3335 	return nullfs_getbackingvnode(in_vp, out_vpp);
3336 #else
3337 #pragma unused(in_vp)
3338 	return ENOENT;
3339 #endif
3340 }
3341 
3342 /*
3343  * Initialize a struct vnode_attr and activate the attributes required
3344  * by the vnode_notify() call.
3345  */
3346 int
vfs_get_notify_attributes(struct vnode_attr * vap)3347 vfs_get_notify_attributes(struct vnode_attr *vap)
3348 {
3349 	VATTR_INIT(vap);
3350 	vap->va_active = VNODE_NOTIFY_ATTRS;
3351 	return 0;
3352 }
3353 
3354 #if CONFIG_TRIGGERS
3355 int
vfs_settriggercallback(fsid_t * fsid,vfs_trigger_callback_t vtc,void * data,uint32_t flags __unused,vfs_context_t ctx)3356 vfs_settriggercallback(fsid_t *fsid, vfs_trigger_callback_t vtc, void *data, uint32_t flags __unused, vfs_context_t ctx)
3357 {
3358 	int error;
3359 	mount_t mp;
3360 
3361 	mp = mount_list_lookupby_fsid(fsid, 0 /* locked */, 1 /* withref */);
3362 	if (mp == NULL) {
3363 		return ENOENT;
3364 	}
3365 
3366 	error = vfs_busy(mp, LK_NOWAIT);
3367 	mount_iterdrop(mp);
3368 
3369 	if (error != 0) {
3370 		return ENOENT;
3371 	}
3372 
3373 	mount_lock(mp);
3374 	if (mp->mnt_triggercallback != NULL) {
3375 		error = EBUSY;
3376 		mount_unlock(mp);
3377 		goto out;
3378 	}
3379 
3380 	mp->mnt_triggercallback = vtc;
3381 	mp->mnt_triggerdata = data;
3382 	mount_unlock(mp);
3383 
3384 	mp->mnt_triggercallback(mp, VTC_REPLACE, data, ctx);
3385 
3386 out:
3387 	vfs_unbusy(mp);
3388 	return 0;
3389 }
3390 #endif /* CONFIG_TRIGGERS */
3391 
3392 /*
3393  *  Definition of vnode operations.
3394  */
3395 
3396 #if 0
3397 /*
3398 *#
3399 *#% lookup       dvp     L ? ?
3400 *#% lookup       vpp     - L -
3401 */
3402 struct vnop_lookup_args {
3403 	struct vnodeop_desc *a_desc;
3404 	vnode_t a_dvp;
3405 	vnode_t *a_vpp;
3406 	struct componentname *a_cnp;
3407 	vfs_context_t a_context;
3408 };
3409 #endif /* 0*/
3410 
3411 /*
3412  * Returns:	0			Success
3413  *	lock_fsnode:ENOENT		No such file or directory [only for VFS
3414  *					 that is not thread safe & vnode is
3415  *					 currently being/has been terminated]
3416  *	<vfs_lookup>:ENAMETOOLONG
3417  *	<vfs_lookup>:ENOENT
3418  *	<vfs_lookup>:EJUSTRETURN
3419  *	<vfs_lookup>:EPERM
3420  *	<vfs_lookup>:EISDIR
3421  *	<vfs_lookup>:ENOTDIR
3422  *	<vfs_lookup>:???
3423  *
3424  * Note:	The return codes from the underlying VFS's lookup routine can't
3425  *		be fully enumerated here, since third party VFS authors may not
3426  *		limit their error returns to the ones documented here, even
3427  *		though this may result in some programs functioning incorrectly.
3428  *
3429  *		The return codes documented above are those which may currently
3430  *		be returned by HFS from hfs_lookup, not including additional
3431  *		error code which may be propagated from underlying routines.
3432  */
3433 errno_t
VNOP_LOOKUP(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,vfs_context_t ctx)3434 VNOP_LOOKUP(vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx)
3435 {
3436 	int _err;
3437 	struct vnop_lookup_args a;
3438 
3439 	a.a_desc = &vnop_lookup_desc;
3440 	a.a_dvp = dvp;
3441 	a.a_vpp = vpp;
3442 	a.a_cnp = cnp;
3443 	a.a_context = ctx;
3444 
3445 	_err = (*dvp->v_op[vnop_lookup_desc.vdesc_offset])(&a);
3446 	if (_err == 0 && *vpp) {
3447 		DTRACE_FSINFO(lookup, vnode_t, *vpp);
3448 	}
3449 
3450 	return _err;
3451 }
3452 
3453 #if 0
3454 struct vnop_compound_open_args {
3455 	struct vnodeop_desc *a_desc;
3456 	vnode_t a_dvp;
3457 	vnode_t *a_vpp;
3458 	struct componentname *a_cnp;
3459 	int32_t a_flags;
3460 	int32_t a_fmode;
3461 	struct vnode_attr *a_vap;
3462 	vfs_context_t a_context;
3463 	void *a_reserved;
3464 };
3465 #endif /* 0 */
3466 
3467 int
VNOP_COMPOUND_OPEN(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,int32_t fmode,uint32_t * statusp,struct vnode_attr * vap,vfs_context_t ctx)3468 VNOP_COMPOUND_OPEN(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, int32_t fmode, uint32_t *statusp, struct vnode_attr *vap, vfs_context_t ctx)
3469 {
3470 	int _err;
3471 	struct vnop_compound_open_args a;
3472 	int did_create = 0;
3473 	int want_create;
3474 	uint32_t tmp_status = 0;
3475 	struct componentname *cnp = &ndp->ni_cnd;
3476 
3477 	want_create = (flags & O_CREAT);
3478 
3479 	a.a_desc = &vnop_compound_open_desc;
3480 	a.a_dvp = dvp;
3481 	a.a_vpp = vpp; /* Could be NULL */
3482 	a.a_cnp = cnp;
3483 	a.a_flags = flags;
3484 	a.a_fmode = fmode;
3485 	a.a_status = (statusp != NULL) ? statusp : &tmp_status;
3486 	a.a_vap = vap;
3487 	a.a_context = ctx;
3488 	a.a_open_create_authorizer = vn_authorize_create;
3489 	a.a_open_existing_authorizer = vn_authorize_open_existing;
3490 	a.a_reserved = NULL;
3491 
3492 	if (dvp == NULLVP) {
3493 		panic("No dvp?");
3494 	}
3495 	if (want_create && !vap) {
3496 		panic("Want create, but no vap?");
3497 	}
3498 	if (!want_create && vap) {
3499 		panic("Don't want create, but have a vap?");
3500 	}
3501 
3502 	_err = (*dvp->v_op[vnop_compound_open_desc.vdesc_offset])(&a);
3503 	if (want_create) {
3504 		if (_err == 0 && *vpp) {
3505 			DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3506 		} else {
3507 			DTRACE_FSINFO(compound_open, vnode_t, dvp);
3508 		}
3509 	} else {
3510 		DTRACE_FSINFO(compound_open, vnode_t, *vpp);
3511 	}
3512 
3513 	did_create = (*a.a_status & COMPOUND_OPEN_STATUS_DID_CREATE);
3514 
3515 	if (did_create && !want_create) {
3516 		panic("Filesystem did a create, even though none was requested?");
3517 	}
3518 
3519 	if (did_create) {
3520 #if CONFIG_APPLEDOUBLE
3521 		if (!NATIVE_XATTR(dvp)) {
3522 			/*
3523 			 * Remove stale Apple Double file (if any).
3524 			 */
3525 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3526 		}
3527 #endif /* CONFIG_APPLEDOUBLE */
3528 		/* On create, provide kqueue notification */
3529 		post_event_if_success(dvp, _err, NOTE_WRITE);
3530 	}
3531 
3532 	lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, did_create);
3533 #if 0 /* FSEvents... */
3534 	if (*vpp && _err && _err != EKEEPLOOKING) {
3535 		vnode_put(*vpp);
3536 		*vpp = NULLVP;
3537 	}
3538 #endif /* 0 */
3539 
3540 	return _err;
3541 }
3542 
3543 #if 0
3544 struct vnop_create_args {
3545 	struct vnodeop_desc *a_desc;
3546 	vnode_t a_dvp;
3547 	vnode_t *a_vpp;
3548 	struct componentname *a_cnp;
3549 	struct vnode_attr *a_vap;
3550 	vfs_context_t a_context;
3551 };
3552 #endif /* 0*/
3553 errno_t
VNOP_CREATE(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3554 VNOP_CREATE(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3555 {
3556 	int _err;
3557 	struct vnop_create_args a;
3558 
3559 	a.a_desc = &vnop_create_desc;
3560 	a.a_dvp = dvp;
3561 	a.a_vpp = vpp;
3562 	a.a_cnp = cnp;
3563 	a.a_vap = vap;
3564 	a.a_context = ctx;
3565 
3566 	_err = (*dvp->v_op[vnop_create_desc.vdesc_offset])(&a);
3567 	if (_err == 0 && *vpp) {
3568 		DTRACE_FSINFO(create, vnode_t, *vpp);
3569 	}
3570 
3571 #if CONFIG_APPLEDOUBLE
3572 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
3573 		/*
3574 		 * Remove stale Apple Double file (if any).
3575 		 */
3576 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
3577 	}
3578 #endif /* CONFIG_APPLEDOUBLE */
3579 
3580 	post_event_if_success(dvp, _err, NOTE_WRITE);
3581 
3582 	return _err;
3583 }
3584 
3585 #if 0
3586 /*
3587 *#
3588 *#% whiteout     dvp     L L L
3589 *#% whiteout     cnp     - - -
3590 *#% whiteout     flag    - - -
3591 *#
3592 */
3593 struct vnop_whiteout_args {
3594 	struct vnodeop_desc *a_desc;
3595 	vnode_t a_dvp;
3596 	struct componentname *a_cnp;
3597 	int a_flags;
3598 	vfs_context_t a_context;
3599 };
3600 #endif /* 0*/
3601 errno_t
VNOP_WHITEOUT(__unused vnode_t dvp,__unused struct componentname * cnp,__unused int flags,__unused vfs_context_t ctx)3602 VNOP_WHITEOUT(__unused vnode_t dvp, __unused struct componentname *cnp,
3603     __unused int flags, __unused vfs_context_t ctx)
3604 {
3605 	return ENOTSUP;       // XXX OBSOLETE
3606 }
3607 
3608 #if 0
3609 /*
3610 *#
3611 *#% mknod        dvp     L U U
3612 *#% mknod        vpp     - X -
3613 *#
3614 */
3615 struct vnop_mknod_args {
3616 	struct vnodeop_desc *a_desc;
3617 	vnode_t a_dvp;
3618 	vnode_t *a_vpp;
3619 	struct componentname *a_cnp;
3620 	struct vnode_attr *a_vap;
3621 	vfs_context_t a_context;
3622 };
3623 #endif /* 0*/
3624 errno_t
VNOP_MKNOD(vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)3625 VNOP_MKNOD(vnode_t dvp, vnode_t * vpp, struct componentname * cnp, struct vnode_attr * vap, vfs_context_t ctx)
3626 {
3627 	int _err;
3628 	struct vnop_mknod_args a;
3629 
3630 	a.a_desc = &vnop_mknod_desc;
3631 	a.a_dvp = dvp;
3632 	a.a_vpp = vpp;
3633 	a.a_cnp = cnp;
3634 	a.a_vap = vap;
3635 	a.a_context = ctx;
3636 
3637 	_err = (*dvp->v_op[vnop_mknod_desc.vdesc_offset])(&a);
3638 	if (_err == 0 && *vpp) {
3639 		DTRACE_FSINFO(mknod, vnode_t, *vpp);
3640 	}
3641 
3642 	post_event_if_success(dvp, _err, NOTE_WRITE);
3643 
3644 	return _err;
3645 }
3646 
3647 #if 0
3648 /*
3649 *#
3650 *#% open         vp      L L L
3651 *#
3652 */
3653 struct vnop_open_args {
3654 	struct vnodeop_desc *a_desc;
3655 	vnode_t a_vp;
3656 	int a_mode;
3657 	vfs_context_t a_context;
3658 };
3659 #endif /* 0*/
3660 errno_t
VNOP_OPEN(vnode_t vp,int mode,vfs_context_t ctx)3661 VNOP_OPEN(vnode_t vp, int mode, vfs_context_t ctx)
3662 {
3663 	int _err;
3664 	struct vnop_open_args a;
3665 
3666 	if (ctx == NULL) {
3667 		ctx = vfs_context_current();
3668 	}
3669 	a.a_desc = &vnop_open_desc;
3670 	a.a_vp = vp;
3671 	a.a_mode = mode;
3672 	a.a_context = ctx;
3673 
3674 	_err = (*vp->v_op[vnop_open_desc.vdesc_offset])(&a);
3675 	DTRACE_FSINFO(open, vnode_t, vp);
3676 
3677 	return _err;
3678 }
3679 
3680 #if 0
3681 /*
3682 *#
3683 *#% close        vp      U U U
3684 *#
3685 */
3686 struct vnop_close_args {
3687 	struct vnodeop_desc *a_desc;
3688 	vnode_t a_vp;
3689 	int a_fflag;
3690 	vfs_context_t a_context;
3691 };
3692 #endif /* 0*/
3693 errno_t
VNOP_CLOSE(vnode_t vp,int fflag,vfs_context_t ctx)3694 VNOP_CLOSE(vnode_t vp, int fflag, vfs_context_t ctx)
3695 {
3696 	int _err;
3697 	struct vnop_close_args a;
3698 
3699 	if (ctx == NULL) {
3700 		ctx = vfs_context_current();
3701 	}
3702 	a.a_desc = &vnop_close_desc;
3703 	a.a_vp = vp;
3704 	a.a_fflag = fflag;
3705 	a.a_context = ctx;
3706 
3707 	_err = (*vp->v_op[vnop_close_desc.vdesc_offset])(&a);
3708 	DTRACE_FSINFO(close, vnode_t, vp);
3709 
3710 	return _err;
3711 }
3712 
3713 #if 0
3714 /*
3715 *#
3716 *#% access       vp      L L L
3717 *#
3718 */
3719 struct vnop_access_args {
3720 	struct vnodeop_desc *a_desc;
3721 	vnode_t a_vp;
3722 	int a_action;
3723 	vfs_context_t a_context;
3724 };
3725 #endif /* 0*/
3726 errno_t
VNOP_ACCESS(vnode_t vp,int action,vfs_context_t ctx)3727 VNOP_ACCESS(vnode_t vp, int action, vfs_context_t ctx)
3728 {
3729 	int _err;
3730 	struct vnop_access_args a;
3731 
3732 	if (ctx == NULL) {
3733 		ctx = vfs_context_current();
3734 	}
3735 	a.a_desc = &vnop_access_desc;
3736 	a.a_vp = vp;
3737 	a.a_action = action;
3738 	a.a_context = ctx;
3739 
3740 	_err = (*vp->v_op[vnop_access_desc.vdesc_offset])(&a);
3741 	DTRACE_FSINFO(access, vnode_t, vp);
3742 
3743 	return _err;
3744 }
3745 
3746 #if 0
3747 /*
3748 *#
3749 *#% getattr      vp      = = =
3750 *#
3751 */
3752 struct vnop_getattr_args {
3753 	struct vnodeop_desc *a_desc;
3754 	vnode_t a_vp;
3755 	struct vnode_attr *a_vap;
3756 	vfs_context_t a_context;
3757 };
3758 #endif /* 0*/
3759 errno_t
VNOP_GETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3760 VNOP_GETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3761 {
3762 	int _err;
3763 	struct vnop_getattr_args a;
3764 
3765 	a.a_desc = &vnop_getattr_desc;
3766 	a.a_vp = vp;
3767 	a.a_vap = vap;
3768 	a.a_context = ctx;
3769 
3770 	_err = (*vp->v_op[vnop_getattr_desc.vdesc_offset])(&a);
3771 	DTRACE_FSINFO(getattr, vnode_t, vp);
3772 
3773 	return _err;
3774 }
3775 
3776 #if 0
3777 /*
3778 *#
3779 *#% setattr      vp      L L L
3780 *#
3781 */
3782 struct vnop_setattr_args {
3783 	struct vnodeop_desc *a_desc;
3784 	vnode_t a_vp;
3785 	struct vnode_attr *a_vap;
3786 	vfs_context_t a_context;
3787 };
3788 #endif /* 0*/
3789 errno_t
VNOP_SETATTR(vnode_t vp,struct vnode_attr * vap,vfs_context_t ctx)3790 VNOP_SETATTR(vnode_t vp, struct vnode_attr * vap, vfs_context_t ctx)
3791 {
3792 	int _err;
3793 	struct vnop_setattr_args a;
3794 
3795 	a.a_desc = &vnop_setattr_desc;
3796 	a.a_vp = vp;
3797 	a.a_vap = vap;
3798 	a.a_context = ctx;
3799 
3800 	_err = (*vp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
3801 	DTRACE_FSINFO(setattr, vnode_t, vp);
3802 
3803 #if CONFIG_APPLEDOUBLE
3804 	/*
3805 	 * Shadow uid/gid/mod change to extended attribute file.
3806 	 */
3807 	if (_err == 0 && !NATIVE_XATTR(vp)) {
3808 		struct vnode_attr va;
3809 		int change = 0;
3810 
3811 		VATTR_INIT(&va);
3812 		if (VATTR_IS_ACTIVE(vap, va_uid)) {
3813 			VATTR_SET(&va, va_uid, vap->va_uid);
3814 			change = 1;
3815 		}
3816 		if (VATTR_IS_ACTIVE(vap, va_gid)) {
3817 			VATTR_SET(&va, va_gid, vap->va_gid);
3818 			change = 1;
3819 		}
3820 		if (VATTR_IS_ACTIVE(vap, va_mode)) {
3821 			VATTR_SET(&va, va_mode, vap->va_mode);
3822 			change = 1;
3823 		}
3824 		if (change) {
3825 			vnode_t dvp;
3826 			const char   *vname;
3827 
3828 			dvp = vnode_getparent(vp);
3829 			vname = vnode_getname(vp);
3830 
3831 			xattrfile_setattr(dvp, vname, &va, ctx);
3832 			if (dvp != NULLVP) {
3833 				vnode_put(dvp);
3834 			}
3835 			if (vname != NULL) {
3836 				vnode_putname(vname);
3837 			}
3838 		}
3839 	}
3840 #endif /* CONFIG_APPLEDOUBLE */
3841 
3842 	/*
3843 	 * If we have changed any of the things about the file that are likely
3844 	 * to result in changes to authorization results, blow the vnode auth
3845 	 * cache
3846 	 */
3847 	if (_err == 0 && (
3848 		    VATTR_IS_SUPPORTED(vap, va_mode) ||
3849 		    VATTR_IS_SUPPORTED(vap, va_uid) ||
3850 		    VATTR_IS_SUPPORTED(vap, va_gid) ||
3851 		    VATTR_IS_SUPPORTED(vap, va_flags) ||
3852 		    VATTR_IS_SUPPORTED(vap, va_acl) ||
3853 		    VATTR_IS_SUPPORTED(vap, va_uuuid) ||
3854 		    VATTR_IS_SUPPORTED(vap, va_guuid))) {
3855 		vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3856 
3857 #if NAMEDSTREAMS
3858 		if (vfs_authopaque(vp->v_mount) && vnode_hasnamedstreams(vp)) {
3859 			vnode_t svp;
3860 			if (vnode_getnamedstream(vp, &svp, XATTR_RESOURCEFORK_NAME, NS_OPEN, 0, ctx) == 0) {
3861 				vnode_uncache_authorized_action(svp, KAUTH_INVALIDATE_CACHED_RIGHTS);
3862 				vnode_put(svp);
3863 			}
3864 		}
3865 #endif /* NAMEDSTREAMS */
3866 	}
3867 
3868 
3869 	post_event_if_success(vp, _err, NOTE_ATTRIB);
3870 
3871 	return _err;
3872 }
3873 
3874 
3875 #if 0
3876 /*
3877 *#
3878 *#% read         vp      L L L
3879 *#
3880 */
3881 struct vnop_read_args {
3882 	struct vnodeop_desc *a_desc;
3883 	vnode_t a_vp;
3884 	struct uio *a_uio;
3885 	int a_ioflag;
3886 	vfs_context_t a_context;
3887 };
3888 #endif /* 0*/
3889 errno_t
VNOP_READ(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3890 VNOP_READ(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3891 {
3892 	int _err;
3893 	struct vnop_read_args a;
3894 #if CONFIG_DTRACE
3895 	user_ssize_t resid = uio_resid(uio);
3896 #endif
3897 
3898 	if (ctx == NULL) {
3899 		return EINVAL;
3900 	}
3901 
3902 	a.a_desc = &vnop_read_desc;
3903 	a.a_vp = vp;
3904 	a.a_uio = uio;
3905 	a.a_ioflag = ioflag;
3906 	a.a_context = ctx;
3907 
3908 	_err = (*vp->v_op[vnop_read_desc.vdesc_offset])(&a);
3909 	DTRACE_FSINFO_IO(read,
3910 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3911 
3912 	return _err;
3913 }
3914 
3915 
3916 #if 0
3917 /*
3918 *#
3919 *#% write        vp      L L L
3920 *#
3921 */
3922 struct vnop_write_args {
3923 	struct vnodeop_desc *a_desc;
3924 	vnode_t a_vp;
3925 	struct uio *a_uio;
3926 	int a_ioflag;
3927 	vfs_context_t a_context;
3928 };
3929 #endif /* 0*/
3930 errno_t
VNOP_WRITE(vnode_t vp,struct uio * uio,int ioflag,vfs_context_t ctx)3931 VNOP_WRITE(vnode_t vp, struct uio * uio, int ioflag, vfs_context_t ctx)
3932 {
3933 	struct vnop_write_args a;
3934 	int _err;
3935 #if CONFIG_DTRACE
3936 	user_ssize_t resid = uio_resid(uio);
3937 #endif
3938 
3939 	if (ctx == NULL) {
3940 		return EINVAL;
3941 	}
3942 
3943 	a.a_desc = &vnop_write_desc;
3944 	a.a_vp = vp;
3945 	a.a_uio = uio;
3946 	a.a_ioflag = ioflag;
3947 	a.a_context = ctx;
3948 
3949 	_err = (*vp->v_op[vnop_write_desc.vdesc_offset])(&a);
3950 	DTRACE_FSINFO_IO(write,
3951 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
3952 
3953 	post_event_if_success(vp, _err, NOTE_WRITE);
3954 
3955 	return _err;
3956 }
3957 
3958 
3959 #if 0
3960 /*
3961 *#
3962 *#% ioctl        vp      U U U
3963 *#
3964 */
3965 struct vnop_ioctl_args {
3966 	struct vnodeop_desc *a_desc;
3967 	vnode_t a_vp;
3968 	u_long a_command;
3969 	caddr_t a_data;
3970 	int a_fflag;
3971 	vfs_context_t a_context;
3972 };
3973 #endif /* 0*/
3974 errno_t
VNOP_IOCTL(vnode_t vp,u_long command,caddr_t data,int fflag,vfs_context_t ctx)3975 VNOP_IOCTL(vnode_t vp, u_long command, caddr_t data, int fflag, vfs_context_t ctx)
3976 {
3977 	int _err;
3978 	struct vnop_ioctl_args a;
3979 
3980 	if (ctx == NULL) {
3981 		ctx = vfs_context_current();
3982 	}
3983 
3984 	/*
3985 	 * This check should probably have been put in the TTY code instead...
3986 	 *
3987 	 * We have to be careful about what we assume during startup and shutdown.
3988 	 * We have to be able to use the root filesystem's device vnode even when
3989 	 * devfs isn't mounted (yet/anymore), so we can't go looking at its mount
3990 	 * structure.  If there is no data pointer, it doesn't matter whether
3991 	 * the device is 64-bit ready.  Any command (like DKIOCSYNCHRONIZE)
3992 	 * which passes NULL for its data pointer can therefore be used during
3993 	 * mount or unmount of the root filesystem.
3994 	 *
3995 	 * Depending on what root filesystems need to do during mount/unmount, we
3996 	 * may need to loosen this check again in the future.
3997 	 */
3998 	if (vfs_context_is64bit(ctx) && !(vnode_ischr(vp) || vnode_isblk(vp))) {
3999 		if (data != NULL && !vnode_vfs64bitready(vp)) {
4000 			return ENOTTY;
4001 		}
4002 	}
4003 
4004 	if ((command == DKIOCISSOLIDSTATE) && (vp == rootvp) && rootvp_is_ssd && data) {
4005 		*data = 1;
4006 		return 0;
4007 	}
4008 
4009 	a.a_desc = &vnop_ioctl_desc;
4010 	a.a_vp = vp;
4011 	a.a_command = command;
4012 	a.a_data = data;
4013 	a.a_fflag = fflag;
4014 	a.a_context = ctx;
4015 
4016 	_err = (*vp->v_op[vnop_ioctl_desc.vdesc_offset])(&a);
4017 	DTRACE_FSINFO(ioctl, vnode_t, vp);
4018 
4019 	return _err;
4020 }
4021 
4022 
4023 #if 0
4024 /*
4025 *#
4026 *#% select       vp      U U U
4027 *#
4028 */
4029 struct vnop_select_args {
4030 	struct vnodeop_desc *a_desc;
4031 	vnode_t a_vp;
4032 	int a_which;
4033 	int a_fflags;
4034 	void *a_wql;
4035 	vfs_context_t a_context;
4036 };
4037 #endif /* 0*/
4038 errno_t
VNOP_SELECT(vnode_t vp,int which,int fflags,void * wql,vfs_context_t ctx)4039 VNOP_SELECT(vnode_t vp, int which, int fflags, void * wql, vfs_context_t ctx)
4040 {
4041 	int _err;
4042 	struct vnop_select_args a;
4043 
4044 	if (ctx == NULL) {
4045 		ctx = vfs_context_current();
4046 	}
4047 	a.a_desc = &vnop_select_desc;
4048 	a.a_vp = vp;
4049 	a.a_which = which;
4050 	a.a_fflags = fflags;
4051 	a.a_context = ctx;
4052 	a.a_wql = wql;
4053 
4054 	_err = (*vp->v_op[vnop_select_desc.vdesc_offset])(&a);
4055 	DTRACE_FSINFO(select, vnode_t, vp);
4056 
4057 	return _err;
4058 }
4059 
4060 
4061 #if 0
4062 /*
4063 *#
4064 *#% exchange fvp         L L L
4065 *#% exchange tvp         L L L
4066 *#
4067 */
4068 struct vnop_exchange_args {
4069 	struct vnodeop_desc *a_desc;
4070 	vnode_t a_fvp;
4071 	vnode_t a_tvp;
4072 	int a_options;
4073 	vfs_context_t a_context;
4074 };
4075 #endif /* 0*/
4076 errno_t
VNOP_EXCHANGE(vnode_t fvp,vnode_t tvp,int options,vfs_context_t ctx)4077 VNOP_EXCHANGE(vnode_t fvp, vnode_t tvp, int options, vfs_context_t ctx)
4078 {
4079 	int _err;
4080 	struct vnop_exchange_args a;
4081 
4082 	a.a_desc = &vnop_exchange_desc;
4083 	a.a_fvp = fvp;
4084 	a.a_tvp = tvp;
4085 	a.a_options = options;
4086 	a.a_context = ctx;
4087 
4088 	_err = (*fvp->v_op[vnop_exchange_desc.vdesc_offset])(&a);
4089 	DTRACE_FSINFO(exchange, vnode_t, fvp);
4090 
4091 	/* Don't post NOTE_WRITE because file descriptors follow the data ... */
4092 	post_event_if_success(fvp, _err, NOTE_ATTRIB);
4093 	post_event_if_success(tvp, _err, NOTE_ATTRIB);
4094 
4095 	return _err;
4096 }
4097 
4098 
4099 #if 0
4100 /*
4101 *#
4102 *#% revoke       vp      U U U
4103 *#
4104 */
4105 struct vnop_revoke_args {
4106 	struct vnodeop_desc *a_desc;
4107 	vnode_t a_vp;
4108 	int a_flags;
4109 	vfs_context_t a_context;
4110 };
4111 #endif /* 0*/
4112 errno_t
VNOP_REVOKE(vnode_t vp,int flags,vfs_context_t ctx)4113 VNOP_REVOKE(vnode_t vp, int flags, vfs_context_t ctx)
4114 {
4115 	struct vnop_revoke_args a;
4116 	int _err;
4117 
4118 	a.a_desc = &vnop_revoke_desc;
4119 	a.a_vp = vp;
4120 	a.a_flags = flags;
4121 	a.a_context = ctx;
4122 
4123 	_err = (*vp->v_op[vnop_revoke_desc.vdesc_offset])(&a);
4124 	DTRACE_FSINFO(revoke, vnode_t, vp);
4125 
4126 	return _err;
4127 }
4128 
4129 
4130 #if 0
4131 /*
4132 *#
4133 *# mmap_check - vp U U U
4134 *#
4135 */
4136 struct vnop_mmap_check_args {
4137 	struct vnodeop_desc *a_desc;
4138 	vnode_t a_vp;
4139 	int a_flags;
4140 	vfs_context_t a_context;
4141 };
4142 #endif /* 0 */
4143 errno_t
VNOP_MMAP_CHECK(vnode_t vp,int flags,vfs_context_t ctx)4144 VNOP_MMAP_CHECK(vnode_t vp, int flags, vfs_context_t ctx)
4145 {
4146 	int _err;
4147 	struct vnop_mmap_check_args a;
4148 
4149 	a.a_desc = &vnop_mmap_check_desc;
4150 	a.a_vp = vp;
4151 	a.a_flags = flags;
4152 	a.a_context = ctx;
4153 
4154 	_err = (*vp->v_op[vnop_mmap_check_desc.vdesc_offset])(&a);
4155 	if (_err == ENOTSUP) {
4156 		_err = 0;
4157 	}
4158 	DTRACE_FSINFO(mmap_check, vnode_t, vp);
4159 
4160 	return _err;
4161 }
4162 
4163 #if 0
4164 /*
4165 *#
4166 *# mmap - vp U U U
4167 *#
4168 */
4169 struct vnop_mmap_args {
4170 	struct vnodeop_desc *a_desc;
4171 	vnode_t a_vp;
4172 	int a_fflags;
4173 	vfs_context_t a_context;
4174 };
4175 #endif /* 0*/
4176 errno_t
VNOP_MMAP(vnode_t vp,int fflags,vfs_context_t ctx)4177 VNOP_MMAP(vnode_t vp, int fflags, vfs_context_t ctx)
4178 {
4179 	int _err;
4180 	struct vnop_mmap_args a;
4181 
4182 	a.a_desc = &vnop_mmap_desc;
4183 	a.a_vp = vp;
4184 	a.a_fflags = fflags;
4185 	a.a_context = ctx;
4186 
4187 	_err = (*vp->v_op[vnop_mmap_desc.vdesc_offset])(&a);
4188 	DTRACE_FSINFO(mmap, vnode_t, vp);
4189 
4190 	return _err;
4191 }
4192 
4193 
4194 #if 0
4195 /*
4196 *#
4197 *# mnomap - vp U U U
4198 *#
4199 */
4200 struct vnop_mnomap_args {
4201 	struct vnodeop_desc *a_desc;
4202 	vnode_t a_vp;
4203 	vfs_context_t a_context;
4204 };
4205 #endif /* 0*/
4206 errno_t
VNOP_MNOMAP(vnode_t vp,vfs_context_t ctx)4207 VNOP_MNOMAP(vnode_t vp, vfs_context_t ctx)
4208 {
4209 	int _err;
4210 	struct vnop_mnomap_args a;
4211 
4212 	a.a_desc = &vnop_mnomap_desc;
4213 	a.a_vp = vp;
4214 	a.a_context = ctx;
4215 
4216 	_err = (*vp->v_op[vnop_mnomap_desc.vdesc_offset])(&a);
4217 	DTRACE_FSINFO(mnomap, vnode_t, vp);
4218 
4219 	return _err;
4220 }
4221 
4222 
4223 #if 0
4224 /*
4225 *#
4226 *#% fsync        vp      L L L
4227 *#
4228 */
4229 struct vnop_fsync_args {
4230 	struct vnodeop_desc *a_desc;
4231 	vnode_t a_vp;
4232 	int a_waitfor;
4233 	vfs_context_t a_context;
4234 };
4235 #endif /* 0*/
4236 errno_t
VNOP_FSYNC(vnode_t vp,int waitfor,vfs_context_t ctx)4237 VNOP_FSYNC(vnode_t vp, int waitfor, vfs_context_t ctx)
4238 {
4239 	struct vnop_fsync_args a;
4240 	int _err;
4241 
4242 	a.a_desc = &vnop_fsync_desc;
4243 	a.a_vp = vp;
4244 	a.a_waitfor = waitfor;
4245 	a.a_context = ctx;
4246 
4247 	_err = (*vp->v_op[vnop_fsync_desc.vdesc_offset])(&a);
4248 	DTRACE_FSINFO(fsync, vnode_t, vp);
4249 
4250 	return _err;
4251 }
4252 
4253 
4254 #if 0
4255 /*
4256 *#
4257 *#% remove       dvp     L U U
4258 *#% remove       vp      L U U
4259 *#
4260 */
4261 struct vnop_remove_args {
4262 	struct vnodeop_desc *a_desc;
4263 	vnode_t a_dvp;
4264 	vnode_t a_vp;
4265 	struct componentname *a_cnp;
4266 	int a_flags;
4267 	vfs_context_t a_context;
4268 };
4269 #endif /* 0*/
4270 errno_t
VNOP_REMOVE(vnode_t dvp,vnode_t vp,struct componentname * cnp,int flags,vfs_context_t ctx)4271 VNOP_REMOVE(vnode_t dvp, vnode_t vp, struct componentname * cnp, int flags, vfs_context_t ctx)
4272 {
4273 	int _err;
4274 	struct vnop_remove_args a;
4275 
4276 	a.a_desc = &vnop_remove_desc;
4277 	a.a_dvp = dvp;
4278 	a.a_vp = vp;
4279 	a.a_cnp = cnp;
4280 	a.a_flags = flags;
4281 	a.a_context = ctx;
4282 
4283 	_err = (*dvp->v_op[vnop_remove_desc.vdesc_offset])(&a);
4284 	DTRACE_FSINFO(remove, vnode_t, vp);
4285 
4286 	if (_err == 0) {
4287 		vnode_setneedinactive(vp);
4288 #if CONFIG_APPLEDOUBLE
4289 		if (!(NATIVE_XATTR(dvp))) {
4290 			/*
4291 			 * Remove any associated extended attribute file (._ AppleDouble file).
4292 			 */
4293 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
4294 		}
4295 #endif /* CONFIG_APPLEDOUBLE */
4296 	}
4297 
4298 	post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
4299 	post_event_if_success(dvp, _err, NOTE_WRITE);
4300 
4301 	return _err;
4302 }
4303 
4304 int
VNOP_COMPOUND_REMOVE(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)4305 VNOP_COMPOUND_REMOVE(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
4306 {
4307 	int _err;
4308 	struct vnop_compound_remove_args a;
4309 	int no_vp = (*vpp == NULLVP);
4310 
4311 	a.a_desc = &vnop_compound_remove_desc;
4312 	a.a_dvp = dvp;
4313 	a.a_vpp = vpp;
4314 	a.a_cnp = &ndp->ni_cnd;
4315 	a.a_flags = flags;
4316 	a.a_vap = vap;
4317 	a.a_context = ctx;
4318 	a.a_remove_authorizer = vn_authorize_unlink;
4319 
4320 	_err = (*dvp->v_op[vnop_compound_remove_desc.vdesc_offset])(&a);
4321 	if (_err == 0 && *vpp) {
4322 		DTRACE_FSINFO(compound_remove, vnode_t, *vpp);
4323 	} else {
4324 		DTRACE_FSINFO(compound_remove, vnode_t, dvp);
4325 	}
4326 	if (_err == 0) {
4327 		vnode_setneedinactive(*vpp);
4328 #if CONFIG_APPLEDOUBLE
4329 		if (!(NATIVE_XATTR(dvp))) {
4330 			/*
4331 			 * Remove any associated extended attribute file (._ AppleDouble file).
4332 			 */
4333 			xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 1);
4334 		}
4335 #endif /* CONFIG_APPLEDOUBLE */
4336 	}
4337 
4338 	post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
4339 	post_event_if_success(dvp, _err, NOTE_WRITE);
4340 
4341 	if (no_vp) {
4342 		lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
4343 		if (*vpp && _err && _err != EKEEPLOOKING) {
4344 			vnode_put(*vpp);
4345 			*vpp = NULLVP;
4346 		}
4347 	}
4348 
4349 	//printf("VNOP_COMPOUND_REMOVE() returning %d\n", _err);
4350 
4351 	return _err;
4352 }
4353 
4354 #if 0
4355 /*
4356 *#
4357 *#% link         vp      U U U
4358 *#% link         tdvp    L U U
4359 *#
4360 */
4361 struct vnop_link_args {
4362 	struct vnodeop_desc *a_desc;
4363 	vnode_t a_vp;
4364 	vnode_t a_tdvp;
4365 	struct componentname *a_cnp;
4366 	vfs_context_t a_context;
4367 };
4368 #endif /* 0*/
4369 errno_t
VNOP_LINK(vnode_t vp,vnode_t tdvp,struct componentname * cnp,vfs_context_t ctx)4370 VNOP_LINK(vnode_t vp, vnode_t tdvp, struct componentname * cnp, vfs_context_t ctx)
4371 {
4372 	int _err;
4373 	struct vnop_link_args a;
4374 
4375 #if CONFIG_APPLEDOUBLE
4376 	/*
4377 	 * For file systems with non-native extended attributes,
4378 	 * disallow linking to an existing "._" Apple Double file.
4379 	 */
4380 	if (!NATIVE_XATTR(tdvp) && (vp->v_type == VREG)) {
4381 		const char   *vname;
4382 
4383 		vname = vnode_getname(vp);
4384 		if (vname != NULL) {
4385 			_err = 0;
4386 			if (vname[0] == '.' && vname[1] == '_' && vname[2] != '\0') {
4387 				_err = EPERM;
4388 			}
4389 			vnode_putname(vname);
4390 			if (_err) {
4391 				return _err;
4392 			}
4393 		}
4394 	}
4395 #endif /* CONFIG_APPLEDOUBLE */
4396 
4397 	a.a_desc = &vnop_link_desc;
4398 	a.a_vp = vp;
4399 	a.a_tdvp = tdvp;
4400 	a.a_cnp = cnp;
4401 	a.a_context = ctx;
4402 
4403 	_err = (*tdvp->v_op[vnop_link_desc.vdesc_offset])(&a);
4404 	DTRACE_FSINFO(link, vnode_t, vp);
4405 
4406 	post_event_if_success(vp, _err, NOTE_LINK);
4407 	post_event_if_success(tdvp, _err, NOTE_WRITE);
4408 
4409 	return _err;
4410 }
4411 
4412 errno_t
vn_rename(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,vfs_rename_flags_t flags,vfs_context_t ctx)4413 vn_rename(struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4414     struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4415     vfs_rename_flags_t flags, vfs_context_t ctx)
4416 {
4417 	int _err;
4418 	struct nameidata *fromnd = NULL;
4419 	struct nameidata *tond = NULL;
4420 #if CONFIG_APPLEDOUBLE
4421 	vnode_t src_attr_vp = NULLVP;
4422 	vnode_t dst_attr_vp = NULLVP;
4423 	char smallname1[48];
4424 	char smallname2[48];
4425 	char *xfromname = NULL;
4426 	char *xtoname = NULL;
4427 #endif /* CONFIG_APPLEDOUBLE */
4428 	int batched;
4429 	uint32_t tdfflags;      // Target directory file flags
4430 
4431 	batched = vnode_compound_rename_available(fdvp);
4432 
4433 	if (!batched) {
4434 		if (*fvpp == NULLVP) {
4435 			panic("Not batched, and no fvp?");
4436 		}
4437 	}
4438 
4439 #if CONFIG_APPLEDOUBLE
4440 	/*
4441 	 * We need to preflight any potential AppleDouble file for the source file
4442 	 * before doing the rename operation, since we could potentially be doing
4443 	 * this operation on a network filesystem, and would end up duplicating
4444 	 * the work.  Also, save the source and destination names.  Skip it if the
4445 	 * source has a "._" prefix.
4446 	 */
4447 
4448 	size_t xfromname_len = 0;
4449 	size_t xtoname_len = 0;
4450 	if (!NATIVE_XATTR(fdvp) &&
4451 	    !(fcnp->cn_nameptr[0] == '.' && fcnp->cn_nameptr[1] == '_')) {
4452 		int error;
4453 
4454 		/* Get source attribute file name. */
4455 		xfromname_len = fcnp->cn_namelen + 3;
4456 		if (xfromname_len > sizeof(smallname1)) {
4457 			xfromname = kalloc_data(xfromname_len, Z_WAITOK);
4458 		} else {
4459 			xfromname = &smallname1[0];
4460 		}
4461 		strlcpy(xfromname, "._", xfromname_len);
4462 		strlcat(xfromname, fcnp->cn_nameptr, xfromname_len);
4463 
4464 		/* Get destination attribute file name. */
4465 		xtoname_len = tcnp->cn_namelen + 3;
4466 		if (xtoname_len > sizeof(smallname2)) {
4467 			xtoname = kalloc_data(xtoname_len, Z_WAITOK);
4468 		} else {
4469 			xtoname = &smallname2[0];
4470 		}
4471 		strlcpy(xtoname, "._", xtoname_len);
4472 		strlcat(xtoname, tcnp->cn_nameptr, xtoname_len);
4473 
4474 		/*
4475 		 * Look up source attribute file, keep reference on it if exists.
4476 		 * Note that we do the namei with the nameiop of RENAME, which is different than
4477 		 * in the rename syscall. It's OK if the source file does not exist, since this
4478 		 * is only for AppleDouble files.
4479 		 */
4480 		fromnd = kalloc_type(struct nameidata, Z_WAITOK);
4481 		NDINIT(fromnd, RENAME, OP_RENAME, NOFOLLOW | USEDVP | CN_NBMOUNTLOOK,
4482 		    UIO_SYSSPACE, CAST_USER_ADDR_T(xfromname), ctx);
4483 		fromnd->ni_dvp = fdvp;
4484 		error = namei(fromnd);
4485 
4486 		/*
4487 		 * If there was an error looking up source attribute file,
4488 		 * we'll behave as if it didn't exist.
4489 		 */
4490 
4491 		if (error == 0) {
4492 			if (fromnd->ni_vp) {
4493 				/* src_attr_vp indicates need to call vnode_put / nameidone later */
4494 				src_attr_vp = fromnd->ni_vp;
4495 
4496 				if (fromnd->ni_vp->v_type != VREG) {
4497 					src_attr_vp = NULLVP;
4498 					vnode_put(fromnd->ni_vp);
4499 				}
4500 			}
4501 			/*
4502 			 * Either we got an invalid vnode type (not a regular file) or the namei lookup
4503 			 * suppressed ENOENT as a valid error since we're renaming. Either way, we don't
4504 			 * have a vnode here, so we drop our namei buffer for the source attribute file
4505 			 */
4506 			if (src_attr_vp == NULLVP) {
4507 				nameidone(fromnd);
4508 			}
4509 		}
4510 	}
4511 #endif /* CONFIG_APPLEDOUBLE */
4512 
4513 	if (batched) {
4514 		_err = VNOP_COMPOUND_RENAME(fdvp, fvpp, fcnp, fvap, tdvp, tvpp, tcnp, tvap, flags, ctx);
4515 		if (_err != 0) {
4516 			printf("VNOP_COMPOUND_RENAME() returned %d\n", _err);
4517 		}
4518 	} else {
4519 		if (flags) {
4520 			_err = VNOP_RENAMEX(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, flags, ctx);
4521 			if (_err == ENOTSUP && flags == VFS_RENAME_SECLUDE) {
4522 				// Legacy...
4523 				if ((*fvpp)->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_SECLUDE_RENAME) {
4524 					fcnp->cn_flags |= CN_SECLUDE_RENAME;
4525 					_err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4526 				}
4527 			}
4528 		} else {
4529 			_err = VNOP_RENAME(fdvp, *fvpp, fcnp, tdvp, *tvpp, tcnp, ctx);
4530 		}
4531 	}
4532 
4533 	/*
4534 	 * If moved to a new directory that is restricted,
4535 	 * set the restricted flag on the item moved.
4536 	 */
4537 	if (_err == 0) {
4538 		_err = vnode_flags(tdvp, &tdfflags, ctx);
4539 		if (_err == 0) {
4540 			uint32_t inherit_flags = tdfflags & (UF_DATAVAULT | SF_RESTRICTED);
4541 			if (inherit_flags) {
4542 				uint32_t fflags;
4543 				_err = vnode_flags(*fvpp, &fflags, ctx);
4544 				if (_err == 0 && fflags != (fflags | inherit_flags)) {
4545 					struct vnode_attr va;
4546 					VATTR_INIT(&va);
4547 					VATTR_SET(&va, va_flags, fflags | inherit_flags);
4548 					_err = vnode_setattr(*fvpp, &va, ctx);
4549 				}
4550 			}
4551 		}
4552 	}
4553 
4554 #if CONFIG_MACF
4555 	if (_err == 0) {
4556 		if (flags & VFS_RENAME_SWAP) {
4557 			mac_vnode_notify_rename_swap(
4558 				ctx,                        /* ctx */
4559 				fdvp,                       /* fdvp */
4560 				*fvpp,                      /* fvp */
4561 				fcnp,                       /* fcnp */
4562 				tdvp,                       /* tdvp */
4563 				*tvpp,                      /* tvp */
4564 				tcnp                        /* tcnp */
4565 				);
4566 		} else {
4567 			mac_vnode_notify_rename(
4568 				ctx,                        /* ctx */
4569 				*fvpp,                      /* fvp */
4570 				tdvp,                       /* tdvp */
4571 				tcnp                        /* tcnp */
4572 				);
4573 		}
4574 	}
4575 #endif
4576 
4577 #if CONFIG_APPLEDOUBLE
4578 	/*
4579 	 * Rename any associated extended attribute file (._ AppleDouble file).
4580 	 */
4581 	if (_err == 0 && !NATIVE_XATTR(fdvp) && xfromname != NULL) {
4582 		int error = 0;
4583 
4584 		/*
4585 		 * Get destination attribute file vnode.
4586 		 * Note that tdvp already has an iocount reference. Make sure to check that we
4587 		 * get a valid vnode from namei.
4588 		 */
4589 		tond = kalloc_type(struct nameidata, Z_WAITOK);
4590 		NDINIT(tond, RENAME, OP_RENAME,
4591 		    NOCACHE | NOFOLLOW | USEDVP | CN_NBMOUNTLOOK, UIO_SYSSPACE,
4592 		    CAST_USER_ADDR_T(xtoname), ctx);
4593 		tond->ni_dvp = tdvp;
4594 		error = namei(tond);
4595 
4596 		if (error) {
4597 			goto ad_error;
4598 		}
4599 
4600 		if (tond->ni_vp) {
4601 			dst_attr_vp = tond->ni_vp;
4602 		}
4603 
4604 		if (src_attr_vp) {
4605 			const char *old_name = src_attr_vp->v_name;
4606 			vnode_t old_parent = src_attr_vp->v_parent;
4607 
4608 			if (batched) {
4609 				error = VNOP_COMPOUND_RENAME(fdvp, &src_attr_vp, &fromnd->ni_cnd, NULL,
4610 				    tdvp, &dst_attr_vp, &tond->ni_cnd, NULL,
4611 				    0, ctx);
4612 			} else {
4613 				error = VNOP_RENAME(fdvp, src_attr_vp, &fromnd->ni_cnd,
4614 				    tdvp, dst_attr_vp, &tond->ni_cnd, ctx);
4615 			}
4616 
4617 			if (error == 0 && old_name == src_attr_vp->v_name &&
4618 			    old_parent == src_attr_vp->v_parent) {
4619 				int update_flags = VNODE_UPDATE_NAME;
4620 
4621 				if (fdvp != tdvp) {
4622 					update_flags |= VNODE_UPDATE_PARENT;
4623 				}
4624 
4625 				if ((src_attr_vp->v_mount->mnt_vtable->vfc_vfsflags & VFC_VFSVNOP_NOUPDATEID_RENAME) == 0) {
4626 					vnode_update_identity(src_attr_vp, tdvp,
4627 					    tond->ni_cnd.cn_nameptr,
4628 					    tond->ni_cnd.cn_namelen,
4629 					    tond->ni_cnd.cn_hash,
4630 					    update_flags);
4631 				}
4632 			}
4633 
4634 			/* kevent notifications for moving resource files
4635 			 * _err is zero if we're here, so no need to notify directories, code
4636 			 * below will do that.  only need to post the rename on the source and
4637 			 * possibly a delete on the dest
4638 			 */
4639 			post_event_if_success(src_attr_vp, error, NOTE_RENAME);
4640 			if (dst_attr_vp) {
4641 				post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4642 			}
4643 		} else if (dst_attr_vp) {
4644 			/*
4645 			 * Just delete destination attribute file vnode if it exists, since
4646 			 * we didn't have a source attribute file.
4647 			 * Note that tdvp already has an iocount reference.
4648 			 */
4649 
4650 			struct vnop_remove_args args;
4651 
4652 			args.a_desc    = &vnop_remove_desc;
4653 			args.a_dvp     = tdvp;
4654 			args.a_vp      = dst_attr_vp;
4655 			args.a_cnp     = &tond->ni_cnd;
4656 			args.a_context = ctx;
4657 
4658 			if (error == 0) {
4659 				error = (*tdvp->v_op[vnop_remove_desc.vdesc_offset])(&args);
4660 
4661 				if (error == 0) {
4662 					vnode_setneedinactive(dst_attr_vp);
4663 				}
4664 			}
4665 
4666 			/* kevent notification for deleting the destination's attribute file
4667 			 * if it existed.  Only need to post the delete on the destination, since
4668 			 * the code below will handle the directories.
4669 			 */
4670 			post_event_if_success(dst_attr_vp, error, NOTE_DELETE);
4671 		}
4672 	}
4673 ad_error:
4674 	if (src_attr_vp) {
4675 		vnode_put(src_attr_vp);
4676 		nameidone(fromnd);
4677 	}
4678 	if (dst_attr_vp) {
4679 		vnode_put(dst_attr_vp);
4680 		nameidone(tond);
4681 	}
4682 	if (xfromname && xfromname != &smallname1[0]) {
4683 		kfree_data(xfromname, xfromname_len);
4684 	}
4685 	if (xtoname && xtoname != &smallname2[0]) {
4686 		kfree_data(xtoname, xtoname_len);
4687 	}
4688 #endif /* CONFIG_APPLEDOUBLE */
4689 	kfree_type(struct nameidata, fromnd);
4690 	kfree_type(struct nameidata, tond);
4691 	return _err;
4692 }
4693 
4694 
4695 #if 0
4696 /*
4697 *#
4698 *#% rename       fdvp    U U U
4699 *#% rename       fvp     U U U
4700 *#% rename       tdvp    L U U
4701 *#% rename       tvp     X U U
4702 *#
4703 */
4704 struct vnop_rename_args {
4705 	struct vnodeop_desc *a_desc;
4706 	vnode_t a_fdvp;
4707 	vnode_t a_fvp;
4708 	struct componentname *a_fcnp;
4709 	vnode_t a_tdvp;
4710 	vnode_t a_tvp;
4711 	struct componentname *a_tcnp;
4712 	vfs_context_t a_context;
4713 };
4714 #endif /* 0*/
4715 errno_t
VNOP_RENAME(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_context_t ctx)4716 VNOP_RENAME(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4717     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4718     vfs_context_t ctx)
4719 {
4720 	int _err = 0;
4721 	struct vnop_rename_args a;
4722 
4723 	a.a_desc = &vnop_rename_desc;
4724 	a.a_fdvp = fdvp;
4725 	a.a_fvp = fvp;
4726 	a.a_fcnp = fcnp;
4727 	a.a_tdvp = tdvp;
4728 	a.a_tvp = tvp;
4729 	a.a_tcnp = tcnp;
4730 	a.a_context = ctx;
4731 
4732 	/* do the rename of the main file. */
4733 	_err = (*fdvp->v_op[vnop_rename_desc.vdesc_offset])(&a);
4734 	DTRACE_FSINFO(rename, vnode_t, fdvp);
4735 
4736 	if (_err) {
4737 		return _err;
4738 	}
4739 
4740 	return post_rename(fdvp, fvp, tdvp, tvp);
4741 }
4742 
4743 static errno_t
post_rename(vnode_t fdvp,vnode_t fvp,vnode_t tdvp,vnode_t tvp)4744 post_rename(vnode_t fdvp, vnode_t fvp, vnode_t tdvp, vnode_t tvp)
4745 {
4746 	if (tvp && tvp != fvp) {
4747 		vnode_setneedinactive(tvp);
4748 	}
4749 
4750 	/* Wrote at least one directory.  If transplanted a dir, also changed link counts */
4751 	int events = NOTE_WRITE;
4752 	if (vnode_isdir(fvp)) {
4753 		/* Link count on dir changed only if we are moving a dir and...
4754 		 *      --Moved to new dir, not overwriting there
4755 		 *      --Kept in same dir and DID overwrite
4756 		 */
4757 		if (((fdvp != tdvp) && (!tvp)) || ((fdvp == tdvp) && (tvp))) {
4758 			events |= NOTE_LINK;
4759 		}
4760 	}
4761 
4762 	lock_vnode_and_post(fdvp, events);
4763 	if (fdvp != tdvp) {
4764 		lock_vnode_and_post(tdvp, events);
4765 	}
4766 
4767 	/* If you're replacing the target, post a deletion for it */
4768 	if (tvp) {
4769 		lock_vnode_and_post(tvp, NOTE_DELETE);
4770 	}
4771 
4772 	lock_vnode_and_post(fvp, NOTE_RENAME);
4773 
4774 	return 0;
4775 }
4776 
4777 #if 0
4778 /*
4779 *#
4780 *#% renamex      fdvp    U U U
4781 *#% renamex      fvp     U U U
4782 *#% renamex      tdvp    L U U
4783 *#% renamex      tvp     X U U
4784 *#
4785 */
4786 struct vnop_renamex_args {
4787 	struct vnodeop_desc *a_desc;
4788 	vnode_t a_fdvp;
4789 	vnode_t a_fvp;
4790 	struct componentname *a_fcnp;
4791 	vnode_t a_tdvp;
4792 	vnode_t a_tvp;
4793 	struct componentname *a_tcnp;
4794 	vfs_rename_flags_t a_flags;
4795 	vfs_context_t a_context;
4796 };
4797 #endif /* 0*/
4798 errno_t
VNOP_RENAMEX(struct vnode * fdvp,struct vnode * fvp,struct componentname * fcnp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,vfs_rename_flags_t flags,vfs_context_t ctx)4799 VNOP_RENAMEX(struct vnode *fdvp, struct vnode *fvp, struct componentname *fcnp,
4800     struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
4801     vfs_rename_flags_t flags, vfs_context_t ctx)
4802 {
4803 	int _err = 0;
4804 	struct vnop_renamex_args a;
4805 
4806 	a.a_desc = &vnop_renamex_desc;
4807 	a.a_fdvp = fdvp;
4808 	a.a_fvp = fvp;
4809 	a.a_fcnp = fcnp;
4810 	a.a_tdvp = tdvp;
4811 	a.a_tvp = tvp;
4812 	a.a_tcnp = tcnp;
4813 	a.a_flags = flags;
4814 	a.a_context = ctx;
4815 
4816 	/* do the rename of the main file. */
4817 	_err = (*fdvp->v_op[vnop_renamex_desc.vdesc_offset])(&a);
4818 	DTRACE_FSINFO(renamex, vnode_t, fdvp);
4819 
4820 	if (_err) {
4821 		return _err;
4822 	}
4823 
4824 	return post_rename(fdvp, fvp, tdvp, tvp);
4825 }
4826 
4827 
4828 int
VNOP_COMPOUND_RENAME(struct vnode * fdvp,struct vnode ** fvpp,struct componentname * fcnp,struct vnode_attr * fvap,struct vnode * tdvp,struct vnode ** tvpp,struct componentname * tcnp,struct vnode_attr * tvap,uint32_t flags,vfs_context_t ctx)4829 VNOP_COMPOUND_RENAME(
4830 	struct vnode *fdvp, struct vnode **fvpp, struct componentname *fcnp, struct vnode_attr *fvap,
4831 	struct vnode *tdvp, struct vnode **tvpp, struct componentname *tcnp, struct vnode_attr *tvap,
4832 	uint32_t flags, vfs_context_t ctx)
4833 {
4834 	int _err = 0;
4835 	int events;
4836 	struct vnop_compound_rename_args a;
4837 	int no_fvp, no_tvp;
4838 
4839 	no_fvp = (*fvpp) == NULLVP;
4840 	no_tvp = (*tvpp) == NULLVP;
4841 
4842 	a.a_desc = &vnop_compound_rename_desc;
4843 
4844 	a.a_fdvp = fdvp;
4845 	a.a_fvpp = fvpp;
4846 	a.a_fcnp = fcnp;
4847 	a.a_fvap = fvap;
4848 
4849 	a.a_tdvp = tdvp;
4850 	a.a_tvpp = tvpp;
4851 	a.a_tcnp = tcnp;
4852 	a.a_tvap = tvap;
4853 
4854 	a.a_flags = flags;
4855 	a.a_context = ctx;
4856 	a.a_rename_authorizer = vn_authorize_rename;
4857 	a.a_reserved = NULL;
4858 
4859 	/* do the rename of the main file. */
4860 	_err = (*fdvp->v_op[vnop_compound_rename_desc.vdesc_offset])(&a);
4861 	DTRACE_FSINFO(compound_rename, vnode_t, fdvp);
4862 
4863 	if (_err == 0) {
4864 		if (*tvpp && *tvpp != *fvpp) {
4865 			vnode_setneedinactive(*tvpp);
4866 		}
4867 	}
4868 
4869 	/* Wrote at least one directory.  If transplanted a dir, also changed link counts */
4870 	if (_err == 0 && *fvpp != *tvpp) {
4871 		if (!*fvpp) {
4872 			panic("No fvpp after compound rename?");
4873 		}
4874 
4875 		events = NOTE_WRITE;
4876 		if (vnode_isdir(*fvpp)) {
4877 			/* Link count on dir changed only if we are moving a dir and...
4878 			 *      --Moved to new dir, not overwriting there
4879 			 *      --Kept in same dir and DID overwrite
4880 			 */
4881 			if (((fdvp != tdvp) && (!*tvpp)) || ((fdvp == tdvp) && (*tvpp))) {
4882 				events |= NOTE_LINK;
4883 			}
4884 		}
4885 
4886 		lock_vnode_and_post(fdvp, events);
4887 		if (fdvp != tdvp) {
4888 			lock_vnode_and_post(tdvp, events);
4889 		}
4890 
4891 		/* If you're replacing the target, post a deletion for it */
4892 		if (*tvpp) {
4893 			lock_vnode_and_post(*tvpp, NOTE_DELETE);
4894 		}
4895 
4896 		lock_vnode_and_post(*fvpp, NOTE_RENAME);
4897 	}
4898 
4899 	if (no_fvp) {
4900 		lookup_compound_vnop_post_hook(_err, fdvp, *fvpp, fcnp->cn_ndp, 0);
4901 	}
4902 	if (no_tvp && *tvpp != NULLVP) {
4903 		lookup_compound_vnop_post_hook(_err, tdvp, *tvpp, tcnp->cn_ndp, 0);
4904 	}
4905 
4906 	if (_err && _err != EKEEPLOOKING) {
4907 		if (*fvpp) {
4908 			vnode_put(*fvpp);
4909 			*fvpp = NULLVP;
4910 		}
4911 		if (*tvpp) {
4912 			vnode_put(*tvpp);
4913 			*tvpp = NULLVP;
4914 		}
4915 	}
4916 
4917 	return _err;
4918 }
4919 
4920 int
vn_mkdir(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4921 vn_mkdir(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4922     struct vnode_attr *vap, vfs_context_t ctx)
4923 {
4924 	if (ndp->ni_cnd.cn_nameiop != CREATE) {
4925 		panic("Non-CREATE nameiop in vn_mkdir()?");
4926 	}
4927 
4928 	if (vnode_compound_mkdir_available(dvp)) {
4929 		return VNOP_COMPOUND_MKDIR(dvp, vpp, ndp, vap, ctx);
4930 	} else {
4931 		return VNOP_MKDIR(dvp, vpp, &ndp->ni_cnd, vap, ctx);
4932 	}
4933 }
4934 
4935 #if 0
4936 /*
4937 *#
4938 *#% mkdir        dvp     L U U
4939 *#% mkdir        vpp     - L -
4940 *#
4941 */
4942 struct vnop_mkdir_args {
4943 	struct vnodeop_desc *a_desc;
4944 	vnode_t a_dvp;
4945 	vnode_t *a_vpp;
4946 	struct componentname *a_cnp;
4947 	struct vnode_attr *a_vap;
4948 	vfs_context_t a_context;
4949 };
4950 #endif /* 0*/
4951 errno_t
VNOP_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,vfs_context_t ctx)4952 VNOP_MKDIR(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
4953     struct vnode_attr *vap, vfs_context_t ctx)
4954 {
4955 	int _err;
4956 	struct vnop_mkdir_args a;
4957 
4958 	a.a_desc = &vnop_mkdir_desc;
4959 	a.a_dvp = dvp;
4960 	a.a_vpp = vpp;
4961 	a.a_cnp = cnp;
4962 	a.a_vap = vap;
4963 	a.a_context = ctx;
4964 
4965 	_err = (*dvp->v_op[vnop_mkdir_desc.vdesc_offset])(&a);
4966 	if (_err == 0 && *vpp) {
4967 		DTRACE_FSINFO(mkdir, vnode_t, *vpp);
4968 	}
4969 #if CONFIG_APPLEDOUBLE
4970 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
4971 		/*
4972 		 * Remove stale Apple Double file (if any).
4973 		 */
4974 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
4975 	}
4976 #endif /* CONFIG_APPLEDOUBLE */
4977 
4978 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
4979 
4980 	return _err;
4981 }
4982 
4983 int
VNOP_COMPOUND_MKDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)4984 VNOP_COMPOUND_MKDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
4985     struct vnode_attr *vap, vfs_context_t ctx)
4986 {
4987 	int _err;
4988 	struct vnop_compound_mkdir_args a;
4989 
4990 	a.a_desc = &vnop_compound_mkdir_desc;
4991 	a.a_dvp = dvp;
4992 	a.a_vpp = vpp;
4993 	a.a_cnp = &ndp->ni_cnd;
4994 	a.a_vap = vap;
4995 	a.a_flags = 0;
4996 	a.a_context = ctx;
4997 #if 0
4998 	a.a_mkdir_authorizer = vn_authorize_mkdir;
4999 #endif /* 0 */
5000 	a.a_reserved = NULL;
5001 
5002 	_err = (*dvp->v_op[vnop_compound_mkdir_desc.vdesc_offset])(&a);
5003 	if (_err == 0 && *vpp) {
5004 		DTRACE_FSINFO(compound_mkdir, vnode_t, *vpp);
5005 	}
5006 #if CONFIG_APPLEDOUBLE
5007 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5008 		/*
5009 		 * Remove stale Apple Double file (if any).
5010 		 */
5011 		xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5012 	}
5013 #endif /* CONFIG_APPLEDOUBLE */
5014 
5015 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5016 
5017 	lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, (_err == 0));
5018 	if (*vpp && _err && _err != EKEEPLOOKING) {
5019 		vnode_put(*vpp);
5020 		*vpp = NULLVP;
5021 	}
5022 
5023 	return _err;
5024 }
5025 
5026 int
vn_rmdir(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5027 vn_rmdir(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, struct vnode_attr *vap, vfs_context_t ctx)
5028 {
5029 	if (vnode_compound_rmdir_available(dvp)) {
5030 		return VNOP_COMPOUND_RMDIR(dvp, vpp, ndp, vap, ctx);
5031 	} else {
5032 		if (*vpp == NULLVP) {
5033 			panic("NULL vp, but not a compound VNOP?");
5034 		}
5035 		if (vap != NULL) {
5036 			panic("Non-NULL vap, but not a compound VNOP?");
5037 		}
5038 		return VNOP_RMDIR(dvp, *vpp, &ndp->ni_cnd, ctx);
5039 	}
5040 }
5041 
5042 #if 0
5043 /*
5044 *#
5045 *#% rmdir        dvp     L U U
5046 *#% rmdir        vp      L U U
5047 *#
5048 */
5049 struct vnop_rmdir_args {
5050 	struct vnodeop_desc *a_desc;
5051 	vnode_t a_dvp;
5052 	vnode_t a_vp;
5053 	struct componentname *a_cnp;
5054 	vfs_context_t a_context;
5055 };
5056 
5057 #endif /* 0*/
5058 errno_t
VNOP_RMDIR(struct vnode * dvp,struct vnode * vp,struct componentname * cnp,vfs_context_t ctx)5059 VNOP_RMDIR(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, vfs_context_t ctx)
5060 {
5061 	int _err;
5062 	struct vnop_rmdir_args a;
5063 
5064 	a.a_desc = &vnop_rmdir_desc;
5065 	a.a_dvp = dvp;
5066 	a.a_vp = vp;
5067 	a.a_cnp = cnp;
5068 	a.a_context = ctx;
5069 
5070 	_err = (*vp->v_op[vnop_rmdir_desc.vdesc_offset])(&a);
5071 	DTRACE_FSINFO(rmdir, vnode_t, vp);
5072 
5073 	if (_err == 0) {
5074 		vnode_setneedinactive(vp);
5075 #if CONFIG_APPLEDOUBLE
5076 		if (!(NATIVE_XATTR(dvp))) {
5077 			/*
5078 			 * Remove any associated extended attribute file (._ AppleDouble file).
5079 			 */
5080 			xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 1);
5081 		}
5082 #endif
5083 	}
5084 
5085 	/* If you delete a dir, it loses its "." reference --> NOTE_LINK */
5086 	post_event_if_success(vp, _err, NOTE_DELETE | NOTE_LINK);
5087 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5088 
5089 	return _err;
5090 }
5091 
5092 int
VNOP_COMPOUND_RMDIR(struct vnode * dvp,struct vnode ** vpp,struct nameidata * ndp,struct vnode_attr * vap,vfs_context_t ctx)5093 VNOP_COMPOUND_RMDIR(struct vnode *dvp, struct vnode **vpp, struct nameidata *ndp,
5094     struct vnode_attr *vap, vfs_context_t ctx)
5095 {
5096 	int _err;
5097 	struct vnop_compound_rmdir_args a;
5098 	int no_vp;
5099 
5100 	a.a_desc = &vnop_mkdir_desc;
5101 	a.a_dvp = dvp;
5102 	a.a_vpp = vpp;
5103 	a.a_cnp = &ndp->ni_cnd;
5104 	a.a_vap = vap;
5105 	a.a_flags = 0;
5106 	a.a_context = ctx;
5107 	a.a_rmdir_authorizer = vn_authorize_rmdir;
5108 	a.a_reserved = NULL;
5109 
5110 	no_vp = (*vpp == NULLVP);
5111 
5112 	_err = (*dvp->v_op[vnop_compound_rmdir_desc.vdesc_offset])(&a);
5113 	if (_err == 0 && *vpp) {
5114 		DTRACE_FSINFO(compound_rmdir, vnode_t, *vpp);
5115 	}
5116 #if CONFIG_APPLEDOUBLE
5117 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5118 		/*
5119 		 * Remove stale Apple Double file (if any).
5120 		 */
5121 		xattrfile_remove(dvp, ndp->ni_cnd.cn_nameptr, ctx, 0);
5122 	}
5123 #endif
5124 
5125 	if (*vpp) {
5126 		post_event_if_success(*vpp, _err, NOTE_DELETE | NOTE_LINK);
5127 	}
5128 	post_event_if_success(dvp, _err, NOTE_LINK | NOTE_WRITE);
5129 
5130 	if (no_vp) {
5131 		lookup_compound_vnop_post_hook(_err, dvp, *vpp, ndp, 0);
5132 
5133 #if 0 /* Removing orphaned ._ files requires a vp.... */
5134 		if (*vpp && _err && _err != EKEEPLOOKING) {
5135 			vnode_put(*vpp);
5136 			*vpp = NULLVP;
5137 		}
5138 #endif  /* 0 */
5139 	}
5140 
5141 	return _err;
5142 }
5143 
5144 #if CONFIG_APPLEDOUBLE
5145 /*
5146  * Remove a ._ AppleDouble file
5147  */
5148 #define AD_STALE_SECS  (180)
5149 static void
xattrfile_remove(vnode_t dvp,const char * basename,vfs_context_t ctx,int force)5150 xattrfile_remove(vnode_t dvp, const char * basename, vfs_context_t ctx, int force)
5151 {
5152 	vnode_t xvp;
5153 	struct nameidata nd;
5154 	char smallname[64];
5155 	char *filename = NULL;
5156 	size_t alloc_len;
5157 	size_t copy_len;
5158 
5159 	if ((basename == NULL) || (basename[0] == '\0') ||
5160 	    (basename[0] == '.' && basename[1] == '_')) {
5161 		return;
5162 	}
5163 	filename = &smallname[0];
5164 	alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5165 	if (alloc_len >= sizeof(smallname)) {
5166 		alloc_len++;  /* snprintf result doesn't include '\0' */
5167 		filename = kalloc_data(alloc_len, Z_WAITOK);
5168 		copy_len = snprintf(filename, alloc_len, "._%s", basename);
5169 	}
5170 	NDINIT(&nd, DELETE, OP_UNLINK, WANTPARENT | LOCKLEAF | NOFOLLOW | USEDVP, UIO_SYSSPACE,
5171 	    CAST_USER_ADDR_T(filename), ctx);
5172 	nd.ni_dvp = dvp;
5173 	if (namei(&nd) != 0) {
5174 		goto out2;
5175 	}
5176 
5177 	xvp = nd.ni_vp;
5178 	dvp = nd.ni_dvp;
5179 	nameidone(&nd);
5180 	if (xvp->v_type != VREG) {
5181 		goto out1;
5182 	}
5183 
5184 	/*
5185 	 * When creating a new object and a "._" file already
5186 	 * exists, check to see if it's a stale "._" file. These are
5187 	 * typically AppleDouble (AD) files generated via XNU's
5188 	 * VFS compatibility shims for storing XATTRs and streams
5189 	 * on filesystems that do not support them natively.
5190 	 */
5191 	if (!force) {
5192 		struct vnode_attr va;
5193 
5194 		VATTR_INIT(&va);
5195 		VATTR_WANTED(&va, va_data_size);
5196 		VATTR_WANTED(&va, va_modify_time);
5197 		VATTR_WANTED(&va, va_change_time);
5198 
5199 		if (VNOP_GETATTR(xvp, &va, ctx) == 0 &&
5200 		    VATTR_IS_SUPPORTED(&va, va_data_size) &&
5201 		    va.va_data_size != 0) {
5202 			struct timeval tv_compare = {};
5203 			struct timeval tv_now = {};
5204 
5205 			/*
5206 			 * If the file exists (and has non-zero size), then use the newer of
5207 			 * chgtime / modtime to compare against present time. Note that setting XATTRs or updating
5208 			 * streams through the compatibility interfaces may not trigger chgtime to be updated, so
5209 			 * checking either modtime or chgtime is useful.
5210 			 */
5211 			if (VATTR_IS_SUPPORTED(&va, va_modify_time) && (va.va_modify_time.tv_sec)) {
5212 				if (VATTR_IS_SUPPORTED(&va, va_change_time) && (va.va_change_time.tv_sec)) {
5213 					tv_compare.tv_sec = va.va_change_time.tv_sec;
5214 					if (tv_compare.tv_sec < va.va_modify_time.tv_sec) {
5215 						tv_compare.tv_sec = va.va_modify_time.tv_sec;
5216 					}
5217 				} else {
5218 					/* fall back to mod-time alone if chgtime not supported or set to 0 */
5219 					tv_compare.tv_sec = va.va_modify_time.tv_sec;
5220 				}
5221 			}
5222 
5223 			/* Now, we have a time to compare against, compare against AD_STALE_SEC */
5224 			microtime(&tv_now);
5225 			if ((tv_compare.tv_sec > 0) &&
5226 			    (tv_now.tv_sec > tv_compare.tv_sec) &&
5227 			    ((tv_now.tv_sec - tv_compare.tv_sec) > AD_STALE_SECS)) {
5228 				force = 1;  /* must be stale */
5229 			}
5230 		}
5231 	}
5232 
5233 	if (force) {
5234 		int error;
5235 
5236 		error = VNOP_REMOVE(dvp, xvp, &nd.ni_cnd, 0, ctx);
5237 		if (error == 0) {
5238 			vnode_setneedinactive(xvp);
5239 		}
5240 
5241 		post_event_if_success(xvp, error, NOTE_DELETE);
5242 		post_event_if_success(dvp, error, NOTE_WRITE);
5243 	}
5244 
5245 out1:
5246 	vnode_put(dvp);
5247 	vnode_put(xvp);
5248 out2:
5249 	if (filename && filename != &smallname[0]) {
5250 		kfree_data(filename, alloc_len);
5251 	}
5252 }
5253 
5254 /*
5255  * Shadow uid/gid/mod to a ._ AppleDouble file
5256  */
5257 static void
xattrfile_setattr(vnode_t dvp,const char * basename,struct vnode_attr * vap,vfs_context_t ctx)5258 xattrfile_setattr(vnode_t dvp, const char * basename, struct vnode_attr * vap,
5259     vfs_context_t ctx)
5260 {
5261 	vnode_t xvp;
5262 	struct nameidata nd;
5263 	char smallname[64];
5264 	char *filename = NULL;
5265 	size_t alloc_len;
5266 	size_t copy_len;
5267 
5268 	if ((dvp == NULLVP) ||
5269 	    (basename == NULL) || (basename[0] == '\0') ||
5270 	    (basename[0] == '.' && basename[1] == '_')) {
5271 		return;
5272 	}
5273 	filename = &smallname[0];
5274 	alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
5275 	if (alloc_len >= sizeof(smallname)) {
5276 		alloc_len++;  /* snprintf result doesn't include '\0' */
5277 		filename = kalloc_data(alloc_len, Z_WAITOK);
5278 		copy_len = snprintf(filename, alloc_len, "._%s", basename);
5279 	}
5280 	NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
5281 	    CAST_USER_ADDR_T(filename), ctx);
5282 	nd.ni_dvp = dvp;
5283 	if (namei(&nd) != 0) {
5284 		goto out2;
5285 	}
5286 
5287 	xvp = nd.ni_vp;
5288 	nameidone(&nd);
5289 
5290 	if (xvp->v_type == VREG) {
5291 		struct vnop_setattr_args a;
5292 
5293 		a.a_desc = &vnop_setattr_desc;
5294 		a.a_vp = xvp;
5295 		a.a_vap = vap;
5296 		a.a_context = ctx;
5297 
5298 		(void) (*xvp->v_op[vnop_setattr_desc.vdesc_offset])(&a);
5299 	}
5300 
5301 	vnode_put(xvp);
5302 out2:
5303 	if (filename && filename != &smallname[0]) {
5304 		kfree_data(filename, alloc_len);
5305 	}
5306 }
5307 #endif /* CONFIG_APPLEDOUBLE */
5308 
5309  #if 0
5310 /*
5311 *#
5312 *#% symlink      dvp     L U U
5313 *#% symlink      vpp     - U -
5314 *#
5315 */
5316 struct vnop_symlink_args {
5317 	struct vnodeop_desc *a_desc;
5318 	vnode_t a_dvp;
5319 	vnode_t *a_vpp;
5320 	struct componentname *a_cnp;
5321 	struct vnode_attr *a_vap;
5322 	char *a_target;
5323 	vfs_context_t a_context;
5324 };
5325 
5326 #endif /* 0*/
5327 errno_t
VNOP_SYMLINK(struct vnode * dvp,struct vnode ** vpp,struct componentname * cnp,struct vnode_attr * vap,char * target,vfs_context_t ctx)5328 VNOP_SYMLINK(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
5329     struct vnode_attr *vap, char *target, vfs_context_t ctx)
5330 {
5331 	int _err;
5332 	struct vnop_symlink_args a;
5333 
5334 	a.a_desc = &vnop_symlink_desc;
5335 	a.a_dvp = dvp;
5336 	a.a_vpp = vpp;
5337 	a.a_cnp = cnp;
5338 	a.a_vap = vap;
5339 	a.a_target = target;
5340 	a.a_context = ctx;
5341 
5342 	_err = (*dvp->v_op[vnop_symlink_desc.vdesc_offset])(&a);
5343 	DTRACE_FSINFO(symlink, vnode_t, dvp);
5344 #if CONFIG_APPLEDOUBLE
5345 	if (_err == 0 && !NATIVE_XATTR(dvp)) {
5346 		/*
5347 		 * Remove stale Apple Double file (if any).  Posts its own knotes
5348 		 */
5349 		xattrfile_remove(dvp, cnp->cn_nameptr, ctx, 0);
5350 	}
5351 #endif /* CONFIG_APPLEDOUBLE */
5352 
5353 	post_event_if_success(dvp, _err, NOTE_WRITE);
5354 
5355 	return _err;
5356 }
5357 
5358 #if 0
5359 /*
5360 *#
5361 *#% readdir      vp      L L L
5362 *#
5363 */
5364 struct vnop_readdir_args {
5365 	struct vnodeop_desc *a_desc;
5366 	vnode_t a_vp;
5367 	struct uio *a_uio;
5368 	int a_flags;
5369 	int *a_eofflag;
5370 	int *a_numdirent;
5371 	vfs_context_t a_context;
5372 };
5373 
5374 #endif /* 0*/
5375 errno_t
VNOP_READDIR(struct vnode * vp,struct uio * uio,int flags,int * eofflag,int * numdirent,vfs_context_t ctx)5376 VNOP_READDIR(struct vnode *vp, struct uio *uio, int flags, int *eofflag,
5377     int *numdirent, vfs_context_t ctx)
5378 {
5379 	int _err;
5380 	struct vnop_readdir_args a;
5381 #if CONFIG_DTRACE
5382 	user_ssize_t resid = uio_resid(uio);
5383 #endif
5384 
5385 	a.a_desc = &vnop_readdir_desc;
5386 	a.a_vp = vp;
5387 	a.a_uio = uio;
5388 	a.a_flags = flags;
5389 	a.a_eofflag = eofflag;
5390 	a.a_numdirent = numdirent;
5391 	a.a_context = ctx;
5392 
5393 	_err = (*vp->v_op[vnop_readdir_desc.vdesc_offset])(&a);
5394 	DTRACE_FSINFO_IO(readdir,
5395 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5396 
5397 	return _err;
5398 }
5399 
5400 #if 0
5401 /*
5402 *#
5403 *#% readdirattr  vp      L L L
5404 *#
5405 */
5406 struct vnop_readdirattr_args {
5407 	struct vnodeop_desc *a_desc;
5408 	vnode_t a_vp;
5409 	struct attrlist *a_alist;
5410 	struct uio *a_uio;
5411 	uint32_t a_maxcount;
5412 	uint32_t a_options;
5413 	uint32_t *a_newstate;
5414 	int *a_eofflag;
5415 	uint32_t *a_actualcount;
5416 	vfs_context_t a_context;
5417 };
5418 
5419 #endif /* 0*/
5420 errno_t
VNOP_READDIRATTR(struct vnode * vp,struct attrlist * alist,struct uio * uio,uint32_t maxcount,uint32_t options,uint32_t * newstate,int * eofflag,uint32_t * actualcount,vfs_context_t ctx)5421 VNOP_READDIRATTR(struct vnode *vp, struct attrlist *alist, struct uio *uio, uint32_t maxcount,
5422     uint32_t options, uint32_t *newstate, int *eofflag, uint32_t *actualcount, vfs_context_t ctx)
5423 {
5424 	int _err;
5425 	struct vnop_readdirattr_args a;
5426 #if CONFIG_DTRACE
5427 	user_ssize_t resid = uio_resid(uio);
5428 #endif
5429 
5430 	a.a_desc = &vnop_readdirattr_desc;
5431 	a.a_vp = vp;
5432 	a.a_alist = alist;
5433 	a.a_uio = uio;
5434 	a.a_maxcount = maxcount;
5435 	a.a_options = options;
5436 	a.a_newstate = newstate;
5437 	a.a_eofflag = eofflag;
5438 	a.a_actualcount = actualcount;
5439 	a.a_context = ctx;
5440 
5441 	_err = (*vp->v_op[vnop_readdirattr_desc.vdesc_offset])(&a);
5442 	DTRACE_FSINFO_IO(readdirattr,
5443 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5444 
5445 	return _err;
5446 }
5447 
5448 #if 0
5449 struct vnop_getttrlistbulk_args {
5450 	struct vnodeop_desc *a_desc;
5451 	vnode_t a_vp;
5452 	struct attrlist *a_alist;
5453 	struct vnode_attr *a_vap;
5454 	struct uio *a_uio;
5455 	void *a_private
5456 	uint64_t a_options;
5457 	int *a_eofflag;
5458 	uint32_t *a_actualcount;
5459 	vfs_context_t a_context;
5460 };
5461 #endif /* 0*/
5462 errno_t
VNOP_GETATTRLISTBULK(struct vnode * vp,struct attrlist * alist,struct vnode_attr * vap,struct uio * uio,void * private,uint64_t options,int32_t * eofflag,int32_t * actualcount,vfs_context_t ctx)5463 VNOP_GETATTRLISTBULK(struct vnode *vp, struct attrlist *alist,
5464     struct vnode_attr *vap, struct uio *uio, void *private, uint64_t options,
5465     int32_t *eofflag, int32_t *actualcount, vfs_context_t ctx)
5466 {
5467 	int _err;
5468 	struct vnop_getattrlistbulk_args a;
5469 #if CONFIG_DTRACE
5470 	user_ssize_t resid = uio_resid(uio);
5471 #endif
5472 
5473 	a.a_desc = &vnop_getattrlistbulk_desc;
5474 	a.a_vp = vp;
5475 	a.a_alist = alist;
5476 	a.a_vap = vap;
5477 	a.a_uio = uio;
5478 	a.a_private = private;
5479 	a.a_options = options;
5480 	a.a_eofflag = eofflag;
5481 	a.a_actualcount = actualcount;
5482 	a.a_context = ctx;
5483 
5484 	_err = (*vp->v_op[vnop_getattrlistbulk_desc.vdesc_offset])(&a);
5485 	DTRACE_FSINFO_IO(getattrlistbulk,
5486 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5487 
5488 	return _err;
5489 }
5490 
5491 #if 0
5492 /*
5493 *#
5494 *#% readlink     vp      L L L
5495 *#
5496 */
5497 struct vnop_readlink_args {
5498 	struct vnodeop_desc *a_desc;
5499 	vnode_t a_vp;
5500 	struct uio *a_uio;
5501 	vfs_context_t a_context;
5502 };
5503 #endif /* 0 */
5504 
5505 /*
5506  * Returns:	0			Success
5507  *		lock_fsnode:ENOENT	No such file or directory [only for VFS
5508  *					 that is not thread safe & vnode is
5509  *					 currently being/has been terminated]
5510  *		<vfs_readlink>:EINVAL
5511  *		<vfs_readlink>:???
5512  *
5513  * Note:	The return codes from the underlying VFS's readlink routine
5514  *		can't be fully enumerated here, since third party VFS authors
5515  *		may not limit their error returns to the ones documented here,
5516  *		even though this may result in some programs functioning
5517  *		incorrectly.
5518  *
5519  *		The return codes documented above are those which may currently
5520  *		be returned by HFS from hfs_vnop_readlink, not including
5521  *		additional error code which may be propagated from underlying
5522  *		routines.
5523  */
5524 errno_t
VNOP_READLINK(struct vnode * vp,struct uio * uio,vfs_context_t ctx)5525 VNOP_READLINK(struct vnode *vp, struct uio *uio, vfs_context_t ctx)
5526 {
5527 	int _err;
5528 	struct vnop_readlink_args a;
5529 #if CONFIG_DTRACE
5530 	user_ssize_t resid = uio_resid(uio);
5531 #endif
5532 	a.a_desc = &vnop_readlink_desc;
5533 	a.a_vp = vp;
5534 	a.a_uio = uio;
5535 	a.a_context = ctx;
5536 
5537 	_err = (*vp->v_op[vnop_readlink_desc.vdesc_offset])(&a);
5538 	DTRACE_FSINFO_IO(readlink,
5539 	    vnode_t, vp, user_ssize_t, (resid - uio_resid(uio)));
5540 
5541 	return _err;
5542 }
5543 
5544 #if 0
5545 /*
5546 *#
5547 *#% inactive     vp      L U U
5548 *#
5549 */
5550 struct vnop_inactive_args {
5551 	struct vnodeop_desc *a_desc;
5552 	vnode_t a_vp;
5553 	vfs_context_t a_context;
5554 };
5555 #endif /* 0*/
5556 errno_t
VNOP_INACTIVE(struct vnode * vp,vfs_context_t ctx)5557 VNOP_INACTIVE(struct vnode *vp, vfs_context_t ctx)
5558 {
5559 	int _err;
5560 	struct vnop_inactive_args a;
5561 
5562 	a.a_desc = &vnop_inactive_desc;
5563 	a.a_vp = vp;
5564 	a.a_context = ctx;
5565 
5566 	_err = (*vp->v_op[vnop_inactive_desc.vdesc_offset])(&a);
5567 	DTRACE_FSINFO(inactive, vnode_t, vp);
5568 
5569 #if NAMEDSTREAMS
5570 	/* For file systems that do not support namedstream natively, mark
5571 	 * the shadow stream file vnode to be recycled as soon as the last
5572 	 * reference goes away.  To avoid re-entering reclaim code, do not
5573 	 * call recycle on terminating namedstream vnodes.
5574 	 */
5575 	if (vnode_isnamedstream(vp) &&
5576 	    (vp->v_parent != NULLVP) &&
5577 	    vnode_isshadow(vp) &&
5578 	    ((vp->v_lflag & VL_TERMINATE) == 0)) {
5579 		vnode_recycle(vp);
5580 	}
5581 #endif
5582 
5583 	return _err;
5584 }
5585 
5586 
5587 #if 0
5588 /*
5589 *#
5590 *#% reclaim      vp      U U U
5591 *#
5592 */
5593 struct vnop_reclaim_args {
5594 	struct vnodeop_desc *a_desc;
5595 	vnode_t a_vp;
5596 	vfs_context_t a_context;
5597 };
5598 #endif /* 0*/
5599 errno_t
VNOP_RECLAIM(struct vnode * vp,vfs_context_t ctx)5600 VNOP_RECLAIM(struct vnode *vp, vfs_context_t ctx)
5601 {
5602 	int _err;
5603 	struct vnop_reclaim_args a;
5604 
5605 	a.a_desc = &vnop_reclaim_desc;
5606 	a.a_vp = vp;
5607 	a.a_context = ctx;
5608 
5609 	_err = (*vp->v_op[vnop_reclaim_desc.vdesc_offset])(&a);
5610 	DTRACE_FSINFO(reclaim, vnode_t, vp);
5611 
5612 	return _err;
5613 }
5614 
5615 
5616 /*
5617  * Returns:	0			Success
5618  *	lock_fsnode:ENOENT		No such file or directory [only for VFS
5619  *					 that is not thread safe & vnode is
5620  *					 currently being/has been terminated]
5621  *	<vnop_pathconf_desc>:???	[per FS implementation specific]
5622  */
5623 #if 0
5624 /*
5625 *#
5626 *#% pathconf     vp      L L L
5627 *#
5628 */
5629 struct vnop_pathconf_args {
5630 	struct vnodeop_desc *a_desc;
5631 	vnode_t a_vp;
5632 	int a_name;
5633 	int32_t *a_retval;
5634 	vfs_context_t a_context;
5635 };
5636 #endif /* 0*/
5637 errno_t
VNOP_PATHCONF(struct vnode * vp,int name,int32_t * retval,vfs_context_t ctx)5638 VNOP_PATHCONF(struct vnode *vp, int name, int32_t *retval, vfs_context_t ctx)
5639 {
5640 	int _err;
5641 	struct vnop_pathconf_args a;
5642 
5643 	a.a_desc = &vnop_pathconf_desc;
5644 	a.a_vp = vp;
5645 	a.a_name = name;
5646 	a.a_retval = retval;
5647 	a.a_context = ctx;
5648 
5649 	_err = (*vp->v_op[vnop_pathconf_desc.vdesc_offset])(&a);
5650 	DTRACE_FSINFO(pathconf, vnode_t, vp);
5651 
5652 	return _err;
5653 }
5654 
5655 /*
5656  * Returns:	0			Success
5657  *	err_advlock:ENOTSUP
5658  *	lf_advlock:???
5659  *	<vnop_advlock_desc>:???
5660  *
5661  * Notes:	VFS implementations of advisory locking using calls through
5662  *		<vnop_advlock_desc> because lock enforcement does not occur
5663  *		locally should try to limit themselves to the return codes
5664  *		documented above for lf_advlock and err_advlock.
5665  */
5666 #if 0
5667 /*
5668 *#
5669 *#% advlock      vp      U U U
5670 *#
5671 */
5672 struct vnop_advlock_args {
5673 	struct vnodeop_desc *a_desc;
5674 	vnode_t a_vp;
5675 	caddr_t a_id;
5676 	int a_op;
5677 	struct flock *a_fl;
5678 	int a_flags;
5679 	vfs_context_t a_context;
5680 };
5681 #endif /* 0*/
5682 errno_t
VNOP_ADVLOCK(struct vnode * vp,caddr_t id,int op,struct flock * fl,int flags,vfs_context_t ctx,struct timespec * timeout)5683 VNOP_ADVLOCK(struct vnode *vp, caddr_t id, int op, struct flock *fl, int flags, vfs_context_t ctx, struct timespec *timeout)
5684 {
5685 	int _err;
5686 	struct vnop_advlock_args a;
5687 
5688 	a.a_desc = &vnop_advlock_desc;
5689 	a.a_vp = vp;
5690 	a.a_id = id;
5691 	a.a_op = op;
5692 	a.a_fl = fl;
5693 	a.a_flags = flags;
5694 	a.a_context = ctx;
5695 	a.a_timeout = timeout;
5696 
5697 	/* Disallow advisory locking on non-seekable vnodes */
5698 	if (vnode_isfifo(vp)) {
5699 		_err = err_advlock(&a);
5700 	} else {
5701 		if ((vp->v_flag & VLOCKLOCAL)) {
5702 			/* Advisory locking done at this layer */
5703 			_err = lf_advlock(&a);
5704 		} else if (flags & F_OFD_LOCK) {
5705 			/* Non-local locking doesn't work for OFD locks */
5706 			_err = err_advlock(&a);
5707 		} else if (op == F_TRANSFER) {
5708 			/* Non-local locking doesn't have F_TRANSFER */
5709 			_err = err_advlock(&a);
5710 		} else {
5711 			/* Advisory locking done by underlying filesystem */
5712 			_err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
5713 		}
5714 		DTRACE_FSINFO(advlock, vnode_t, vp);
5715 		if (op == F_UNLCK &&
5716 		    (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
5717 			post_event_if_success(vp, _err, NOTE_FUNLOCK);
5718 		}
5719 	}
5720 
5721 	return _err;
5722 }
5723 
5724 
5725 
5726 #if 0
5727 /*
5728 *#
5729 *#% allocate     vp      L L L
5730 *#
5731 */
5732 struct vnop_allocate_args {
5733 	struct vnodeop_desc *a_desc;
5734 	vnode_t a_vp;
5735 	off_t a_length;
5736 	u_int32_t a_flags;
5737 	off_t *a_bytesallocated;
5738 	off_t a_offset;
5739 	vfs_context_t a_context;
5740 };
5741 
5742 #endif /* 0*/
5743 errno_t
VNOP_ALLOCATE(struct vnode * vp,off_t length,u_int32_t flags,off_t * bytesallocated,off_t offset,vfs_context_t ctx)5744 VNOP_ALLOCATE(struct vnode *vp, off_t length, u_int32_t flags, off_t *bytesallocated, off_t offset, vfs_context_t ctx)
5745 {
5746 	int _err;
5747 	struct vnop_allocate_args a;
5748 
5749 	a.a_desc = &vnop_allocate_desc;
5750 	a.a_vp = vp;
5751 	a.a_length = length;
5752 	a.a_flags = flags;
5753 	a.a_bytesallocated = bytesallocated;
5754 	a.a_offset = offset;
5755 	a.a_context = ctx;
5756 
5757 	_err = (*vp->v_op[vnop_allocate_desc.vdesc_offset])(&a);
5758 	DTRACE_FSINFO(allocate, vnode_t, vp);
5759 #if CONFIG_FSE
5760 	if (_err == 0) {
5761 		add_fsevent(FSE_STAT_CHANGED, ctx, FSE_ARG_VNODE, vp, FSE_ARG_DONE);
5762 	}
5763 #endif
5764 
5765 	return _err;
5766 }
5767 
5768 #if 0
5769 /*
5770 *#
5771 *#% pagein       vp      = = =
5772 *#
5773 */
5774 struct vnop_pagein_args {
5775 	struct vnodeop_desc *a_desc;
5776 	vnode_t a_vp;
5777 	upl_t a_pl;
5778 	upl_offset_t a_pl_offset;
5779 	off_t a_f_offset;
5780 	size_t a_size;
5781 	int a_flags;
5782 	vfs_context_t a_context;
5783 };
5784 #endif /* 0*/
5785 errno_t
VNOP_PAGEIN(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5786 VNOP_PAGEIN(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5787 {
5788 	int _err;
5789 	struct vnop_pagein_args a;
5790 
5791 	a.a_desc = &vnop_pagein_desc;
5792 	a.a_vp = vp;
5793 	a.a_pl = pl;
5794 	a.a_pl_offset = pl_offset;
5795 	a.a_f_offset = f_offset;
5796 	a.a_size = size;
5797 	a.a_flags = flags;
5798 	a.a_context = ctx;
5799 
5800 	_err = (*vp->v_op[vnop_pagein_desc.vdesc_offset])(&a);
5801 	DTRACE_FSINFO(pagein, vnode_t, vp);
5802 
5803 	return _err;
5804 }
5805 
5806 #if 0
5807 /*
5808 *#
5809 *#% pageout      vp      = = =
5810 *#
5811 */
5812 struct vnop_pageout_args {
5813 	struct vnodeop_desc *a_desc;
5814 	vnode_t a_vp;
5815 	upl_t a_pl;
5816 	upl_offset_t a_pl_offset;
5817 	off_t a_f_offset;
5818 	size_t a_size;
5819 	int a_flags;
5820 	vfs_context_t a_context;
5821 };
5822 
5823 #endif /* 0*/
5824 errno_t
VNOP_PAGEOUT(struct vnode * vp,upl_t pl,upl_offset_t pl_offset,off_t f_offset,size_t size,int flags,vfs_context_t ctx)5825 VNOP_PAGEOUT(struct vnode *vp, upl_t pl, upl_offset_t pl_offset, off_t f_offset, size_t size, int flags, vfs_context_t ctx)
5826 {
5827 	int _err;
5828 	struct vnop_pageout_args a;
5829 
5830 	a.a_desc = &vnop_pageout_desc;
5831 	a.a_vp = vp;
5832 	a.a_pl = pl;
5833 	a.a_pl_offset = pl_offset;
5834 	a.a_f_offset = f_offset;
5835 	a.a_size = size;
5836 	a.a_flags = flags;
5837 	a.a_context = ctx;
5838 
5839 	_err = (*vp->v_op[vnop_pageout_desc.vdesc_offset])(&a);
5840 	DTRACE_FSINFO(pageout, vnode_t, vp);
5841 
5842 	post_event_if_success(vp, _err, NOTE_WRITE);
5843 
5844 	return _err;
5845 }
5846 
5847 int
vn_remove(vnode_t dvp,vnode_t * vpp,struct nameidata * ndp,int32_t flags,struct vnode_attr * vap,vfs_context_t ctx)5848 vn_remove(vnode_t dvp, vnode_t *vpp, struct nameidata *ndp, int32_t flags, struct vnode_attr *vap, vfs_context_t ctx)
5849 {
5850 	if (vnode_compound_remove_available(dvp)) {
5851 		return VNOP_COMPOUND_REMOVE(dvp, vpp, ndp, flags, vap, ctx);
5852 	} else {
5853 		return VNOP_REMOVE(dvp, *vpp, &ndp->ni_cnd, flags, ctx);
5854 	}
5855 }
5856 
5857 #if CONFIG_SEARCHFS
5858 
5859 #if 0
5860 /*
5861 *#
5862 *#% searchfs     vp      L L L
5863 *#
5864 */
5865 struct vnop_searchfs_args {
5866 	struct vnodeop_desc *a_desc;
5867 	vnode_t a_vp;
5868 	void *a_searchparams1;
5869 	void *a_searchparams2;
5870 	struct attrlist *a_searchattrs;
5871 	uint32_t a_maxmatches;
5872 	struct timeval *a_timelimit;
5873 	struct attrlist *a_returnattrs;
5874 	uint32_t *a_nummatches;
5875 	uint32_t a_scriptcode;
5876 	uint32_t a_options;
5877 	struct uio *a_uio;
5878 	struct searchstate *a_searchstate;
5879 	vfs_context_t a_context;
5880 };
5881 
5882 #endif /* 0*/
5883 errno_t
VNOP_SEARCHFS(struct vnode * vp,void * searchparams1,void * searchparams2,struct attrlist * searchattrs,uint32_t maxmatches,struct timeval * timelimit,struct attrlist * returnattrs,uint32_t * nummatches,uint32_t scriptcode,uint32_t options,struct uio * uio,struct searchstate * searchstate,vfs_context_t ctx)5884 VNOP_SEARCHFS(struct vnode *vp, void *searchparams1, void *searchparams2, struct attrlist *searchattrs, uint32_t maxmatches, struct timeval *timelimit, struct attrlist *returnattrs, uint32_t *nummatches, uint32_t scriptcode, uint32_t options, struct uio *uio, struct searchstate *searchstate, vfs_context_t ctx)
5885 {
5886 	int _err;
5887 	struct vnop_searchfs_args a;
5888 
5889 	a.a_desc = &vnop_searchfs_desc;
5890 	a.a_vp = vp;
5891 	a.a_searchparams1 = searchparams1;
5892 	a.a_searchparams2 = searchparams2;
5893 	a.a_searchattrs = searchattrs;
5894 	a.a_maxmatches = maxmatches;
5895 	a.a_timelimit = timelimit;
5896 	a.a_returnattrs = returnattrs;
5897 	a.a_nummatches = nummatches;
5898 	a.a_scriptcode = scriptcode;
5899 	a.a_options = options;
5900 	a.a_uio = uio;
5901 	a.a_searchstate = searchstate;
5902 	a.a_context = ctx;
5903 
5904 	_err = (*vp->v_op[vnop_searchfs_desc.vdesc_offset])(&a);
5905 	DTRACE_FSINFO(searchfs, vnode_t, vp);
5906 
5907 	return _err;
5908 }
5909 #endif /* CONFIG_SEARCHFS */
5910 
5911 #if 0
5912 /*
5913 *#
5914 *#% copyfile fvp U U U
5915 *#% copyfile tdvp L U U
5916 *#% copyfile tvp X U U
5917 *#
5918 */
5919 struct vnop_copyfile_args {
5920 	struct vnodeop_desc *a_desc;
5921 	vnode_t a_fvp;
5922 	vnode_t a_tdvp;
5923 	vnode_t a_tvp;
5924 	struct componentname *a_tcnp;
5925 	int a_mode;
5926 	int a_flags;
5927 	vfs_context_t a_context;
5928 };
5929 #endif /* 0*/
5930 errno_t
VNOP_COPYFILE(struct vnode * fvp,struct vnode * tdvp,struct vnode * tvp,struct componentname * tcnp,int mode,int flags,vfs_context_t ctx)5931 VNOP_COPYFILE(struct vnode *fvp, struct vnode *tdvp, struct vnode *tvp, struct componentname *tcnp,
5932     int mode, int flags, vfs_context_t ctx)
5933 {
5934 	int _err;
5935 	struct vnop_copyfile_args a;
5936 	a.a_desc = &vnop_copyfile_desc;
5937 	a.a_fvp = fvp;
5938 	a.a_tdvp = tdvp;
5939 	a.a_tvp = tvp;
5940 	a.a_tcnp = tcnp;
5941 	a.a_mode = mode;
5942 	a.a_flags = flags;
5943 	a.a_context = ctx;
5944 	_err = (*fvp->v_op[vnop_copyfile_desc.vdesc_offset])(&a);
5945 	DTRACE_FSINFO(copyfile, vnode_t, fvp);
5946 	return _err;
5947 }
5948 
5949 #if 0
5950 struct vnop_clonefile_args {
5951 	struct vnodeop_desc *a_desc;
5952 	vnode_t a_fvp;
5953 	vnode_t a_dvp;
5954 	vnode_t *a_vpp;
5955 	struct componentname *a_cnp;
5956 	struct vnode_attr *a_vap;
5957 	uint32_t a_flags;
5958 	vfs_context_t a_context;
5959 	int (*a_dir_clone_authorizer)(  /* Authorization callback */
5960 		struct vnode_attr *vap,         /* attribute to be authorized */
5961 		kauth_action_t action,         /* action for which attribute is to be authorized */
5962 		struct vnode_attr *dvap,         /* target directory attributes */
5963 		vnode_t sdvp,         /* source directory vnode pointer (optional) */
5964 		mount_t mp,         /* mount point of filesystem */
5965 		dir_clone_authorizer_op_t vattr_op,         /* specific operation requested : setup, authorization or cleanup  */
5966 		uint32_t flags;         /* value passed in a_flags to the VNOP */
5967 		vfs_context_t ctx,                      /* As passed to VNOP */
5968 		void *reserved);                        /* Always NULL */
5969 	void *a_reserved;               /* Currently unused */
5970 };
5971 #endif /* 0 */
5972 
5973 errno_t
VNOP_CLONEFILE(vnode_t fvp,vnode_t dvp,vnode_t * vpp,struct componentname * cnp,struct vnode_attr * vap,uint32_t flags,vfs_context_t ctx)5974 VNOP_CLONEFILE(vnode_t fvp, vnode_t dvp, vnode_t *vpp,
5975     struct componentname *cnp, struct vnode_attr *vap, uint32_t flags,
5976     vfs_context_t ctx)
5977 {
5978 	int _err;
5979 	struct vnop_clonefile_args a;
5980 	a.a_desc = &vnop_clonefile_desc;
5981 	a.a_fvp = fvp;
5982 	a.a_dvp = dvp;
5983 	a.a_vpp = vpp;
5984 	a.a_cnp = cnp;
5985 	a.a_vap = vap;
5986 	a.a_flags = flags;
5987 	a.a_context = ctx;
5988 
5989 	if (vnode_vtype(fvp) == VDIR) {
5990 		a.a_dir_clone_authorizer = vnode_attr_authorize_dir_clone;
5991 	} else {
5992 		a.a_dir_clone_authorizer = NULL;
5993 	}
5994 
5995 	_err = (*dvp->v_op[vnop_clonefile_desc.vdesc_offset])(&a);
5996 
5997 	if (_err == 0 && *vpp) {
5998 		DTRACE_FSINFO(clonefile, vnode_t, *vpp);
5999 		if (kdebug_enable) {
6000 			kdebug_lookup(*vpp, cnp);
6001 		}
6002 	}
6003 
6004 	post_event_if_success(dvp, _err, NOTE_WRITE);
6005 
6006 	return _err;
6007 }
6008 
6009 errno_t
VNOP_GETXATTR(vnode_t vp,const char * name,uio_t uio,size_t * size,int options,vfs_context_t ctx)6010 VNOP_GETXATTR(vnode_t vp, const char *name, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6011 {
6012 	struct vnop_getxattr_args a;
6013 	int error;
6014 
6015 	a.a_desc = &vnop_getxattr_desc;
6016 	a.a_vp = vp;
6017 	a.a_name = name;
6018 	a.a_uio = uio;
6019 	a.a_size = size;
6020 	a.a_options = options;
6021 	a.a_context = ctx;
6022 
6023 	error = (*vp->v_op[vnop_getxattr_desc.vdesc_offset])(&a);
6024 	DTRACE_FSINFO(getxattr, vnode_t, vp);
6025 
6026 	return error;
6027 }
6028 
6029 errno_t
VNOP_SETXATTR(vnode_t vp,const char * name,uio_t uio,int options,vfs_context_t ctx)6030 VNOP_SETXATTR(vnode_t vp, const char *name, uio_t uio, int options, vfs_context_t ctx)
6031 {
6032 	struct vnop_setxattr_args a;
6033 	int error;
6034 
6035 	a.a_desc = &vnop_setxattr_desc;
6036 	a.a_vp = vp;
6037 	a.a_name = name;
6038 	a.a_uio = uio;
6039 	a.a_options = options;
6040 	a.a_context = ctx;
6041 
6042 	error = (*vp->v_op[vnop_setxattr_desc.vdesc_offset])(&a);
6043 	DTRACE_FSINFO(setxattr, vnode_t, vp);
6044 
6045 	if (error == 0) {
6046 		vnode_uncache_authorized_action(vp, KAUTH_INVALIDATE_CACHED_RIGHTS);
6047 	}
6048 
6049 	post_event_if_success(vp, error, NOTE_ATTRIB);
6050 
6051 	return error;
6052 }
6053 
6054 errno_t
VNOP_REMOVEXATTR(vnode_t vp,const char * name,int options,vfs_context_t ctx)6055 VNOP_REMOVEXATTR(vnode_t vp, const char *name, int options, vfs_context_t ctx)
6056 {
6057 	struct vnop_removexattr_args a;
6058 	int error;
6059 
6060 	a.a_desc = &vnop_removexattr_desc;
6061 	a.a_vp = vp;
6062 	a.a_name = name;
6063 	a.a_options = options;
6064 	a.a_context = ctx;
6065 
6066 	error = (*vp->v_op[vnop_removexattr_desc.vdesc_offset])(&a);
6067 	DTRACE_FSINFO(removexattr, vnode_t, vp);
6068 
6069 	post_event_if_success(vp, error, NOTE_ATTRIB);
6070 
6071 	return error;
6072 }
6073 
6074 errno_t
VNOP_LISTXATTR(vnode_t vp,uio_t uio,size_t * size,int options,vfs_context_t ctx)6075 VNOP_LISTXATTR(vnode_t vp, uio_t uio, size_t *size, int options, vfs_context_t ctx)
6076 {
6077 	struct vnop_listxattr_args a;
6078 	int error;
6079 
6080 	a.a_desc = &vnop_listxattr_desc;
6081 	a.a_vp = vp;
6082 	a.a_uio = uio;
6083 	a.a_size = size;
6084 	a.a_options = options;
6085 	a.a_context = ctx;
6086 
6087 	error = (*vp->v_op[vnop_listxattr_desc.vdesc_offset])(&a);
6088 	DTRACE_FSINFO(listxattr, vnode_t, vp);
6089 
6090 	return error;
6091 }
6092 
6093 
6094 #if 0
6095 /*
6096 *#
6097 *#% blktooff vp = = =
6098 *#
6099 */
6100 struct vnop_blktooff_args {
6101 	struct vnodeop_desc *a_desc;
6102 	vnode_t a_vp;
6103 	daddr64_t a_lblkno;
6104 	off_t *a_offset;
6105 };
6106 #endif /* 0*/
6107 errno_t
VNOP_BLKTOOFF(struct vnode * vp,daddr64_t lblkno,off_t * offset)6108 VNOP_BLKTOOFF(struct vnode *vp, daddr64_t lblkno, off_t *offset)
6109 {
6110 	int _err;
6111 	struct vnop_blktooff_args a;
6112 
6113 	a.a_desc = &vnop_blktooff_desc;
6114 	a.a_vp = vp;
6115 	a.a_lblkno = lblkno;
6116 	a.a_offset = offset;
6117 
6118 	_err = (*vp->v_op[vnop_blktooff_desc.vdesc_offset])(&a);
6119 	DTRACE_FSINFO(blktooff, vnode_t, vp);
6120 
6121 	return _err;
6122 }
6123 
6124 #if 0
6125 /*
6126 *#
6127 *#% offtoblk vp = = =
6128 *#
6129 */
6130 struct vnop_offtoblk_args {
6131 	struct vnodeop_desc *a_desc;
6132 	vnode_t a_vp;
6133 	off_t a_offset;
6134 	daddr64_t *a_lblkno;
6135 };
6136 #endif /* 0*/
6137 errno_t
VNOP_OFFTOBLK(struct vnode * vp,off_t offset,daddr64_t * lblkno)6138 VNOP_OFFTOBLK(struct vnode *vp, off_t offset, daddr64_t *lblkno)
6139 {
6140 	int _err;
6141 	struct vnop_offtoblk_args a;
6142 
6143 	a.a_desc = &vnop_offtoblk_desc;
6144 	a.a_vp = vp;
6145 	a.a_offset = offset;
6146 	a.a_lblkno = lblkno;
6147 
6148 	_err = (*vp->v_op[vnop_offtoblk_desc.vdesc_offset])(&a);
6149 	DTRACE_FSINFO(offtoblk, vnode_t, vp);
6150 
6151 	return _err;
6152 }
6153 
6154 #if 0
6155 /*
6156 *#
6157 *#% ap vp L L L
6158 *#
6159 */
6160 struct vnop_verify_args {
6161 	struct vnodeop_desc *a_desc;
6162 	vnode_t a_vp;
6163 	off_t a_foffset;
6164 	char *a_buf;
6165 	size_t a_bufsize;
6166 	size_t *a_verifyblksize;
6167 	void **a_verify_ctxp;
6168 	int a_flags;
6169 	vfs_context_t a_context;
6170 };
6171 #endif
6172 
6173 errno_t
VNOP_VERIFY(struct vnode * vp,off_t foffset,uint8_t * buf,size_t bufsize,size_t * verify_block_size,void ** verify_ctxp,vnode_verify_flags_t flags,vfs_context_t ctx)6174 VNOP_VERIFY(struct vnode *vp, off_t foffset, uint8_t *buf, size_t bufsize,
6175     size_t *verify_block_size, void **verify_ctxp, vnode_verify_flags_t flags,
6176     vfs_context_t ctx)
6177 {
6178 	int _err;
6179 	struct vnop_verify_args a;
6180 
6181 	if (ctx == NULL) {
6182 		ctx = vfs_context_kernel();
6183 	}
6184 	a.a_desc = &vnop_verify_desc;
6185 	a.a_vp = vp;
6186 	a.a_foffset = foffset;
6187 	a.a_buf = buf;
6188 	a.a_bufsize = bufsize;
6189 	a.a_verifyblksize = verify_block_size;
6190 	a.a_flags = flags;
6191 	a.a_verify_ctxp = verify_ctxp;
6192 	a.a_context = ctx;
6193 
6194 	_err = (*vp->v_op[vnop_verify_desc.vdesc_offset])(&a);
6195 	DTRACE_FSINFO(verify, vnode_t, vp);
6196 
6197 	/* It is not an error for a filesystem to not support this VNOP */
6198 	if (_err == ENOTSUP) {
6199 		if (!buf && verify_block_size) {
6200 			*verify_block_size = 0;
6201 		}
6202 
6203 		_err = 0;
6204 	}
6205 
6206 	return _err;
6207 }
6208 
6209 #if 0
6210 /*
6211 *#
6212 *#% blockmap vp L L L
6213 *#
6214 */
6215 struct vnop_blockmap_args {
6216 	struct vnodeop_desc *a_desc;
6217 	vnode_t a_vp;
6218 	off_t a_foffset;
6219 	size_t a_size;
6220 	daddr64_t *a_bpn;
6221 	size_t *a_run;
6222 	void *a_poff;
6223 	int a_flags;
6224 	vfs_context_t a_context;
6225 };
6226 #endif /* 0*/
6227 errno_t
VNOP_BLOCKMAP(struct vnode * vp,off_t foffset,size_t size,daddr64_t * bpn,size_t * run,void * poff,int flags,vfs_context_t ctx)6228 VNOP_BLOCKMAP(struct vnode *vp, off_t foffset, size_t size, daddr64_t *bpn, size_t *run, void *poff, int flags, vfs_context_t ctx)
6229 {
6230 	int _err;
6231 	struct vnop_blockmap_args a;
6232 	size_t localrun = 0;
6233 
6234 	if (ctx == NULL) {
6235 		ctx = vfs_context_current();
6236 	}
6237 	a.a_desc = &vnop_blockmap_desc;
6238 	a.a_vp = vp;
6239 	a.a_foffset = foffset;
6240 	a.a_size = size;
6241 	a.a_bpn = bpn;
6242 	a.a_run = &localrun;
6243 	a.a_poff = poff;
6244 	a.a_flags = flags;
6245 	a.a_context = ctx;
6246 
6247 	_err = (*vp->v_op[vnop_blockmap_desc.vdesc_offset])(&a);
6248 	DTRACE_FSINFO(blockmap, vnode_t, vp);
6249 
6250 	/*
6251 	 * We used a local variable to request information from the underlying
6252 	 * filesystem about the length of the I/O run in question.  If
6253 	 * we get malformed output from the filesystem, we cap it to the length
6254 	 * requested, at most.  Update 'run' on the way out.
6255 	 */
6256 	if (_err == 0) {
6257 		if (localrun > size) {
6258 			localrun = size;
6259 		}
6260 
6261 		if (run) {
6262 			*run = localrun;
6263 		}
6264 	}
6265 
6266 	return _err;
6267 }
6268 
6269 #if 0
6270 struct vnop_strategy_args {
6271 	struct vnodeop_desc *a_desc;
6272 	struct buf *a_bp;
6273 };
6274 
6275 #endif /* 0*/
6276 errno_t
VNOP_STRATEGY(struct buf * bp)6277 VNOP_STRATEGY(struct buf *bp)
6278 {
6279 	int _err;
6280 	struct vnop_strategy_args a;
6281 	vnode_t vp = buf_vnode(bp);
6282 	a.a_desc = &vnop_strategy_desc;
6283 	a.a_bp = bp;
6284 	_err = (*vp->v_op[vnop_strategy_desc.vdesc_offset])(&a);
6285 	DTRACE_FSINFO(strategy, vnode_t, vp);
6286 	return _err;
6287 }
6288 
6289 #if 0
6290 struct vnop_bwrite_args {
6291 	struct vnodeop_desc *a_desc;
6292 	buf_t a_bp;
6293 };
6294 #endif /* 0*/
6295 errno_t
VNOP_BWRITE(struct buf * bp)6296 VNOP_BWRITE(struct buf *bp)
6297 {
6298 	int _err;
6299 	struct vnop_bwrite_args a;
6300 	vnode_t vp = buf_vnode(bp);
6301 	a.a_desc = &vnop_bwrite_desc;
6302 	a.a_bp = bp;
6303 	_err = (*vp->v_op[vnop_bwrite_desc.vdesc_offset])(&a);
6304 	DTRACE_FSINFO(bwrite, vnode_t, vp);
6305 	return _err;
6306 }
6307 
6308 #if 0
6309 struct vnop_kqfilt_add_args {
6310 	struct vnodeop_desc *a_desc;
6311 	struct vnode *a_vp;
6312 	struct knote *a_kn;
6313 	vfs_context_t a_context;
6314 };
6315 #endif
6316 errno_t
VNOP_KQFILT_ADD(struct vnode * vp,struct knote * kn,vfs_context_t ctx)6317 VNOP_KQFILT_ADD(struct vnode *vp, struct knote *kn, vfs_context_t ctx)
6318 {
6319 	int _err;
6320 	struct vnop_kqfilt_add_args a;
6321 
6322 	a.a_desc = VDESC(vnop_kqfilt_add);
6323 	a.a_vp = vp;
6324 	a.a_kn = kn;
6325 	a.a_context = ctx;
6326 
6327 	_err = (*vp->v_op[vnop_kqfilt_add_desc.vdesc_offset])(&a);
6328 	DTRACE_FSINFO(kqfilt_add, vnode_t, vp);
6329 
6330 	return _err;
6331 }
6332 
6333 #if 0
6334 struct vnop_kqfilt_remove_args {
6335 	struct vnodeop_desc *a_desc;
6336 	struct vnode *a_vp;
6337 	uintptr_t a_ident;
6338 	vfs_context_t a_context;
6339 };
6340 #endif
6341 errno_t
VNOP_KQFILT_REMOVE(struct vnode * vp,uintptr_t ident,vfs_context_t ctx)6342 VNOP_KQFILT_REMOVE(struct vnode *vp, uintptr_t ident, vfs_context_t ctx)
6343 {
6344 	int _err;
6345 	struct vnop_kqfilt_remove_args a;
6346 
6347 	a.a_desc = VDESC(vnop_kqfilt_remove);
6348 	a.a_vp = vp;
6349 	a.a_ident = ident;
6350 	a.a_context = ctx;
6351 
6352 	_err = (*vp->v_op[vnop_kqfilt_remove_desc.vdesc_offset])(&a);
6353 	DTRACE_FSINFO(kqfilt_remove, vnode_t, vp);
6354 
6355 	return _err;
6356 }
6357 
6358 errno_t
VNOP_MONITOR(vnode_t vp,uint32_t events,uint32_t flags,void * handle,vfs_context_t ctx)6359 VNOP_MONITOR(vnode_t vp, uint32_t events, uint32_t flags, void *handle, vfs_context_t ctx)
6360 {
6361 	int _err;
6362 	struct vnop_monitor_args a;
6363 
6364 	a.a_desc = VDESC(vnop_monitor);
6365 	a.a_vp = vp;
6366 	a.a_events = events;
6367 	a.a_flags = flags;
6368 	a.a_handle = handle;
6369 	a.a_context = ctx;
6370 
6371 	_err = (*vp->v_op[vnop_monitor_desc.vdesc_offset])(&a);
6372 	DTRACE_FSINFO(monitor, vnode_t, vp);
6373 
6374 	return _err;
6375 }
6376 
6377 #if 0
6378 struct vnop_setlabel_args {
6379 	struct vnodeop_desc *a_desc;
6380 	struct vnode *a_vp;
6381 	struct label *a_vl;
6382 	vfs_context_t a_context;
6383 };
6384 #endif
6385 errno_t
VNOP_SETLABEL(struct vnode * vp,struct label * label,vfs_context_t ctx)6386 VNOP_SETLABEL(struct vnode *vp, struct label *label, vfs_context_t ctx)
6387 {
6388 	int _err;
6389 	struct vnop_setlabel_args a;
6390 
6391 	a.a_desc = VDESC(vnop_setlabel);
6392 	a.a_vp = vp;
6393 	a.a_vl = label;
6394 	a.a_context = ctx;
6395 
6396 	_err = (*vp->v_op[vnop_setlabel_desc.vdesc_offset])(&a);
6397 	DTRACE_FSINFO(setlabel, vnode_t, vp);
6398 
6399 	return _err;
6400 }
6401 
6402 
6403 #if NAMEDSTREAMS
6404 /*
6405  * Get a named streamed
6406  */
6407 errno_t
VNOP_GETNAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,enum nsoperation operation,int flags,vfs_context_t ctx)6408 VNOP_GETNAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, enum nsoperation operation, int flags, vfs_context_t ctx)
6409 {
6410 	int _err;
6411 	struct vnop_getnamedstream_args a;
6412 
6413 	a.a_desc = &vnop_getnamedstream_desc;
6414 	a.a_vp = vp;
6415 	a.a_svpp = svpp;
6416 	a.a_name = name;
6417 	a.a_operation = operation;
6418 	a.a_flags = flags;
6419 	a.a_context = ctx;
6420 
6421 	_err = (*vp->v_op[vnop_getnamedstream_desc.vdesc_offset])(&a);
6422 	DTRACE_FSINFO(getnamedstream, vnode_t, vp);
6423 	return _err;
6424 }
6425 
6426 /*
6427  * Create a named streamed
6428  */
6429 errno_t
VNOP_MAKENAMEDSTREAM(vnode_t vp,vnode_t * svpp,const char * name,int flags,vfs_context_t ctx)6430 VNOP_MAKENAMEDSTREAM(vnode_t vp, vnode_t *svpp, const char *name, int flags, vfs_context_t ctx)
6431 {
6432 	int _err;
6433 	struct vnop_makenamedstream_args a;
6434 
6435 	a.a_desc = &vnop_makenamedstream_desc;
6436 	a.a_vp = vp;
6437 	a.a_svpp = svpp;
6438 	a.a_name = name;
6439 	a.a_flags = flags;
6440 	a.a_context = ctx;
6441 
6442 	_err = (*vp->v_op[vnop_makenamedstream_desc.vdesc_offset])(&a);
6443 	DTRACE_FSINFO(makenamedstream, vnode_t, vp);
6444 	return _err;
6445 }
6446 
6447 
6448 /*
6449  * Remove a named streamed
6450  */
6451 errno_t
VNOP_REMOVENAMEDSTREAM(vnode_t vp,vnode_t svp,const char * name,int flags,vfs_context_t ctx)6452 VNOP_REMOVENAMEDSTREAM(vnode_t vp, vnode_t svp, const char *name, int flags, vfs_context_t ctx)
6453 {
6454 	int _err;
6455 	struct vnop_removenamedstream_args a;
6456 
6457 	a.a_desc = &vnop_removenamedstream_desc;
6458 	a.a_vp = vp;
6459 	a.a_svp = svp;
6460 	a.a_name = name;
6461 	a.a_flags = flags;
6462 	a.a_context = ctx;
6463 
6464 	_err = (*vp->v_op[vnop_removenamedstream_desc.vdesc_offset])(&a);
6465 	DTRACE_FSINFO(removenamedstream, vnode_t, vp);
6466 	return _err;
6467 }
6468 #endif
6469