1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)vnode.h 8.7 (Berkeley) 2/4/94 32 * $FreeBSD$ 33 */ 34 35 #ifndef _SYS_VNODE_H_ 36 #define _SYS_VNODE_H_ 37 38 #include <sys/bufobj.h> 39 #include <sys/queue.h> 40 #include <sys/lock.h> 41 #include <sys/lockmgr.h> 42 #include <sys/mutex.h> 43 #include <sys/rangelock.h> 44 #include <sys/selinfo.h> 45 #include <sys/uio.h> 46 #include <sys/acl.h> 47 #include <sys/ktr.h> 48 #include <sys/_seqc.h> 49 50 /* 51 * The vnode is the focus of all file activity in UNIX. There is a 52 * unique vnode allocated for each active file, each current directory, 53 * each mounted-on file, text file, and the root. 54 */ 55 56 /* 57 * Vnode types. VNON means no type. 58 */ 59 enum vtype { VNON, VREG, VDIR, VBLK, VCHR, VLNK, VSOCK, VFIFO, VBAD, 60 VMARKER }; 61 62 enum vgetstate { VGET_NONE, VGET_HOLDCNT, VGET_USECOUNT }; 63 /* 64 * Each underlying filesystem allocates its own private area and hangs 65 * it from v_data. If non-null, this area is freed in getnewvnode(). 66 */ 67 68 struct namecache; 69 struct cache_fpl; 70 71 struct vpollinfo { 72 struct mtx vpi_lock; /* lock to protect below */ 73 struct selinfo vpi_selinfo; /* identity of poller(s) */ 74 short vpi_events; /* what they are looking for */ 75 short vpi_revents; /* what has happened */ 76 }; 77 78 /* 79 * Reading or writing any of these items requires holding the appropriate lock. 80 * 81 * Lock reference: 82 * c - namecache mutex 83 * i - interlock 84 * l - mp mnt_listmtx or freelist mutex 85 * I - updated with atomics, 0->1 and 1->0 transitions with interlock held 86 * m - mount point interlock 87 * p - pollinfo lock 88 * u - Only a reference to the vnode is needed to read. 89 * v - vnode lock 90 * 91 * Vnodes may be found on many lists. The general way to deal with operating 92 * on a vnode that is on a list is: 93 * 1) Lock the list and find the vnode. 94 * 2) Lock interlock so that the vnode does not go away. 95 * 3) Unlock the list to avoid lock order reversals. 96 * 4) vget with LK_INTERLOCK and check for ENOENT, or 97 * 5) Check for DOOMED if the vnode lock is not required. 98 * 6) Perform your operation, then vput(). 99 */ 100 101 #if defined(_KERNEL) || defined(_KVM_VNODE) 102 103 struct vnode { 104 /* 105 * Fields which define the identity of the vnode. These fields are 106 * owned by the filesystem (XXX: and vgone() ?) 107 */ 108 enum vtype v_type:8; /* u vnode type */ 109 short v_irflag; /* i frequently read flags */ 110 seqc_t v_seqc; /* i modification count */ 111 uint32_t v_nchash; /* u namecache hash */ 112 struct vop_vector *v_op; /* u vnode operations vector */ 113 void *v_data; /* u private data for fs */ 114 115 /* 116 * Filesystem instance stuff 117 */ 118 struct mount *v_mount; /* u ptr to vfs we are in */ 119 TAILQ_ENTRY(vnode) v_nmntvnodes; /* m vnodes for mount point */ 120 121 /* 122 * Type specific fields, only one applies to any given vnode. 123 */ 124 union { 125 struct mount *v_mountedhere; /* v ptr to mountpoint (VDIR) */ 126 struct unpcb *v_unpcb; /* v unix domain net (VSOCK) */ 127 struct cdev *v_rdev; /* v device (VCHR, VBLK) */ 128 struct fifoinfo *v_fifoinfo; /* v fifo (VFIFO) */ 129 }; 130 131 /* 132 * vfs_hash: (mount + inode) -> vnode hash. The hash value 133 * itself is grouped with other int fields, to avoid padding. 134 */ 135 LIST_ENTRY(vnode) v_hashlist; 136 137 /* 138 * VFS_namecache stuff 139 */ 140 LIST_HEAD(, namecache) v_cache_src; /* c Cache entries from us */ 141 TAILQ_HEAD(, namecache) v_cache_dst; /* c Cache entries to us */ 142 struct namecache *v_cache_dd; /* c Cache entry for .. vnode */ 143 144 /* 145 * Locking 146 */ 147 struct lock v_lock; /* u (if fs don't have one) */ 148 struct mtx v_interlock; /* lock for "i" things */ 149 struct lock *v_vnlock; /* u pointer to vnode lock */ 150 151 /* 152 * The machinery of being a vnode 153 */ 154 TAILQ_ENTRY(vnode) v_vnodelist; /* l vnode lists */ 155 TAILQ_ENTRY(vnode) v_lazylist; /* l vnode lazy list */ 156 struct bufobj v_bufobj; /* * Buffer cache object */ 157 158 /* 159 * Hooks for various subsystems and features. 160 */ 161 struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */ 162 struct label *v_label; /* MAC label for vnode */ 163 struct lockf *v_lockf; /* Byte-level advisory lock list */ 164 struct rangelock v_rl; /* Byte-range lock */ 165 166 /* 167 * clustering stuff 168 */ 169 daddr_t v_cstart; /* v start block of cluster */ 170 daddr_t v_lasta; /* v last allocation */ 171 daddr_t v_lastw; /* v last write */ 172 int v_clen; /* v length of cur. cluster */ 173 174 u_int v_holdcnt; /* I prevents recycling. */ 175 u_int v_usecount; /* I ref count of users */ 176 u_short v_iflag; /* i vnode flags (see below) */ 177 u_short v_vflag; /* v vnode flags */ 178 u_short v_mflag; /* l mnt-specific vnode flags */ 179 short v_dbatchcpu; /* i LRU requeue deferral batch */ 180 int v_writecount; /* I ref count of writers or 181 (negative) text users */ 182 int v_seqc_users; /* i modifications pending */ 183 u_int v_hash; 184 }; 185 186 #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */ 187 188 #define bo2vnode(bo) __containerof((bo), struct vnode, v_bufobj) 189 190 /* XXX: These are temporary to avoid a source sweep at this time */ 191 #define v_object v_bufobj.bo_object 192 193 /* 194 * Userland version of struct vnode, for sysctl. 195 */ 196 struct xvnode { 197 size_t xv_size; /* sizeof(struct xvnode) */ 198 void *xv_vnode; /* address of real vnode */ 199 u_long xv_flag; /* vnode vflags */ 200 int xv_usecount; /* reference count of users */ 201 int xv_writecount; /* reference count of writers */ 202 int xv_holdcnt; /* page & buffer references */ 203 u_long xv_id; /* capability identifier */ 204 void *xv_mount; /* address of parent mount */ 205 long xv_numoutput; /* num of writes in progress */ 206 enum vtype xv_type; /* vnode type */ 207 union { 208 void *xvu_socket; /* unpcb, if VSOCK */ 209 void *xvu_fifo; /* fifo, if VFIFO */ 210 dev_t xvu_rdev; /* maj/min, if VBLK/VCHR */ 211 struct { 212 dev_t xvu_dev; /* device, if VDIR/VREG/VLNK */ 213 ino_t xvu_ino; /* id, if VDIR/VREG/VLNK */ 214 } xv_uns; 215 } xv_un; 216 }; 217 #define xv_socket xv_un.xvu_socket 218 #define xv_fifo xv_un.xvu_fifo 219 #define xv_rdev xv_un.xvu_rdev 220 #define xv_dev xv_un.xv_uns.xvu_dev 221 #define xv_ino xv_un.xv_uns.xvu_ino 222 223 /* We don't need to lock the knlist */ 224 #define VN_KNLIST_EMPTY(vp) ((vp)->v_pollinfo == NULL || \ 225 KNLIST_EMPTY(&(vp)->v_pollinfo->vpi_selinfo.si_note)) 226 227 #define VN_KNOTE(vp, b, a) \ 228 do { \ 229 if (!VN_KNLIST_EMPTY(vp)) \ 230 KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b), \ 231 (a) | KNF_NOKQLOCK); \ 232 } while (0) 233 #define VN_KNOTE_LOCKED(vp, b) VN_KNOTE(vp, b, KNF_LISTLOCKED) 234 #define VN_KNOTE_UNLOCKED(vp, b) VN_KNOTE(vp, b, 0) 235 236 /* 237 * Vnode flags. 238 * VI flags are protected by interlock and live in v_iflag 239 * VV flags are protected by the vnode lock and live in v_vflag 240 * 241 * VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both 242 * are required for writing but the status may be checked with either. 243 */ 244 #define VHOLD_NO_SMR (1<<29) /* Disable vhold_smr */ 245 #define VHOLD_ALL_FLAGS (VHOLD_NO_SMR) 246 247 #define VIRF_DOOMED 0x0001 /* This vnode is being recycled */ 248 #define VIRF_PGREAD 0x0002 /* Direct reads from the page cache are permitted, 249 never cleared once set */ 250 #define VIRF_MOUNTPOINT 0x0004 /* This vnode is mounted on */ 251 252 #define VI_TEXT_REF 0x0001 /* Text ref grabbed use ref */ 253 #define VI_MOUNT 0x0002 /* Mount in progress */ 254 #define VI_DOINGINACT 0x0004 /* VOP_INACTIVE is in progress */ 255 #define VI_OWEINACT 0x0008 /* Need to call inactive */ 256 #define VI_DEFINACT 0x0010 /* deferred inactive */ 257 258 #define VV_ROOT 0x0001 /* root of its filesystem */ 259 #define VV_ISTTY 0x0002 /* vnode represents a tty */ 260 #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */ 261 #define VV_ETERNALDEV 0x0008 /* device that is never destroyed */ 262 #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */ 263 #define VV_VMSIZEVNLOCK 0x0020 /* object size check requires vnode lock */ 264 #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */ 265 #define VV_SYSTEM 0x0080 /* vnode being used by kernel */ 266 #define VV_PROCDEP 0x0100 /* vnode is process dependent */ 267 #define VV_NOKNOTE 0x0200 /* don't activate knotes on this vnode */ 268 #define VV_DELETED 0x0400 /* should be removed */ 269 #define VV_MD 0x0800 /* vnode backs the md device */ 270 #define VV_FORCEINSMQ 0x1000 /* force the insmntque to succeed */ 271 #define VV_READLINK 0x2000 /* fdescfs linux vnode */ 272 #define VV_UNREF 0x4000 /* vunref, do not drop lock in inactive() */ 273 274 #define VMP_LAZYLIST 0x0001 /* Vnode is on mnt's lazy list */ 275 276 /* 277 * Vnode attributes. A field value of VNOVAL represents a field whose value 278 * is unavailable (getattr) or which is not to be changed (setattr). 279 */ 280 struct vattr { 281 enum vtype va_type; /* vnode type (for create) */ 282 u_short va_mode; /* files access mode and type */ 283 u_short va_padding0; 284 uid_t va_uid; /* owner user id */ 285 gid_t va_gid; /* owner group id */ 286 nlink_t va_nlink; /* number of references to file */ 287 dev_t va_fsid; /* filesystem id */ 288 ino_t va_fileid; /* file id */ 289 u_quad_t va_size; /* file size in bytes */ 290 long va_blocksize; /* blocksize preferred for i/o */ 291 struct timespec va_atime; /* time of last access */ 292 struct timespec va_mtime; /* time of last modification */ 293 struct timespec va_ctime; /* time file changed */ 294 struct timespec va_birthtime; /* time file created */ 295 u_long va_gen; /* generation number of file */ 296 u_long va_flags; /* flags defined for file */ 297 dev_t va_rdev; /* device the special file represents */ 298 u_quad_t va_bytes; /* bytes of disk space held by file */ 299 u_quad_t va_filerev; /* file modification number */ 300 u_int va_vaflags; /* operations flags, see below */ 301 long va_spare; /* remain quad aligned */ 302 }; 303 304 /* 305 * Flags for va_vaflags. 306 */ 307 #define VA_UTIMES_NULL 0x01 /* utimes argument was NULL */ 308 #define VA_EXCLUSIVE 0x02 /* exclusive create request */ 309 #define VA_SYNC 0x04 /* O_SYNC truncation */ 310 311 /* 312 * Flags for ioflag. (high 16 bits used to ask for read-ahead and 313 * help with write clustering) 314 * NB: IO_NDELAY and IO_DIRECT are linked to fcntl.h 315 */ 316 #define IO_UNIT 0x0001 /* do I/O as atomic unit */ 317 #define IO_APPEND 0x0002 /* append write to end */ 318 #define IO_NDELAY 0x0004 /* FNDELAY flag set in file table */ 319 #define IO_NODELOCKED 0x0008 /* underlying node already locked */ 320 #define IO_ASYNC 0x0010 /* bawrite rather then bdwrite */ 321 #define IO_VMIO 0x0020 /* data already in VMIO space */ 322 #define IO_INVAL 0x0040 /* invalidate after I/O */ 323 #define IO_SYNC 0x0080 /* do I/O synchronously */ 324 #define IO_DIRECT 0x0100 /* attempt to bypass buffer cache */ 325 #define IO_NOREUSE 0x0200 /* VMIO data won't be reused */ 326 #define IO_EXT 0x0400 /* operate on external attributes */ 327 #define IO_NORMAL 0x0800 /* operate on regular data */ 328 #define IO_NOMACCHECK 0x1000 /* MAC checks unnecessary */ 329 #define IO_BUFLOCKED 0x2000 /* ffs flag; indir buf is locked */ 330 #define IO_RANGELOCKED 0x4000 /* range locked */ 331 #define IO_DATASYNC 0x8000 /* do only data I/O synchronously */ 332 333 #define IO_SEQMAX 0x7F /* seq heuristic max value */ 334 #define IO_SEQSHIFT 16 /* seq heuristic in upper 16 bits */ 335 336 /* 337 * Flags for accmode_t. 338 */ 339 #define VEXEC 000000000100 /* execute/search permission */ 340 #define VWRITE 000000000200 /* write permission */ 341 #define VREAD 000000000400 /* read permission */ 342 #define VADMIN 000000010000 /* being the file owner */ 343 #define VAPPEND 000000040000 /* permission to write/append */ 344 /* 345 * VEXPLICIT_DENY makes VOP_ACCESSX(9) return EPERM or EACCES only 346 * if permission was denied explicitly, by a "deny" rule in NFSv4 ACL, 347 * and 0 otherwise. This never happens with ordinary unix access rights 348 * or POSIX.1e ACLs. Obviously, VEXPLICIT_DENY must be OR-ed with 349 * some other V* constant. 350 */ 351 #define VEXPLICIT_DENY 000000100000 352 #define VREAD_NAMED_ATTRS 000000200000 /* not used */ 353 #define VWRITE_NAMED_ATTRS 000000400000 /* not used */ 354 #define VDELETE_CHILD 000001000000 355 #define VREAD_ATTRIBUTES 000002000000 /* permission to stat(2) */ 356 #define VWRITE_ATTRIBUTES 000004000000 /* change {m,c,a}time */ 357 #define VDELETE 000010000000 358 #define VREAD_ACL 000020000000 /* read ACL and file mode */ 359 #define VWRITE_ACL 000040000000 /* change ACL and/or file mode */ 360 #define VWRITE_OWNER 000100000000 /* change file owner */ 361 #define VSYNCHRONIZE 000200000000 /* not used */ 362 #define VCREAT 000400000000 /* creating new file */ 363 #define VVERIFY 001000000000 /* verification required */ 364 365 /* 366 * Permissions that were traditionally granted only to the file owner. 367 */ 368 #define VADMIN_PERMS (VADMIN | VWRITE_ATTRIBUTES | VWRITE_ACL | \ 369 VWRITE_OWNER) 370 371 /* 372 * Permissions that were traditionally granted to everyone. 373 */ 374 #define VSTAT_PERMS (VREAD_ATTRIBUTES | VREAD_ACL) 375 376 /* 377 * Permissions that allow to change the state of the file in any way. 378 */ 379 #define VMODIFY_PERMS (VWRITE | VAPPEND | VADMIN_PERMS | VDELETE_CHILD | \ 380 VDELETE) 381 382 /* 383 * Token indicating no attribute value yet assigned. 384 */ 385 #define VNOVAL (-1) 386 387 /* 388 * LK_TIMELOCK timeout for vnode locks (used mainly by the pageout daemon) 389 */ 390 #define VLKTIMEOUT (hz / 20 + 1) 391 392 #ifdef _KERNEL 393 394 #ifdef MALLOC_DECLARE 395 MALLOC_DECLARE(M_VNODE); 396 #endif 397 398 extern u_int ncsizefactor; 399 extern const u_int io_hold_cnt; 400 401 /* 402 * Convert between vnode types and inode formats (since POSIX.1 403 * defines mode word of stat structure in terms of inode formats). 404 */ 405 extern enum vtype iftovt_tab[]; 406 extern int vttoif_tab[]; 407 #define IFTOVT(mode) (iftovt_tab[((mode) & S_IFMT) >> 12]) 408 #define VTTOIF(indx) (vttoif_tab[(int)(indx)]) 409 #define MAKEIMODE(indx, mode) (int)(VTTOIF(indx) | (mode)) 410 411 /* 412 * Flags to various vnode functions. 413 */ 414 #define SKIPSYSTEM 0x0001 /* vflush: skip vnodes marked VSYSTEM */ 415 #define FORCECLOSE 0x0002 /* vflush: force file closure */ 416 #define WRITECLOSE 0x0004 /* vflush: only close writable files */ 417 #define EARLYFLUSH 0x0008 /* vflush: early call for ffs_flushfiles */ 418 #define V_SAVE 0x0001 /* vinvalbuf: sync file first */ 419 #define V_ALT 0x0002 /* vinvalbuf: invalidate only alternate bufs */ 420 #define V_NORMAL 0x0004 /* vinvalbuf: invalidate only regular bufs */ 421 #define V_CLEANONLY 0x0008 /* vinvalbuf: invalidate only clean bufs */ 422 #define V_VMIO 0x0010 /* vinvalbuf: called during pageout */ 423 #define V_ALLOWCLEAN 0x0020 /* vinvalbuf: allow clean buffers after flush */ 424 #define REVOKEALL 0x0001 /* vop_revoke: revoke all aliases */ 425 #define V_WAIT 0x0001 /* vn_start_write: sleep for suspend */ 426 #define V_NOWAIT 0x0002 /* vn_start_write: don't sleep for suspend */ 427 #define V_XSLEEP 0x0004 /* vn_start_write: just return after sleep */ 428 #define V_MNTREF 0x0010 /* vn_start_write: mp is already ref-ed */ 429 430 #define VR_START_WRITE 0x0001 /* vfs_write_resume: start write atomically */ 431 #define VR_NO_SUSPCLR 0x0002 /* vfs_write_resume: do not clear suspension */ 432 433 #define VS_SKIP_UNMOUNT 0x0001 /* vfs_write_suspend: fail if the 434 filesystem is being unmounted */ 435 436 #define VREF(vp) vref(vp) 437 438 #ifdef DIAGNOSTIC 439 #define VATTR_NULL(vap) vattr_null(vap) 440 #else 441 #define VATTR_NULL(vap) (*(vap) = va_null) /* initialize a vattr */ 442 #endif /* DIAGNOSTIC */ 443 444 #define NULLVP ((struct vnode *)NULL) 445 446 /* 447 * Global vnode data. 448 */ 449 extern struct vnode *rootvnode; /* root (i.e. "/") vnode */ 450 extern struct mount *rootdevmp; /* "/dev" mount */ 451 extern u_long desiredvnodes; /* number of vnodes desired */ 452 extern struct uma_zone *namei_zone; 453 extern struct vattr va_null; /* predefined null vattr structure */ 454 455 extern u_int vn_lock_pair_pause_max; 456 457 #define VI_LOCK(vp) mtx_lock(&(vp)->v_interlock) 458 #define VI_LOCK_FLAGS(vp, flags) mtx_lock_flags(&(vp)->v_interlock, (flags)) 459 #define VI_TRYLOCK(vp) mtx_trylock(&(vp)->v_interlock) 460 #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock) 461 #define VI_MTX(vp) (&(vp)->v_interlock) 462 463 #define VN_LOCK_AREC(vp) lockallowrecurse((vp)->v_vnlock) 464 #define VN_LOCK_ASHARE(vp) lockallowshare((vp)->v_vnlock) 465 #define VN_LOCK_DSHARE(vp) lockdisableshare((vp)->v_vnlock) 466 467 #endif /* _KERNEL */ 468 469 /* 470 * Mods for extensibility. 471 */ 472 473 /* 474 * Flags for vdesc_flags: 475 */ 476 #define VDESC_MAX_VPS 16 477 /* Low order 16 flag bits are reserved for willrele flags for vp arguments. */ 478 #define VDESC_VP0_WILLRELE 0x0001 479 #define VDESC_VP1_WILLRELE 0x0002 480 #define VDESC_VP2_WILLRELE 0x0004 481 #define VDESC_VP3_WILLRELE 0x0008 482 483 /* 484 * A generic structure. 485 * This can be used by bypass routines to identify generic arguments. 486 */ 487 struct vop_generic_args { 488 struct vnodeop_desc *a_desc; 489 /* other random data follows, presumably */ 490 }; 491 492 typedef int vop_bypass_t(struct vop_generic_args *); 493 494 /* 495 * VDESC_NO_OFFSET is used to identify the end of the offset list 496 * and in places where no such field exists. 497 */ 498 #define VDESC_NO_OFFSET -1 499 500 /* 501 * This structure describes the vnode operation taking place. 502 */ 503 struct vnodeop_desc { 504 char *vdesc_name; /* a readable name for debugging */ 505 int vdesc_flags; /* VDESC_* flags */ 506 int vdesc_vop_offset; 507 vop_bypass_t *vdesc_call; /* Function to call */ 508 509 /* 510 * These ops are used by bypass routines to map and locate arguments. 511 * Creds and procs are not needed in bypass routines, but sometimes 512 * they are useful to (for example) transport layers. 513 * Nameidata is useful because it has a cred in it. 514 */ 515 int *vdesc_vp_offsets; /* list ended by VDESC_NO_OFFSET */ 516 int vdesc_vpp_offset; /* return vpp location */ 517 int vdesc_cred_offset; /* cred location, if any */ 518 int vdesc_thread_offset; /* thread location, if any */ 519 int vdesc_componentname_offset; /* if any */ 520 }; 521 522 #ifdef _KERNEL 523 /* 524 * A list of all the operation descs. 525 */ 526 extern struct vnodeop_desc *vnodeop_descs[]; 527 528 #define VOPARG_OFFSETOF(s_type, field) __offsetof(s_type, field) 529 #define VOPARG_OFFSETTO(s_type, s_offset, struct_p) \ 530 ((s_type)(((char*)(struct_p)) + (s_offset))) 531 532 #ifdef DEBUG_VFS_LOCKS 533 /* 534 * Support code to aid in debugging VFS locking problems. Not totally 535 * reliable since if the thread sleeps between changing the lock 536 * state and checking it with the assert, some other thread could 537 * change the state. They are good enough for debugging a single 538 * filesystem using a single-threaded test. Note that the unreliability is 539 * limited to false negatives; efforts were made to ensure that false 540 * positives cannot occur. 541 */ 542 void assert_vi_locked(struct vnode *vp, const char *str); 543 void assert_vi_unlocked(struct vnode *vp, const char *str); 544 void assert_vop_elocked(struct vnode *vp, const char *str); 545 void assert_vop_locked(struct vnode *vp, const char *str); 546 void assert_vop_unlocked(struct vnode *vp, const char *str); 547 548 #define ASSERT_VI_LOCKED(vp, str) assert_vi_locked((vp), (str)) 549 #define ASSERT_VI_UNLOCKED(vp, str) assert_vi_unlocked((vp), (str)) 550 #define ASSERT_VOP_ELOCKED(vp, str) assert_vop_elocked((vp), (str)) 551 #define ASSERT_VOP_LOCKED(vp, str) assert_vop_locked((vp), (str)) 552 #define ASSERT_VOP_UNLOCKED(vp, str) assert_vop_unlocked((vp), (str)) 553 554 #define ASSERT_VOP_IN_SEQC(vp) do { \ 555 struct vnode *_vp = (vp); \ 556 \ 557 VNPASS(seqc_in_modify(_vp->v_seqc), _vp); \ 558 } while (0) 559 560 #define ASSERT_VOP_NOT_IN_SEQC(vp) do { \ 561 struct vnode *_vp = (vp); \ 562 \ 563 VNPASS(!seqc_in_modify(_vp->v_seqc), _vp); \ 564 } while (0) 565 566 #else /* !DEBUG_VFS_LOCKS */ 567 568 #define ASSERT_VI_LOCKED(vp, str) ((void)0) 569 #define ASSERT_VI_UNLOCKED(vp, str) ((void)0) 570 #define ASSERT_VOP_ELOCKED(vp, str) ((void)0) 571 #define ASSERT_VOP_LOCKED(vp, str) ((void)0) 572 #define ASSERT_VOP_UNLOCKED(vp, str) ((void)0) 573 574 #define ASSERT_VOP_IN_SEQC(vp) ((void)0) 575 #define ASSERT_VOP_NOT_IN_SEQC(vp) ((void)0) 576 577 #endif /* DEBUG_VFS_LOCKS */ 578 579 /* 580 * This call works for vnodes in the kernel. 581 */ 582 #define VCALL(c) ((c)->a_desc->vdesc_call(c)) 583 584 #define DOINGASYNC(vp) \ 585 (((vp)->v_mount->mnt_kern_flag & MNTK_ASYNC) != 0 && \ 586 ((curthread->td_pflags & TDP_SYNCIO) == 0)) 587 588 /* 589 * VMIO support inline 590 */ 591 592 extern int vmiodirenable; 593 594 static __inline int 595 vn_canvmio(struct vnode *vp) 596 { 597 if (vp && (vp->v_type == VREG || (vmiodirenable && vp->v_type == VDIR))) 598 return(TRUE); 599 return(FALSE); 600 } 601 602 /* 603 * Finally, include the default set of vnode operations. 604 */ 605 typedef void vop_getpages_iodone_t(void *, vm_page_t *, int, int); 606 #include "vnode_if.h" 607 608 /* vn_open_flags */ 609 #define VN_OPEN_NOAUDIT 0x00000001 610 #define VN_OPEN_NOCAPCHECK 0x00000002 611 #define VN_OPEN_NAMECACHE 0x00000004 612 #define VN_OPEN_INVFS 0x00000008 613 614 /* 615 * Public vnode manipulation functions. 616 */ 617 struct componentname; 618 struct file; 619 struct mount; 620 struct nameidata; 621 struct ostat; 622 struct freebsd11_stat; 623 struct thread; 624 struct proc; 625 struct stat; 626 struct nstat; 627 struct ucred; 628 struct uio; 629 struct vattr; 630 struct vfsops; 631 struct vnode; 632 633 typedef int (*vn_get_ino_t)(struct mount *, void *, int, struct vnode **); 634 635 int bnoreuselist(struct bufv *bufv, struct bufobj *bo, daddr_t startn, 636 daddr_t endn); 637 /* cache_* may belong in namei.h. */ 638 void cache_changesize(u_long newhashsize); 639 #define cache_enter(dvp, vp, cnp) \ 640 cache_enter_time(dvp, vp, cnp, NULL, NULL) 641 void cache_enter_time(struct vnode *dvp, struct vnode *vp, 642 struct componentname *cnp, struct timespec *tsp, 643 struct timespec *dtsp); 644 int cache_lookup(struct vnode *dvp, struct vnode **vpp, 645 struct componentname *cnp, struct timespec *tsp, int *ticksp); 646 void cache_vnode_init(struct vnode *vp); 647 void cache_purge(struct vnode *vp); 648 void cache_purge_vgone(struct vnode *vp); 649 void cache_purge_negative(struct vnode *vp); 650 void cache_purgevfs(struct mount *mp); 651 char *cache_symlink_alloc(size_t size, int flags); 652 void cache_symlink_free(char *string, size_t size); 653 int cache_symlink_resolve(struct cache_fpl *fpl, const char *string, 654 size_t len); 655 void cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp, 656 struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp); 657 void cache_vop_rmdir(struct vnode *dvp, struct vnode *vp); 658 #ifdef INVARIANTS 659 void cache_validate(struct vnode *dvp, struct vnode *vp, 660 struct componentname *cnp); 661 #else 662 static inline void 663 cache_validate(struct vnode *dvp, struct vnode *vp, struct componentname *cnp) 664 { 665 } 666 #endif 667 void cache_fast_lookup_enabled_recalc(void); 668 int change_dir(struct vnode *vp, struct thread *td); 669 void cvtstat(struct stat *st, struct ostat *ost); 670 void freebsd11_cvtnstat(struct stat *sb, struct nstat *nsb); 671 int freebsd11_cvtstat(struct stat *st, struct freebsd11_stat *ost); 672 int getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, 673 struct vnode **vpp); 674 void getnewvnode_reserve(void); 675 void getnewvnode_drop_reserve(void); 676 int insmntque1(struct vnode *vp, struct mount *mp, 677 void (*dtr)(struct vnode *, void *), void *dtr_arg); 678 int insmntque(struct vnode *vp, struct mount *mp); 679 u_quad_t init_va_filerev(void); 680 int speedup_syncer(void); 681 int vn_vptocnp(struct vnode **vp, char *buf, size_t *buflen); 682 int vn_getcwd(char *buf, char **retbuf, size_t *buflen); 683 int vn_fullpath(struct vnode *vp, char **retbuf, char **freebuf); 684 int vn_fullpath_global(struct vnode *vp, char **retbuf, char **freebuf); 685 struct vnode * 686 vn_dir_dd_ino(struct vnode *vp); 687 int vn_commname(struct vnode *vn, char *buf, u_int buflen); 688 int vn_path_to_global_path(struct thread *td, struct vnode *vp, 689 char *path, u_int pathlen); 690 int vaccess(enum vtype type, mode_t file_mode, uid_t file_uid, 691 gid_t file_gid, accmode_t accmode, struct ucred *cred); 692 int vaccess_vexec_smr(mode_t file_mode, uid_t file_uid, gid_t file_gid, 693 struct ucred *cred); 694 int vaccess_acl_nfs4(enum vtype type, uid_t file_uid, gid_t file_gid, 695 struct acl *aclp, accmode_t accmode, struct ucred *cred); 696 int vaccess_acl_posix1e(enum vtype type, uid_t file_uid, 697 gid_t file_gid, struct acl *acl, accmode_t accmode, 698 struct ucred *cred); 699 void vattr_null(struct vattr *vap); 700 void vlazy(struct vnode *); 701 void vdrop(struct vnode *); 702 void vdropl(struct vnode *); 703 int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td); 704 int vget(struct vnode *vp, int flags); 705 enum vgetstate vget_prep_smr(struct vnode *vp); 706 enum vgetstate vget_prep(struct vnode *vp); 707 int vget_finish(struct vnode *vp, int flags, enum vgetstate vs); 708 void vget_finish_ref(struct vnode *vp, enum vgetstate vs); 709 void vget_abort(struct vnode *vp, enum vgetstate vs); 710 void vgone(struct vnode *vp); 711 void vhold(struct vnode *); 712 void vholdnz(struct vnode *); 713 bool vhold_smr(struct vnode *); 714 int vinactive(struct vnode *vp); 715 int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo); 716 int vtruncbuf(struct vnode *vp, off_t length, int blksize); 717 void v_inval_buf_range(struct vnode *vp, daddr_t startlbn, daddr_t endlbn, 718 int blksize); 719 void vunref(struct vnode *); 720 void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3); 721 int vrecycle(struct vnode *vp); 722 int vrecyclel(struct vnode *vp); 723 int vn_bmap_seekhole(struct vnode *vp, u_long cmd, off_t *off, 724 struct ucred *cred); 725 int vn_close(struct vnode *vp, 726 int flags, struct ucred *file_cred, struct thread *td); 727 int vn_copy_file_range(struct vnode *invp, off_t *inoffp, 728 struct vnode *outvp, off_t *outoffp, size_t *lenp, 729 unsigned int flags, struct ucred *incred, struct ucred *outcred, 730 struct thread *fsize_td); 731 void vn_finished_write(struct mount *mp); 732 void vn_finished_secondary_write(struct mount *mp); 733 int vn_fsync_buf(struct vnode *vp, int waitfor); 734 int vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp, 735 struct vnode *outvp, off_t *outoffp, size_t *lenp, 736 unsigned int flags, struct ucred *incred, struct ucred *outcred, 737 struct thread *fsize_td); 738 int vn_need_pageq_flush(struct vnode *vp); 739 bool vn_isdisk_error(struct vnode *vp, int *errp); 740 bool vn_isdisk(struct vnode *vp); 741 int _vn_lock(struct vnode *vp, int flags, const char *file, int line); 742 #define vn_lock(vp, flags) _vn_lock(vp, flags, __FILE__, __LINE__) 743 void vn_lock_pair(struct vnode *vp1, bool vp1_locked, struct vnode *vp2, 744 bool vp2_locked); 745 int vn_open(struct nameidata *ndp, int *flagp, int cmode, struct file *fp); 746 int vn_open_cred(struct nameidata *ndp, int *flagp, int cmode, 747 u_int vn_open_flags, struct ucred *cred, struct file *fp); 748 int vn_open_vnode(struct vnode *vp, int fmode, struct ucred *cred, 749 struct thread *td, struct file *fp); 750 void vn_pages_remove(struct vnode *vp, vm_pindex_t start, vm_pindex_t end); 751 int vn_pollrecord(struct vnode *vp, struct thread *p, int events); 752 int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, 753 int len, off_t offset, enum uio_seg segflg, int ioflg, 754 struct ucred *active_cred, struct ucred *file_cred, ssize_t *aresid, 755 struct thread *td); 756 int vn_rdwr_inchunks(enum uio_rw rw, struct vnode *vp, void *base, 757 size_t len, off_t offset, enum uio_seg segflg, int ioflg, 758 struct ucred *active_cred, struct ucred *file_cred, size_t *aresid, 759 struct thread *td); 760 int vn_read_from_obj(struct vnode *vp, struct uio *uio); 761 int vn_rlimit_fsize(const struct vnode *vp, const struct uio *uio, 762 struct thread *td); 763 int vn_start_write(struct vnode *vp, struct mount **mpp, int flags); 764 int vn_start_secondary_write(struct vnode *vp, struct mount **mpp, 765 int flags); 766 int vn_truncate_locked(struct vnode *vp, off_t length, bool sync, 767 struct ucred *cred); 768 int vn_writechk(struct vnode *vp); 769 int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, 770 const char *attrname, int *buflen, char *buf, struct thread *td); 771 int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, 772 const char *attrname, int buflen, char *buf, struct thread *td); 773 int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, 774 const char *attrname, struct thread *td); 775 int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, 776 struct vnode **rvp); 777 int vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, 778 void *alloc_arg, int lkflags, struct vnode **rvp); 779 int vn_utimes_perm(struct vnode *vp, struct vattr *vap, 780 struct ucred *cred, struct thread *td); 781 782 int vn_io_fault_uiomove(char *data, int xfersize, struct uio *uio); 783 int vn_io_fault_pgmove(vm_page_t ma[], vm_offset_t offset, int xfersize, 784 struct uio *uio); 785 786 void vn_seqc_write_begin_unheld_locked(struct vnode *vp); 787 void vn_seqc_write_begin_unheld(struct vnode *vp); 788 void vn_seqc_write_begin_locked(struct vnode *vp); 789 void vn_seqc_write_begin(struct vnode *vp); 790 void vn_seqc_write_end_locked(struct vnode *vp); 791 void vn_seqc_write_end(struct vnode *vp); 792 #define vn_seqc_read_any(vp) seqc_read_any(&(vp)->v_seqc) 793 #define vn_seqc_read_notmodify(vp) seqc_read_notmodify(&(vp)->v_seqc) 794 #define vn_seqc_consistent(vp, seq) seqc_consistent(&(vp)->v_seqc, seq) 795 796 #define vn_rangelock_unlock(vp, cookie) \ 797 rangelock_unlock(&(vp)->v_rl, (cookie), VI_MTX(vp)) 798 #define vn_rangelock_unlock_range(vp, cookie, start, end) \ 799 rangelock_unlock_range(&(vp)->v_rl, (cookie), (start), (end), \ 800 VI_MTX(vp)) 801 #define vn_rangelock_rlock(vp, start, end) \ 802 rangelock_rlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) 803 #define vn_rangelock_tryrlock(vp, start, end) \ 804 rangelock_tryrlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) 805 #define vn_rangelock_wlock(vp, start, end) \ 806 rangelock_wlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) 807 #define vn_rangelock_trywlock(vp, start, end) \ 808 rangelock_trywlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) 809 810 #define vn_irflag_read(vp) atomic_load_short(&(vp)->v_irflag) 811 void vn_irflag_set_locked(struct vnode *vp, short toset); 812 void vn_irflag_set(struct vnode *vp, short toset); 813 void vn_irflag_set_cond_locked(struct vnode *vp, short toset); 814 void vn_irflag_set_cond(struct vnode *vp, short toset); 815 void vn_irflag_unset_locked(struct vnode *vp, short tounset); 816 void vn_irflag_unset(struct vnode *vp, short tounset); 817 818 int vfs_cache_lookup(struct vop_lookup_args *ap); 819 int vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp); 820 void vfs_timestamp(struct timespec *); 821 void vfs_write_resume(struct mount *mp, int flags); 822 int vfs_write_suspend(struct mount *mp, int flags); 823 int vfs_write_suspend_umnt(struct mount *mp); 824 struct vnode *vnlru_alloc_marker(void); 825 void vnlru_free_marker(struct vnode *); 826 void vnlru_free(int, struct vfsops *); 827 void vnlru_free_vfsops(int, struct vfsops *, struct vnode *); 828 int vop_stdbmap(struct vop_bmap_args *); 829 int vop_stdfdatasync_buf(struct vop_fdatasync_args *); 830 int vop_stdfsync(struct vop_fsync_args *); 831 int vop_stdgetwritemount(struct vop_getwritemount_args *); 832 int vop_stdgetpages(struct vop_getpages_args *); 833 int vop_stdinactive(struct vop_inactive_args *); 834 int vop_stdioctl(struct vop_ioctl_args *); 835 int vop_stdneed_inactive(struct vop_need_inactive_args *); 836 int vop_stdkqfilter(struct vop_kqfilter_args *); 837 int vop_stdlock(struct vop_lock1_args *); 838 int vop_stdunlock(struct vop_unlock_args *); 839 int vop_stdislocked(struct vop_islocked_args *); 840 int vop_lock(struct vop_lock1_args *); 841 int vop_unlock(struct vop_unlock_args *); 842 int vop_islocked(struct vop_islocked_args *); 843 int vop_stdputpages(struct vop_putpages_args *); 844 int vop_nopoll(struct vop_poll_args *); 845 int vop_stdaccess(struct vop_access_args *ap); 846 int vop_stdaccessx(struct vop_accessx_args *ap); 847 int vop_stdadvise(struct vop_advise_args *ap); 848 int vop_stdadvlock(struct vop_advlock_args *ap); 849 int vop_stdadvlockasync(struct vop_advlockasync_args *ap); 850 int vop_stdadvlockpurge(struct vop_advlockpurge_args *ap); 851 int vop_stdallocate(struct vop_allocate_args *ap); 852 int vop_stdset_text(struct vop_set_text_args *ap); 853 int vop_stdpathconf(struct vop_pathconf_args *); 854 int vop_stdpoll(struct vop_poll_args *); 855 int vop_stdvptocnp(struct vop_vptocnp_args *ap); 856 int vop_stdvptofh(struct vop_vptofh_args *ap); 857 int vop_stdunp_bind(struct vop_unp_bind_args *ap); 858 int vop_stdunp_connect(struct vop_unp_connect_args *ap); 859 int vop_stdunp_detach(struct vop_unp_detach_args *ap); 860 int vop_eopnotsupp(struct vop_generic_args *ap); 861 int vop_ebadf(struct vop_generic_args *ap); 862 int vop_einval(struct vop_generic_args *ap); 863 int vop_enoent(struct vop_generic_args *ap); 864 int vop_enotty(struct vop_generic_args *ap); 865 int vop_eagain(struct vop_generic_args *ap); 866 int vop_null(struct vop_generic_args *ap); 867 int vop_panic(struct vop_generic_args *ap); 868 int dead_poll(struct vop_poll_args *ap); 869 int dead_read(struct vop_read_args *ap); 870 int dead_write(struct vop_write_args *ap); 871 872 /* These are called from within the actual VOPS. */ 873 void vop_close_post(void *a, int rc); 874 void vop_create_pre(void *a); 875 void vop_create_post(void *a, int rc); 876 void vop_whiteout_pre(void *a); 877 void vop_whiteout_post(void *a, int rc); 878 void vop_deleteextattr_pre(void *a); 879 void vop_deleteextattr_post(void *a, int rc); 880 void vop_link_pre(void *a); 881 void vop_link_post(void *a, int rc); 882 void vop_lookup_post(void *a, int rc); 883 void vop_lookup_pre(void *a); 884 void vop_mkdir_pre(void *a); 885 void vop_mkdir_post(void *a, int rc); 886 void vop_mknod_pre(void *a); 887 void vop_mknod_post(void *a, int rc); 888 void vop_open_post(void *a, int rc); 889 void vop_read_post(void *a, int rc); 890 void vop_read_pgcache_post(void *ap, int rc); 891 void vop_readdir_post(void *a, int rc); 892 void vop_reclaim_post(void *a, int rc); 893 void vop_remove_pre(void *a); 894 void vop_remove_post(void *a, int rc); 895 void vop_rename_post(void *a, int rc); 896 void vop_rename_pre(void *a); 897 void vop_rmdir_pre(void *a); 898 void vop_rmdir_post(void *a, int rc); 899 void vop_setattr_pre(void *a); 900 void vop_setattr_post(void *a, int rc); 901 void vop_setacl_pre(void *a); 902 void vop_setacl_post(void *a, int rc); 903 void vop_setextattr_pre(void *a); 904 void vop_setextattr_post(void *a, int rc); 905 void vop_symlink_pre(void *a); 906 void vop_symlink_post(void *a, int rc); 907 int vop_sigdefer(struct vop_vector *vop, struct vop_generic_args *a); 908 909 #ifdef DEBUG_VFS_LOCKS 910 void vop_fplookup_vexec_debugpre(void *a); 911 void vop_fplookup_vexec_debugpost(void *a, int rc); 912 void vop_fplookup_symlink_debugpre(void *a); 913 void vop_fplookup_symlink_debugpost(void *a, int rc); 914 void vop_strategy_debugpre(void *a); 915 void vop_lock_debugpre(void *a); 916 void vop_lock_debugpost(void *a, int rc); 917 void vop_unlock_debugpre(void *a); 918 void vop_need_inactive_debugpre(void *a); 919 void vop_need_inactive_debugpost(void *a, int rc); 920 void vop_mkdir_debugpost(void *a, int rc); 921 #else 922 #define vop_fplookup_vexec_debugpre(x) do { } while (0) 923 #define vop_fplookup_vexec_debugpost(x, y) do { } while (0) 924 #define vop_fplookup_symlink_debugpre(x) do { } while (0) 925 #define vop_fplookup_symlink_debugpost(x, y) do { } while (0) 926 #define vop_strategy_debugpre(x) do { } while (0) 927 #define vop_lock_debugpre(x) do { } while (0) 928 #define vop_lock_debugpost(x, y) do { } while (0) 929 #define vop_unlock_debugpre(x) do { } while (0) 930 #define vop_need_inactive_debugpre(x) do { } while (0) 931 #define vop_need_inactive_debugpost(x, y) do { } while (0) 932 #define vop_mkdir_debugpost(x, y) do { } while (0) 933 #endif 934 935 void vop_rename_fail(struct vop_rename_args *ap); 936 937 #define vop_stat_helper_pre(ap) ({ \ 938 int _error; \ 939 AUDIT_ARG_VNODE1(ap->a_vp); \ 940 _error = mac_vnode_check_stat(ap->a_active_cred, ap->a_file_cred, ap->a_vp);\ 941 if (__predict_true(_error == 0)) \ 942 bzero(ap->a_sb, sizeof(*ap->a_sb)); \ 943 _error; \ 944 }) 945 946 #define vop_stat_helper_post(ap, error) ({ \ 947 int _error = (error); \ 948 if (priv_check_cred_vfs_generation(ap->a_td->td_ucred)) \ 949 ap->a_sb->st_gen = 0; \ 950 _error; \ 951 }) 952 953 #define VOP_WRITE_PRE(ap) \ 954 struct vattr va; \ 955 int error; \ 956 off_t osize, ooffset, noffset; \ 957 \ 958 osize = ooffset = noffset = 0; \ 959 if (!VN_KNLIST_EMPTY((ap)->a_vp)) { \ 960 error = VOP_GETATTR((ap)->a_vp, &va, (ap)->a_cred); \ 961 if (error) \ 962 return (error); \ 963 ooffset = (ap)->a_uio->uio_offset; \ 964 osize = (off_t)va.va_size; \ 965 } 966 967 #define VOP_WRITE_POST(ap, ret) \ 968 noffset = (ap)->a_uio->uio_offset; \ 969 if (noffset > ooffset && !VN_KNLIST_EMPTY((ap)->a_vp)) { \ 970 VFS_KNOTE_LOCKED((ap)->a_vp, NOTE_WRITE \ 971 | (noffset > osize ? NOTE_EXTEND : 0)); \ 972 } 973 974 #define VOP_LOCK(vp, flags) VOP_LOCK1(vp, flags, __FILE__, __LINE__) 975 976 #ifdef INVARIANTS 977 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) \ 978 do { \ 979 int error_; \ 980 \ 981 error_ = VOP_ADD_WRITECOUNT((vp), (cnt)); \ 982 VNASSERT(error_ == 0, (vp), ("VOP_ADD_WRITECOUNT returned %d", \ 983 error_)); \ 984 } while (0) 985 #define VOP_SET_TEXT_CHECKED(vp) \ 986 do { \ 987 int error_; \ 988 \ 989 error_ = VOP_SET_TEXT((vp)); \ 990 VNASSERT(error_ == 0, (vp), ("VOP_SET_TEXT returned %d", \ 991 error_)); \ 992 } while (0) 993 #define VOP_UNSET_TEXT_CHECKED(vp) \ 994 do { \ 995 int error_; \ 996 \ 997 error_ = VOP_UNSET_TEXT((vp)); \ 998 VNASSERT(error_ == 0, (vp), ("VOP_UNSET_TEXT returned %d", \ 999 error_)); \ 1000 } while (0) 1001 #else 1002 #define VOP_ADD_WRITECOUNT_CHECKED(vp, cnt) VOP_ADD_WRITECOUNT((vp), (cnt)) 1003 #define VOP_SET_TEXT_CHECKED(vp) VOP_SET_TEXT((vp)) 1004 #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp)) 1005 #endif 1006 1007 #define VN_IS_DOOMED(vp) __predict_false((vn_irflag_read(vp) & VIRF_DOOMED) != 0) 1008 1009 void vput(struct vnode *vp); 1010 void vrele(struct vnode *vp); 1011 void vref(struct vnode *vp); 1012 void vrefact(struct vnode *vp); 1013 void v_addpollinfo(struct vnode *vp); 1014 static __inline int 1015 vrefcnt(struct vnode *vp) 1016 { 1017 1018 return (vp->v_usecount); 1019 } 1020 1021 #define vholdl(vp) do { \ 1022 ASSERT_VI_LOCKED(vp, __func__); \ 1023 vhold(vp); \ 1024 } while (0) 1025 1026 #define vrefl(vp) do { \ 1027 ASSERT_VI_LOCKED(vp, __func__); \ 1028 vref(vp); \ 1029 } while (0) 1030 1031 int vnode_create_vobject(struct vnode *vp, off_t size, struct thread *td); 1032 void vnode_destroy_vobject(struct vnode *vp); 1033 1034 extern struct vop_vector fifo_specops; 1035 extern struct vop_vector dead_vnodeops; 1036 extern struct vop_vector default_vnodeops; 1037 1038 #define VOP_PANIC ((void*)(uintptr_t)vop_panic) 1039 #define VOP_NULL ((void*)(uintptr_t)vop_null) 1040 #define VOP_EBADF ((void*)(uintptr_t)vop_ebadf) 1041 #define VOP_ENOTTY ((void*)(uintptr_t)vop_enotty) 1042 #define VOP_EINVAL ((void*)(uintptr_t)vop_einval) 1043 #define VOP_ENOENT ((void*)(uintptr_t)vop_enoent) 1044 #define VOP_EOPNOTSUPP ((void*)(uintptr_t)vop_eopnotsupp) 1045 #define VOP_EAGAIN ((void*)(uintptr_t)vop_eagain) 1046 1047 /* fifo_vnops.c */ 1048 int fifo_printinfo(struct vnode *); 1049 1050 /* vfs_hash.c */ 1051 typedef int vfs_hash_cmp_t(struct vnode *vp, void *arg); 1052 1053 void vfs_hash_changesize(u_long newhashsize); 1054 int vfs_hash_get(const struct mount *mp, u_int hash, int flags, 1055 struct thread *td, struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); 1056 u_int vfs_hash_index(struct vnode *vp); 1057 int vfs_hash_insert(struct vnode *vp, u_int hash, int flags, struct thread *td, 1058 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); 1059 void vfs_hash_ref(const struct mount *mp, u_int hash, struct thread *td, 1060 struct vnode **vpp, vfs_hash_cmp_t *fn, void *arg); 1061 void vfs_hash_rehash(struct vnode *vp, u_int hash); 1062 void vfs_hash_remove(struct vnode *vp); 1063 1064 int vfs_kqfilter(struct vop_kqfilter_args *); 1065 struct dirent; 1066 int vfs_read_dirent(struct vop_readdir_args *ap, struct dirent *dp, off_t off); 1067 int vfs_emptydir(struct vnode *vp); 1068 1069 int vfs_unixify_accmode(accmode_t *accmode); 1070 1071 void vfs_unp_reclaim(struct vnode *vp); 1072 1073 int setfmode(struct thread *td, struct ucred *cred, struct vnode *vp, int mode); 1074 int setfown(struct thread *td, struct ucred *cred, struct vnode *vp, uid_t uid, 1075 gid_t gid); 1076 int vn_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, 1077 struct thread *td); 1078 int vn_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, 1079 struct thread *td); 1080 1081 void vn_fsid(struct vnode *vp, struct vattr *va); 1082 1083 int vn_dir_check_exec(struct vnode *vp, struct componentname *cnp); 1084 1085 #define VOP_UNLOCK_FLAGS(vp, flags) ({ \ 1086 struct vnode *_vp = (vp); \ 1087 int _flags = (flags); \ 1088 int _error; \ 1089 \ 1090 if ((_flags & ~(LK_INTERLOCK | LK_RELEASE)) != 0) \ 1091 panic("%s: unsupported flags %x\n", __func__, flags); \ 1092 _error = VOP_UNLOCK(_vp); \ 1093 if (_flags & LK_INTERLOCK) \ 1094 VI_UNLOCK(_vp); \ 1095 _error; \ 1096 }) 1097 1098 #include <sys/kernel.h> 1099 1100 #define VFS_VOP_VECTOR_REGISTER(vnodeops) \ 1101 SYSINIT(vfs_vector_##vnodeops##_f, SI_SUB_VFS, SI_ORDER_ANY, \ 1102 vfs_vector_op_register, &vnodeops) 1103 1104 #define VFS_SMR_DECLARE \ 1105 extern smr_t vfs_smr 1106 1107 #define VFS_SMR() vfs_smr 1108 #define vfs_smr_enter() smr_enter(VFS_SMR()) 1109 #define vfs_smr_exit() smr_exit(VFS_SMR()) 1110 #define vfs_smr_entered_load(ptr) smr_entered_load((ptr), VFS_SMR()) 1111 #define VFS_SMR_ASSERT_ENTERED() SMR_ASSERT_ENTERED(VFS_SMR()) 1112 #define VFS_SMR_ASSERT_NOT_ENTERED() SMR_ASSERT_NOT_ENTERED(VFS_SMR()) 1113 #define VFS_SMR_ZONE_SET(zone) uma_zone_set_smr((zone), VFS_SMR()) 1114 1115 #define vn_load_v_data_smr(vp) ({ \ 1116 struct vnode *_vp = (vp); \ 1117 \ 1118 VFS_SMR_ASSERT_ENTERED(); \ 1119 atomic_load_consume_ptr(&(_vp)->v_data);\ 1120 }) 1121 1122 #endif /* _KERNEL */ 1123 1124 #endif /* !_SYS_VNODE_H_ */ 1125