1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
32 */
33
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36
37 #include "opt_quota.h"
38 #include "opt_ufs.h"
39 #include "opt_ffs.h"
40 #include "opt_ddb.h"
41
42 #include <sys/param.h>
43 #include <sys/gsb_crc32.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/priv.h>
47 #include <sys/proc.h>
48 #include <sys/taskqueue.h>
49 #include <sys/kernel.h>
50 #include <sys/ktr.h>
51 #include <sys/vnode.h>
52 #include <sys/mount.h>
53 #include <sys/bio.h>
54 #include <sys/buf.h>
55 #include <sys/conf.h>
56 #include <sys/fcntl.h>
57 #include <sys/ioccom.h>
58 #include <sys/malloc.h>
59 #include <sys/mutex.h>
60 #include <sys/rwlock.h>
61 #include <sys/sysctl.h>
62 #include <sys/vmmeter.h>
63
64 #include <security/mac/mac_framework.h>
65
66 #include <ufs/ufs/dir.h>
67 #include <ufs/ufs/extattr.h>
68 #include <ufs/ufs/gjournal.h>
69 #include <ufs/ufs/quota.h>
70 #include <ufs/ufs/ufsmount.h>
71 #include <ufs/ufs/inode.h>
72 #include <ufs/ufs/ufs_extern.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 #include <vm/vm.h>
78 #include <vm/uma.h>
79 #include <vm/vm_page.h>
80
81 #include <geom/geom.h>
82 #include <geom/geom_vfs.h>
83
84 #include <ddb/ddb.h>
85
86 static uma_zone_t uma_inode, uma_ufs1, uma_ufs2;
87 VFS_SMR_DECLARE;
88
89 static int ffs_mountfs(struct vnode *, struct mount *, struct thread *);
90 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *,
91 ufs2_daddr_t);
92 static void ffs_ifree(struct ufsmount *ump, struct inode *ip);
93 static int ffs_sync_lazy(struct mount *mp);
94 static int ffs_use_bread(void *devfd, off_t loc, void **bufp, int size);
95 static int ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size);
96
97 static vfs_init_t ffs_init;
98 static vfs_uninit_t ffs_uninit;
99 static vfs_extattrctl_t ffs_extattrctl;
100 static vfs_cmount_t ffs_cmount;
101 static vfs_unmount_t ffs_unmount;
102 static vfs_mount_t ffs_mount;
103 static vfs_statfs_t ffs_statfs;
104 static vfs_fhtovp_t ffs_fhtovp;
105 static vfs_sync_t ffs_sync;
106
107 static struct vfsops ufs_vfsops = {
108 .vfs_extattrctl = ffs_extattrctl,
109 .vfs_fhtovp = ffs_fhtovp,
110 .vfs_init = ffs_init,
111 .vfs_mount = ffs_mount,
112 .vfs_cmount = ffs_cmount,
113 .vfs_quotactl = ufs_quotactl,
114 .vfs_root = vfs_cache_root,
115 .vfs_cachedroot = ufs_root,
116 .vfs_statfs = ffs_statfs,
117 .vfs_sync = ffs_sync,
118 .vfs_uninit = ffs_uninit,
119 .vfs_unmount = ffs_unmount,
120 .vfs_vget = ffs_vget,
121 .vfs_susp_clean = process_deferred_inactive,
122 };
123
124 VFS_SET(ufs_vfsops, ufs, 0);
125 MODULE_VERSION(ufs, 1);
126
127 static b_strategy_t ffs_geom_strategy;
128 static b_write_t ffs_bufwrite;
129
130 static struct buf_ops ffs_ops = {
131 .bop_name = "FFS",
132 .bop_write = ffs_bufwrite,
133 .bop_strategy = ffs_geom_strategy,
134 .bop_sync = bufsync,
135 #ifdef NO_FFS_SNAPSHOT
136 .bop_bdflush = bufbdflush,
137 #else
138 .bop_bdflush = ffs_bdflush,
139 #endif
140 };
141
142 /*
143 * Note that userquota and groupquota options are not currently used
144 * by UFS/FFS code and generally mount(8) does not pass those options
145 * from userland, but they can be passed by loader(8) via
146 * vfs.root.mountfrom.options.
147 */
148 static const char *ffs_opts[] = { "acls", "async", "noatime", "noclusterr",
149 "noclusterw", "noexec", "export", "force", "from", "groupquota",
150 "multilabel", "nfsv4acls", "snapshot", "nosuid", "suiddir",
151 "nosymfollow", "sync", "union", "userquota", "untrusted", NULL };
152
153 static int ffs_enxio_enable = 1;
154 SYSCTL_DECL(_vfs_ffs);
155 SYSCTL_INT(_vfs_ffs, OID_AUTO, enxio_enable, CTLFLAG_RWTUN,
156 &ffs_enxio_enable, 0,
157 "enable mapping of other disk I/O errors to ENXIO");
158
159 /*
160 * Return buffer with the contents of block "offset" from the beginning of
161 * directory "ip". If "res" is non-zero, fill it in with a pointer to the
162 * remaining space in the directory.
163 */
164 static int
ffs_blkatoff(struct vnode * vp,off_t offset,char ** res,struct buf ** bpp)165 ffs_blkatoff(struct vnode *vp, off_t offset, char **res, struct buf **bpp)
166 {
167 struct inode *ip;
168 struct fs *fs;
169 struct buf *bp;
170 ufs_lbn_t lbn;
171 int bsize, error;
172
173 ip = VTOI(vp);
174 fs = ITOFS(ip);
175 lbn = lblkno(fs, offset);
176 bsize = blksize(fs, ip, lbn);
177
178 *bpp = NULL;
179 error = bread(vp, lbn, bsize, NOCRED, &bp);
180 if (error) {
181 return (error);
182 }
183 if (res)
184 *res = (char *)bp->b_data + blkoff(fs, offset);
185 *bpp = bp;
186 return (0);
187 }
188
189 /*
190 * Load up the contents of an inode and copy the appropriate pieces
191 * to the incore copy.
192 */
193 static int
ffs_load_inode(struct buf * bp,struct inode * ip,struct fs * fs,ino_t ino)194 ffs_load_inode(struct buf *bp, struct inode *ip, struct fs *fs, ino_t ino)
195 {
196 struct ufs1_dinode *dip1;
197 struct ufs2_dinode *dip2;
198 int error;
199
200 if (I_IS_UFS1(ip)) {
201 dip1 = ip->i_din1;
202 *dip1 =
203 *((struct ufs1_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
204 ip->i_mode = dip1->di_mode;
205 ip->i_nlink = dip1->di_nlink;
206 ip->i_effnlink = dip1->di_nlink;
207 ip->i_size = dip1->di_size;
208 ip->i_flags = dip1->di_flags;
209 ip->i_gen = dip1->di_gen;
210 ip->i_uid = dip1->di_uid;
211 ip->i_gid = dip1->di_gid;
212 return (0);
213 }
214 dip2 = ((struct ufs2_dinode *)bp->b_data + ino_to_fsbo(fs, ino));
215 if ((error = ffs_verify_dinode_ckhash(fs, dip2)) != 0 &&
216 !ffs_fsfail_cleanup(ITOUMP(ip), error)) {
217 printf("%s: inode %jd: check-hash failed\n", fs->fs_fsmnt,
218 (intmax_t)ino);
219 return (error);
220 }
221 *ip->i_din2 = *dip2;
222 dip2 = ip->i_din2;
223 ip->i_mode = dip2->di_mode;
224 ip->i_nlink = dip2->di_nlink;
225 ip->i_effnlink = dip2->di_nlink;
226 ip->i_size = dip2->di_size;
227 ip->i_flags = dip2->di_flags;
228 ip->i_gen = dip2->di_gen;
229 ip->i_uid = dip2->di_uid;
230 ip->i_gid = dip2->di_gid;
231 return (0);
232 }
233
234 /*
235 * Verify that a filesystem block number is a valid data block.
236 * This routine is only called on untrusted filesystems.
237 */
238 static int
ffs_check_blkno(struct mount * mp,ino_t inum,ufs2_daddr_t daddr,int blksize)239 ffs_check_blkno(struct mount *mp, ino_t inum, ufs2_daddr_t daddr, int blksize)
240 {
241 struct fs *fs;
242 struct ufsmount *ump;
243 ufs2_daddr_t end_daddr;
244 int cg, havemtx;
245
246 KASSERT((mp->mnt_flag & MNT_UNTRUSTED) != 0,
247 ("ffs_check_blkno called on a trusted file system"));
248 ump = VFSTOUFS(mp);
249 fs = ump->um_fs;
250 cg = dtog(fs, daddr);
251 end_daddr = daddr + numfrags(fs, blksize);
252 /*
253 * Verify that the block number is a valid data block. Also check
254 * that it does not point to an inode block or a superblock. Accept
255 * blocks that are unalloacted (0) or part of snapshot metadata
256 * (BLK_NOCOPY or BLK_SNAP).
257 *
258 * Thus, the block must be in a valid range for the filesystem and
259 * either in the space before a backup superblock (except the first
260 * cylinder group where that space is used by the bootstrap code) or
261 * after the inode blocks and before the end of the cylinder group.
262 */
263 if ((uint64_t)daddr <= BLK_SNAP ||
264 ((uint64_t)end_daddr <= fs->fs_size &&
265 ((cg > 0 && end_daddr <= cgsblock(fs, cg)) ||
266 (daddr >= cgdmin(fs, cg) &&
267 end_daddr <= cgbase(fs, cg) + fs->fs_fpg))))
268 return (0);
269 if ((havemtx = mtx_owned(UFS_MTX(ump))) == 0)
270 UFS_LOCK(ump);
271 if (ppsratecheck(&ump->um_last_integritymsg,
272 &ump->um_secs_integritymsg, 1)) {
273 UFS_UNLOCK(ump);
274 uprintf("\n%s: inode %jd, out-of-range indirect block "
275 "number %jd\n", mp->mnt_stat.f_mntonname, inum, daddr);
276 if (havemtx)
277 UFS_LOCK(ump);
278 } else if (!havemtx)
279 UFS_UNLOCK(ump);
280 return (EINTEGRITY);
281 }
282
283 /*
284 * Initiate a forcible unmount.
285 * Used to unmount filesystems whose underlying media has gone away.
286 */
287 static void
ffs_fsfail_unmount(void * v,int pending)288 ffs_fsfail_unmount(void *v, int pending)
289 {
290 struct fsfail_task *etp;
291 struct mount *mp;
292
293 etp = v;
294
295 /*
296 * Find our mount and get a ref on it, then try to unmount.
297 */
298 mp = vfs_getvfs(&etp->fsid);
299 if (mp != NULL)
300 dounmount(mp, MNT_FORCE, curthread);
301 free(etp, M_UFSMNT);
302 }
303
304 /*
305 * On first ENXIO error, start a task that forcibly unmounts the filesystem.
306 *
307 * Return true if a cleanup is in progress.
308 */
309 int
ffs_fsfail_cleanup(struct ufsmount * ump,int error)310 ffs_fsfail_cleanup(struct ufsmount *ump, int error)
311 {
312 int retval;
313
314 UFS_LOCK(ump);
315 retval = ffs_fsfail_cleanup_locked(ump, error);
316 UFS_UNLOCK(ump);
317 return (retval);
318 }
319
320 int
ffs_fsfail_cleanup_locked(struct ufsmount * ump,int error)321 ffs_fsfail_cleanup_locked(struct ufsmount *ump, int error)
322 {
323 struct fsfail_task *etp;
324 struct task *tp;
325
326 mtx_assert(UFS_MTX(ump), MA_OWNED);
327 if (error == ENXIO && (ump->um_flags & UM_FSFAIL_CLEANUP) == 0) {
328 ump->um_flags |= UM_FSFAIL_CLEANUP;
329 /*
330 * Queue an async forced unmount.
331 */
332 etp = ump->um_fsfail_task;
333 ump->um_fsfail_task = NULL;
334 if (etp != NULL) {
335 tp = &etp->task;
336 TASK_INIT(tp, 0, ffs_fsfail_unmount, etp);
337 taskqueue_enqueue(taskqueue_thread, tp);
338 printf("UFS: forcibly unmounting %s from %s\n",
339 ump->um_mountp->mnt_stat.f_mntfromname,
340 ump->um_mountp->mnt_stat.f_mntonname);
341 }
342 }
343 return ((ump->um_flags & UM_FSFAIL_CLEANUP) != 0);
344 }
345
346 /*
347 * Wrapper used during ENXIO cleanup to allocate empty buffers when
348 * the kernel is unable to read the real one. They are needed so that
349 * the soft updates code can use them to unwind its dependencies.
350 */
351 int
ffs_breadz(struct ufsmount * ump,struct vnode * vp,daddr_t lblkno,daddr_t dblkno,int size,daddr_t * rablkno,int * rabsize,int cnt,struct ucred * cred,int flags,void (* ckhashfunc)(struct buf *),struct buf ** bpp)352 ffs_breadz(struct ufsmount *ump, struct vnode *vp, daddr_t lblkno,
353 daddr_t dblkno, int size, daddr_t *rablkno, int *rabsize, int cnt,
354 struct ucred *cred, int flags, void (*ckhashfunc)(struct buf *),
355 struct buf **bpp)
356 {
357 int error;
358
359 flags |= GB_CVTENXIO;
360 error = breadn_flags(vp, lblkno, dblkno, size, rablkno, rabsize, cnt,
361 cred, flags, ckhashfunc, bpp);
362 if (error != 0 && ffs_fsfail_cleanup(ump, error)) {
363 error = getblkx(vp, lblkno, dblkno, size, 0, 0, flags, bpp);
364 KASSERT(error == 0, ("getblkx failed"));
365 vfs_bio_bzero_buf(*bpp, 0, size);
366 }
367 return (error);
368 }
369
370 static int
ffs_mount(struct mount * mp)371 ffs_mount(struct mount *mp)
372 {
373 struct vnode *devvp, *odevvp;
374 struct thread *td;
375 struct ufsmount *ump = NULL;
376 struct fs *fs;
377 int error, flags;
378 int error1 __diagused;
379 uint64_t mntorflags, saved_mnt_flag;
380 accmode_t accmode;
381 struct nameidata ndp;
382 char *fspec;
383 bool mounted_softdep;
384
385 td = curthread;
386 if (vfs_filteropt(mp->mnt_optnew, ffs_opts))
387 return (EINVAL);
388 if (uma_inode == NULL) {
389 uma_inode = uma_zcreate("FFS inode",
390 sizeof(struct inode), NULL, NULL, NULL, NULL,
391 UMA_ALIGN_PTR, 0);
392 uma_ufs1 = uma_zcreate("FFS1 dinode",
393 sizeof(struct ufs1_dinode), NULL, NULL, NULL, NULL,
394 UMA_ALIGN_PTR, 0);
395 uma_ufs2 = uma_zcreate("FFS2 dinode",
396 sizeof(struct ufs2_dinode), NULL, NULL, NULL, NULL,
397 UMA_ALIGN_PTR, 0);
398 VFS_SMR_ZONE_SET(uma_inode);
399 }
400
401 vfs_deleteopt(mp->mnt_optnew, "groupquota");
402 vfs_deleteopt(mp->mnt_optnew, "userquota");
403
404 fspec = vfs_getopts(mp->mnt_optnew, "from", &error);
405 if (error)
406 return (error);
407
408 mntorflags = 0;
409 if (vfs_getopt(mp->mnt_optnew, "untrusted", NULL, NULL) == 0)
410 mntorflags |= MNT_UNTRUSTED;
411
412 if (vfs_getopt(mp->mnt_optnew, "acls", NULL, NULL) == 0)
413 mntorflags |= MNT_ACLS;
414
415 if (vfs_getopt(mp->mnt_optnew, "snapshot", NULL, NULL) == 0) {
416 mntorflags |= MNT_SNAPSHOT;
417 /*
418 * Once we have set the MNT_SNAPSHOT flag, do not
419 * persist "snapshot" in the options list.
420 */
421 vfs_deleteopt(mp->mnt_optnew, "snapshot");
422 vfs_deleteopt(mp->mnt_opt, "snapshot");
423 }
424
425 if (vfs_getopt(mp->mnt_optnew, "nfsv4acls", NULL, NULL) == 0) {
426 if (mntorflags & MNT_ACLS) {
427 vfs_mount_error(mp,
428 "\"acls\" and \"nfsv4acls\" options "
429 "are mutually exclusive");
430 return (EINVAL);
431 }
432 mntorflags |= MNT_NFS4ACLS;
433 }
434
435 MNT_ILOCK(mp);
436 mp->mnt_kern_flag &= ~MNTK_FPLOOKUP;
437 mp->mnt_flag |= mntorflags;
438 MNT_IUNLOCK(mp);
439 /*
440 * If updating, check whether changing from read-only to
441 * read/write; if there is no device name, that's all we do.
442 */
443 if (mp->mnt_flag & MNT_UPDATE) {
444 ump = VFSTOUFS(mp);
445 fs = ump->um_fs;
446 odevvp = ump->um_odevvp;
447 devvp = ump->um_devvp;
448 if (fs->fs_ronly == 0 &&
449 vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
450 /*
451 * Flush any dirty data and suspend filesystem.
452 */
453 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
454 return (error);
455 error = vfs_write_suspend_umnt(mp);
456 if (error != 0)
457 return (error);
458
459 fs->fs_ronly = 1;
460 if (MOUNTEDSOFTDEP(mp)) {
461 MNT_ILOCK(mp);
462 mp->mnt_flag &= ~MNT_SOFTDEP;
463 MNT_IUNLOCK(mp);
464 mounted_softdep = true;
465 } else
466 mounted_softdep = false;
467
468 /*
469 * Check for and optionally get rid of files open
470 * for writing.
471 */
472 flags = WRITECLOSE;
473 if (mp->mnt_flag & MNT_FORCE)
474 flags |= FORCECLOSE;
475 if (mounted_softdep) {
476 error = softdep_flushfiles(mp, flags, td);
477 } else {
478 error = ffs_flushfiles(mp, flags, td);
479 }
480 if (error) {
481 fs->fs_ronly = 0;
482 if (mounted_softdep) {
483 MNT_ILOCK(mp);
484 mp->mnt_flag |= MNT_SOFTDEP;
485 MNT_IUNLOCK(mp);
486 }
487 vfs_write_resume(mp, 0);
488 return (error);
489 }
490
491 if (fs->fs_pendingblocks != 0 ||
492 fs->fs_pendinginodes != 0) {
493 printf("WARNING: %s Update error: blocks %jd "
494 "files %d\n", fs->fs_fsmnt,
495 (intmax_t)fs->fs_pendingblocks,
496 fs->fs_pendinginodes);
497 fs->fs_pendingblocks = 0;
498 fs->fs_pendinginodes = 0;
499 }
500 if ((fs->fs_flags & (FS_UNCLEAN | FS_NEEDSFSCK)) == 0)
501 fs->fs_clean = 1;
502 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
503 fs->fs_ronly = 0;
504 fs->fs_clean = 0;
505 if (mounted_softdep) {
506 MNT_ILOCK(mp);
507 mp->mnt_flag |= MNT_SOFTDEP;
508 MNT_IUNLOCK(mp);
509 }
510 vfs_write_resume(mp, 0);
511 return (error);
512 }
513 if (mounted_softdep)
514 softdep_unmount(mp);
515 g_topology_lock();
516 /*
517 * Drop our write and exclusive access.
518 */
519 g_access(ump->um_cp, 0, -1, -1);
520 g_topology_unlock();
521 MNT_ILOCK(mp);
522 mp->mnt_flag |= MNT_RDONLY;
523 MNT_IUNLOCK(mp);
524 /*
525 * Allow the writers to note that filesystem
526 * is ro now.
527 */
528 vfs_write_resume(mp, 0);
529 }
530 if ((mp->mnt_flag & MNT_RELOAD) &&
531 (error = ffs_reload(mp, 0)) != 0)
532 return (error);
533 if (fs->fs_ronly &&
534 !vfs_flagopt(mp->mnt_optnew, "ro", NULL, 0)) {
535 /*
536 * If upgrade to read-write by non-root, then verify
537 * that user has necessary permissions on the device.
538 */
539 vn_lock(odevvp, LK_EXCLUSIVE | LK_RETRY);
540 error = VOP_ACCESS(odevvp, VREAD | VWRITE,
541 td->td_ucred, td);
542 if (error)
543 error = priv_check(td, PRIV_VFS_MOUNT_PERM);
544 VOP_UNLOCK(odevvp);
545 if (error) {
546 return (error);
547 }
548 fs->fs_flags &= ~FS_UNCLEAN;
549 if (fs->fs_clean == 0) {
550 fs->fs_flags |= FS_UNCLEAN;
551 if ((mp->mnt_flag & MNT_FORCE) ||
552 ((fs->fs_flags &
553 (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
554 (fs->fs_flags & FS_DOSOFTDEP))) {
555 printf("WARNING: %s was not properly "
556 "dismounted\n", fs->fs_fsmnt);
557 } else {
558 vfs_mount_error(mp,
559 "R/W mount of %s denied. %s.%s",
560 fs->fs_fsmnt,
561 "Filesystem is not clean - run fsck",
562 (fs->fs_flags & FS_SUJ) == 0 ? "" :
563 " Forced mount will invalidate"
564 " journal contents");
565 return (EPERM);
566 }
567 }
568 g_topology_lock();
569 /*
570 * Request exclusive write access.
571 */
572 error = g_access(ump->um_cp, 0, 1, 1);
573 g_topology_unlock();
574 if (error)
575 return (error);
576 if ((error = vn_start_write(NULL, &mp, V_WAIT)) != 0)
577 return (error);
578 error = vfs_write_suspend_umnt(mp);
579 if (error != 0)
580 return (error);
581 fs->fs_ronly = 0;
582 MNT_ILOCK(mp);
583 saved_mnt_flag = MNT_RDONLY;
584 if (MOUNTEDSOFTDEP(mp) && (mp->mnt_flag &
585 MNT_ASYNC) != 0)
586 saved_mnt_flag |= MNT_ASYNC;
587 mp->mnt_flag &= ~saved_mnt_flag;
588 MNT_IUNLOCK(mp);
589 fs->fs_mtime = time_second;
590 /* check to see if we need to start softdep */
591 if ((fs->fs_flags & FS_DOSOFTDEP) &&
592 (error = softdep_mount(devvp, mp, fs, td->td_ucred))){
593 fs->fs_ronly = 1;
594 MNT_ILOCK(mp);
595 mp->mnt_flag |= saved_mnt_flag;
596 MNT_IUNLOCK(mp);
597 vfs_write_resume(mp, 0);
598 return (error);
599 }
600 fs->fs_clean = 0;
601 if ((error = ffs_sbupdate(ump, MNT_WAIT, 0)) != 0) {
602 fs->fs_ronly = 1;
603 if ((fs->fs_flags & FS_DOSOFTDEP) != 0)
604 softdep_unmount(mp);
605 MNT_ILOCK(mp);
606 mp->mnt_flag |= saved_mnt_flag;
607 MNT_IUNLOCK(mp);
608 vfs_write_resume(mp, 0);
609 return (error);
610 }
611 if (fs->fs_snapinum[0] != 0)
612 ffs_snapshot_mount(mp);
613 vfs_write_resume(mp, 0);
614 }
615 /*
616 * Soft updates is incompatible with "async",
617 * so if we are doing softupdates stop the user
618 * from setting the async flag in an update.
619 * Softdep_mount() clears it in an initial mount
620 * or ro->rw remount.
621 */
622 if (MOUNTEDSOFTDEP(mp)) {
623 /* XXX: Reset too late ? */
624 MNT_ILOCK(mp);
625 mp->mnt_flag &= ~MNT_ASYNC;
626 MNT_IUNLOCK(mp);
627 }
628 /*
629 * Keep MNT_ACLS flag if it is stored in superblock.
630 */
631 if ((fs->fs_flags & FS_ACLS) != 0) {
632 /* XXX: Set too late ? */
633 MNT_ILOCK(mp);
634 mp->mnt_flag |= MNT_ACLS;
635 MNT_IUNLOCK(mp);
636 }
637
638 if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
639 /* XXX: Set too late ? */
640 MNT_ILOCK(mp);
641 mp->mnt_flag |= MNT_NFS4ACLS;
642 MNT_IUNLOCK(mp);
643 }
644
645 /*
646 * If this is a snapshot request, take the snapshot.
647 */
648 if (mp->mnt_flag & MNT_SNAPSHOT)
649 return (ffs_snapshot(mp, fspec));
650
651 /*
652 * Must not call namei() while owning busy ref.
653 */
654 vfs_unbusy(mp);
655 }
656
657 /*
658 * Not an update, or updating the name: look up the name
659 * and verify that it refers to a sensible disk device.
660 */
661 NDINIT(&ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, fspec, td);
662 error = namei(&ndp);
663 if ((mp->mnt_flag & MNT_UPDATE) != 0) {
664 /*
665 * Unmount does not start if MNT_UPDATE is set. Mount
666 * update busies mp before setting MNT_UPDATE. We
667 * must be able to retain our busy ref succesfully,
668 * without sleep.
669 */
670 error1 = vfs_busy(mp, MBF_NOWAIT);
671 MPASS(error1 == 0);
672 }
673 if (error != 0)
674 return (error);
675 NDFREE(&ndp, NDF_ONLY_PNBUF);
676 devvp = ndp.ni_vp;
677 if (!vn_isdisk_error(devvp, &error)) {
678 vput(devvp);
679 return (error);
680 }
681
682 /*
683 * If mount by non-root, then verify that user has necessary
684 * permissions on the device.
685 */
686 accmode = VREAD;
687 if ((mp->mnt_flag & MNT_RDONLY) == 0)
688 accmode |= VWRITE;
689 error = VOP_ACCESS(devvp, accmode, td->td_ucred, td);
690 if (error)
691 error = priv_check(td, PRIV_VFS_MOUNT_PERM);
692 if (error) {
693 vput(devvp);
694 return (error);
695 }
696
697 if (mp->mnt_flag & MNT_UPDATE) {
698 /*
699 * Update only
700 *
701 * If it's not the same vnode, or at least the same device
702 * then it's not correct.
703 */
704
705 if (devvp->v_rdev != ump->um_devvp->v_rdev)
706 error = EINVAL; /* needs translation */
707 vput(devvp);
708 if (error)
709 return (error);
710 } else {
711 /*
712 * New mount
713 *
714 * We need the name for the mount point (also used for
715 * "last mounted on") copied in. If an error occurs,
716 * the mount point is discarded by the upper level code.
717 * Note that vfs_mount_alloc() populates f_mntonname for us.
718 */
719 if ((error = ffs_mountfs(devvp, mp, td)) != 0) {
720 vrele(devvp);
721 return (error);
722 }
723 }
724
725 MNT_ILOCK(mp);
726 /*
727 * This is racy versus lookup, see ufs_fplookup_vexec for details.
728 */
729 if ((mp->mnt_kern_flag & MNTK_FPLOOKUP) != 0)
730 panic("MNTK_FPLOOKUP set on mount %p when it should not be", mp);
731 if ((mp->mnt_flag & (MNT_ACLS | MNT_NFS4ACLS | MNT_UNION)) == 0)
732 mp->mnt_kern_flag |= MNTK_FPLOOKUP;
733 MNT_IUNLOCK(mp);
734
735 vfs_mountedfrom(mp, fspec);
736 return (0);
737 }
738
739 /*
740 * Compatibility with old mount system call.
741 */
742
743 static int
ffs_cmount(struct mntarg * ma,void * data,uint64_t flags)744 ffs_cmount(struct mntarg *ma, void *data, uint64_t flags)
745 {
746 struct ufs_args args;
747 int error;
748
749 if (data == NULL)
750 return (EINVAL);
751 error = copyin(data, &args, sizeof args);
752 if (error)
753 return (error);
754
755 ma = mount_argsu(ma, "from", args.fspec, MAXPATHLEN);
756 ma = mount_arg(ma, "export", &args.export, sizeof(args.export));
757 error = kernel_mount(ma, flags);
758
759 return (error);
760 }
761
762 /*
763 * Reload all incore data for a filesystem (used after running fsck on
764 * the root filesystem and finding things to fix). If the 'force' flag
765 * is 0, the filesystem must be mounted read-only.
766 *
767 * Things to do to update the mount:
768 * 1) invalidate all cached meta-data.
769 * 2) re-read superblock from disk.
770 * 3) re-read summary information from disk.
771 * 4) invalidate all inactive vnodes.
772 * 5) clear MNTK_SUSPEND2 and MNTK_SUSPENDED flags, allowing secondary
773 * writers, if requested.
774 * 6) invalidate all cached file data.
775 * 7) re-read inode data for all active vnodes.
776 */
777 int
ffs_reload(struct mount * mp,int flags)778 ffs_reload(struct mount *mp, int flags)
779 {
780 struct vnode *vp, *mvp, *devvp;
781 struct inode *ip;
782 void *space;
783 struct buf *bp;
784 struct fs *fs, *newfs;
785 struct ufsmount *ump;
786 ufs2_daddr_t sblockloc;
787 int i, blks, error;
788 u_long size;
789 int32_t *lp;
790
791 ump = VFSTOUFS(mp);
792
793 MNT_ILOCK(mp);
794 if ((mp->mnt_flag & MNT_RDONLY) == 0 && (flags & FFSR_FORCE) == 0) {
795 MNT_IUNLOCK(mp);
796 return (EINVAL);
797 }
798 MNT_IUNLOCK(mp);
799
800 /*
801 * Step 1: invalidate all cached meta-data.
802 */
803 devvp = VFSTOUFS(mp)->um_devvp;
804 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
805 if (vinvalbuf(devvp, 0, 0, 0) != 0)
806 panic("ffs_reload: dirty1");
807 VOP_UNLOCK(devvp);
808
809 /*
810 * Step 2: re-read superblock from disk.
811 */
812 fs = VFSTOUFS(mp)->um_fs;
813 if ((error = bread(devvp, btodb(fs->fs_sblockloc), fs->fs_sbsize,
814 NOCRED, &bp)) != 0)
815 return (error);
816 newfs = (struct fs *)bp->b_data;
817 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
818 newfs->fs_magic != FS_UFS2_MAGIC) ||
819 newfs->fs_bsize > MAXBSIZE ||
820 newfs->fs_bsize < sizeof(struct fs)) {
821 brelse(bp);
822 return (EIO); /* XXX needs translation */
823 }
824 /*
825 * Preserve the summary information, read-only status, and
826 * superblock location by copying these fields into our new
827 * superblock before using it to update the existing superblock.
828 */
829 newfs->fs_si = fs->fs_si;
830 newfs->fs_ronly = fs->fs_ronly;
831 sblockloc = fs->fs_sblockloc;
832 bcopy(newfs, fs, (u_int)fs->fs_sbsize);
833 brelse(bp);
834 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
835 ffs_oldfscompat_read(fs, VFSTOUFS(mp), sblockloc);
836 UFS_LOCK(ump);
837 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
838 printf("WARNING: %s: reload pending error: blocks %jd "
839 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
840 fs->fs_pendinginodes);
841 fs->fs_pendingblocks = 0;
842 fs->fs_pendinginodes = 0;
843 }
844 UFS_UNLOCK(ump);
845
846 /*
847 * Step 3: re-read summary information from disk.
848 */
849 size = fs->fs_cssize;
850 blks = howmany(size, fs->fs_fsize);
851 if (fs->fs_contigsumsize > 0)
852 size += fs->fs_ncg * sizeof(int32_t);
853 size += fs->fs_ncg * sizeof(u_int8_t);
854 free(fs->fs_csp, M_UFSMNT);
855 space = malloc(size, M_UFSMNT, M_WAITOK);
856 fs->fs_csp = space;
857 for (i = 0; i < blks; i += fs->fs_frag) {
858 size = fs->fs_bsize;
859 if (i + fs->fs_frag > blks)
860 size = (blks - i) * fs->fs_fsize;
861 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
862 NOCRED, &bp);
863 if (error)
864 return (error);
865 bcopy(bp->b_data, space, (u_int)size);
866 space = (char *)space + size;
867 brelse(bp);
868 }
869 /*
870 * We no longer know anything about clusters per cylinder group.
871 */
872 if (fs->fs_contigsumsize > 0) {
873 fs->fs_maxcluster = lp = space;
874 for (i = 0; i < fs->fs_ncg; i++)
875 *lp++ = fs->fs_contigsumsize;
876 space = lp;
877 }
878 size = fs->fs_ncg * sizeof(u_int8_t);
879 fs->fs_contigdirs = (u_int8_t *)space;
880 bzero(fs->fs_contigdirs, size);
881 if ((flags & FFSR_UNSUSPEND) != 0) {
882 MNT_ILOCK(mp);
883 mp->mnt_kern_flag &= ~(MNTK_SUSPENDED | MNTK_SUSPEND2);
884 wakeup(&mp->mnt_flag);
885 MNT_IUNLOCK(mp);
886 }
887
888 loop:
889 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
890 /*
891 * Skip syncer vnode.
892 */
893 if (vp->v_type == VNON) {
894 VI_UNLOCK(vp);
895 continue;
896 }
897 /*
898 * Step 4: invalidate all cached file data.
899 */
900 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
901 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
902 goto loop;
903 }
904 if (vinvalbuf(vp, 0, 0, 0))
905 panic("ffs_reload: dirty2");
906 /*
907 * Step 5: re-read inode data for all active vnodes.
908 */
909 ip = VTOI(vp);
910 error =
911 bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
912 (int)fs->fs_bsize, NOCRED, &bp);
913 if (error) {
914 vput(vp);
915 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
916 return (error);
917 }
918 if ((error = ffs_load_inode(bp, ip, fs, ip->i_number)) != 0) {
919 brelse(bp);
920 vput(vp);
921 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
922 return (error);
923 }
924 ip->i_effnlink = ip->i_nlink;
925 brelse(bp);
926 vput(vp);
927 }
928 return (0);
929 }
930
931 /*
932 * Common code for mount and mountroot
933 */
934 static int
ffs_mountfs(odevvp,mp,td)935 ffs_mountfs(odevvp, mp, td)
936 struct vnode *odevvp;
937 struct mount *mp;
938 struct thread *td;
939 {
940 struct ufsmount *ump;
941 struct fs *fs;
942 struct cdev *dev;
943 int error, i, len, ronly;
944 struct ucred *cred;
945 struct g_consumer *cp;
946 struct mount *nmp;
947 struct vnode *devvp;
948 struct fsfail_task *etp;
949 int candelete, canspeedup;
950 off_t loc;
951
952 fs = NULL;
953 ump = NULL;
954 cred = td ? td->td_ucred : NOCRED;
955 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
956
957 devvp = mntfs_allocvp(mp, odevvp);
958 VOP_UNLOCK(odevvp);
959 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
960 KASSERT(devvp->v_type == VCHR, ("reclaimed devvp"));
961 dev = devvp->v_rdev;
962 KASSERT(dev->si_snapdata == NULL, ("non-NULL snapshot data"));
963 if (atomic_cmpset_acq_ptr((uintptr_t *)&dev->si_mountpt, 0,
964 (uintptr_t)mp) == 0) {
965 mntfs_freevp(devvp);
966 return (EBUSY);
967 }
968 g_topology_lock();
969 error = g_vfs_open(devvp, &cp, "ffs", ronly ? 0 : 1);
970 g_topology_unlock();
971 if (error != 0) {
972 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
973 mntfs_freevp(devvp);
974 return (error);
975 }
976 dev_ref(dev);
977 devvp->v_bufobj.bo_ops = &ffs_ops;
978 BO_LOCK(&odevvp->v_bufobj);
979 odevvp->v_bufobj.bo_flag |= BO_NOBUFS;
980 BO_UNLOCK(&odevvp->v_bufobj);
981 VOP_UNLOCK(devvp);
982 if (dev->si_iosize_max != 0)
983 mp->mnt_iosize_max = dev->si_iosize_max;
984 if (mp->mnt_iosize_max > maxphys)
985 mp->mnt_iosize_max = maxphys;
986 if ((SBLOCKSIZE % cp->provider->sectorsize) != 0) {
987 error = EINVAL;
988 vfs_mount_error(mp,
989 "Invalid sectorsize %d for superblock size %d",
990 cp->provider->sectorsize, SBLOCKSIZE);
991 goto out;
992 }
993 /* fetch the superblock and summary information */
994 loc = STDSB;
995 if ((mp->mnt_flag & MNT_ROOTFS) != 0)
996 loc = STDSB_NOHASHFAIL;
997 if ((error = ffs_sbget(devvp, &fs, loc, M_UFSMNT, ffs_use_bread)) != 0)
998 goto out;
999 fs->fs_flags &= ~FS_UNCLEAN;
1000 if (fs->fs_clean == 0) {
1001 fs->fs_flags |= FS_UNCLEAN;
1002 if (ronly || (mp->mnt_flag & MNT_FORCE) ||
1003 ((fs->fs_flags & (FS_SUJ | FS_NEEDSFSCK)) == 0 &&
1004 (fs->fs_flags & FS_DOSOFTDEP))) {
1005 printf("WARNING: %s was not properly dismounted\n",
1006 fs->fs_fsmnt);
1007 } else {
1008 vfs_mount_error(mp, "R/W mount of %s denied. %s%s",
1009 fs->fs_fsmnt, "Filesystem is not clean - run fsck.",
1010 (fs->fs_flags & FS_SUJ) == 0 ? "" :
1011 " Forced mount will invalidate journal contents");
1012 error = EPERM;
1013 goto out;
1014 }
1015 if ((fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) &&
1016 (mp->mnt_flag & MNT_FORCE)) {
1017 printf("WARNING: %s: lost blocks %jd files %d\n",
1018 fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1019 fs->fs_pendinginodes);
1020 fs->fs_pendingblocks = 0;
1021 fs->fs_pendinginodes = 0;
1022 }
1023 }
1024 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1025 printf("WARNING: %s: mount pending error: blocks %jd "
1026 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1027 fs->fs_pendinginodes);
1028 fs->fs_pendingblocks = 0;
1029 fs->fs_pendinginodes = 0;
1030 }
1031 if ((fs->fs_flags & FS_GJOURNAL) != 0) {
1032 #ifdef UFS_GJOURNAL
1033 /*
1034 * Get journal provider name.
1035 */
1036 len = 1024;
1037 mp->mnt_gjprovider = malloc((u_long)len, M_UFSMNT, M_WAITOK);
1038 if (g_io_getattr("GJOURNAL::provider", cp, &len,
1039 mp->mnt_gjprovider) == 0) {
1040 mp->mnt_gjprovider = realloc(mp->mnt_gjprovider, len,
1041 M_UFSMNT, M_WAITOK);
1042 MNT_ILOCK(mp);
1043 mp->mnt_flag |= MNT_GJOURNAL;
1044 MNT_IUNLOCK(mp);
1045 } else {
1046 if ((mp->mnt_flag & MNT_RDONLY) == 0)
1047 printf("WARNING: %s: GJOURNAL flag on fs "
1048 "but no gjournal provider below\n",
1049 mp->mnt_stat.f_mntonname);
1050 free(mp->mnt_gjprovider, M_UFSMNT);
1051 mp->mnt_gjprovider = NULL;
1052 }
1053 #else
1054 printf("WARNING: %s: GJOURNAL flag on fs but no "
1055 "UFS_GJOURNAL support\n", mp->mnt_stat.f_mntonname);
1056 #endif
1057 } else {
1058 mp->mnt_gjprovider = NULL;
1059 }
1060 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
1061 ump->um_cp = cp;
1062 ump->um_bo = &devvp->v_bufobj;
1063 ump->um_fs = fs;
1064 if (fs->fs_magic == FS_UFS1_MAGIC) {
1065 ump->um_fstype = UFS1;
1066 ump->um_balloc = ffs_balloc_ufs1;
1067 } else {
1068 ump->um_fstype = UFS2;
1069 ump->um_balloc = ffs_balloc_ufs2;
1070 }
1071 ump->um_blkatoff = ffs_blkatoff;
1072 ump->um_truncate = ffs_truncate;
1073 ump->um_update = ffs_update;
1074 ump->um_valloc = ffs_valloc;
1075 ump->um_vfree = ffs_vfree;
1076 ump->um_ifree = ffs_ifree;
1077 ump->um_rdonly = ffs_rdonly;
1078 ump->um_snapgone = ffs_snapgone;
1079 if ((mp->mnt_flag & MNT_UNTRUSTED) != 0)
1080 ump->um_check_blkno = ffs_check_blkno;
1081 else
1082 ump->um_check_blkno = NULL;
1083 mtx_init(UFS_MTX(ump), "FFS", "FFS Lock", MTX_DEF);
1084 sx_init(&ump->um_checkpath_lock, "uchpth");
1085 ffs_oldfscompat_read(fs, ump, fs->fs_sblockloc);
1086 fs->fs_ronly = ronly;
1087 fs->fs_active = NULL;
1088 mp->mnt_data = ump;
1089 mp->mnt_stat.f_fsid.val[0] = fs->fs_id[0];
1090 mp->mnt_stat.f_fsid.val[1] = fs->fs_id[1];
1091 nmp = NULL;
1092 if (fs->fs_id[0] == 0 || fs->fs_id[1] == 0 ||
1093 (nmp = vfs_getvfs(&mp->mnt_stat.f_fsid))) {
1094 if (nmp)
1095 vfs_rel(nmp);
1096 vfs_getnewfsid(mp);
1097 }
1098 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1099 MNT_ILOCK(mp);
1100 mp->mnt_flag |= MNT_LOCAL;
1101 MNT_IUNLOCK(mp);
1102 if ((fs->fs_flags & FS_MULTILABEL) != 0) {
1103 #ifdef MAC
1104 MNT_ILOCK(mp);
1105 mp->mnt_flag |= MNT_MULTILABEL;
1106 MNT_IUNLOCK(mp);
1107 #else
1108 printf("WARNING: %s: multilabel flag on fs but "
1109 "no MAC support\n", mp->mnt_stat.f_mntonname);
1110 #endif
1111 }
1112 if ((fs->fs_flags & FS_ACLS) != 0) {
1113 #ifdef UFS_ACL
1114 MNT_ILOCK(mp);
1115
1116 if (mp->mnt_flag & MNT_NFS4ACLS)
1117 printf("WARNING: %s: ACLs flag on fs conflicts with "
1118 "\"nfsv4acls\" mount option; option ignored\n",
1119 mp->mnt_stat.f_mntonname);
1120 mp->mnt_flag &= ~MNT_NFS4ACLS;
1121 mp->mnt_flag |= MNT_ACLS;
1122
1123 MNT_IUNLOCK(mp);
1124 #else
1125 printf("WARNING: %s: ACLs flag on fs but no ACLs support\n",
1126 mp->mnt_stat.f_mntonname);
1127 #endif
1128 }
1129 if ((fs->fs_flags & FS_NFS4ACLS) != 0) {
1130 #ifdef UFS_ACL
1131 MNT_ILOCK(mp);
1132
1133 if (mp->mnt_flag & MNT_ACLS)
1134 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
1135 "with \"acls\" mount option; option ignored\n",
1136 mp->mnt_stat.f_mntonname);
1137 mp->mnt_flag &= ~MNT_ACLS;
1138 mp->mnt_flag |= MNT_NFS4ACLS;
1139
1140 MNT_IUNLOCK(mp);
1141 #else
1142 printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
1143 "ACLs support\n", mp->mnt_stat.f_mntonname);
1144 #endif
1145 }
1146 if ((fs->fs_flags & FS_TRIM) != 0) {
1147 len = sizeof(int);
1148 if (g_io_getattr("GEOM::candelete", cp, &len,
1149 &candelete) == 0) {
1150 if (candelete)
1151 ump->um_flags |= UM_CANDELETE;
1152 else
1153 printf("WARNING: %s: TRIM flag on fs but disk "
1154 "does not support TRIM\n",
1155 mp->mnt_stat.f_mntonname);
1156 } else {
1157 printf("WARNING: %s: TRIM flag on fs but disk does "
1158 "not confirm that it supports TRIM\n",
1159 mp->mnt_stat.f_mntonname);
1160 }
1161 if (((ump->um_flags) & UM_CANDELETE) != 0) {
1162 ump->um_trim_tq = taskqueue_create("trim", M_WAITOK,
1163 taskqueue_thread_enqueue, &ump->um_trim_tq);
1164 taskqueue_start_threads(&ump->um_trim_tq, 1, PVFS,
1165 "%s trim", mp->mnt_stat.f_mntonname);
1166 ump->um_trimhash = hashinit(MAXTRIMIO, M_TRIM,
1167 &ump->um_trimlisthashsize);
1168 }
1169 }
1170
1171 len = sizeof(int);
1172 if (g_io_getattr("GEOM::canspeedup", cp, &len, &canspeedup) == 0) {
1173 if (canspeedup)
1174 ump->um_flags |= UM_CANSPEEDUP;
1175 }
1176
1177 ump->um_mountp = mp;
1178 ump->um_dev = dev;
1179 ump->um_devvp = devvp;
1180 ump->um_odevvp = odevvp;
1181 ump->um_nindir = fs->fs_nindir;
1182 ump->um_bptrtodb = fs->fs_fsbtodb;
1183 ump->um_seqinc = fs->fs_frag;
1184 for (i = 0; i < MAXQUOTAS; i++)
1185 ump->um_quotas[i] = NULLVP;
1186 #ifdef UFS_EXTATTR
1187 ufs_extattr_uepm_init(&ump->um_extattr);
1188 #endif
1189 /*
1190 * Set FS local "last mounted on" information (NULL pad)
1191 */
1192 bzero(fs->fs_fsmnt, MAXMNTLEN);
1193 strlcpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname, MAXMNTLEN);
1194 mp->mnt_stat.f_iosize = fs->fs_bsize;
1195
1196 if (mp->mnt_flag & MNT_ROOTFS) {
1197 /*
1198 * Root mount; update timestamp in mount structure.
1199 * this will be used by the common root mount code
1200 * to update the system clock.
1201 */
1202 mp->mnt_time = fs->fs_time;
1203 }
1204
1205 if (ronly == 0) {
1206 fs->fs_mtime = time_second;
1207 if ((fs->fs_flags & FS_DOSOFTDEP) &&
1208 (error = softdep_mount(devvp, mp, fs, cred)) != 0) {
1209 ffs_flushfiles(mp, FORCECLOSE, td);
1210 goto out;
1211 }
1212 if (fs->fs_snapinum[0] != 0)
1213 ffs_snapshot_mount(mp);
1214 fs->fs_fmod = 1;
1215 fs->fs_clean = 0;
1216 (void) ffs_sbupdate(ump, MNT_WAIT, 0);
1217 }
1218 /*
1219 * Initialize filesystem state information in mount struct.
1220 */
1221 MNT_ILOCK(mp);
1222 mp->mnt_kern_flag |= MNTK_LOOKUP_SHARED | MNTK_EXTENDED_SHARED |
1223 MNTK_NO_IOPF | MNTK_UNMAPPED_BUFS | MNTK_USES_BCACHE;
1224 MNT_IUNLOCK(mp);
1225 #ifdef UFS_EXTATTR
1226 #ifdef UFS_EXTATTR_AUTOSTART
1227 /*
1228 *
1229 * Auto-starting does the following:
1230 * - check for /.attribute in the fs, and extattr_start if so
1231 * - for each file in .attribute, enable that file with
1232 * an attribute of the same name.
1233 * Not clear how to report errors -- probably eat them.
1234 * This would all happen while the filesystem was busy/not
1235 * available, so would effectively be "atomic".
1236 */
1237 (void) ufs_extattr_autostart(mp, td);
1238 #endif /* !UFS_EXTATTR_AUTOSTART */
1239 #endif /* !UFS_EXTATTR */
1240 etp = malloc(sizeof *ump->um_fsfail_task, M_UFSMNT, M_WAITOK | M_ZERO);
1241 etp->fsid = mp->mnt_stat.f_fsid;
1242 ump->um_fsfail_task = etp;
1243 return (0);
1244 out:
1245 if (fs != NULL) {
1246 free(fs->fs_csp, M_UFSMNT);
1247 free(fs->fs_si, M_UFSMNT);
1248 free(fs, M_UFSMNT);
1249 }
1250 if (cp != NULL) {
1251 g_topology_lock();
1252 g_vfs_close(cp);
1253 g_topology_unlock();
1254 }
1255 if (ump != NULL) {
1256 mtx_destroy(UFS_MTX(ump));
1257 sx_destroy(&ump->um_checkpath_lock);
1258 if (mp->mnt_gjprovider != NULL) {
1259 free(mp->mnt_gjprovider, M_UFSMNT);
1260 mp->mnt_gjprovider = NULL;
1261 }
1262 MPASS(ump->um_softdep == NULL);
1263 free(ump, M_UFSMNT);
1264 mp->mnt_data = NULL;
1265 }
1266 BO_LOCK(&odevvp->v_bufobj);
1267 odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1268 BO_UNLOCK(&odevvp->v_bufobj);
1269 atomic_store_rel_ptr((uintptr_t *)&dev->si_mountpt, 0);
1270 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1271 mntfs_freevp(devvp);
1272 dev_rel(dev);
1273 return (error);
1274 }
1275
1276 /*
1277 * A read function for use by filesystem-layer routines.
1278 */
1279 static int
ffs_use_bread(void * devfd,off_t loc,void ** bufp,int size)1280 ffs_use_bread(void *devfd, off_t loc, void **bufp, int size)
1281 {
1282 struct buf *bp;
1283 int error;
1284
1285 KASSERT(*bufp == NULL, ("ffs_use_bread: non-NULL *bufp %p\n", *bufp));
1286 *bufp = malloc(size, M_UFSMNT, M_WAITOK);
1287 if ((error = bread((struct vnode *)devfd, btodb(loc), size, NOCRED,
1288 &bp)) != 0)
1289 return (error);
1290 bcopy(bp->b_data, *bufp, size);
1291 bp->b_flags |= B_INVAL | B_NOCACHE;
1292 brelse(bp);
1293 return (0);
1294 }
1295
1296 static int bigcgs = 0;
1297 SYSCTL_INT(_debug, OID_AUTO, bigcgs, CTLFLAG_RW, &bigcgs, 0, "");
1298
1299 /*
1300 * Sanity checks for loading old filesystem superblocks.
1301 * See ffs_oldfscompat_write below for unwound actions.
1302 *
1303 * XXX - Parts get retired eventually.
1304 * Unfortunately new bits get added.
1305 */
1306 static void
ffs_oldfscompat_read(fs,ump,sblockloc)1307 ffs_oldfscompat_read(fs, ump, sblockloc)
1308 struct fs *fs;
1309 struct ufsmount *ump;
1310 ufs2_daddr_t sblockloc;
1311 {
1312 off_t maxfilesize;
1313
1314 /*
1315 * If not yet done, update fs_flags location and value of fs_sblockloc.
1316 */
1317 if ((fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
1318 fs->fs_flags = fs->fs_old_flags;
1319 fs->fs_old_flags |= FS_FLAGS_UPDATED;
1320 fs->fs_sblockloc = sblockloc;
1321 }
1322 /*
1323 * If not yet done, update UFS1 superblock with new wider fields.
1324 */
1325 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
1326 fs->fs_maxbsize = fs->fs_bsize;
1327 fs->fs_time = fs->fs_old_time;
1328 fs->fs_size = fs->fs_old_size;
1329 fs->fs_dsize = fs->fs_old_dsize;
1330 fs->fs_csaddr = fs->fs_old_csaddr;
1331 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1332 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1333 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1334 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1335 }
1336 if (fs->fs_magic == FS_UFS1_MAGIC &&
1337 fs->fs_old_inodefmt < FS_44INODEFMT) {
1338 fs->fs_maxfilesize = ((uint64_t)1 << 31) - 1;
1339 fs->fs_qbmask = ~fs->fs_bmask;
1340 fs->fs_qfmask = ~fs->fs_fmask;
1341 }
1342 if (fs->fs_magic == FS_UFS1_MAGIC) {
1343 ump->um_savedmaxfilesize = fs->fs_maxfilesize;
1344 maxfilesize = (uint64_t)0x80000000 * fs->fs_bsize - 1;
1345 if (fs->fs_maxfilesize > maxfilesize)
1346 fs->fs_maxfilesize = maxfilesize;
1347 }
1348 /* Compatibility for old filesystems */
1349 if (fs->fs_avgfilesize <= 0)
1350 fs->fs_avgfilesize = AVFILESIZ;
1351 if (fs->fs_avgfpdir <= 0)
1352 fs->fs_avgfpdir = AFPDIR;
1353 if (bigcgs) {
1354 fs->fs_save_cgsize = fs->fs_cgsize;
1355 fs->fs_cgsize = fs->fs_bsize;
1356 }
1357 }
1358
1359 /*
1360 * Unwinding superblock updates for old filesystems.
1361 * See ffs_oldfscompat_read above for details.
1362 *
1363 * XXX - Parts get retired eventually.
1364 * Unfortunately new bits get added.
1365 */
1366 void
ffs_oldfscompat_write(fs,ump)1367 ffs_oldfscompat_write(fs, ump)
1368 struct fs *fs;
1369 struct ufsmount *ump;
1370 {
1371
1372 /*
1373 * Copy back UFS2 updated fields that UFS1 inspects.
1374 */
1375 if (fs->fs_magic == FS_UFS1_MAGIC) {
1376 fs->fs_old_time = fs->fs_time;
1377 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1378 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1379 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1380 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1381 fs->fs_maxfilesize = ump->um_savedmaxfilesize;
1382 }
1383 if (bigcgs) {
1384 fs->fs_cgsize = fs->fs_save_cgsize;
1385 fs->fs_save_cgsize = 0;
1386 }
1387 }
1388
1389 /*
1390 * unmount system call
1391 */
1392 static int
ffs_unmount(mp,mntflags)1393 ffs_unmount(mp, mntflags)
1394 struct mount *mp;
1395 int mntflags;
1396 {
1397 struct thread *td;
1398 struct ufsmount *ump = VFSTOUFS(mp);
1399 struct fs *fs;
1400 int error, flags, susp;
1401 #ifdef UFS_EXTATTR
1402 int e_restart;
1403 #endif
1404
1405 flags = 0;
1406 td = curthread;
1407 fs = ump->um_fs;
1408 if (mntflags & MNT_FORCE)
1409 flags |= FORCECLOSE;
1410 susp = fs->fs_ronly == 0;
1411 #ifdef UFS_EXTATTR
1412 if ((error = ufs_extattr_stop(mp, td))) {
1413 if (error != EOPNOTSUPP)
1414 printf("WARNING: unmount %s: ufs_extattr_stop "
1415 "returned errno %d\n", mp->mnt_stat.f_mntonname,
1416 error);
1417 e_restart = 0;
1418 } else {
1419 ufs_extattr_uepm_destroy(&ump->um_extattr);
1420 e_restart = 1;
1421 }
1422 #endif
1423 if (susp) {
1424 error = vfs_write_suspend_umnt(mp);
1425 if (error != 0)
1426 goto fail1;
1427 }
1428 if (MOUNTEDSOFTDEP(mp))
1429 error = softdep_flushfiles(mp, flags, td);
1430 else
1431 error = ffs_flushfiles(mp, flags, td);
1432 if (error != 0 && !ffs_fsfail_cleanup(ump, error))
1433 goto fail;
1434
1435 UFS_LOCK(ump);
1436 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1437 printf("WARNING: unmount %s: pending error: blocks %jd "
1438 "files %d\n", fs->fs_fsmnt, (intmax_t)fs->fs_pendingblocks,
1439 fs->fs_pendinginodes);
1440 fs->fs_pendingblocks = 0;
1441 fs->fs_pendinginodes = 0;
1442 }
1443 UFS_UNLOCK(ump);
1444 if (MOUNTEDSOFTDEP(mp))
1445 softdep_unmount(mp);
1446 MPASS(ump->um_softdep == NULL);
1447 if (fs->fs_ronly == 0) {
1448 fs->fs_clean = fs->fs_flags & (FS_UNCLEAN|FS_NEEDSFSCK) ? 0 : 1;
1449 error = ffs_sbupdate(ump, MNT_WAIT, 0);
1450 if (ffs_fsfail_cleanup(ump, error))
1451 error = 0;
1452 if (error != 0 && !ffs_fsfail_cleanup(ump, error)) {
1453 fs->fs_clean = 0;
1454 goto fail;
1455 }
1456 }
1457 if (susp)
1458 vfs_write_resume(mp, VR_START_WRITE);
1459 if (ump->um_trim_tq != NULL) {
1460 while (ump->um_trim_inflight != 0)
1461 pause("ufsutr", hz);
1462 taskqueue_drain_all(ump->um_trim_tq);
1463 taskqueue_free(ump->um_trim_tq);
1464 free (ump->um_trimhash, M_TRIM);
1465 }
1466 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1467 g_topology_lock();
1468 g_vfs_close(ump->um_cp);
1469 g_topology_unlock();
1470 BO_LOCK(&ump->um_odevvp->v_bufobj);
1471 ump->um_odevvp->v_bufobj.bo_flag &= ~BO_NOBUFS;
1472 BO_UNLOCK(&ump->um_odevvp->v_bufobj);
1473 atomic_store_rel_ptr((uintptr_t *)&ump->um_dev->si_mountpt, 0);
1474 mntfs_freevp(ump->um_devvp);
1475 vrele(ump->um_odevvp);
1476 dev_rel(ump->um_dev);
1477 mtx_destroy(UFS_MTX(ump));
1478 sx_destroy(&ump->um_checkpath_lock);
1479 if (mp->mnt_gjprovider != NULL) {
1480 free(mp->mnt_gjprovider, M_UFSMNT);
1481 mp->mnt_gjprovider = NULL;
1482 }
1483 free(fs->fs_csp, M_UFSMNT);
1484 free(fs->fs_si, M_UFSMNT);
1485 free(fs, M_UFSMNT);
1486 if (ump->um_fsfail_task != NULL)
1487 free(ump->um_fsfail_task, M_UFSMNT);
1488 free(ump, M_UFSMNT);
1489 mp->mnt_data = NULL;
1490 MNT_ILOCK(mp);
1491 mp->mnt_flag &= ~MNT_LOCAL;
1492 MNT_IUNLOCK(mp);
1493 if (td->td_su == mp) {
1494 td->td_su = NULL;
1495 vfs_rel(mp);
1496 }
1497 return (error);
1498
1499 fail:
1500 if (susp)
1501 vfs_write_resume(mp, VR_START_WRITE);
1502 fail1:
1503 #ifdef UFS_EXTATTR
1504 if (e_restart) {
1505 ufs_extattr_uepm_init(&ump->um_extattr);
1506 #ifdef UFS_EXTATTR_AUTOSTART
1507 (void) ufs_extattr_autostart(mp, td);
1508 #endif
1509 }
1510 #endif
1511
1512 return (error);
1513 }
1514
1515 /*
1516 * Flush out all the files in a filesystem.
1517 */
1518 int
ffs_flushfiles(mp,flags,td)1519 ffs_flushfiles(mp, flags, td)
1520 struct mount *mp;
1521 int flags;
1522 struct thread *td;
1523 {
1524 struct ufsmount *ump;
1525 int qerror, error;
1526
1527 ump = VFSTOUFS(mp);
1528 qerror = 0;
1529 #ifdef QUOTA
1530 if (mp->mnt_flag & MNT_QUOTA) {
1531 int i;
1532 error = vflush(mp, 0, SKIPSYSTEM|flags, td);
1533 if (error)
1534 return (error);
1535 for (i = 0; i < MAXQUOTAS; i++) {
1536 error = quotaoff(td, mp, i);
1537 if (error != 0) {
1538 if ((flags & EARLYFLUSH) == 0)
1539 return (error);
1540 else
1541 qerror = error;
1542 }
1543 }
1544
1545 /*
1546 * Here we fall through to vflush again to ensure that
1547 * we have gotten rid of all the system vnodes, unless
1548 * quotas must not be closed.
1549 */
1550 }
1551 #endif
1552 /* devvp is not locked there */
1553 if (ump->um_devvp->v_vflag & VV_COPYONWRITE) {
1554 if ((error = vflush(mp, 0, SKIPSYSTEM | flags, td)) != 0)
1555 return (error);
1556 ffs_snapshot_unmount(mp);
1557 flags |= FORCECLOSE;
1558 /*
1559 * Here we fall through to vflush again to ensure
1560 * that we have gotten rid of all the system vnodes.
1561 */
1562 }
1563
1564 /*
1565 * Do not close system files if quotas were not closed, to be
1566 * able to sync the remaining dquots. The freeblks softupdate
1567 * workitems might hold a reference on a dquot, preventing
1568 * quotaoff() from completing. Next round of
1569 * softdep_flushworklist() iteration should process the
1570 * blockers, allowing the next run of quotaoff() to finally
1571 * flush held dquots.
1572 *
1573 * Otherwise, flush all the files.
1574 */
1575 if (qerror == 0 && (error = vflush(mp, 0, flags, td)) != 0)
1576 return (error);
1577
1578 /*
1579 * Flush filesystem metadata.
1580 */
1581 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1582 error = VOP_FSYNC(ump->um_devvp, MNT_WAIT, td);
1583 VOP_UNLOCK(ump->um_devvp);
1584 return (error);
1585 }
1586
1587 /*
1588 * Get filesystem statistics.
1589 */
1590 static int
ffs_statfs(mp,sbp)1591 ffs_statfs(mp, sbp)
1592 struct mount *mp;
1593 struct statfs *sbp;
1594 {
1595 struct ufsmount *ump;
1596 struct fs *fs;
1597
1598 ump = VFSTOUFS(mp);
1599 fs = ump->um_fs;
1600 if (fs->fs_magic != FS_UFS1_MAGIC && fs->fs_magic != FS_UFS2_MAGIC)
1601 panic("ffs_statfs");
1602 sbp->f_version = STATFS_VERSION;
1603 sbp->f_bsize = fs->fs_fsize;
1604 sbp->f_iosize = fs->fs_bsize;
1605 sbp->f_blocks = fs->fs_dsize;
1606 UFS_LOCK(ump);
1607 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1608 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1609 sbp->f_bavail = freespace(fs, fs->fs_minfree) +
1610 dbtofsb(fs, fs->fs_pendingblocks);
1611 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1612 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1613 UFS_UNLOCK(ump);
1614 sbp->f_namemax = UFS_MAXNAMLEN;
1615 return (0);
1616 }
1617
1618 static bool
sync_doupdate(struct inode * ip)1619 sync_doupdate(struct inode *ip)
1620 {
1621
1622 return ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED |
1623 IN_UPDATE)) != 0);
1624 }
1625
1626 static int
ffs_sync_lazy_filter(struct vnode * vp,void * arg __unused)1627 ffs_sync_lazy_filter(struct vnode *vp, void *arg __unused)
1628 {
1629 struct inode *ip;
1630
1631 /*
1632 * Flags are safe to access because ->v_data invalidation
1633 * is held off by listmtx.
1634 */
1635 if (vp->v_type == VNON)
1636 return (false);
1637 ip = VTOI(vp);
1638 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0)
1639 return (false);
1640 return (true);
1641 }
1642
1643 /*
1644 * For a lazy sync, we only care about access times, quotas and the
1645 * superblock. Other filesystem changes are already converted to
1646 * cylinder group blocks or inode blocks updates and are written to
1647 * disk by syncer.
1648 */
1649 static int
ffs_sync_lazy(mp)1650 ffs_sync_lazy(mp)
1651 struct mount *mp;
1652 {
1653 struct vnode *mvp, *vp;
1654 struct inode *ip;
1655 int allerror, error;
1656
1657 allerror = 0;
1658 if ((mp->mnt_flag & MNT_NOATIME) != 0) {
1659 #ifdef QUOTA
1660 qsync(mp);
1661 #endif
1662 goto sbupdate;
1663 }
1664 MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, ffs_sync_lazy_filter, NULL) {
1665 if (vp->v_type == VNON) {
1666 VI_UNLOCK(vp);
1667 continue;
1668 }
1669 ip = VTOI(vp);
1670
1671 /*
1672 * The IN_ACCESS flag is converted to IN_MODIFIED by
1673 * ufs_close() and ufs_getattr() by the calls to
1674 * ufs_itimes_locked(), without subsequent UFS_UPDATE().
1675 * Test also all the other timestamp flags too, to pick up
1676 * any other cases that could be missed.
1677 */
1678 if (!sync_doupdate(ip) && (vp->v_iflag & VI_OWEINACT) == 0) {
1679 VI_UNLOCK(vp);
1680 continue;
1681 }
1682 if ((error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK)) != 0)
1683 continue;
1684 #ifdef QUOTA
1685 qsyncvp(vp);
1686 #endif
1687 if (sync_doupdate(ip))
1688 error = ffs_update(vp, 0);
1689 if (error != 0)
1690 allerror = error;
1691 vput(vp);
1692 }
1693 sbupdate:
1694 if (VFSTOUFS(mp)->um_fs->fs_fmod != 0 &&
1695 (error = ffs_sbupdate(VFSTOUFS(mp), MNT_LAZY, 0)) != 0)
1696 allerror = error;
1697 return (allerror);
1698 }
1699
1700 /*
1701 * Go through the disk queues to initiate sandbagged IO;
1702 * go through the inodes to write those that have been modified;
1703 * initiate the writing of the super block if it has been modified.
1704 *
1705 * Note: we are always called with the filesystem marked busy using
1706 * vfs_busy().
1707 */
1708 static int
ffs_sync(mp,waitfor)1709 ffs_sync(mp, waitfor)
1710 struct mount *mp;
1711 int waitfor;
1712 {
1713 struct vnode *mvp, *vp, *devvp;
1714 struct thread *td;
1715 struct inode *ip;
1716 struct ufsmount *ump = VFSTOUFS(mp);
1717 struct fs *fs;
1718 int error, count, lockreq, allerror = 0;
1719 int suspend;
1720 int suspended;
1721 int secondary_writes;
1722 int secondary_accwrites;
1723 int softdep_deps;
1724 int softdep_accdeps;
1725 struct bufobj *bo;
1726
1727 suspend = 0;
1728 suspended = 0;
1729 td = curthread;
1730 fs = ump->um_fs;
1731 if (fs->fs_fmod != 0 && fs->fs_ronly != 0)
1732 panic("%s: ffs_sync: modification on read-only filesystem",
1733 fs->fs_fsmnt);
1734 if (waitfor == MNT_LAZY) {
1735 if (!rebooting)
1736 return (ffs_sync_lazy(mp));
1737 waitfor = MNT_NOWAIT;
1738 }
1739
1740 /*
1741 * Write back each (modified) inode.
1742 */
1743 lockreq = LK_EXCLUSIVE | LK_NOWAIT;
1744 if (waitfor == MNT_SUSPEND) {
1745 suspend = 1;
1746 waitfor = MNT_WAIT;
1747 }
1748 if (waitfor == MNT_WAIT)
1749 lockreq = LK_EXCLUSIVE;
1750 lockreq |= LK_INTERLOCK | LK_SLEEPFAIL;
1751 loop:
1752 /* Grab snapshot of secondary write counts */
1753 MNT_ILOCK(mp);
1754 secondary_writes = mp->mnt_secondary_writes;
1755 secondary_accwrites = mp->mnt_secondary_accwrites;
1756 MNT_IUNLOCK(mp);
1757
1758 /* Grab snapshot of softdep dependency counts */
1759 softdep_get_depcounts(mp, &softdep_deps, &softdep_accdeps);
1760
1761 MNT_VNODE_FOREACH_ALL(vp, mp, mvp) {
1762 /*
1763 * Depend on the vnode interlock to keep things stable enough
1764 * for a quick test. Since there might be hundreds of
1765 * thousands of vnodes, we cannot afford even a subroutine
1766 * call unless there's a good chance that we have work to do.
1767 */
1768 if (vp->v_type == VNON) {
1769 VI_UNLOCK(vp);
1770 continue;
1771 }
1772 ip = VTOI(vp);
1773 if ((ip->i_flag &
1774 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
1775 vp->v_bufobj.bo_dirty.bv_cnt == 0) {
1776 VI_UNLOCK(vp);
1777 continue;
1778 }
1779 if ((error = vget(vp, lockreq)) != 0) {
1780 if (error == ENOENT || error == ENOLCK) {
1781 MNT_VNODE_FOREACH_ALL_ABORT(mp, mvp);
1782 goto loop;
1783 }
1784 continue;
1785 }
1786 #ifdef QUOTA
1787 qsyncvp(vp);
1788 #endif
1789 for (;;) {
1790 error = ffs_syncvnode(vp, waitfor, 0);
1791 if (error == ERELOOKUP)
1792 continue;
1793 if (error != 0)
1794 allerror = error;
1795 break;
1796 }
1797 vput(vp);
1798 }
1799 /*
1800 * Force stale filesystem control information to be flushed.
1801 */
1802 if (waitfor == MNT_WAIT || rebooting) {
1803 if ((error = softdep_flushworklist(ump->um_mountp, &count, td)))
1804 allerror = error;
1805 if (ffs_fsfail_cleanup(ump, allerror))
1806 allerror = 0;
1807 /* Flushed work items may create new vnodes to clean */
1808 if (allerror == 0 && count)
1809 goto loop;
1810 }
1811
1812 devvp = ump->um_devvp;
1813 bo = &devvp->v_bufobj;
1814 BO_LOCK(bo);
1815 if (bo->bo_numoutput > 0 || bo->bo_dirty.bv_cnt > 0) {
1816 BO_UNLOCK(bo);
1817 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1818 error = VOP_FSYNC(devvp, waitfor, td);
1819 VOP_UNLOCK(devvp);
1820 if (MOUNTEDSOFTDEP(mp) && (error == 0 || error == EAGAIN))
1821 error = ffs_sbupdate(ump, waitfor, 0);
1822 if (error != 0)
1823 allerror = error;
1824 if (ffs_fsfail_cleanup(ump, allerror))
1825 allerror = 0;
1826 if (allerror == 0 && waitfor == MNT_WAIT)
1827 goto loop;
1828 } else if (suspend != 0) {
1829 if (softdep_check_suspend(mp,
1830 devvp,
1831 softdep_deps,
1832 softdep_accdeps,
1833 secondary_writes,
1834 secondary_accwrites) != 0) {
1835 MNT_IUNLOCK(mp);
1836 goto loop; /* More work needed */
1837 }
1838 mtx_assert(MNT_MTX(mp), MA_OWNED);
1839 mp->mnt_kern_flag |= MNTK_SUSPEND2 | MNTK_SUSPENDED;
1840 MNT_IUNLOCK(mp);
1841 suspended = 1;
1842 } else
1843 BO_UNLOCK(bo);
1844 /*
1845 * Write back modified superblock.
1846 */
1847 if (fs->fs_fmod != 0 &&
1848 (error = ffs_sbupdate(ump, waitfor, suspended)) != 0)
1849 allerror = error;
1850 if (ffs_fsfail_cleanup(ump, allerror))
1851 allerror = 0;
1852 return (allerror);
1853 }
1854
1855 int
ffs_vget(mp,ino,flags,vpp)1856 ffs_vget(mp, ino, flags, vpp)
1857 struct mount *mp;
1858 ino_t ino;
1859 int flags;
1860 struct vnode **vpp;
1861 {
1862 return (ffs_vgetf(mp, ino, flags, vpp, 0));
1863 }
1864
1865 int
ffs_vgetf(mp,ino,flags,vpp,ffs_flags)1866 ffs_vgetf(mp, ino, flags, vpp, ffs_flags)
1867 struct mount *mp;
1868 ino_t ino;
1869 int flags;
1870 struct vnode **vpp;
1871 int ffs_flags;
1872 {
1873 struct fs *fs;
1874 struct inode *ip;
1875 struct ufsmount *ump;
1876 struct buf *bp;
1877 struct vnode *vp;
1878 daddr_t dbn;
1879 int error;
1880
1881 MPASS((ffs_flags & (FFSV_REPLACE | FFSV_REPLACE_DOOMED)) == 0 ||
1882 (flags & LK_EXCLUSIVE) != 0);
1883
1884 error = vfs_hash_get(mp, ino, flags, curthread, vpp, NULL, NULL);
1885 if (error != 0)
1886 return (error);
1887 if (*vpp != NULL) {
1888 if ((ffs_flags & FFSV_REPLACE) == 0 ||
1889 ((ffs_flags & FFSV_REPLACE_DOOMED) == 0 ||
1890 !VN_IS_DOOMED(*vpp)))
1891 return (0);
1892 vgone(*vpp);
1893 vput(*vpp);
1894 }
1895
1896 /*
1897 * We must promote to an exclusive lock for vnode creation. This
1898 * can happen if lookup is passed LOCKSHARED.
1899 */
1900 if ((flags & LK_TYPE_MASK) == LK_SHARED) {
1901 flags &= ~LK_TYPE_MASK;
1902 flags |= LK_EXCLUSIVE;
1903 }
1904
1905 /*
1906 * We do not lock vnode creation as it is believed to be too
1907 * expensive for such rare case as simultaneous creation of vnode
1908 * for same ino by different processes. We just allow them to race
1909 * and check later to decide who wins. Let the race begin!
1910 */
1911
1912 ump = VFSTOUFS(mp);
1913 fs = ump->um_fs;
1914 ip = uma_zalloc_smr(uma_inode, M_WAITOK | M_ZERO);
1915
1916 /* Allocate a new vnode/inode. */
1917 error = getnewvnode("ufs", mp, fs->fs_magic == FS_UFS1_MAGIC ?
1918 &ffs_vnodeops1 : &ffs_vnodeops2, &vp);
1919 if (error) {
1920 *vpp = NULL;
1921 uma_zfree_smr(uma_inode, ip);
1922 return (error);
1923 }
1924 /*
1925 * FFS supports recursive locking.
1926 */
1927 lockmgr(vp->v_vnlock, LK_EXCLUSIVE | LK_NOWITNESS, NULL);
1928 VN_LOCK_AREC(vp);
1929 vp->v_data = ip;
1930 vp->v_bufobj.bo_bsize = fs->fs_bsize;
1931 ip->i_vnode = vp;
1932 ip->i_ump = ump;
1933 ip->i_number = ino;
1934 ip->i_ea_refs = 0;
1935 ip->i_nextclustercg = -1;
1936 ip->i_flag = fs->fs_magic == FS_UFS1_MAGIC ? 0 : IN_UFS2;
1937 ip->i_mode = 0; /* ensure error cases below throw away vnode */
1938 #ifdef DIAGNOSTIC
1939 ufs_init_trackers(ip);
1940 #endif
1941 #ifdef QUOTA
1942 {
1943 int i;
1944 for (i = 0; i < MAXQUOTAS; i++)
1945 ip->i_dquot[i] = NODQUOT;
1946 }
1947 #endif
1948
1949 if (ffs_flags & FFSV_FORCEINSMQ)
1950 vp->v_vflag |= VV_FORCEINSMQ;
1951 error = insmntque(vp, mp);
1952 if (error != 0) {
1953 uma_zfree_smr(uma_inode, ip);
1954 *vpp = NULL;
1955 return (error);
1956 }
1957 vp->v_vflag &= ~VV_FORCEINSMQ;
1958 error = vfs_hash_insert(vp, ino, flags, curthread, vpp, NULL, NULL);
1959 if (error != 0)
1960 return (error);
1961 if (*vpp != NULL) {
1962 /*
1963 * Calls from ffs_valloc() (i.e. FFSV_REPLACE set)
1964 * operate on empty inode, which must not be found by
1965 * other threads until fully filled. Vnode for empty
1966 * inode must be not re-inserted on the hash by other
1967 * thread, after removal by us at the beginning.
1968 */
1969 MPASS((ffs_flags & FFSV_REPLACE) == 0);
1970 return (0);
1971 }
1972
1973 /* Read in the disk contents for the inode, copy into the inode. */
1974 dbn = fsbtodb(fs, ino_to_fsba(fs, ino));
1975 error = ffs_breadz(ump, ump->um_devvp, dbn, dbn, (int)fs->fs_bsize,
1976 NULL, NULL, 0, NOCRED, 0, NULL, &bp);
1977 if (error != 0) {
1978 /*
1979 * The inode does not contain anything useful, so it would
1980 * be misleading to leave it on its hash chain. With mode
1981 * still zero, it will be unlinked and returned to the free
1982 * list by vput().
1983 */
1984 vgone(vp);
1985 vput(vp);
1986 *vpp = NULL;
1987 return (error);
1988 }
1989 if (I_IS_UFS1(ip))
1990 ip->i_din1 = uma_zalloc(uma_ufs1, M_WAITOK);
1991 else
1992 ip->i_din2 = uma_zalloc(uma_ufs2, M_WAITOK);
1993 if ((error = ffs_load_inode(bp, ip, fs, ino)) != 0) {
1994 bqrelse(bp);
1995 vgone(vp);
1996 vput(vp);
1997 *vpp = NULL;
1998 return (error);
1999 }
2000 if (DOINGSOFTDEP(vp) && (!fs->fs_ronly ||
2001 (ffs_flags & FFSV_FORCEINODEDEP) != 0))
2002 softdep_load_inodeblock(ip);
2003 else
2004 ip->i_effnlink = ip->i_nlink;
2005 bqrelse(bp);
2006
2007 /*
2008 * Initialize the vnode from the inode, check for aliases.
2009 * Note that the underlying vnode may have changed.
2010 */
2011 error = ufs_vinit(mp, I_IS_UFS1(ip) ? &ffs_fifoops1 : &ffs_fifoops2,
2012 &vp);
2013 if (error) {
2014 vgone(vp);
2015 vput(vp);
2016 *vpp = NULL;
2017 return (error);
2018 }
2019
2020 /*
2021 * Finish inode initialization.
2022 */
2023 if (vp->v_type != VFIFO) {
2024 /* FFS supports shared locking for all files except fifos. */
2025 VN_LOCK_ASHARE(vp);
2026 }
2027
2028 /*
2029 * Set up a generation number for this inode if it does not
2030 * already have one. This should only happen on old filesystems.
2031 */
2032 if (ip->i_gen == 0) {
2033 while (ip->i_gen == 0)
2034 ip->i_gen = arc4random();
2035 if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0) {
2036 UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
2037 DIP_SET(ip, i_gen, ip->i_gen);
2038 }
2039 }
2040 #ifdef MAC
2041 if ((mp->mnt_flag & MNT_MULTILABEL) && ip->i_mode) {
2042 /*
2043 * If this vnode is already allocated, and we're running
2044 * multi-label, attempt to perform a label association
2045 * from the extended attributes on the inode.
2046 */
2047 error = mac_vnode_associate_extattr(mp, vp);
2048 if (error) {
2049 /* ufs_inactive will release ip->i_devvp ref. */
2050 vgone(vp);
2051 vput(vp);
2052 *vpp = NULL;
2053 return (error);
2054 }
2055 }
2056 #endif
2057
2058 *vpp = vp;
2059 return (0);
2060 }
2061
2062 /*
2063 * File handle to vnode
2064 *
2065 * Have to be really careful about stale file handles:
2066 * - check that the inode number is valid
2067 * - for UFS2 check that the inode number is initialized
2068 * - call ffs_vget() to get the locked inode
2069 * - check for an unallocated inode (i_mode == 0)
2070 * - check that the given client host has export rights and return
2071 * those rights via. exflagsp and credanonp
2072 */
2073 static int
ffs_fhtovp(mp,fhp,flags,vpp)2074 ffs_fhtovp(mp, fhp, flags, vpp)
2075 struct mount *mp;
2076 struct fid *fhp;
2077 int flags;
2078 struct vnode **vpp;
2079 {
2080 struct ufid *ufhp;
2081
2082 ufhp = (struct ufid *)fhp;
2083 return (ffs_inotovp(mp, ufhp->ufid_ino, ufhp->ufid_gen, flags,
2084 vpp, 0));
2085 }
2086
2087 int
ffs_inotovp(mp,ino,gen,lflags,vpp,ffs_flags)2088 ffs_inotovp(mp, ino, gen, lflags, vpp, ffs_flags)
2089 struct mount *mp;
2090 ino_t ino;
2091 u_int64_t gen;
2092 int lflags;
2093 struct vnode **vpp;
2094 int ffs_flags;
2095 {
2096 struct ufsmount *ump;
2097 struct vnode *nvp;
2098 struct inode *ip;
2099 struct fs *fs;
2100 struct cg *cgp;
2101 struct buf *bp;
2102 u_int cg;
2103 int error;
2104
2105 ump = VFSTOUFS(mp);
2106 fs = ump->um_fs;
2107 *vpp = NULL;
2108
2109 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg)
2110 return (ESTALE);
2111
2112 /*
2113 * Need to check if inode is initialized because UFS2 does lazy
2114 * initialization and nfs_fhtovp can offer arbitrary inode numbers.
2115 */
2116 if (fs->fs_magic == FS_UFS2_MAGIC) {
2117 cg = ino_to_cg(fs, ino);
2118 error = ffs_getcg(fs, ump->um_devvp, cg, 0, &bp, &cgp);
2119 if (error != 0)
2120 return (error);
2121 if (ino >= cg * fs->fs_ipg + cgp->cg_initediblk) {
2122 brelse(bp);
2123 return (ESTALE);
2124 }
2125 brelse(bp);
2126 }
2127
2128 error = ffs_vgetf(mp, ino, lflags, &nvp, ffs_flags);
2129 if (error != 0)
2130 return (error);
2131
2132 ip = VTOI(nvp);
2133 if (ip->i_mode == 0 || ip->i_gen != gen || ip->i_effnlink <= 0) {
2134 if (ip->i_mode == 0)
2135 vgone(nvp);
2136 vput(nvp);
2137 return (ESTALE);
2138 }
2139
2140 vnode_create_vobject(nvp, DIP(ip, i_size), curthread);
2141 *vpp = nvp;
2142 return (0);
2143 }
2144
2145 /*
2146 * Initialize the filesystem.
2147 */
2148 static int
ffs_init(vfsp)2149 ffs_init(vfsp)
2150 struct vfsconf *vfsp;
2151 {
2152
2153 ffs_susp_initialize();
2154 softdep_initialize();
2155 return (ufs_init(vfsp));
2156 }
2157
2158 /*
2159 * Undo the work of ffs_init().
2160 */
2161 static int
ffs_uninit(vfsp)2162 ffs_uninit(vfsp)
2163 struct vfsconf *vfsp;
2164 {
2165 int ret;
2166
2167 ret = ufs_uninit(vfsp);
2168 softdep_uninitialize();
2169 ffs_susp_uninitialize();
2170 taskqueue_drain_all(taskqueue_thread);
2171 return (ret);
2172 }
2173
2174 /*
2175 * Structure used to pass information from ffs_sbupdate to its
2176 * helper routine ffs_use_bwrite.
2177 */
2178 struct devfd {
2179 struct ufsmount *ump;
2180 struct buf *sbbp;
2181 int waitfor;
2182 int suspended;
2183 int error;
2184 };
2185
2186 /*
2187 * Write a superblock and associated information back to disk.
2188 */
2189 int
ffs_sbupdate(ump,waitfor,suspended)2190 ffs_sbupdate(ump, waitfor, suspended)
2191 struct ufsmount *ump;
2192 int waitfor;
2193 int suspended;
2194 {
2195 struct fs *fs;
2196 struct buf *sbbp;
2197 struct devfd devfd;
2198
2199 fs = ump->um_fs;
2200 if (fs->fs_ronly == 1 &&
2201 (ump->um_mountp->mnt_flag & (MNT_RDONLY | MNT_UPDATE)) !=
2202 (MNT_RDONLY | MNT_UPDATE))
2203 panic("ffs_sbupdate: write read-only filesystem");
2204 /*
2205 * We use the superblock's buf to serialize calls to ffs_sbupdate().
2206 */
2207 sbbp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
2208 (int)fs->fs_sbsize, 0, 0, 0);
2209 /*
2210 * Initialize info needed for write function.
2211 */
2212 devfd.ump = ump;
2213 devfd.sbbp = sbbp;
2214 devfd.waitfor = waitfor;
2215 devfd.suspended = suspended;
2216 devfd.error = 0;
2217 return (ffs_sbput(&devfd, fs, fs->fs_sblockloc, ffs_use_bwrite));
2218 }
2219
2220 /*
2221 * Write function for use by filesystem-layer routines.
2222 */
2223 static int
ffs_use_bwrite(void * devfd,off_t loc,void * buf,int size)2224 ffs_use_bwrite(void *devfd, off_t loc, void *buf, int size)
2225 {
2226 struct devfd *devfdp;
2227 struct ufsmount *ump;
2228 struct buf *bp;
2229 struct fs *fs;
2230 int error;
2231
2232 devfdp = devfd;
2233 ump = devfdp->ump;
2234 fs = ump->um_fs;
2235 /*
2236 * Writing the superblock summary information.
2237 */
2238 if (loc != fs->fs_sblockloc) {
2239 bp = getblk(ump->um_devvp, btodb(loc), size, 0, 0, 0);
2240 bcopy(buf, bp->b_data, (u_int)size);
2241 if (devfdp->suspended)
2242 bp->b_flags |= B_VALIDSUSPWRT;
2243 if (devfdp->waitfor != MNT_WAIT)
2244 bawrite(bp);
2245 else if ((error = bwrite(bp)) != 0)
2246 devfdp->error = error;
2247 return (0);
2248 }
2249 /*
2250 * Writing the superblock itself. We need to do special checks for it.
2251 */
2252 bp = devfdp->sbbp;
2253 if (ffs_fsfail_cleanup(ump, devfdp->error))
2254 devfdp->error = 0;
2255 if (devfdp->error != 0) {
2256 brelse(bp);
2257 return (devfdp->error);
2258 }
2259 if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_sblockloc != SBLOCK_UFS1 &&
2260 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2261 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2262 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS1);
2263 fs->fs_sblockloc = SBLOCK_UFS1;
2264 }
2265 if (fs->fs_magic == FS_UFS2_MAGIC && fs->fs_sblockloc != SBLOCK_UFS2 &&
2266 (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0) {
2267 printf("WARNING: %s: correcting fs_sblockloc from %jd to %d\n",
2268 fs->fs_fsmnt, fs->fs_sblockloc, SBLOCK_UFS2);
2269 fs->fs_sblockloc = SBLOCK_UFS2;
2270 }
2271 if (MOUNTEDSOFTDEP(ump->um_mountp))
2272 softdep_setup_sbupdate(ump, (struct fs *)bp->b_data, bp);
2273 bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
2274 fs = (struct fs *)bp->b_data;
2275 ffs_oldfscompat_write(fs, ump);
2276 fs->fs_si = NULL;
2277 /* Recalculate the superblock hash */
2278 fs->fs_ckhash = ffs_calc_sbhash(fs);
2279 if (devfdp->suspended)
2280 bp->b_flags |= B_VALIDSUSPWRT;
2281 if (devfdp->waitfor != MNT_WAIT)
2282 bawrite(bp);
2283 else if ((error = bwrite(bp)) != 0)
2284 devfdp->error = error;
2285 return (devfdp->error);
2286 }
2287
2288 static int
ffs_extattrctl(struct mount * mp,int cmd,struct vnode * filename_vp,int attrnamespace,const char * attrname)2289 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *filename_vp,
2290 int attrnamespace, const char *attrname)
2291 {
2292
2293 #ifdef UFS_EXTATTR
2294 return (ufs_extattrctl(mp, cmd, filename_vp, attrnamespace,
2295 attrname));
2296 #else
2297 return (vfs_stdextattrctl(mp, cmd, filename_vp, attrnamespace,
2298 attrname));
2299 #endif
2300 }
2301
2302 static void
ffs_ifree(struct ufsmount * ump,struct inode * ip)2303 ffs_ifree(struct ufsmount *ump, struct inode *ip)
2304 {
2305
2306 if (ump->um_fstype == UFS1 && ip->i_din1 != NULL)
2307 uma_zfree(uma_ufs1, ip->i_din1);
2308 else if (ip->i_din2 != NULL)
2309 uma_zfree(uma_ufs2, ip->i_din2);
2310 uma_zfree_smr(uma_inode, ip);
2311 }
2312
2313 static int dobkgrdwrite = 1;
2314 SYSCTL_INT(_debug, OID_AUTO, dobkgrdwrite, CTLFLAG_RW, &dobkgrdwrite, 0,
2315 "Do background writes (honoring the BV_BKGRDWRITE flag)?");
2316
2317 /*
2318 * Complete a background write started from bwrite.
2319 */
2320 static void
ffs_backgroundwritedone(struct buf * bp)2321 ffs_backgroundwritedone(struct buf *bp)
2322 {
2323 struct bufobj *bufobj;
2324 struct buf *origbp;
2325
2326 #ifdef SOFTUPDATES
2327 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) != 0)
2328 softdep_handle_error(bp);
2329 #endif
2330
2331 /*
2332 * Find the original buffer that we are writing.
2333 */
2334 bufobj = bp->b_bufobj;
2335 BO_LOCK(bufobj);
2336 if ((origbp = gbincore(bp->b_bufobj, bp->b_lblkno)) == NULL)
2337 panic("backgroundwritedone: lost buffer");
2338
2339 /*
2340 * We should mark the cylinder group buffer origbp as
2341 * dirty, to not lose the failed write.
2342 */
2343 if ((bp->b_ioflags & BIO_ERROR) != 0)
2344 origbp->b_vflags |= BV_BKGRDERR;
2345 BO_UNLOCK(bufobj);
2346 /*
2347 * Process dependencies then return any unfinished ones.
2348 */
2349 if (!LIST_EMPTY(&bp->b_dep) && (bp->b_ioflags & BIO_ERROR) == 0)
2350 buf_complete(bp);
2351 #ifdef SOFTUPDATES
2352 if (!LIST_EMPTY(&bp->b_dep))
2353 softdep_move_dependencies(bp, origbp);
2354 #endif
2355 /*
2356 * This buffer is marked B_NOCACHE so when it is released
2357 * by biodone it will be tossed. Clear B_IOSTARTED in case of error.
2358 */
2359 bp->b_flags |= B_NOCACHE;
2360 bp->b_flags &= ~(B_CACHE | B_IOSTARTED);
2361 pbrelvp(bp);
2362
2363 /*
2364 * Prevent brelse() from trying to keep and re-dirtying bp on
2365 * errors. It causes b_bufobj dereference in
2366 * bdirty()/reassignbuf(), and b_bufobj was cleared in
2367 * pbrelvp() above.
2368 */
2369 if ((bp->b_ioflags & BIO_ERROR) != 0)
2370 bp->b_flags |= B_INVAL;
2371 bufdone(bp);
2372 BO_LOCK(bufobj);
2373 /*
2374 * Clear the BV_BKGRDINPROG flag in the original buffer
2375 * and awaken it if it is waiting for the write to complete.
2376 * If BV_BKGRDINPROG is not set in the original buffer it must
2377 * have been released and re-instantiated - which is not legal.
2378 */
2379 KASSERT((origbp->b_vflags & BV_BKGRDINPROG),
2380 ("backgroundwritedone: lost buffer2"));
2381 origbp->b_vflags &= ~BV_BKGRDINPROG;
2382 if (origbp->b_vflags & BV_BKGRDWAIT) {
2383 origbp->b_vflags &= ~BV_BKGRDWAIT;
2384 wakeup(&origbp->b_xflags);
2385 }
2386 BO_UNLOCK(bufobj);
2387 }
2388
2389 /*
2390 * Write, release buffer on completion. (Done by iodone
2391 * if async). Do not bother writing anything if the buffer
2392 * is invalid.
2393 *
2394 * Note that we set B_CACHE here, indicating that buffer is
2395 * fully valid and thus cacheable. This is true even of NFS
2396 * now so we set it generally. This could be set either here
2397 * or in biodone() since the I/O is synchronous. We put it
2398 * here.
2399 */
2400 static int
ffs_bufwrite(struct buf * bp)2401 ffs_bufwrite(struct buf *bp)
2402 {
2403 struct buf *newbp;
2404 struct cg *cgp;
2405
2406 CTR3(KTR_BUF, "bufwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags);
2407 if (bp->b_flags & B_INVAL) {
2408 brelse(bp);
2409 return (0);
2410 }
2411
2412 if (!BUF_ISLOCKED(bp))
2413 panic("bufwrite: buffer is not busy???");
2414 /*
2415 * If a background write is already in progress, delay
2416 * writing this block if it is asynchronous. Otherwise
2417 * wait for the background write to complete.
2418 */
2419 BO_LOCK(bp->b_bufobj);
2420 if (bp->b_vflags & BV_BKGRDINPROG) {
2421 if (bp->b_flags & B_ASYNC) {
2422 BO_UNLOCK(bp->b_bufobj);
2423 bdwrite(bp);
2424 return (0);
2425 }
2426 bp->b_vflags |= BV_BKGRDWAIT;
2427 msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj), PRIBIO,
2428 "bwrbg", 0);
2429 if (bp->b_vflags & BV_BKGRDINPROG)
2430 panic("bufwrite: still writing");
2431 }
2432 bp->b_vflags &= ~BV_BKGRDERR;
2433 BO_UNLOCK(bp->b_bufobj);
2434
2435 /*
2436 * If this buffer is marked for background writing and we
2437 * do not have to wait for it, make a copy and write the
2438 * copy so as to leave this buffer ready for further use.
2439 *
2440 * This optimization eats a lot of memory. If we have a page
2441 * or buffer shortfall we can't do it.
2442 */
2443 if (dobkgrdwrite && (bp->b_xflags & BX_BKGRDWRITE) &&
2444 (bp->b_flags & B_ASYNC) &&
2445 !vm_page_count_severe() &&
2446 !buf_dirty_count_severe()) {
2447 KASSERT(bp->b_iodone == NULL,
2448 ("bufwrite: needs chained iodone (%p)", bp->b_iodone));
2449
2450 /* get a new block */
2451 newbp = geteblk(bp->b_bufsize, GB_NOWAIT_BD);
2452 if (newbp == NULL)
2453 goto normal_write;
2454
2455 KASSERT(buf_mapped(bp), ("Unmapped cg"));
2456 memcpy(newbp->b_data, bp->b_data, bp->b_bufsize);
2457 BO_LOCK(bp->b_bufobj);
2458 bp->b_vflags |= BV_BKGRDINPROG;
2459 BO_UNLOCK(bp->b_bufobj);
2460 newbp->b_xflags |=
2461 (bp->b_xflags & BX_FSPRIV) | BX_BKGRDMARKER;
2462 newbp->b_lblkno = bp->b_lblkno;
2463 newbp->b_blkno = bp->b_blkno;
2464 newbp->b_offset = bp->b_offset;
2465 newbp->b_iodone = ffs_backgroundwritedone;
2466 newbp->b_flags |= B_ASYNC;
2467 newbp->b_flags &= ~B_INVAL;
2468 pbgetvp(bp->b_vp, newbp);
2469
2470 #ifdef SOFTUPDATES
2471 /*
2472 * Move over the dependencies. If there are rollbacks,
2473 * leave the parent buffer dirtied as it will need to
2474 * be written again.
2475 */
2476 if (LIST_EMPTY(&bp->b_dep) ||
2477 softdep_move_dependencies(bp, newbp) == 0)
2478 bundirty(bp);
2479 #else
2480 bundirty(bp);
2481 #endif
2482
2483 /*
2484 * Initiate write on the copy, release the original. The
2485 * BKGRDINPROG flag prevents it from going away until
2486 * the background write completes. We have to recalculate
2487 * its check hash in case the buffer gets freed and then
2488 * reconstituted from the buffer cache during a later read.
2489 */
2490 if ((bp->b_xflags & BX_CYLGRP) != 0) {
2491 cgp = (struct cg *)bp->b_data;
2492 cgp->cg_ckhash = 0;
2493 cgp->cg_ckhash =
2494 calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2495 }
2496 bqrelse(bp);
2497 bp = newbp;
2498 } else
2499 /* Mark the buffer clean */
2500 bundirty(bp);
2501
2502 /* Let the normal bufwrite do the rest for us */
2503 normal_write:
2504 /*
2505 * If we are writing a cylinder group, update its time.
2506 */
2507 if ((bp->b_xflags & BX_CYLGRP) != 0) {
2508 cgp = (struct cg *)bp->b_data;
2509 cgp->cg_old_time = cgp->cg_time = time_second;
2510 }
2511 return (bufwrite(bp));
2512 }
2513
2514 static void
ffs_geom_strategy(struct bufobj * bo,struct buf * bp)2515 ffs_geom_strategy(struct bufobj *bo, struct buf *bp)
2516 {
2517 struct vnode *vp;
2518 struct buf *tbp;
2519 int error, nocopy;
2520
2521 /*
2522 * This is the bufobj strategy for the private VCHR vnodes
2523 * used by FFS to access the underlying storage device.
2524 * We override the default bufobj strategy and thus bypass
2525 * VOP_STRATEGY() for these vnodes.
2526 */
2527 vp = bo2vnode(bo);
2528 KASSERT(bp->b_vp == NULL || bp->b_vp->v_type != VCHR ||
2529 bp->b_vp->v_rdev == NULL ||
2530 bp->b_vp->v_rdev->si_mountpt == NULL ||
2531 VFSTOUFS(bp->b_vp->v_rdev->si_mountpt) == NULL ||
2532 vp == VFSTOUFS(bp->b_vp->v_rdev->si_mountpt)->um_devvp,
2533 ("ffs_geom_strategy() with wrong vp"));
2534 if (bp->b_iocmd == BIO_WRITE) {
2535 if ((bp->b_flags & B_VALIDSUSPWRT) == 0 &&
2536 bp->b_vp != NULL && bp->b_vp->v_mount != NULL &&
2537 (bp->b_vp->v_mount->mnt_kern_flag & MNTK_SUSPENDED) != 0)
2538 panic("ffs_geom_strategy: bad I/O");
2539 nocopy = bp->b_flags & B_NOCOPY;
2540 bp->b_flags &= ~(B_VALIDSUSPWRT | B_NOCOPY);
2541 if ((vp->v_vflag & VV_COPYONWRITE) && nocopy == 0 &&
2542 vp->v_rdev->si_snapdata != NULL) {
2543 if ((bp->b_flags & B_CLUSTER) != 0) {
2544 runningbufwakeup(bp);
2545 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2546 b_cluster.cluster_entry) {
2547 error = ffs_copyonwrite(vp, tbp);
2548 if (error != 0 &&
2549 error != EOPNOTSUPP) {
2550 bp->b_error = error;
2551 bp->b_ioflags |= BIO_ERROR;
2552 bp->b_flags &= ~B_BARRIER;
2553 bufdone(bp);
2554 return;
2555 }
2556 }
2557 bp->b_runningbufspace = bp->b_bufsize;
2558 atomic_add_long(&runningbufspace,
2559 bp->b_runningbufspace);
2560 } else {
2561 error = ffs_copyonwrite(vp, bp);
2562 if (error != 0 && error != EOPNOTSUPP) {
2563 bp->b_error = error;
2564 bp->b_ioflags |= BIO_ERROR;
2565 bp->b_flags &= ~B_BARRIER;
2566 bufdone(bp);
2567 return;
2568 }
2569 }
2570 }
2571 #ifdef SOFTUPDATES
2572 if ((bp->b_flags & B_CLUSTER) != 0) {
2573 TAILQ_FOREACH(tbp, &bp->b_cluster.cluster_head,
2574 b_cluster.cluster_entry) {
2575 if (!LIST_EMPTY(&tbp->b_dep))
2576 buf_start(tbp);
2577 }
2578 } else {
2579 if (!LIST_EMPTY(&bp->b_dep))
2580 buf_start(bp);
2581 }
2582
2583 #endif
2584 /*
2585 * Check for metadata that needs check-hashes and update them.
2586 */
2587 switch (bp->b_xflags & BX_FSPRIV) {
2588 case BX_CYLGRP:
2589 ((struct cg *)bp->b_data)->cg_ckhash = 0;
2590 ((struct cg *)bp->b_data)->cg_ckhash =
2591 calculate_crc32c(~0L, bp->b_data, bp->b_bcount);
2592 break;
2593
2594 case BX_SUPERBLOCK:
2595 case BX_INODE:
2596 case BX_INDIR:
2597 case BX_DIR:
2598 printf("Check-hash write is unimplemented!!!\n");
2599 break;
2600
2601 case 0:
2602 break;
2603
2604 default:
2605 printf("multiple buffer types 0x%b\n",
2606 (u_int)(bp->b_xflags & BX_FSPRIV),
2607 PRINT_UFS_BUF_XFLAGS);
2608 break;
2609 }
2610 }
2611 if (bp->b_iocmd != BIO_READ && ffs_enxio_enable)
2612 bp->b_xflags |= BX_CVTENXIO;
2613 g_vfs_strategy(bo, bp);
2614 }
2615
2616 int
ffs_own_mount(const struct mount * mp)2617 ffs_own_mount(const struct mount *mp)
2618 {
2619
2620 if (mp->mnt_op == &ufs_vfsops)
2621 return (1);
2622 return (0);
2623 }
2624
2625 #ifdef DDB
2626 #ifdef SOFTUPDATES
2627
2628 /* defined in ffs_softdep.c */
2629 extern void db_print_ffs(struct ufsmount *ump);
2630
DB_SHOW_COMMAND(ffs,db_show_ffs)2631 DB_SHOW_COMMAND(ffs, db_show_ffs)
2632 {
2633 struct mount *mp;
2634 struct ufsmount *ump;
2635
2636 if (have_addr) {
2637 ump = VFSTOUFS((struct mount *)addr);
2638 db_print_ffs(ump);
2639 return;
2640 }
2641
2642 TAILQ_FOREACH(mp, &mountlist, mnt_list) {
2643 if (!strcmp(mp->mnt_stat.f_fstypename, ufs_vfsconf.vfc_name))
2644 db_print_ffs(VFSTOUFS(mp));
2645 }
2646 }
2647
2648 #endif /* SOFTUPDATES */
2649 #endif /* DDB */
2650