1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
35 */
36
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/bio.h>
41 #include <sys/buf.h>
42 #include <sys/kernel.h>
43 #include <sys/mount.h>
44 #include <sys/rwlock.h>
45 #include <sys/vmmeter.h>
46 #include <sys/vnode.h>
47
48 #include <vm/vm.h>
49 #include <vm/vm_param.h>
50 #include <vm/vm_extern.h>
51 #include <vm/vm_page.h>
52 #include <vm/vm_object.h>
53 #include <vm/vm_pager.h>
54 #include <vm/vnode_pager.h>
55
56 #include <fs/nfs/nfsport.h>
57 #include <fs/nfsclient/nfsmount.h>
58 #include <fs/nfsclient/nfs.h>
59 #include <fs/nfsclient/nfsnode.h>
60 #include <fs/nfsclient/nfs_kdtrace.h>
61
62 extern int newnfs_directio_allow_mmap;
63 extern struct nfsstatsv1 nfsstatsv1;
64 extern struct mtx ncl_iod_mutex;
65 extern int ncl_numasync;
66 extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
67 extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
68 extern int newnfs_directio_enable;
69 extern int nfs_keep_dirty_on_error;
70
71 uma_zone_t ncl_pbuf_zone;
72
73 static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
74 struct thread *td);
75 static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
76 struct ucred *cred, int ioflag);
77
78 /*
79 * Vnode op for VM getpages.
80 */
81 SYSCTL_DECL(_vfs_nfs);
82 static int use_buf_pager = 1;
83 SYSCTL_INT(_vfs_nfs, OID_AUTO, use_buf_pager, CTLFLAG_RWTUN,
84 &use_buf_pager, 0,
85 "Use buffer pager instead of direct readrpc call");
86
87 static daddr_t
ncl_gbp_getblkno(struct vnode * vp,vm_ooffset_t off)88 ncl_gbp_getblkno(struct vnode *vp, vm_ooffset_t off)
89 {
90
91 return (off / vp->v_bufobj.bo_bsize);
92 }
93
94 static int
ncl_gbp_getblksz(struct vnode * vp,daddr_t lbn,long * sz)95 ncl_gbp_getblksz(struct vnode *vp, daddr_t lbn, long *sz)
96 {
97 struct nfsnode *np;
98 u_quad_t nsize;
99 int biosize, bcount;
100
101 np = VTONFS(vp);
102 NFSLOCKNODE(np);
103 nsize = np->n_size;
104 NFSUNLOCKNODE(np);
105
106 biosize = vp->v_bufobj.bo_bsize;
107 bcount = biosize;
108 if ((off_t)lbn * biosize >= nsize)
109 bcount = 0;
110 else if ((off_t)(lbn + 1) * biosize > nsize)
111 bcount = nsize - (off_t)lbn * biosize;
112 *sz = bcount;
113 return (0);
114 }
115
116 int
ncl_getpages(struct vop_getpages_args * ap)117 ncl_getpages(struct vop_getpages_args *ap)
118 {
119 int i, error, nextoff, size, toff, count, npages;
120 struct uio uio;
121 struct iovec iov;
122 vm_offset_t kva;
123 struct buf *bp;
124 struct vnode *vp;
125 struct thread *td;
126 struct ucred *cred;
127 struct nfsmount *nmp;
128 vm_object_t object;
129 vm_page_t *pages;
130 struct nfsnode *np;
131
132 vp = ap->a_vp;
133 np = VTONFS(vp);
134 td = curthread;
135 cred = curthread->td_ucred;
136 nmp = VFSTONFS(vp->v_mount);
137 pages = ap->a_m;
138 npages = ap->a_count;
139
140 if ((object = vp->v_object) == NULL) {
141 printf("ncl_getpages: called with non-merged cache vnode\n");
142 return (VM_PAGER_ERROR);
143 }
144
145 if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
146 NFSLOCKNODE(np);
147 if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
148 NFSUNLOCKNODE(np);
149 printf("ncl_getpages: called on non-cacheable vnode\n");
150 return (VM_PAGER_ERROR);
151 } else
152 NFSUNLOCKNODE(np);
153 }
154
155 mtx_lock(&nmp->nm_mtx);
156 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
157 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
158 mtx_unlock(&nmp->nm_mtx);
159 /* We'll never get here for v4, because we always have fsinfo */
160 (void)ncl_fsinfo(nmp, vp, cred, td);
161 } else
162 mtx_unlock(&nmp->nm_mtx);
163
164 if (use_buf_pager)
165 return (vfs_bio_getpages(vp, pages, npages, ap->a_rbehind,
166 ap->a_rahead, ncl_gbp_getblkno, ncl_gbp_getblksz));
167
168 /*
169 * If the requested page is partially valid, just return it and
170 * allow the pager to zero-out the blanks. Partially valid pages
171 * can only occur at the file EOF.
172 *
173 * XXXGL: is that true for NFS, where short read can occur???
174 */
175 VM_OBJECT_WLOCK(object);
176 if (!vm_page_none_valid(pages[npages - 1]) && --npages == 0)
177 goto out;
178 VM_OBJECT_WUNLOCK(object);
179
180 /*
181 * We use only the kva address for the buffer, but this is extremely
182 * convenient and fast.
183 */
184 bp = uma_zalloc(ncl_pbuf_zone, M_WAITOK);
185
186 kva = (vm_offset_t) bp->b_data;
187 pmap_qenter(kva, pages, npages);
188 VM_CNT_INC(v_vnodein);
189 VM_CNT_ADD(v_vnodepgsin, npages);
190
191 count = npages << PAGE_SHIFT;
192 iov.iov_base = (caddr_t) kva;
193 iov.iov_len = count;
194 uio.uio_iov = &iov;
195 uio.uio_iovcnt = 1;
196 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
197 uio.uio_resid = count;
198 uio.uio_segflg = UIO_SYSSPACE;
199 uio.uio_rw = UIO_READ;
200 uio.uio_td = td;
201
202 error = ncl_readrpc(vp, &uio, cred);
203 pmap_qremove(kva, npages);
204
205 uma_zfree(ncl_pbuf_zone, bp);
206
207 if (error && (uio.uio_resid == count)) {
208 printf("ncl_getpages: error %d\n", error);
209 return (VM_PAGER_ERROR);
210 }
211
212 /*
213 * Calculate the number of bytes read and validate only that number
214 * of bytes. Note that due to pending writes, size may be 0. This
215 * does not mean that the remaining data is invalid!
216 */
217
218 size = count - uio.uio_resid;
219 VM_OBJECT_WLOCK(object);
220 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
221 vm_page_t m;
222 nextoff = toff + PAGE_SIZE;
223 m = pages[i];
224
225 if (nextoff <= size) {
226 /*
227 * Read operation filled an entire page
228 */
229 vm_page_valid(m);
230 KASSERT(m->dirty == 0,
231 ("nfs_getpages: page %p is dirty", m));
232 } else if (size > toff) {
233 /*
234 * Read operation filled a partial page.
235 */
236 vm_page_invalid(m);
237 vm_page_set_valid_range(m, 0, size - toff);
238 KASSERT(m->dirty == 0,
239 ("nfs_getpages: page %p is dirty", m));
240 } else {
241 /*
242 * Read operation was short. If no error
243 * occurred we may have hit a zero-fill
244 * section. We leave valid set to 0, and page
245 * is freed by vm_page_readahead_finish() if
246 * its index is not equal to requested, or
247 * page is zeroed and set valid by
248 * vm_pager_get_pages() for requested page.
249 */
250 ;
251 }
252 }
253 out:
254 VM_OBJECT_WUNLOCK(object);
255 if (ap->a_rbehind)
256 *ap->a_rbehind = 0;
257 if (ap->a_rahead)
258 *ap->a_rahead = 0;
259 return (VM_PAGER_OK);
260 }
261
262 /*
263 * Vnode op for VM putpages.
264 */
265 int
ncl_putpages(struct vop_putpages_args * ap)266 ncl_putpages(struct vop_putpages_args *ap)
267 {
268 struct uio uio;
269 struct iovec iov;
270 int i, error, npages, count;
271 off_t offset;
272 int *rtvals;
273 struct vnode *vp;
274 struct thread *td;
275 struct ucred *cred;
276 struct nfsmount *nmp;
277 struct nfsnode *np;
278 vm_page_t *pages;
279
280 vp = ap->a_vp;
281 np = VTONFS(vp);
282 td = curthread; /* XXX */
283 /* Set the cred to n_writecred for the write rpcs. */
284 if (np->n_writecred != NULL)
285 cred = crhold(np->n_writecred);
286 else
287 cred = crhold(curthread->td_ucred); /* XXX */
288 nmp = VFSTONFS(vp->v_mount);
289 pages = ap->a_m;
290 count = ap->a_count;
291 rtvals = ap->a_rtvals;
292 npages = btoc(count);
293 offset = IDX_TO_OFF(pages[0]->pindex);
294
295 mtx_lock(&nmp->nm_mtx);
296 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
297 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
298 mtx_unlock(&nmp->nm_mtx);
299 (void)ncl_fsinfo(nmp, vp, cred, td);
300 } else
301 mtx_unlock(&nmp->nm_mtx);
302
303 NFSLOCKNODE(np);
304 if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
305 (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
306 NFSUNLOCKNODE(np);
307 printf("ncl_putpages: called on noncache-able vnode\n");
308 NFSLOCKNODE(np);
309 }
310 /*
311 * When putting pages, do not extend file past EOF.
312 */
313 if (offset + count > np->n_size) {
314 count = np->n_size - offset;
315 if (count < 0)
316 count = 0;
317 }
318 NFSUNLOCKNODE(np);
319
320 for (i = 0; i < npages; i++)
321 rtvals[i] = VM_PAGER_ERROR;
322
323 VM_CNT_INC(v_vnodeout);
324 VM_CNT_ADD(v_vnodepgsout, count);
325
326 iov.iov_base = unmapped_buf;
327 iov.iov_len = count;
328 uio.uio_iov = &iov;
329 uio.uio_iovcnt = 1;
330 uio.uio_offset = offset;
331 uio.uio_resid = count;
332 uio.uio_segflg = UIO_NOCOPY;
333 uio.uio_rw = UIO_WRITE;
334 uio.uio_td = td;
335
336 error = VOP_WRITE(vp, &uio, vnode_pager_putpages_ioflags(ap->a_sync),
337 cred);
338 crfree(cred);
339
340 if (error == 0 || !nfs_keep_dirty_on_error) {
341 vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid,
342 np->n_size - offset, npages * PAGE_SIZE);
343 }
344 return (rtvals[0]);
345 }
346
347 /*
348 * For nfs, cache consistency can only be maintained approximately.
349 * Although RFC1094 does not specify the criteria, the following is
350 * believed to be compatible with the reference port.
351 * For nfs:
352 * If the file's modify time on the server has changed since the
353 * last read rpc or you have written to the file,
354 * you may have lost data cache consistency with the
355 * server, so flush all of the file's data out of the cache.
356 * Then force a getattr rpc to ensure that you have up to date
357 * attributes.
358 * NB: This implies that cache data can be read when up to
359 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
360 * attributes this could be forced by setting n_attrstamp to 0 before
361 * the VOP_GETATTR() call.
362 */
363 static inline int
nfs_bioread_check_cons(struct vnode * vp,struct thread * td,struct ucred * cred)364 nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
365 {
366 int error = 0;
367 struct vattr vattr;
368 struct nfsnode *np = VTONFS(vp);
369 bool old_lock;
370
371 /*
372 * Ensure the exclusive access to the node before checking
373 * whether the cache is consistent.
374 */
375 old_lock = ncl_excl_start(vp);
376 NFSLOCKNODE(np);
377 if (np->n_flag & NMODIFIED) {
378 NFSUNLOCKNODE(np);
379 if (vp->v_type != VREG) {
380 if (vp->v_type != VDIR)
381 panic("nfs: bioread, not dir");
382 ncl_invaldir(vp);
383 error = ncl_vinvalbuf(vp, V_SAVE | V_ALLOWCLEAN, td, 1);
384 if (error != 0)
385 goto out;
386 }
387 np->n_attrstamp = 0;
388 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
389 error = VOP_GETATTR(vp, &vattr, cred);
390 if (error)
391 goto out;
392 NFSLOCKNODE(np);
393 np->n_mtime = vattr.va_mtime;
394 NFSUNLOCKNODE(np);
395 } else {
396 NFSUNLOCKNODE(np);
397 error = VOP_GETATTR(vp, &vattr, cred);
398 if (error)
399 goto out;
400 NFSLOCKNODE(np);
401 if ((np->n_flag & NSIZECHANGED)
402 || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
403 NFSUNLOCKNODE(np);
404 if (vp->v_type == VDIR)
405 ncl_invaldir(vp);
406 error = ncl_vinvalbuf(vp, V_SAVE | V_ALLOWCLEAN, td, 1);
407 if (error != 0)
408 goto out;
409 NFSLOCKNODE(np);
410 np->n_mtime = vattr.va_mtime;
411 np->n_flag &= ~NSIZECHANGED;
412 }
413 NFSUNLOCKNODE(np);
414 }
415 out:
416 ncl_excl_finish(vp, old_lock);
417 return (error);
418 }
419
420 static bool
ncl_bioread_dora(struct vnode * vp)421 ncl_bioread_dora(struct vnode *vp)
422 {
423 vm_object_t obj;
424
425 obj = vp->v_object;
426 if (obj == NULL)
427 return (true);
428 return (!vm_object_mightbedirty(vp->v_object) &&
429 vp->v_object->un_pager.vnp.writemappings == 0);
430 }
431
432 /*
433 * Vnode op for read using bio
434 */
435 int
ncl_bioread(struct vnode * vp,struct uio * uio,int ioflag,struct ucred * cred)436 ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
437 {
438 struct nfsnode *np = VTONFS(vp);
439 struct buf *bp, *rabp;
440 struct thread *td;
441 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
442 daddr_t lbn, rabn;
443 int biosize, bcount, error, i, n, nra, on, save2, seqcount;
444 off_t tmp_off;
445
446 KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
447 if (uio->uio_resid == 0)
448 return (0);
449 if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
450 return (EINVAL);
451 td = uio->uio_td;
452
453 mtx_lock(&nmp->nm_mtx);
454 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
455 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
456 mtx_unlock(&nmp->nm_mtx);
457 (void)ncl_fsinfo(nmp, vp, cred, td);
458 mtx_lock(&nmp->nm_mtx);
459 }
460 if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
461 (void) newnfs_iosize(nmp);
462
463 tmp_off = uio->uio_offset + uio->uio_resid;
464 if (vp->v_type != VDIR &&
465 (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
466 mtx_unlock(&nmp->nm_mtx);
467 return (EFBIG);
468 }
469 mtx_unlock(&nmp->nm_mtx);
470
471 if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
472 /* No caching/ no readaheads. Just read data into the user buffer */
473 return ncl_readrpc(vp, uio, cred);
474
475 n = 0;
476 on = 0;
477 biosize = vp->v_bufobj.bo_bsize;
478 seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
479
480 error = nfs_bioread_check_cons(vp, td, cred);
481 if (error)
482 return error;
483
484 save2 = curthread_pflags2_set(TDP2_SBPAGES);
485 do {
486 u_quad_t nsize;
487
488 NFSLOCKNODE(np);
489 nsize = np->n_size;
490 NFSUNLOCKNODE(np);
491
492 switch (vp->v_type) {
493 case VREG:
494 NFSINCRGLOBAL(nfsstatsv1.biocache_reads);
495 lbn = uio->uio_offset / biosize;
496 on = uio->uio_offset - (lbn * biosize);
497
498 /*
499 * Start the read ahead(s), as required. Do not do
500 * read-ahead if there are writeable mappings, since
501 * unlocked read by nfsiod could obliterate changes
502 * done by userspace.
503 */
504 if (nmp->nm_readahead > 0 && ncl_bioread_dora(vp)) {
505 for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
506 (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
507 rabn = lbn + 1 + nra;
508 if (incore(&vp->v_bufobj, rabn) == NULL) {
509 rabp = nfs_getcacheblk(vp, rabn, biosize, td);
510 if (!rabp) {
511 error = newnfs_sigintr(nmp, td);
512 if (error == 0)
513 error = EINTR;
514 goto out;
515 }
516 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
517 rabp->b_flags |= B_ASYNC;
518 rabp->b_iocmd = BIO_READ;
519 vfs_busy_pages(rabp, 0);
520 if (ncl_asyncio(nmp, rabp, cred, td)) {
521 rabp->b_flags |= B_INVAL;
522 rabp->b_ioflags |= BIO_ERROR;
523 vfs_unbusy_pages(rabp);
524 brelse(rabp);
525 break;
526 }
527 } else {
528 brelse(rabp);
529 }
530 }
531 }
532 }
533
534 /* Note that bcount is *not* DEV_BSIZE aligned. */
535 bcount = biosize;
536 if ((off_t)lbn * biosize >= nsize) {
537 bcount = 0;
538 } else if ((off_t)(lbn + 1) * biosize > nsize) {
539 bcount = nsize - (off_t)lbn * biosize;
540 }
541 bp = nfs_getcacheblk(vp, lbn, bcount, td);
542
543 if (!bp) {
544 error = newnfs_sigintr(nmp, td);
545 if (error == 0)
546 error = EINTR;
547 goto out;
548 }
549
550 /*
551 * If B_CACHE is not set, we must issue the read. If this
552 * fails, we return an error.
553 */
554
555 if ((bp->b_flags & B_CACHE) == 0) {
556 bp->b_iocmd = BIO_READ;
557 vfs_busy_pages(bp, 0);
558 error = ncl_doio(vp, bp, cred, td, 0);
559 if (error) {
560 brelse(bp);
561 goto out;
562 }
563 }
564
565 /*
566 * on is the offset into the current bp. Figure out how many
567 * bytes we can copy out of the bp. Note that bcount is
568 * NOT DEV_BSIZE aligned.
569 *
570 * Then figure out how many bytes we can copy into the uio.
571 */
572
573 n = 0;
574 if (on < bcount)
575 n = MIN((unsigned)(bcount - on), uio->uio_resid);
576 break;
577 case VLNK:
578 NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks);
579 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
580 if (!bp) {
581 error = newnfs_sigintr(nmp, td);
582 if (error == 0)
583 error = EINTR;
584 goto out;
585 }
586 if ((bp->b_flags & B_CACHE) == 0) {
587 bp->b_iocmd = BIO_READ;
588 vfs_busy_pages(bp, 0);
589 error = ncl_doio(vp, bp, cred, td, 0);
590 if (error) {
591 bp->b_ioflags |= BIO_ERROR;
592 brelse(bp);
593 goto out;
594 }
595 }
596 n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
597 on = 0;
598 break;
599 case VDIR:
600 NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs);
601 NFSLOCKNODE(np);
602 if (np->n_direofoffset
603 && uio->uio_offset >= np->n_direofoffset) {
604 NFSUNLOCKNODE(np);
605 error = 0;
606 goto out;
607 }
608 NFSUNLOCKNODE(np);
609 lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
610 on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
611 bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
612 if (!bp) {
613 error = newnfs_sigintr(nmp, td);
614 if (error == 0)
615 error = EINTR;
616 goto out;
617 }
618 if ((bp->b_flags & B_CACHE) == 0) {
619 bp->b_iocmd = BIO_READ;
620 vfs_busy_pages(bp, 0);
621 error = ncl_doio(vp, bp, cred, td, 0);
622 if (error) {
623 brelse(bp);
624 }
625 while (error == NFSERR_BAD_COOKIE) {
626 ncl_invaldir(vp);
627 error = ncl_vinvalbuf(vp, 0, td, 1);
628
629 /*
630 * Yuck! The directory has been modified on the
631 * server. The only way to get the block is by
632 * reading from the beginning to get all the
633 * offset cookies.
634 *
635 * Leave the last bp intact unless there is an error.
636 * Loop back up to the while if the error is another
637 * NFSERR_BAD_COOKIE (double yuch!).
638 */
639 for (i = 0; i <= lbn && !error; i++) {
640 NFSLOCKNODE(np);
641 if (np->n_direofoffset
642 && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset) {
643 NFSUNLOCKNODE(np);
644 error = 0;
645 goto out;
646 }
647 NFSUNLOCKNODE(np);
648 bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
649 if (!bp) {
650 error = newnfs_sigintr(nmp, td);
651 if (error == 0)
652 error = EINTR;
653 goto out;
654 }
655 if ((bp->b_flags & B_CACHE) == 0) {
656 bp->b_iocmd = BIO_READ;
657 vfs_busy_pages(bp, 0);
658 error = ncl_doio(vp, bp, cred, td, 0);
659 /*
660 * no error + B_INVAL == directory EOF,
661 * use the block.
662 */
663 if (error == 0 && (bp->b_flags & B_INVAL))
664 break;
665 }
666 /*
667 * An error will throw away the block and the
668 * for loop will break out. If no error and this
669 * is not the block we want, we throw away the
670 * block and go for the next one via the for loop.
671 */
672 if (error || i < lbn)
673 brelse(bp);
674 }
675 }
676 /*
677 * The above while is repeated if we hit another cookie
678 * error. If we hit an error and it wasn't a cookie error,
679 * we give up.
680 */
681 if (error)
682 goto out;
683 }
684
685 /*
686 * If not eof and read aheads are enabled, start one.
687 * (You need the current block first, so that you have the
688 * directory offset cookie of the next block.)
689 */
690 NFSLOCKNODE(np);
691 if (nmp->nm_readahead > 0 && ncl_bioread_dora(vp) &&
692 (bp->b_flags & B_INVAL) == 0 &&
693 (np->n_direofoffset == 0 ||
694 (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
695 incore(&vp->v_bufobj, lbn + 1) == NULL) {
696 NFSUNLOCKNODE(np);
697 rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
698 if (rabp) {
699 if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
700 rabp->b_flags |= B_ASYNC;
701 rabp->b_iocmd = BIO_READ;
702 vfs_busy_pages(rabp, 0);
703 if (ncl_asyncio(nmp, rabp, cred, td)) {
704 rabp->b_flags |= B_INVAL;
705 rabp->b_ioflags |= BIO_ERROR;
706 vfs_unbusy_pages(rabp);
707 brelse(rabp);
708 }
709 } else {
710 brelse(rabp);
711 }
712 }
713 NFSLOCKNODE(np);
714 }
715 /*
716 * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
717 * chopped for the EOF condition, we cannot tell how large
718 * NFS directories are going to be until we hit EOF. So
719 * an NFS directory buffer is *not* chopped to its EOF. Now,
720 * it just so happens that b_resid will effectively chop it
721 * to EOF. *BUT* this information is lost if the buffer goes
722 * away and is reconstituted into a B_CACHE state ( due to
723 * being VMIO ) later. So we keep track of the directory eof
724 * in np->n_direofoffset and chop it off as an extra step
725 * right here.
726 */
727 n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
728 if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
729 n = np->n_direofoffset - uio->uio_offset;
730 NFSUNLOCKNODE(np);
731 break;
732 default:
733 printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
734 bp = NULL;
735 break;
736 }
737
738 if (n > 0) {
739 error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
740 }
741 if (vp->v_type == VLNK)
742 n = 0;
743 if (bp != NULL)
744 brelse(bp);
745 } while (error == 0 && uio->uio_resid > 0 && n > 0);
746 out:
747 curthread_pflags2_restore(save2);
748 if ((curthread->td_pflags2 & TDP2_SBPAGES) == 0) {
749 NFSLOCKNODE(np);
750 ncl_pager_setsize(vp, NULL);
751 }
752 return (error);
753 }
754
755 /*
756 * The NFS write path cannot handle iovecs with len > 1. So we need to
757 * break up iovecs accordingly (restricting them to wsize).
758 * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
759 * For the ASYNC case, 2 copies are needed. The first a copy from the
760 * user buffer to a staging buffer and then a second copy from the staging
761 * buffer to mbufs. This can be optimized by copying from the user buffer
762 * directly into mbufs and passing the chain down, but that requires a
763 * fair amount of re-working of the relevant codepaths (and can be done
764 * later).
765 */
766 static int
nfs_directio_write(struct vnode * vp,struct uio * uiop,struct ucred * cred,int ioflag)767 nfs_directio_write(struct vnode *vp, struct uio *uiop, struct ucred *cred,
768 int ioflag)
769 {
770 struct uio uio;
771 struct iovec iov;
772 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
773 struct thread *td = uiop->uio_td;
774 int error, iomode, must_commit, size, wsize;
775
776 KASSERT((ioflag & IO_SYNC) != 0, ("nfs_directio_write: not sync"));
777 mtx_lock(&nmp->nm_mtx);
778 wsize = nmp->nm_wsize;
779 mtx_unlock(&nmp->nm_mtx);
780 while (uiop->uio_resid > 0) {
781 size = MIN(uiop->uio_resid, wsize);
782 size = MIN(uiop->uio_iov->iov_len, size);
783 iov.iov_base = uiop->uio_iov->iov_base;
784 iov.iov_len = size;
785 uio.uio_iov = &iov;
786 uio.uio_iovcnt = 1;
787 uio.uio_offset = uiop->uio_offset;
788 uio.uio_resid = size;
789 uio.uio_segflg = uiop->uio_segflg;
790 uio.uio_rw = UIO_WRITE;
791 uio.uio_td = td;
792 iomode = NFSWRITE_FILESYNC;
793 /*
794 * When doing direct I/O we do not care if the
795 * server's write verifier has changed, but we
796 * do not want to update the verifier if it has
797 * changed, since that hides the change from
798 * writes being done through the buffer cache.
799 * By passing must_commit in set to two, the code
800 * in nfsrpc_writerpc() will not update the
801 * verifier on the mount point.
802 */
803 must_commit = 2;
804 error = ncl_writerpc(vp, &uio, cred, &iomode,
805 &must_commit, 0, ioflag);
806 KASSERT(must_commit == 2,
807 ("ncl_directio_write: Updated write verifier"));
808 if (error != 0)
809 return (error);
810 if (iomode != NFSWRITE_FILESYNC)
811 printf("nfs_directio_write: Broken server "
812 "did not reply FILE_SYNC\n");
813 uiop->uio_offset += size;
814 uiop->uio_resid -= size;
815 if (uiop->uio_iov->iov_len <= size) {
816 uiop->uio_iovcnt--;
817 uiop->uio_iov++;
818 } else {
819 uiop->uio_iov->iov_base =
820 (char *)uiop->uio_iov->iov_base + size;
821 uiop->uio_iov->iov_len -= size;
822 }
823 }
824 return (0);
825 }
826
827 /*
828 * Vnode op for write using bio
829 */
830 int
ncl_write(struct vop_write_args * ap)831 ncl_write(struct vop_write_args *ap)
832 {
833 int biosize;
834 struct uio *uio = ap->a_uio;
835 struct thread *td = uio->uio_td;
836 struct vnode *vp = ap->a_vp;
837 struct nfsnode *np = VTONFS(vp);
838 struct ucred *cred = ap->a_cred;
839 int ioflag = ap->a_ioflag;
840 struct buf *bp;
841 struct vattr vattr;
842 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
843 daddr_t lbn;
844 int bcount, noncontig_write, obcount;
845 int bp_cached, n, on, error = 0, error1, save2, wouldcommit;
846 size_t orig_resid, local_resid;
847 off_t orig_size, tmp_off;
848 struct timespec ts;
849
850 KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
851 KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
852 ("ncl_write proc"));
853 if (vp->v_type != VREG)
854 return (EIO);
855 NFSLOCKNODE(np);
856 if (np->n_flag & NWRITEERR) {
857 np->n_flag &= ~NWRITEERR;
858 NFSUNLOCKNODE(np);
859 return (np->n_error);
860 } else
861 NFSUNLOCKNODE(np);
862 mtx_lock(&nmp->nm_mtx);
863 if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
864 (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
865 mtx_unlock(&nmp->nm_mtx);
866 (void)ncl_fsinfo(nmp, vp, cred, td);
867 mtx_lock(&nmp->nm_mtx);
868 }
869 if (nmp->nm_wsize == 0)
870 (void) newnfs_iosize(nmp);
871 mtx_unlock(&nmp->nm_mtx);
872
873 /*
874 * Synchronously flush pending buffers if we are in synchronous
875 * mode or if we are appending.
876 */
877 if ((ioflag & IO_APPEND) || ((ioflag & IO_SYNC) && (np->n_flag &
878 NMODIFIED))) {
879 /*
880 * For the case where IO_APPEND is being done using a
881 * direct output (to the NFS server) RPC and
882 * newnfs_directio_enable is 0, all buffer cache buffers,
883 * including ones not modified, must be invalidated.
884 * This ensures that stale data is not read out of the
885 * buffer cache. The call also invalidates all mapped
886 * pages and, since the exclusive lock is held on the vnode,
887 * new pages cannot be faulted in.
888 *
889 * For the case where newnfs_directio_enable is set
890 * (which is not the default), it is not obvious that
891 * stale data should be left in the buffer cache, but
892 * the code has been this way for over a decade without
893 * complaints. Note that, unlike doing IO_APPEND via
894 * a direct write RPC when newnfs_directio_enable is not set,
895 * when newnfs_directio_enable is set, reading is done via
896 * direct to NFS server RPCs as well.
897 */
898 np->n_attrstamp = 0;
899 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
900 error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag &
901 IO_VMIO) != 0 ? V_VMIO : 0), td, 1);
902 if (error != 0)
903 return (error);
904 }
905
906 orig_resid = uio->uio_resid;
907 NFSLOCKNODE(np);
908 orig_size = np->n_size;
909 NFSUNLOCKNODE(np);
910
911 /*
912 * If IO_APPEND then load uio_offset. We restart here if we cannot
913 * get the append lock.
914 */
915 if (ioflag & IO_APPEND) {
916 /*
917 * For NFSv4, the AppendWrite will Verify the size against
918 * the file's size on the server. If not the same, the
919 * write will then be retried, using the file size returned
920 * by the AppendWrite. However, for NFSv2 and NFSv3, the
921 * size must be acquired here via a Getattr RPC.
922 * The AppendWrite is not done for a pNFS mount.
923 */
924 if (!NFSHASNFSV4(nmp) || NFSHASPNFS(nmp)) {
925 np->n_attrstamp = 0;
926 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
927 error = VOP_GETATTR(vp, &vattr, cred);
928 if (error)
929 return (error);
930 }
931 NFSLOCKNODE(np);
932 uio->uio_offset = np->n_size;
933 NFSUNLOCKNODE(np);
934 }
935
936 if (uio->uio_offset < 0)
937 return (EINVAL);
938 tmp_off = uio->uio_offset + uio->uio_resid;
939 if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
940 return (EFBIG);
941 if (uio->uio_resid == 0)
942 return (0);
943
944 /*
945 * Do IO_APPEND writing via a synchronous direct write.
946 * This can result in a significant performance improvement.
947 */
948 if ((newnfs_directio_enable && (ioflag & IO_DIRECT)) ||
949 (ioflag & IO_APPEND)) {
950 /*
951 * Direct writes to the server must be done NFSWRITE_FILESYNC,
952 * because the write data is not cached and, therefore, the
953 * write cannot be redone after a server reboot.
954 * Set IO_SYNC to make this happen.
955 */
956 ioflag |= IO_SYNC;
957 return (nfs_directio_write(vp, uio, cred, ioflag));
958 }
959
960 /*
961 * Maybe this should be above the vnode op call, but so long as
962 * file servers have no limits, i don't think it matters
963 */
964 error = vn_rlimit_fsize(vp, uio, td);
965 if (error != 0)
966 return (error);
967
968 save2 = curthread_pflags2_set(TDP2_SBPAGES);
969 biosize = vp->v_bufobj.bo_bsize;
970 /*
971 * Find all of this file's B_NEEDCOMMIT buffers. If our writes
972 * would exceed the local maximum per-file write commit size when
973 * combined with those, we must decide whether to flush,
974 * go synchronous, or return error. We don't bother checking
975 * IO_UNIT -- we just make all writes atomic anyway, as there's
976 * no point optimizing for something that really won't ever happen.
977 */
978 wouldcommit = 0;
979 if (!(ioflag & IO_SYNC)) {
980 int nflag;
981
982 NFSLOCKNODE(np);
983 nflag = np->n_flag;
984 NFSUNLOCKNODE(np);
985 if (nflag & NMODIFIED) {
986 BO_LOCK(&vp->v_bufobj);
987 if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
988 TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
989 b_bobufs) {
990 if (bp->b_flags & B_NEEDCOMMIT)
991 wouldcommit += bp->b_bcount;
992 }
993 }
994 BO_UNLOCK(&vp->v_bufobj);
995 }
996 }
997
998 do {
999 if (!(ioflag & IO_SYNC)) {
1000 wouldcommit += biosize;
1001 if (wouldcommit > nmp->nm_wcommitsize) {
1002 np->n_attrstamp = 0;
1003 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1004 error = ncl_vinvalbuf(vp, V_SAVE | ((ioflag &
1005 IO_VMIO) != 0 ? V_VMIO : 0), td, 1);
1006 if (error != 0)
1007 goto out;
1008 wouldcommit = biosize;
1009 }
1010 }
1011
1012 NFSINCRGLOBAL(nfsstatsv1.biocache_writes);
1013 lbn = uio->uio_offset / biosize;
1014 on = uio->uio_offset - (lbn * biosize);
1015 n = MIN((unsigned)(biosize - on), uio->uio_resid);
1016 again:
1017 /*
1018 * Handle direct append and file extension cases, calculate
1019 * unaligned buffer size.
1020 */
1021 NFSLOCKNODE(np);
1022 if ((np->n_flag & NHASBEENLOCKED) == 0 &&
1023 (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0)
1024 noncontig_write = 1;
1025 else
1026 noncontig_write = 0;
1027 if ((uio->uio_offset == np->n_size ||
1028 (noncontig_write != 0 &&
1029 lbn == (np->n_size / biosize) &&
1030 uio->uio_offset + n > np->n_size)) && n) {
1031 NFSUNLOCKNODE(np);
1032 /*
1033 * Get the buffer (in its pre-append state to maintain
1034 * B_CACHE if it was previously set). Resize the
1035 * nfsnode after we have locked the buffer to prevent
1036 * readers from reading garbage.
1037 */
1038 obcount = np->n_size - (lbn * biosize);
1039 bp = nfs_getcacheblk(vp, lbn, obcount, td);
1040
1041 if (bp != NULL) {
1042 long save;
1043
1044 NFSLOCKNODE(np);
1045 np->n_size = uio->uio_offset + n;
1046 np->n_flag |= NMODIFIED;
1047 np->n_flag &= ~NVNSETSZSKIP;
1048 vnode_pager_setsize(vp, np->n_size);
1049 NFSUNLOCKNODE(np);
1050
1051 save = bp->b_flags & B_CACHE;
1052 bcount = on + n;
1053 allocbuf(bp, bcount);
1054 bp->b_flags |= save;
1055 if (noncontig_write != 0 && on > obcount)
1056 vfs_bio_bzero_buf(bp, obcount, on -
1057 obcount);
1058 }
1059 } else {
1060 /*
1061 * Obtain the locked cache block first, and then
1062 * adjust the file's size as appropriate.
1063 */
1064 bcount = on + n;
1065 if ((off_t)lbn * biosize + bcount < np->n_size) {
1066 if ((off_t)(lbn + 1) * biosize < np->n_size)
1067 bcount = biosize;
1068 else
1069 bcount = np->n_size - (off_t)lbn * biosize;
1070 }
1071 NFSUNLOCKNODE(np);
1072 bp = nfs_getcacheblk(vp, lbn, bcount, td);
1073 NFSLOCKNODE(np);
1074 if (uio->uio_offset + n > np->n_size) {
1075 np->n_size = uio->uio_offset + n;
1076 np->n_flag |= NMODIFIED;
1077 np->n_flag &= ~NVNSETSZSKIP;
1078 vnode_pager_setsize(vp, np->n_size);
1079 }
1080 NFSUNLOCKNODE(np);
1081 }
1082
1083 if (!bp) {
1084 error = newnfs_sigintr(nmp, td);
1085 if (!error)
1086 error = EINTR;
1087 break;
1088 }
1089
1090 /*
1091 * Issue a READ if B_CACHE is not set. In special-append
1092 * mode, B_CACHE is based on the buffer prior to the write
1093 * op and is typically set, avoiding the read. If a read
1094 * is required in special append mode, the server will
1095 * probably send us a short-read since we extended the file
1096 * on our end, resulting in b_resid == 0 and, thusly,
1097 * B_CACHE getting set.
1098 *
1099 * We can also avoid issuing the read if the write covers
1100 * the entire buffer. We have to make sure the buffer state
1101 * is reasonable in this case since we will not be initiating
1102 * I/O. See the comments in kern/vfs_bio.c's getblk() for
1103 * more information.
1104 *
1105 * B_CACHE may also be set due to the buffer being cached
1106 * normally.
1107 */
1108
1109 bp_cached = 1;
1110 if (on == 0 && n == bcount) {
1111 if ((bp->b_flags & B_CACHE) == 0)
1112 bp_cached = 0;
1113 bp->b_flags |= B_CACHE;
1114 bp->b_flags &= ~B_INVAL;
1115 bp->b_ioflags &= ~BIO_ERROR;
1116 }
1117
1118 if ((bp->b_flags & B_CACHE) == 0) {
1119 bp->b_iocmd = BIO_READ;
1120 vfs_busy_pages(bp, 0);
1121 error = ncl_doio(vp, bp, cred, td, 0);
1122 if (error) {
1123 brelse(bp);
1124 break;
1125 }
1126 }
1127 if (bp->b_wcred == NOCRED)
1128 bp->b_wcred = crhold(cred);
1129 NFSLOCKNODE(np);
1130 np->n_flag |= NMODIFIED;
1131 NFSUNLOCKNODE(np);
1132
1133 /*
1134 * If dirtyend exceeds file size, chop it down. This should
1135 * not normally occur but there is an append race where it
1136 * might occur XXX, so we log it.
1137 *
1138 * If the chopping creates a reverse-indexed or degenerate
1139 * situation with dirtyoff/end, we 0 both of them.
1140 */
1141
1142 if (bp->b_dirtyend > bcount) {
1143 printf("NFS append race @%lx:%d\n",
1144 (long)bp->b_blkno * DEV_BSIZE,
1145 bp->b_dirtyend - bcount);
1146 bp->b_dirtyend = bcount;
1147 }
1148
1149 if (bp->b_dirtyoff >= bp->b_dirtyend)
1150 bp->b_dirtyoff = bp->b_dirtyend = 0;
1151
1152 /*
1153 * If the new write will leave a contiguous dirty
1154 * area, just update the b_dirtyoff and b_dirtyend,
1155 * otherwise force a write rpc of the old dirty area.
1156 *
1157 * If there has been a file lock applied to this file
1158 * or vfs.nfs.old_noncontig_writing is set, do the following:
1159 * While it is possible to merge discontiguous writes due to
1160 * our having a B_CACHE buffer ( and thus valid read data
1161 * for the hole), we don't because it could lead to
1162 * significant cache coherency problems with multiple clients,
1163 * especially if locking is implemented later on.
1164 *
1165 * If vfs.nfs.old_noncontig_writing is not set and there has
1166 * not been file locking done on this file:
1167 * Relax coherency a bit for the sake of performance and
1168 * expand the current dirty region to contain the new
1169 * write even if it means we mark some non-dirty data as
1170 * dirty.
1171 */
1172
1173 if (noncontig_write == 0 && bp->b_dirtyend > 0 &&
1174 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1175 if (bwrite(bp) == EINTR) {
1176 error = EINTR;
1177 break;
1178 }
1179 goto again;
1180 }
1181
1182 local_resid = uio->uio_resid;
1183 error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
1184
1185 if (error != 0 && !bp_cached) {
1186 /*
1187 * This block has no other content then what
1188 * possibly was written by the faulty uiomove.
1189 * Release it, forgetting the data pages, to
1190 * prevent the leak of uninitialized data to
1191 * usermode.
1192 */
1193 bp->b_ioflags |= BIO_ERROR;
1194 brelse(bp);
1195 uio->uio_offset -= local_resid - uio->uio_resid;
1196 uio->uio_resid = local_resid;
1197 break;
1198 }
1199
1200 /*
1201 * Since this block is being modified, it must be written
1202 * again and not just committed. Since write clustering does
1203 * not work for the stage 1 data write, only the stage 2
1204 * commit rpc, we have to clear B_CLUSTEROK as well.
1205 */
1206 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1207
1208 /*
1209 * Get the partial update on the progress made from
1210 * uiomove, if an error occurred.
1211 */
1212 if (error != 0)
1213 n = local_resid - uio->uio_resid;
1214
1215 /*
1216 * Only update dirtyoff/dirtyend if not a degenerate
1217 * condition.
1218 */
1219 if (n > 0) {
1220 if (bp->b_dirtyend > 0) {
1221 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1222 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1223 } else {
1224 bp->b_dirtyoff = on;
1225 bp->b_dirtyend = on + n;
1226 }
1227 vfs_bio_set_valid(bp, on, n);
1228 }
1229
1230 /*
1231 * If IO_SYNC do bwrite().
1232 *
1233 * IO_INVAL appears to be unused. The idea appears to be
1234 * to turn off caching in this case. Very odd. XXX
1235 */
1236 if ((ioflag & IO_SYNC)) {
1237 if (ioflag & IO_INVAL)
1238 bp->b_flags |= B_NOCACHE;
1239 error1 = bwrite(bp);
1240 if (error1 != 0) {
1241 if (error == 0)
1242 error = error1;
1243 break;
1244 }
1245 } else if ((n + on) == biosize || (ioflag & IO_ASYNC) != 0) {
1246 bp->b_flags |= B_ASYNC;
1247 (void) bwrite(bp);
1248 } else {
1249 bdwrite(bp);
1250 }
1251
1252 if (error != 0)
1253 break;
1254 } while (uio->uio_resid > 0 && n > 0);
1255
1256 if (error == 0) {
1257 nanouptime(&ts);
1258 NFSLOCKNODE(np);
1259 np->n_localmodtime = ts;
1260 NFSUNLOCKNODE(np);
1261 } else {
1262 if (ioflag & IO_UNIT) {
1263 VATTR_NULL(&vattr);
1264 vattr.va_size = orig_size;
1265 /* IO_SYNC is handled implicitely */
1266 (void)VOP_SETATTR(vp, &vattr, cred);
1267 uio->uio_offset -= orig_resid - uio->uio_resid;
1268 uio->uio_resid = orig_resid;
1269 }
1270 }
1271
1272 out:
1273 curthread_pflags2_restore(save2);
1274 return (error);
1275 }
1276
1277 /*
1278 * Get an nfs cache block.
1279 *
1280 * Allocate a new one if the block isn't currently in the cache
1281 * and return the block marked busy. If the calling process is
1282 * interrupted by a signal for an interruptible mount point, return
1283 * NULL.
1284 *
1285 * The caller must carefully deal with the possible B_INVAL state of
1286 * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1287 * indirectly), so synchronous reads can be issued without worrying about
1288 * the B_INVAL state. We have to be a little more careful when dealing
1289 * with writes (see comments in nfs_write()) when extending a file past
1290 * its EOF.
1291 */
1292 static struct buf *
nfs_getcacheblk(struct vnode * vp,daddr_t bn,int size,struct thread * td)1293 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1294 {
1295 struct buf *bp;
1296 struct mount *mp;
1297 struct nfsmount *nmp;
1298
1299 mp = vp->v_mount;
1300 nmp = VFSTONFS(mp);
1301
1302 if (nmp->nm_flag & NFSMNT_INT) {
1303 sigset_t oldset;
1304
1305 newnfs_set_sigmask(td, &oldset);
1306 bp = getblk(vp, bn, size, PCATCH, 0, 0);
1307 newnfs_restore_sigmask(td, &oldset);
1308 while (bp == NULL) {
1309 if (newnfs_sigintr(nmp, td))
1310 return (NULL);
1311 bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1312 }
1313 } else {
1314 bp = getblk(vp, bn, size, 0, 0, 0);
1315 }
1316
1317 if (vp->v_type == VREG)
1318 bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1319 return (bp);
1320 }
1321
1322 /*
1323 * Flush and invalidate all dirty buffers. If another process is already
1324 * doing the flush, just wait for completion.
1325 */
1326 int
ncl_vinvalbuf(struct vnode * vp,int flags,struct thread * td,int intrflg)1327 ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1328 {
1329 struct nfsnode *np = VTONFS(vp);
1330 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1331 int error = 0, slpflag, slptimeo;
1332 bool old_lock;
1333 struct timespec ts;
1334
1335 ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1336
1337 if ((nmp->nm_flag & NFSMNT_INT) == 0)
1338 intrflg = 0;
1339 if (NFSCL_FORCEDISM(nmp->nm_mountp))
1340 intrflg = 1;
1341 if (intrflg) {
1342 slpflag = PCATCH;
1343 slptimeo = 2 * hz;
1344 } else {
1345 slpflag = 0;
1346 slptimeo = 0;
1347 }
1348
1349 old_lock = ncl_excl_start(vp);
1350 if (old_lock)
1351 flags |= V_ALLOWCLEAN;
1352
1353 /*
1354 * Now, flush as required.
1355 */
1356 if ((flags & (V_SAVE | V_VMIO)) == V_SAVE) {
1357 vnode_pager_clean_sync(vp);
1358
1359 /*
1360 * If the page clean was interrupted, fail the invalidation.
1361 * Not doing so, we run the risk of losing dirty pages in the
1362 * vinvalbuf() call below.
1363 */
1364 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1365 goto out;
1366 }
1367
1368 error = vinvalbuf(vp, flags, slpflag, 0);
1369 while (error) {
1370 if (intrflg && (error = newnfs_sigintr(nmp, td)))
1371 goto out;
1372 error = vinvalbuf(vp, flags, 0, slptimeo);
1373 }
1374 if (NFSHASPNFS(nmp)) {
1375 nfscl_layoutcommit(vp, td);
1376 nanouptime(&ts);
1377 /*
1378 * Invalidate the attribute cache, since writes to a DS
1379 * won't update the size attribute.
1380 */
1381 NFSLOCKNODE(np);
1382 np->n_attrstamp = 0;
1383 } else {
1384 nanouptime(&ts);
1385 NFSLOCKNODE(np);
1386 }
1387 if ((np->n_flag & NMODIFIED) != 0) {
1388 np->n_localmodtime = ts;
1389 np->n_flag &= ~NMODIFIED;
1390 }
1391 NFSUNLOCKNODE(np);
1392 out:
1393 ncl_excl_finish(vp, old_lock);
1394 return error;
1395 }
1396
1397 /*
1398 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1399 * This is mainly to avoid queueing async I/O requests when the nfsiods
1400 * are all hung on a dead server.
1401 *
1402 * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1403 * is eventually dequeued by the async daemon, ncl_doio() *will*.
1404 */
1405 int
ncl_asyncio(struct nfsmount * nmp,struct buf * bp,struct ucred * cred,struct thread * td)1406 ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1407 {
1408 int iod;
1409 int gotiod;
1410 int slpflag = 0;
1411 int slptimeo = 0;
1412 int error, error2;
1413
1414 /*
1415 * Commits are usually short and sweet so lets save some cpu and
1416 * leave the async daemons for more important rpc's (such as reads
1417 * and writes).
1418 *
1419 * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1420 * in the directory in order to update attributes. This can deadlock
1421 * with another thread that is waiting for async I/O to be done by
1422 * an nfsiod thread while holding a lock on one of these vnodes.
1423 * To avoid this deadlock, don't allow the async nfsiod threads to
1424 * perform Readdirplus RPCs.
1425 */
1426 NFSLOCKIOD();
1427 if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1428 (nmp->nm_bufqiods > ncl_numasync / 2)) ||
1429 (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
1430 NFSUNLOCKIOD();
1431 return(EIO);
1432 }
1433 again:
1434 if (nmp->nm_flag & NFSMNT_INT)
1435 slpflag = PCATCH;
1436 gotiod = FALSE;
1437
1438 /*
1439 * Find a free iod to process this request.
1440 */
1441 for (iod = 0; iod < ncl_numasync; iod++)
1442 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1443 gotiod = TRUE;
1444 break;
1445 }
1446
1447 /*
1448 * Try to create one if none are free.
1449 */
1450 if (!gotiod)
1451 ncl_nfsiodnew();
1452 else {
1453 /*
1454 * Found one, so wake it up and tell it which
1455 * mount to process.
1456 */
1457 NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1458 iod, nmp));
1459 ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1460 ncl_iodmount[iod] = nmp;
1461 nmp->nm_bufqiods++;
1462 wakeup(&ncl_iodwant[iod]);
1463 }
1464
1465 /*
1466 * If none are free, we may already have an iod working on this mount
1467 * point. If so, it will process our request.
1468 */
1469 if (!gotiod) {
1470 if (nmp->nm_bufqiods > 0) {
1471 NFS_DPF(ASYNCIO,
1472 ("ncl_asyncio: %d iods are already processing mount %p\n",
1473 nmp->nm_bufqiods, nmp));
1474 gotiod = TRUE;
1475 }
1476 }
1477
1478 /*
1479 * If we have an iod which can process the request, then queue
1480 * the buffer.
1481 */
1482 if (gotiod) {
1483 /*
1484 * Ensure that the queue never grows too large. We still want
1485 * to asynchronize so we block rather then return EIO.
1486 */
1487 while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1488 NFS_DPF(ASYNCIO,
1489 ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1490 nmp->nm_bufqwant = TRUE;
1491 error = newnfs_msleep(td, &nmp->nm_bufq,
1492 &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1493 slptimeo);
1494 if (error) {
1495 error2 = newnfs_sigintr(nmp, td);
1496 if (error2) {
1497 NFSUNLOCKIOD();
1498 return (error2);
1499 }
1500 if (slpflag == PCATCH) {
1501 slpflag = 0;
1502 slptimeo = 2 * hz;
1503 }
1504 }
1505 /*
1506 * We might have lost our iod while sleeping,
1507 * so check and loop if necessary.
1508 */
1509 goto again;
1510 }
1511
1512 /* We might have lost our nfsiod */
1513 if (nmp->nm_bufqiods == 0) {
1514 NFS_DPF(ASYNCIO,
1515 ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1516 goto again;
1517 }
1518
1519 if (bp->b_iocmd == BIO_READ) {
1520 if (bp->b_rcred == NOCRED && cred != NOCRED)
1521 bp->b_rcred = crhold(cred);
1522 } else {
1523 if (bp->b_wcred == NOCRED && cred != NOCRED)
1524 bp->b_wcred = crhold(cred);
1525 }
1526
1527 if (bp->b_flags & B_REMFREE)
1528 bremfreef(bp);
1529 BUF_KERNPROC(bp);
1530 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1531 nmp->nm_bufqlen++;
1532 KASSERT((bp->b_flags & B_DIRECT) == 0,
1533 ("ncl_asyncio: B_DIRECT set"));
1534 NFSUNLOCKIOD();
1535 return (0);
1536 }
1537
1538 NFSUNLOCKIOD();
1539
1540 /*
1541 * All the iods are busy on other mounts, so return EIO to
1542 * force the caller to process the i/o synchronously.
1543 */
1544 NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1545 return (EIO);
1546 }
1547
1548 /*
1549 * Do an I/O operation to/from a cache block. This may be called
1550 * synchronously or from an nfsiod.
1551 */
1552 int
ncl_doio(struct vnode * vp,struct buf * bp,struct ucred * cr,struct thread * td,int called_from_strategy)1553 ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1554 int called_from_strategy)
1555 {
1556 struct uio *uiop;
1557 struct nfsnode *np;
1558 struct nfsmount *nmp;
1559 int error = 0, iomode, must_commit = 0;
1560 struct uio uio;
1561 struct iovec io;
1562 struct proc *p = td ? td->td_proc : NULL;
1563 uint8_t iocmd;
1564
1565 np = VTONFS(vp);
1566 nmp = VFSTONFS(vp->v_mount);
1567 uiop = &uio;
1568 uiop->uio_iov = &io;
1569 uiop->uio_iovcnt = 1;
1570 uiop->uio_segflg = UIO_SYSSPACE;
1571 uiop->uio_td = td;
1572
1573 /*
1574 * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1575 * do this here so we do not have to do it in all the code that
1576 * calls us.
1577 */
1578 bp->b_flags &= ~B_INVAL;
1579 bp->b_ioflags &= ~BIO_ERROR;
1580
1581 KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1582 iocmd = bp->b_iocmd;
1583 if (iocmd == BIO_READ) {
1584 io.iov_len = uiop->uio_resid = bp->b_bcount;
1585 io.iov_base = bp->b_data;
1586 uiop->uio_rw = UIO_READ;
1587
1588 switch (vp->v_type) {
1589 case VREG:
1590 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1591 NFSINCRGLOBAL(nfsstatsv1.read_bios);
1592 error = ncl_readrpc(vp, uiop, cr);
1593
1594 if (!error) {
1595 if (uiop->uio_resid) {
1596 /*
1597 * If we had a short read with no error, we must have
1598 * hit a file hole. We should zero-fill the remainder.
1599 * This can also occur if the server hits the file EOF.
1600 *
1601 * Holes used to be able to occur due to pending
1602 * writes, but that is not possible any longer.
1603 */
1604 int nread = bp->b_bcount - uiop->uio_resid;
1605 ssize_t left = uiop->uio_resid;
1606
1607 if (left > 0)
1608 bzero((char *)bp->b_data + nread, left);
1609 uiop->uio_resid = 0;
1610 }
1611 }
1612 /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1613 if (p && vp->v_writecount <= -1) {
1614 NFSLOCKNODE(np);
1615 if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1616 NFSUNLOCKNODE(np);
1617 PROC_LOCK(p);
1618 killproc(p, "text file modification");
1619 PROC_UNLOCK(p);
1620 } else
1621 NFSUNLOCKNODE(np);
1622 }
1623 break;
1624 case VLNK:
1625 uiop->uio_offset = (off_t)0;
1626 NFSINCRGLOBAL(nfsstatsv1.readlink_bios);
1627 error = ncl_readlinkrpc(vp, uiop, cr);
1628 break;
1629 case VDIR:
1630 NFSINCRGLOBAL(nfsstatsv1.readdir_bios);
1631 uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1632 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1633 error = ncl_readdirplusrpc(vp, uiop, cr, td);
1634 if (error == NFSERR_NOTSUPP)
1635 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1636 }
1637 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1638 error = ncl_readdirrpc(vp, uiop, cr, td);
1639 /*
1640 * end-of-directory sets B_INVAL but does not generate an
1641 * error.
1642 */
1643 if (error == 0 && uiop->uio_resid == bp->b_bcount)
1644 bp->b_flags |= B_INVAL;
1645 break;
1646 default:
1647 printf("ncl_doio: type %x unexpected\n", vp->v_type);
1648 break;
1649 }
1650 if (error) {
1651 bp->b_ioflags |= BIO_ERROR;
1652 bp->b_error = error;
1653 }
1654 } else {
1655 /*
1656 * If we only need to commit, try to commit
1657 */
1658 if (bp->b_flags & B_NEEDCOMMIT) {
1659 int retv;
1660 off_t off;
1661
1662 off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1663 retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1664 bp->b_wcred, td);
1665 if (NFSCL_FORCEDISM(vp->v_mount) || retv == 0) {
1666 bp->b_dirtyoff = bp->b_dirtyend = 0;
1667 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1668 bp->b_resid = 0;
1669 bufdone(bp);
1670 return (0);
1671 }
1672 if (retv == NFSERR_STALEWRITEVERF) {
1673 ncl_clearcommit(vp->v_mount);
1674 }
1675 }
1676
1677 /*
1678 * Setup for actual write
1679 */
1680 NFSLOCKNODE(np);
1681 if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1682 bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1683 NFSUNLOCKNODE(np);
1684
1685 if (bp->b_dirtyend > bp->b_dirtyoff) {
1686 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1687 - bp->b_dirtyoff;
1688 uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1689 + bp->b_dirtyoff;
1690 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1691 uiop->uio_rw = UIO_WRITE;
1692 NFSINCRGLOBAL(nfsstatsv1.write_bios);
1693
1694 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1695 iomode = NFSWRITE_UNSTABLE;
1696 else
1697 iomode = NFSWRITE_FILESYNC;
1698
1699 error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1700 called_from_strategy, 0);
1701
1702 /*
1703 * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1704 * to cluster the buffers needing commit. This will allow
1705 * the system to submit a single commit rpc for the whole
1706 * cluster. We can do this even if the buffer is not 100%
1707 * dirty (relative to the NFS blocksize), so we optimize the
1708 * append-to-file-case.
1709 *
1710 * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1711 * cleared because write clustering only works for commit
1712 * rpc's, not for the data portion of the write).
1713 */
1714
1715 if (!error && iomode == NFSWRITE_UNSTABLE) {
1716 bp->b_flags |= B_NEEDCOMMIT;
1717 if (bp->b_dirtyoff == 0
1718 && bp->b_dirtyend == bp->b_bcount)
1719 bp->b_flags |= B_CLUSTEROK;
1720 } else {
1721 bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1722 }
1723
1724 /*
1725 * For an interrupted write, the buffer is still valid
1726 * and the write hasn't been pushed to the server yet,
1727 * so we can't set BIO_ERROR and report the interruption
1728 * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1729 * is not relevant, so the rpc attempt is essentially
1730 * a noop. For the case of a V3 write rpc not being
1731 * committed to stable storage, the block is still
1732 * dirty and requires either a commit rpc or another
1733 * write rpc with iomode == NFSV3WRITE_FILESYNC before
1734 * the block is reused. This is indicated by setting
1735 * the B_DELWRI and B_NEEDCOMMIT flags.
1736 *
1737 * EIO is returned by ncl_writerpc() to indicate a recoverable
1738 * write error and is handled as above, except that
1739 * B_EINTR isn't set. One cause of this is a stale stateid
1740 * error for the RPC that indicates recovery is required,
1741 * when called with called_from_strategy != 0.
1742 *
1743 * If the buffer is marked B_PAGING, it does not reside on
1744 * the vp's paging queues so we cannot call bdirty(). The
1745 * bp in this case is not an NFS cache block so we should
1746 * be safe. XXX
1747 *
1748 * The logic below breaks up errors into recoverable and
1749 * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1750 * and keep the buffer around for potential write retries.
1751 * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1752 * and save the error in the nfsnode. This is less than ideal
1753 * but necessary. Keeping such buffers around could potentially
1754 * cause buffer exhaustion eventually (they can never be written
1755 * out, so will get constantly be re-dirtied). It also causes
1756 * all sorts of vfs panics. For non-recoverable write errors,
1757 * also invalidate the attrcache, so we'll be forced to go over
1758 * the wire for this object, returning an error to user on next
1759 * call (most of the time).
1760 */
1761 if (error == EINTR || error == EIO || error == ETIMEDOUT
1762 || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1763 bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1764 if ((bp->b_flags & B_PAGING) == 0) {
1765 bdirty(bp);
1766 bp->b_flags &= ~B_DONE;
1767 }
1768 if ((error == EINTR || error == ETIMEDOUT) &&
1769 (bp->b_flags & B_ASYNC) == 0)
1770 bp->b_flags |= B_EINTR;
1771 } else {
1772 if (error) {
1773 bp->b_ioflags |= BIO_ERROR;
1774 bp->b_flags |= B_INVAL;
1775 bp->b_error = np->n_error = error;
1776 NFSLOCKNODE(np);
1777 np->n_flag |= NWRITEERR;
1778 np->n_attrstamp = 0;
1779 KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1780 NFSUNLOCKNODE(np);
1781 }
1782 bp->b_dirtyoff = bp->b_dirtyend = 0;
1783 }
1784 } else {
1785 bp->b_resid = 0;
1786 bufdone(bp);
1787 return (0);
1788 }
1789 }
1790 bp->b_resid = uiop->uio_resid;
1791 if (must_commit == 1)
1792 ncl_clearcommit(vp->v_mount);
1793 bufdone(bp);
1794 return (error);
1795 }
1796
1797 /*
1798 * Used to aid in handling ftruncate() operations on the NFS client side.
1799 * Truncation creates a number of special problems for NFS. We have to
1800 * throw away VM pages and buffer cache buffers that are beyond EOF, and
1801 * we have to properly handle VM pages or (potentially dirty) buffers
1802 * that straddle the truncation point.
1803 */
1804
1805 int
ncl_meta_setsize(struct vnode * vp,struct thread * td,u_quad_t nsize)1806 ncl_meta_setsize(struct vnode *vp, struct thread *td, u_quad_t nsize)
1807 {
1808 struct nfsnode *np = VTONFS(vp);
1809 u_quad_t tsize;
1810 int biosize = vp->v_bufobj.bo_bsize;
1811 int error = 0;
1812
1813 NFSLOCKNODE(np);
1814 tsize = np->n_size;
1815 np->n_size = nsize;
1816 NFSUNLOCKNODE(np);
1817
1818 if (nsize < tsize) {
1819 struct buf *bp;
1820 daddr_t lbn;
1821 int bufsize;
1822
1823 /*
1824 * vtruncbuf() doesn't get the buffer overlapping the
1825 * truncation point. We may have a B_DELWRI and/or B_CACHE
1826 * buffer that now needs to be truncated.
1827 */
1828 error = vtruncbuf(vp, nsize, biosize);
1829 lbn = nsize / biosize;
1830 bufsize = nsize - (lbn * biosize);
1831 bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1832 if (!bp)
1833 return EINTR;
1834 if (bp->b_dirtyoff > bp->b_bcount)
1835 bp->b_dirtyoff = bp->b_bcount;
1836 if (bp->b_dirtyend > bp->b_bcount)
1837 bp->b_dirtyend = bp->b_bcount;
1838 bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1839 brelse(bp);
1840 } else {
1841 vnode_pager_setsize(vp, nsize);
1842 }
1843 return(error);
1844 }
1845