xref: /f-stack/freebsd/vm/vnode_pager.c (revision 22ce4aff)
1a9643ea8Slogwang /*-
2*22ce4affSfengbojiang  * SPDX-License-Identifier: BSD-4-Clause
3*22ce4affSfengbojiang  *
4a9643ea8Slogwang  * Copyright (c) 1990 University of Utah.
5a9643ea8Slogwang  * Copyright (c) 1991 The Regents of the University of California.
6a9643ea8Slogwang  * All rights reserved.
7a9643ea8Slogwang  * Copyright (c) 1993, 1994 John S. Dyson
8a9643ea8Slogwang  * Copyright (c) 1995, David Greenman
9a9643ea8Slogwang  *
10a9643ea8Slogwang  * This code is derived from software contributed to Berkeley by
11a9643ea8Slogwang  * the Systems Programming Group of the University of Utah Computer
12a9643ea8Slogwang  * Science Department.
13a9643ea8Slogwang  *
14a9643ea8Slogwang  * Redistribution and use in source and binary forms, with or without
15a9643ea8Slogwang  * modification, are permitted provided that the following conditions
16a9643ea8Slogwang  * are met:
17a9643ea8Slogwang  * 1. Redistributions of source code must retain the above copyright
18a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer.
19a9643ea8Slogwang  * 2. Redistributions in binary form must reproduce the above copyright
20a9643ea8Slogwang  *    notice, this list of conditions and the following disclaimer in the
21a9643ea8Slogwang  *    documentation and/or other materials provided with the distribution.
22a9643ea8Slogwang  * 3. All advertising materials mentioning features or use of this software
23a9643ea8Slogwang  *    must display the following acknowledgement:
24a9643ea8Slogwang  *	This product includes software developed by the University of
25a9643ea8Slogwang  *	California, Berkeley and its contributors.
26a9643ea8Slogwang  * 4. Neither the name of the University nor the names of its contributors
27a9643ea8Slogwang  *    may be used to endorse or promote products derived from this software
28a9643ea8Slogwang  *    without specific prior written permission.
29a9643ea8Slogwang  *
30a9643ea8Slogwang  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
31a9643ea8Slogwang  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
32a9643ea8Slogwang  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
33a9643ea8Slogwang  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
34a9643ea8Slogwang  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
35a9643ea8Slogwang  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
36a9643ea8Slogwang  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
37a9643ea8Slogwang  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
38a9643ea8Slogwang  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
39a9643ea8Slogwang  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40a9643ea8Slogwang  * SUCH DAMAGE.
41a9643ea8Slogwang  *
42a9643ea8Slogwang  *	from: @(#)vnode_pager.c	7.5 (Berkeley) 4/20/91
43a9643ea8Slogwang  */
44a9643ea8Slogwang 
45a9643ea8Slogwang /*
46a9643ea8Slogwang  * Page to/from files (vnodes).
47a9643ea8Slogwang  */
48a9643ea8Slogwang 
49a9643ea8Slogwang /*
50a9643ea8Slogwang  * TODO:
51a9643ea8Slogwang  *	Implement VOP_GETPAGES/PUTPAGES interface for filesystems. Will
52a9643ea8Slogwang  *	greatly re-simplify the vnode_pager.
53a9643ea8Slogwang  */
54a9643ea8Slogwang 
55a9643ea8Slogwang #include <sys/cdefs.h>
56a9643ea8Slogwang __FBSDID("$FreeBSD$");
57a9643ea8Slogwang 
58a9643ea8Slogwang #include "opt_vm.h"
59a9643ea8Slogwang 
60a9643ea8Slogwang #include <sys/param.h>
61*22ce4affSfengbojiang #include <sys/kernel.h>
62a9643ea8Slogwang #include <sys/systm.h>
63*22ce4affSfengbojiang #include <sys/sysctl.h>
64a9643ea8Slogwang #include <sys/proc.h>
65a9643ea8Slogwang #include <sys/vnode.h>
66a9643ea8Slogwang #include <sys/mount.h>
67a9643ea8Slogwang #include <sys/bio.h>
68a9643ea8Slogwang #include <sys/buf.h>
69a9643ea8Slogwang #include <sys/vmmeter.h>
70*22ce4affSfengbojiang #include <sys/ktr.h>
71a9643ea8Slogwang #include <sys/limits.h>
72a9643ea8Slogwang #include <sys/conf.h>
73*22ce4affSfengbojiang #include <sys/refcount.h>
74a9643ea8Slogwang #include <sys/rwlock.h>
75a9643ea8Slogwang #include <sys/sf_buf.h>
76*22ce4affSfengbojiang #include <sys/domainset.h>
77a9643ea8Slogwang 
78a9643ea8Slogwang #include <machine/atomic.h>
79a9643ea8Slogwang 
80a9643ea8Slogwang #include <vm/vm.h>
81a9643ea8Slogwang #include <vm/vm_param.h>
82a9643ea8Slogwang #include <vm/vm_object.h>
83a9643ea8Slogwang #include <vm/vm_page.h>
84a9643ea8Slogwang #include <vm/vm_pager.h>
85a9643ea8Slogwang #include <vm/vm_map.h>
86a9643ea8Slogwang #include <vm/vnode_pager.h>
87a9643ea8Slogwang #include <vm/vm_extern.h>
88*22ce4affSfengbojiang #include <vm/uma.h>
89a9643ea8Slogwang 
90a9643ea8Slogwang static int vnode_pager_addr(struct vnode *vp, vm_ooffset_t address,
91a9643ea8Slogwang     daddr_t *rtaddress, int *run);
92a9643ea8Slogwang static int vnode_pager_input_smlfs(vm_object_t object, vm_page_t m);
93a9643ea8Slogwang static int vnode_pager_input_old(vm_object_t object, vm_page_t m);
94a9643ea8Slogwang static void vnode_pager_dealloc(vm_object_t);
95a9643ea8Slogwang static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int *, int *);
96a9643ea8Slogwang static int vnode_pager_getpages_async(vm_object_t, vm_page_t *, int, int *,
97a9643ea8Slogwang     int *, vop_getpages_iodone_t, void *);
98a9643ea8Slogwang static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
99a9643ea8Slogwang static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
100a9643ea8Slogwang static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
101a9643ea8Slogwang     vm_ooffset_t, struct ucred *cred);
102a9643ea8Slogwang static int vnode_pager_generic_getpages_done(struct buf *);
103a9643ea8Slogwang static void vnode_pager_generic_getpages_done_async(struct buf *);
104*22ce4affSfengbojiang static void vnode_pager_update_writecount(vm_object_t, vm_offset_t,
105*22ce4affSfengbojiang     vm_offset_t);
106*22ce4affSfengbojiang static void vnode_pager_release_writecount(vm_object_t, vm_offset_t,
107*22ce4affSfengbojiang     vm_offset_t);
108a9643ea8Slogwang 
109a9643ea8Slogwang struct pagerops vnodepagerops = {
110a9643ea8Slogwang 	.pgo_alloc =	vnode_pager_alloc,
111a9643ea8Slogwang 	.pgo_dealloc =	vnode_pager_dealloc,
112a9643ea8Slogwang 	.pgo_getpages =	vnode_pager_getpages,
113a9643ea8Slogwang 	.pgo_getpages_async = vnode_pager_getpages_async,
114a9643ea8Slogwang 	.pgo_putpages =	vnode_pager_putpages,
115a9643ea8Slogwang 	.pgo_haspage =	vnode_pager_haspage,
116*22ce4affSfengbojiang 	.pgo_update_writecount = vnode_pager_update_writecount,
117*22ce4affSfengbojiang 	.pgo_release_writecount = vnode_pager_release_writecount,
118a9643ea8Slogwang };
119a9643ea8Slogwang 
120*22ce4affSfengbojiang static struct domainset *vnode_domainset = NULL;
121*22ce4affSfengbojiang 
122*22ce4affSfengbojiang SYSCTL_PROC(_debug, OID_AUTO, vnode_domainset,
123*22ce4affSfengbojiang     CTLTYPE_STRING | CTLFLAG_MPSAFE | CTLFLAG_RW, &vnode_domainset, 0,
124*22ce4affSfengbojiang     sysctl_handle_domainset, "A", "Default vnode NUMA policy");
125*22ce4affSfengbojiang 
126*22ce4affSfengbojiang static int nvnpbufs;
127*22ce4affSfengbojiang SYSCTL_INT(_vm, OID_AUTO, vnode_pbufs, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
128*22ce4affSfengbojiang     &nvnpbufs, 0, "number of physical buffers allocated for vnode pager");
129*22ce4affSfengbojiang 
130*22ce4affSfengbojiang static uma_zone_t vnode_pbuf_zone;
131*22ce4affSfengbojiang 
132*22ce4affSfengbojiang static void
vnode_pager_init(void * dummy)133*22ce4affSfengbojiang vnode_pager_init(void *dummy)
134*22ce4affSfengbojiang {
135*22ce4affSfengbojiang 
136*22ce4affSfengbojiang #ifdef __LP64__
137*22ce4affSfengbojiang 	nvnpbufs = nswbuf * 2;
138*22ce4affSfengbojiang #else
139*22ce4affSfengbojiang 	nvnpbufs = nswbuf / 2;
140*22ce4affSfengbojiang #endif
141*22ce4affSfengbojiang 	TUNABLE_INT_FETCH("vm.vnode_pbufs", &nvnpbufs);
142*22ce4affSfengbojiang 	vnode_pbuf_zone = pbuf_zsecond_create("vnpbuf", nvnpbufs);
143*22ce4affSfengbojiang }
144*22ce4affSfengbojiang SYSINIT(vnode_pager, SI_SUB_CPU, SI_ORDER_ANY, vnode_pager_init, NULL);
145a9643ea8Slogwang 
146a9643ea8Slogwang /* Create the VM system backing object for this vnode */
147a9643ea8Slogwang int
vnode_create_vobject(struct vnode * vp,off_t isize,struct thread * td)148a9643ea8Slogwang vnode_create_vobject(struct vnode *vp, off_t isize, struct thread *td)
149a9643ea8Slogwang {
150a9643ea8Slogwang 	vm_object_t object;
151a9643ea8Slogwang 	vm_ooffset_t size = isize;
152a9643ea8Slogwang 	struct vattr va;
153*22ce4affSfengbojiang 	bool last;
154a9643ea8Slogwang 
155*22ce4affSfengbojiang 	if (!vn_isdisk(vp) && vn_canvmio(vp) == FALSE)
156a9643ea8Slogwang 		return (0);
157a9643ea8Slogwang 
158*22ce4affSfengbojiang 	object = vp->v_object;
159*22ce4affSfengbojiang 	if (object != NULL)
160a9643ea8Slogwang 		return (0);
161a9643ea8Slogwang 
162a9643ea8Slogwang 	if (size == 0) {
163*22ce4affSfengbojiang 		if (vn_isdisk(vp)) {
164a9643ea8Slogwang 			size = IDX_TO_OFF(INT_MAX);
165a9643ea8Slogwang 		} else {
166a9643ea8Slogwang 			if (VOP_GETATTR(vp, &va, td->td_ucred))
167a9643ea8Slogwang 				return (0);
168a9643ea8Slogwang 			size = va.va_size;
169a9643ea8Slogwang 		}
170a9643ea8Slogwang 	}
171a9643ea8Slogwang 
172a9643ea8Slogwang 	object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
173a9643ea8Slogwang 	/*
174a9643ea8Slogwang 	 * Dereference the reference we just created.  This assumes
175*22ce4affSfengbojiang 	 * that the object is associated with the vp.  We still have
176*22ce4affSfengbojiang 	 * to serialize with vnode_pager_dealloc() for the last
177*22ce4affSfengbojiang 	 * potential reference.
178a9643ea8Slogwang 	 */
179*22ce4affSfengbojiang 	VM_OBJECT_RLOCK(object);
180*22ce4affSfengbojiang 	last = refcount_release(&object->ref_count);
181*22ce4affSfengbojiang 	VM_OBJECT_RUNLOCK(object);
182*22ce4affSfengbojiang 	if (last)
183a9643ea8Slogwang 		vrele(vp);
184a9643ea8Slogwang 
185a9643ea8Slogwang 	KASSERT(vp->v_object != NULL, ("vnode_create_vobject: NULL object"));
186a9643ea8Slogwang 
187a9643ea8Slogwang 	return (0);
188a9643ea8Slogwang }
189a9643ea8Slogwang 
190a9643ea8Slogwang void
vnode_destroy_vobject(struct vnode * vp)191a9643ea8Slogwang vnode_destroy_vobject(struct vnode *vp)
192a9643ea8Slogwang {
193a9643ea8Slogwang 	struct vm_object *obj;
194a9643ea8Slogwang 
195a9643ea8Slogwang 	obj = vp->v_object;
196*22ce4affSfengbojiang 	if (obj == NULL || obj->handle != vp)
197a9643ea8Slogwang 		return;
198a9643ea8Slogwang 	ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
199a9643ea8Slogwang 	VM_OBJECT_WLOCK(obj);
200*22ce4affSfengbojiang 	MPASS(obj->type == OBJT_VNODE);
201a9643ea8Slogwang 	umtx_shm_object_terminated(obj);
202a9643ea8Slogwang 	if (obj->ref_count == 0) {
203*22ce4affSfengbojiang 		KASSERT((obj->flags & OBJ_DEAD) == 0,
204*22ce4affSfengbojiang 		   ("vnode_destroy_vobject: Terminating dead object"));
205*22ce4affSfengbojiang 		vm_object_set_flag(obj, OBJ_DEAD);
206*22ce4affSfengbojiang 
207a9643ea8Slogwang 		/*
208*22ce4affSfengbojiang 		 * Clean pages and flush buffers.
209a9643ea8Slogwang 		 */
210*22ce4affSfengbojiang 		vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
211a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(obj);
212*22ce4affSfengbojiang 
213*22ce4affSfengbojiang 		vinvalbuf(vp, V_SAVE, 0, 0);
214*22ce4affSfengbojiang 
215*22ce4affSfengbojiang 		BO_LOCK(&vp->v_bufobj);
216*22ce4affSfengbojiang 		vp->v_bufobj.bo_flag |= BO_DEAD;
217*22ce4affSfengbojiang 		BO_UNLOCK(&vp->v_bufobj);
218*22ce4affSfengbojiang 
219*22ce4affSfengbojiang 		VM_OBJECT_WLOCK(obj);
220*22ce4affSfengbojiang 		vm_object_terminate(obj);
221a9643ea8Slogwang 	} else {
222a9643ea8Slogwang 		/*
223a9643ea8Slogwang 		 * Woe to the process that tries to page now :-).
224a9643ea8Slogwang 		 */
225a9643ea8Slogwang 		vm_pager_deallocate(obj);
226a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(obj);
227a9643ea8Slogwang 	}
228a9643ea8Slogwang 	KASSERT(vp->v_object == NULL, ("vp %p obj %p", vp, vp->v_object));
229a9643ea8Slogwang }
230a9643ea8Slogwang 
231a9643ea8Slogwang /*
232a9643ea8Slogwang  * Allocate (or lookup) pager for a vnode.
233a9643ea8Slogwang  * Handle is a vnode pointer.
234a9643ea8Slogwang  */
235a9643ea8Slogwang vm_object_t
vnode_pager_alloc(void * handle,vm_ooffset_t size,vm_prot_t prot,vm_ooffset_t offset,struct ucred * cred)236a9643ea8Slogwang vnode_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
237a9643ea8Slogwang     vm_ooffset_t offset, struct ucred *cred)
238a9643ea8Slogwang {
239a9643ea8Slogwang 	vm_object_t object;
240a9643ea8Slogwang 	struct vnode *vp;
241a9643ea8Slogwang 
242a9643ea8Slogwang 	/*
243a9643ea8Slogwang 	 * Pageout to vnode, no can do yet.
244a9643ea8Slogwang 	 */
245a9643ea8Slogwang 	if (handle == NULL)
246a9643ea8Slogwang 		return (NULL);
247a9643ea8Slogwang 
248a9643ea8Slogwang 	vp = (struct vnode *)handle;
249*22ce4affSfengbojiang 	ASSERT_VOP_LOCKED(vp, "vnode_pager_alloc");
250*22ce4affSfengbojiang 	VNPASS(vp->v_usecount > 0, vp);
251a9643ea8Slogwang retry:
252*22ce4affSfengbojiang 	object = vp->v_object;
253a9643ea8Slogwang 
254a9643ea8Slogwang 	if (object == NULL) {
255a9643ea8Slogwang 		/*
256a9643ea8Slogwang 		 * Add an object of the appropriate size
257a9643ea8Slogwang 		 */
258*22ce4affSfengbojiang 		object = vm_object_allocate(OBJT_VNODE,
259*22ce4affSfengbojiang 		    OFF_TO_IDX(round_page(size)));
260a9643ea8Slogwang 
261a9643ea8Slogwang 		object->un_pager.vnp.vnp_size = size;
262a9643ea8Slogwang 		object->un_pager.vnp.writemappings = 0;
263*22ce4affSfengbojiang 		object->domain.dr_policy = vnode_domainset;
264a9643ea8Slogwang 		object->handle = handle;
265*22ce4affSfengbojiang 		if ((vp->v_vflag & VV_VMSIZEVNLOCK) != 0) {
266*22ce4affSfengbojiang 			VM_OBJECT_WLOCK(object);
267*22ce4affSfengbojiang 			vm_object_set_flag(object, OBJ_SIZEVNLOCK);
268*22ce4affSfengbojiang 			VM_OBJECT_WUNLOCK(object);
269*22ce4affSfengbojiang 		}
270a9643ea8Slogwang 		VI_LOCK(vp);
271a9643ea8Slogwang 		if (vp->v_object != NULL) {
272a9643ea8Slogwang 			/*
273*22ce4affSfengbojiang 			 * Object has been created while we were allocating.
274a9643ea8Slogwang 			 */
275a9643ea8Slogwang 			VI_UNLOCK(vp);
276a9643ea8Slogwang 			VM_OBJECT_WLOCK(object);
277a9643ea8Slogwang 			KASSERT(object->ref_count == 1,
278a9643ea8Slogwang 			    ("leaked ref %p %d", object, object->ref_count));
279a9643ea8Slogwang 			object->type = OBJT_DEAD;
280*22ce4affSfengbojiang 			refcount_init(&object->ref_count, 0);
281a9643ea8Slogwang 			VM_OBJECT_WUNLOCK(object);
282a9643ea8Slogwang 			vm_object_destroy(object);
283a9643ea8Slogwang 			goto retry;
284a9643ea8Slogwang 		}
285a9643ea8Slogwang 		vp->v_object = object;
286a9643ea8Slogwang 		VI_UNLOCK(vp);
287*22ce4affSfengbojiang 		vrefact(vp);
288a9643ea8Slogwang 	} else {
289*22ce4affSfengbojiang 		vm_object_reference(object);
290a9643ea8Slogwang #if VM_NRESERVLEVEL > 0
291*22ce4affSfengbojiang 		if ((object->flags & OBJ_COLORED) == 0) {
292*22ce4affSfengbojiang 			VM_OBJECT_WLOCK(object);
293a9643ea8Slogwang 			vm_object_color(object, 0);
294a9643ea8Slogwang 			VM_OBJECT_WUNLOCK(object);
295a9643ea8Slogwang 		}
296*22ce4affSfengbojiang #endif
297*22ce4affSfengbojiang 	}
298a9643ea8Slogwang 	return (object);
299a9643ea8Slogwang }
300a9643ea8Slogwang 
301a9643ea8Slogwang /*
302a9643ea8Slogwang  *	The object must be locked.
303a9643ea8Slogwang  */
304a9643ea8Slogwang static void
vnode_pager_dealloc(vm_object_t object)305a9643ea8Slogwang vnode_pager_dealloc(vm_object_t object)
306a9643ea8Slogwang {
307a9643ea8Slogwang 	struct vnode *vp;
308a9643ea8Slogwang 	int refs;
309a9643ea8Slogwang 
310a9643ea8Slogwang 	vp = object->handle;
311a9643ea8Slogwang 	if (vp == NULL)
312a9643ea8Slogwang 		panic("vnode_pager_dealloc: pager already dealloced");
313a9643ea8Slogwang 
314a9643ea8Slogwang 	VM_OBJECT_ASSERT_WLOCKED(object);
315a9643ea8Slogwang 	vm_object_pip_wait(object, "vnpdea");
316a9643ea8Slogwang 	refs = object->ref_count;
317a9643ea8Slogwang 
318a9643ea8Slogwang 	object->handle = NULL;
319a9643ea8Slogwang 	object->type = OBJT_DEAD;
320a9643ea8Slogwang 	ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc");
321a9643ea8Slogwang 	if (object->un_pager.vnp.writemappings > 0) {
322a9643ea8Slogwang 		object->un_pager.vnp.writemappings = 0;
323*22ce4affSfengbojiang 		VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
324a9643ea8Slogwang 		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
325a9643ea8Slogwang 		    __func__, vp, vp->v_writecount);
326a9643ea8Slogwang 	}
327a9643ea8Slogwang 	vp->v_object = NULL;
328*22ce4affSfengbojiang 	VI_LOCK(vp);
329*22ce4affSfengbojiang 
330*22ce4affSfengbojiang 	/*
331*22ce4affSfengbojiang 	 * vm_map_entry_set_vnode_text() cannot reach this vnode by
332*22ce4affSfengbojiang 	 * following object->handle.  Clear all text references now.
333*22ce4affSfengbojiang 	 * This also clears the transient references from
334*22ce4affSfengbojiang 	 * kern_execve(), which is fine because dead_vnodeops uses nop
335*22ce4affSfengbojiang 	 * for VOP_UNSET_TEXT().
336*22ce4affSfengbojiang 	 */
337*22ce4affSfengbojiang 	if (vp->v_writecount < 0)
338*22ce4affSfengbojiang 		vp->v_writecount = 0;
339*22ce4affSfengbojiang 	VI_UNLOCK(vp);
340a9643ea8Slogwang 	VM_OBJECT_WUNLOCK(object);
341*22ce4affSfengbojiang 	if (refs > 0)
342a9643ea8Slogwang 		vunref(vp);
343a9643ea8Slogwang 	VM_OBJECT_WLOCK(object);
344a9643ea8Slogwang }
345a9643ea8Slogwang 
346a9643ea8Slogwang static boolean_t
vnode_pager_haspage(vm_object_t object,vm_pindex_t pindex,int * before,int * after)347a9643ea8Slogwang vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before,
348a9643ea8Slogwang     int *after)
349a9643ea8Slogwang {
350a9643ea8Slogwang 	struct vnode *vp = object->handle;
351a9643ea8Slogwang 	daddr_t bn;
352*22ce4affSfengbojiang 	uintptr_t lockstate;
353a9643ea8Slogwang 	int err;
354a9643ea8Slogwang 	daddr_t reqblock;
355a9643ea8Slogwang 	int poff;
356a9643ea8Slogwang 	int bsize;
357a9643ea8Slogwang 	int pagesperblock, blocksperpage;
358a9643ea8Slogwang 
359*22ce4affSfengbojiang 	VM_OBJECT_ASSERT_LOCKED(object);
360a9643ea8Slogwang 	/*
361a9643ea8Slogwang 	 * If no vp or vp is doomed or marked transparent to VM, we do not
362a9643ea8Slogwang 	 * have the page.
363a9643ea8Slogwang 	 */
364*22ce4affSfengbojiang 	if (vp == NULL || VN_IS_DOOMED(vp))
365a9643ea8Slogwang 		return FALSE;
366a9643ea8Slogwang 	/*
367a9643ea8Slogwang 	 * If the offset is beyond end of file we do
368a9643ea8Slogwang 	 * not have the page.
369a9643ea8Slogwang 	 */
370a9643ea8Slogwang 	if (IDX_TO_OFF(pindex) >= object->un_pager.vnp.vnp_size)
371a9643ea8Slogwang 		return FALSE;
372a9643ea8Slogwang 
373a9643ea8Slogwang 	bsize = vp->v_mount->mnt_stat.f_iosize;
374a9643ea8Slogwang 	pagesperblock = bsize / PAGE_SIZE;
375a9643ea8Slogwang 	blocksperpage = 0;
376a9643ea8Slogwang 	if (pagesperblock > 0) {
377a9643ea8Slogwang 		reqblock = pindex / pagesperblock;
378a9643ea8Slogwang 	} else {
379a9643ea8Slogwang 		blocksperpage = (PAGE_SIZE / bsize);
380a9643ea8Slogwang 		reqblock = pindex * blocksperpage;
381a9643ea8Slogwang 	}
382*22ce4affSfengbojiang 	lockstate = VM_OBJECT_DROP(object);
383a9643ea8Slogwang 	err = VOP_BMAP(vp, reqblock, NULL, &bn, after, before);
384*22ce4affSfengbojiang 	VM_OBJECT_PICKUP(object, lockstate);
385a9643ea8Slogwang 	if (err)
386a9643ea8Slogwang 		return TRUE;
387a9643ea8Slogwang 	if (bn == -1)
388a9643ea8Slogwang 		return FALSE;
389a9643ea8Slogwang 	if (pagesperblock > 0) {
390a9643ea8Slogwang 		poff = pindex - (reqblock * pagesperblock);
391a9643ea8Slogwang 		if (before) {
392a9643ea8Slogwang 			*before *= pagesperblock;
393a9643ea8Slogwang 			*before += poff;
394a9643ea8Slogwang 		}
395a9643ea8Slogwang 		if (after) {
396a9643ea8Slogwang 			/*
397a9643ea8Slogwang 			 * The BMAP vop can report a partial block in the
398a9643ea8Slogwang 			 * 'after', but must not report blocks after EOF.
399a9643ea8Slogwang 			 * Assert the latter, and truncate 'after' in case
400a9643ea8Slogwang 			 * of the former.
401a9643ea8Slogwang 			 */
402a9643ea8Slogwang 			KASSERT((reqblock + *after) * pagesperblock <
403a9643ea8Slogwang 			    roundup2(object->size, pagesperblock),
404a9643ea8Slogwang 			    ("%s: reqblock %jd after %d size %ju", __func__,
405a9643ea8Slogwang 			    (intmax_t )reqblock, *after,
406a9643ea8Slogwang 			    (uintmax_t )object->size));
407a9643ea8Slogwang 			*after *= pagesperblock;
408a9643ea8Slogwang 			*after += pagesperblock - (poff + 1);
409a9643ea8Slogwang 			if (pindex + *after >= object->size)
410a9643ea8Slogwang 				*after = object->size - 1 - pindex;
411a9643ea8Slogwang 		}
412a9643ea8Slogwang 	} else {
413a9643ea8Slogwang 		if (before) {
414a9643ea8Slogwang 			*before /= blocksperpage;
415a9643ea8Slogwang 		}
416a9643ea8Slogwang 
417a9643ea8Slogwang 		if (after) {
418a9643ea8Slogwang 			*after /= blocksperpage;
419a9643ea8Slogwang 		}
420a9643ea8Slogwang 	}
421a9643ea8Slogwang 	return TRUE;
422a9643ea8Slogwang }
423a9643ea8Slogwang 
424a9643ea8Slogwang /*
425a9643ea8Slogwang  * Lets the VM system know about a change in size for a file.
426a9643ea8Slogwang  * We adjust our own internal size and flush any cached pages in
427a9643ea8Slogwang  * the associated object that are affected by the size change.
428a9643ea8Slogwang  *
429a9643ea8Slogwang  * Note: this routine may be invoked as a result of a pager put
430a9643ea8Slogwang  * operation (possibly at object termination time), so we must be careful.
431a9643ea8Slogwang  */
432a9643ea8Slogwang void
vnode_pager_setsize(struct vnode * vp,vm_ooffset_t nsize)433a9643ea8Slogwang vnode_pager_setsize(struct vnode *vp, vm_ooffset_t nsize)
434a9643ea8Slogwang {
435a9643ea8Slogwang 	vm_object_t object;
436a9643ea8Slogwang 	vm_page_t m;
437a9643ea8Slogwang 	vm_pindex_t nobjsize;
438a9643ea8Slogwang 
439a9643ea8Slogwang 	if ((object = vp->v_object) == NULL)
440a9643ea8Slogwang 		return;
441*22ce4affSfengbojiang #ifdef DEBUG_VFS_LOCKS
442*22ce4affSfengbojiang 	{
443*22ce4affSfengbojiang 		struct mount *mp;
444*22ce4affSfengbojiang 
445*22ce4affSfengbojiang 		mp = vp->v_mount;
446*22ce4affSfengbojiang 		if (mp != NULL && (mp->mnt_kern_flag & MNTK_VMSETSIZE_BUG) == 0)
447*22ce4affSfengbojiang 			assert_vop_elocked(vp,
448*22ce4affSfengbojiang 			    "vnode_pager_setsize and not locked vnode");
449*22ce4affSfengbojiang 	}
450*22ce4affSfengbojiang #endif
451a9643ea8Slogwang 	VM_OBJECT_WLOCK(object);
452a9643ea8Slogwang 	if (object->type == OBJT_DEAD) {
453a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
454a9643ea8Slogwang 		return;
455a9643ea8Slogwang 	}
456a9643ea8Slogwang 	KASSERT(object->type == OBJT_VNODE,
457a9643ea8Slogwang 	    ("not vnode-backed object %p", object));
458a9643ea8Slogwang 	if (nsize == object->un_pager.vnp.vnp_size) {
459a9643ea8Slogwang 		/*
460a9643ea8Slogwang 		 * Hasn't changed size
461a9643ea8Slogwang 		 */
462a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
463a9643ea8Slogwang 		return;
464a9643ea8Slogwang 	}
465a9643ea8Slogwang 	nobjsize = OFF_TO_IDX(nsize + PAGE_MASK);
466a9643ea8Slogwang 	if (nsize < object->un_pager.vnp.vnp_size) {
467a9643ea8Slogwang 		/*
468a9643ea8Slogwang 		 * File has shrunk. Toss any cached pages beyond the new EOF.
469a9643ea8Slogwang 		 */
470a9643ea8Slogwang 		if (nobjsize < object->size)
471a9643ea8Slogwang 			vm_object_page_remove(object, nobjsize, object->size,
472a9643ea8Slogwang 			    0);
473a9643ea8Slogwang 		/*
474a9643ea8Slogwang 		 * this gets rid of garbage at the end of a page that is now
475a9643ea8Slogwang 		 * only partially backed by the vnode.
476a9643ea8Slogwang 		 *
477a9643ea8Slogwang 		 * XXX for some reason (I don't know yet), if we take a
478a9643ea8Slogwang 		 * completely invalid page and mark it partially valid
479a9643ea8Slogwang 		 * it can screw up NFS reads, so we don't allow the case.
480a9643ea8Slogwang 		 */
481*22ce4affSfengbojiang 		if (!(nsize & PAGE_MASK))
482*22ce4affSfengbojiang 			goto out;
483*22ce4affSfengbojiang 		m = vm_page_grab(object, OFF_TO_IDX(nsize), VM_ALLOC_NOCREAT);
484*22ce4affSfengbojiang 		if (m == NULL)
485*22ce4affSfengbojiang 			goto out;
486*22ce4affSfengbojiang 		if (!vm_page_none_valid(m)) {
487a9643ea8Slogwang 			int base = (int)nsize & PAGE_MASK;
488a9643ea8Slogwang 			int size = PAGE_SIZE - base;
489a9643ea8Slogwang 
490a9643ea8Slogwang 			/*
491a9643ea8Slogwang 			 * Clear out partial-page garbage in case
492a9643ea8Slogwang 			 * the page has been mapped.
493a9643ea8Slogwang 			 */
494a9643ea8Slogwang 			pmap_zero_page_area(m, base, size);
495a9643ea8Slogwang 
496a9643ea8Slogwang 			/*
497a9643ea8Slogwang 			 * Update the valid bits to reflect the blocks that
498a9643ea8Slogwang 			 * have been zeroed.  Some of these valid bits may
499a9643ea8Slogwang 			 * have already been set.
500a9643ea8Slogwang 			 */
501a9643ea8Slogwang 			vm_page_set_valid_range(m, base, size);
502a9643ea8Slogwang 
503a9643ea8Slogwang 			/*
504a9643ea8Slogwang 			 * Round "base" to the next block boundary so that the
505a9643ea8Slogwang 			 * dirty bit for a partially zeroed block is not
506a9643ea8Slogwang 			 * cleared.
507a9643ea8Slogwang 			 */
508a9643ea8Slogwang 			base = roundup2(base, DEV_BSIZE);
509a9643ea8Slogwang 
510a9643ea8Slogwang 			/*
511a9643ea8Slogwang 			 * Clear out partial-page dirty bits.
512a9643ea8Slogwang 			 *
513a9643ea8Slogwang 			 * note that we do not clear out the valid
514a9643ea8Slogwang 			 * bits.  This would prevent bogus_page
515a9643ea8Slogwang 			 * replacement from working properly.
516a9643ea8Slogwang 			 */
517a9643ea8Slogwang 			vm_page_clear_dirty(m, base, PAGE_SIZE - base);
518a9643ea8Slogwang 		}
519*22ce4affSfengbojiang 		vm_page_xunbusy(m);
520a9643ea8Slogwang 	}
521*22ce4affSfengbojiang out:
522*22ce4affSfengbojiang #if defined(__powerpc__) && !defined(__powerpc64__)
523a9643ea8Slogwang 	object->un_pager.vnp.vnp_size = nsize;
524*22ce4affSfengbojiang #else
525*22ce4affSfengbojiang 	atomic_store_64(&object->un_pager.vnp.vnp_size, nsize);
526*22ce4affSfengbojiang #endif
527a9643ea8Slogwang 	object->size = nobjsize;
528a9643ea8Slogwang 	VM_OBJECT_WUNLOCK(object);
529a9643ea8Slogwang }
530a9643ea8Slogwang 
531a9643ea8Slogwang /*
532a9643ea8Slogwang  * calculate the linear (byte) disk address of specified virtual
533a9643ea8Slogwang  * file address
534a9643ea8Slogwang  */
535a9643ea8Slogwang static int
vnode_pager_addr(struct vnode * vp,vm_ooffset_t address,daddr_t * rtaddress,int * run)536a9643ea8Slogwang vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress,
537a9643ea8Slogwang     int *run)
538a9643ea8Slogwang {
539a9643ea8Slogwang 	int bsize;
540a9643ea8Slogwang 	int err;
541a9643ea8Slogwang 	daddr_t vblock;
542a9643ea8Slogwang 	daddr_t voffset;
543a9643ea8Slogwang 
544*22ce4affSfengbojiang 	if (VN_IS_DOOMED(vp))
545a9643ea8Slogwang 		return -1;
546a9643ea8Slogwang 
547a9643ea8Slogwang 	bsize = vp->v_mount->mnt_stat.f_iosize;
548a9643ea8Slogwang 	vblock = address / bsize;
549a9643ea8Slogwang 	voffset = address % bsize;
550a9643ea8Slogwang 
551a9643ea8Slogwang 	err = VOP_BMAP(vp, vblock, NULL, rtaddress, run, NULL);
552a9643ea8Slogwang 	if (err == 0) {
553a9643ea8Slogwang 		if (*rtaddress != -1)
554a9643ea8Slogwang 			*rtaddress += voffset / DEV_BSIZE;
555a9643ea8Slogwang 		if (run) {
556a9643ea8Slogwang 			*run += 1;
557a9643ea8Slogwang 			*run *= bsize / PAGE_SIZE;
558a9643ea8Slogwang 			*run -= voffset / PAGE_SIZE;
559a9643ea8Slogwang 		}
560a9643ea8Slogwang 	}
561a9643ea8Slogwang 
562a9643ea8Slogwang 	return (err);
563a9643ea8Slogwang }
564a9643ea8Slogwang 
565a9643ea8Slogwang /*
566a9643ea8Slogwang  * small block filesystem vnode pager input
567a9643ea8Slogwang  */
568a9643ea8Slogwang static int
vnode_pager_input_smlfs(vm_object_t object,vm_page_t m)569a9643ea8Slogwang vnode_pager_input_smlfs(vm_object_t object, vm_page_t m)
570a9643ea8Slogwang {
571a9643ea8Slogwang 	struct vnode *vp;
572a9643ea8Slogwang 	struct bufobj *bo;
573a9643ea8Slogwang 	struct buf *bp;
574a9643ea8Slogwang 	struct sf_buf *sf;
575a9643ea8Slogwang 	daddr_t fileaddr;
576a9643ea8Slogwang 	vm_offset_t bsize;
577a9643ea8Slogwang 	vm_page_bits_t bits;
578a9643ea8Slogwang 	int error, i;
579a9643ea8Slogwang 
580a9643ea8Slogwang 	error = 0;
581a9643ea8Slogwang 	vp = object->handle;
582*22ce4affSfengbojiang 	if (VN_IS_DOOMED(vp))
583a9643ea8Slogwang 		return VM_PAGER_BAD;
584a9643ea8Slogwang 
585a9643ea8Slogwang 	bsize = vp->v_mount->mnt_stat.f_iosize;
586a9643ea8Slogwang 
587a9643ea8Slogwang 	VOP_BMAP(vp, 0, &bo, 0, NULL, NULL);
588a9643ea8Slogwang 
589a9643ea8Slogwang 	sf = sf_buf_alloc(m, 0);
590a9643ea8Slogwang 
591a9643ea8Slogwang 	for (i = 0; i < PAGE_SIZE / bsize; i++) {
592a9643ea8Slogwang 		vm_ooffset_t address;
593a9643ea8Slogwang 
594a9643ea8Slogwang 		bits = vm_page_bits(i * bsize, bsize);
595a9643ea8Slogwang 		if (m->valid & bits)
596a9643ea8Slogwang 			continue;
597a9643ea8Slogwang 
598a9643ea8Slogwang 		address = IDX_TO_OFF(m->pindex) + i * bsize;
599a9643ea8Slogwang 		if (address >= object->un_pager.vnp.vnp_size) {
600a9643ea8Slogwang 			fileaddr = -1;
601a9643ea8Slogwang 		} else {
602a9643ea8Slogwang 			error = vnode_pager_addr(vp, address, &fileaddr, NULL);
603a9643ea8Slogwang 			if (error)
604a9643ea8Slogwang 				break;
605a9643ea8Slogwang 		}
606a9643ea8Slogwang 		if (fileaddr != -1) {
607*22ce4affSfengbojiang 			bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK);
608a9643ea8Slogwang 
609a9643ea8Slogwang 			/* build a minimal buffer header */
610a9643ea8Slogwang 			bp->b_iocmd = BIO_READ;
611a9643ea8Slogwang 			bp->b_iodone = bdone;
612a9643ea8Slogwang 			KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
613a9643ea8Slogwang 			KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
614a9643ea8Slogwang 			bp->b_rcred = crhold(curthread->td_ucred);
615a9643ea8Slogwang 			bp->b_wcred = crhold(curthread->td_ucred);
616a9643ea8Slogwang 			bp->b_data = (caddr_t)sf_buf_kva(sf) + i * bsize;
617a9643ea8Slogwang 			bp->b_blkno = fileaddr;
618a9643ea8Slogwang 			pbgetbo(bo, bp);
619a9643ea8Slogwang 			bp->b_vp = vp;
620a9643ea8Slogwang 			bp->b_bcount = bsize;
621a9643ea8Slogwang 			bp->b_bufsize = bsize;
622a9643ea8Slogwang 			bp->b_runningbufspace = bp->b_bufsize;
623a9643ea8Slogwang 			atomic_add_long(&runningbufspace, bp->b_runningbufspace);
624a9643ea8Slogwang 
625a9643ea8Slogwang 			/* do the input */
626a9643ea8Slogwang 			bp->b_iooffset = dbtob(bp->b_blkno);
627a9643ea8Slogwang 			bstrategy(bp);
628a9643ea8Slogwang 
629a9643ea8Slogwang 			bwait(bp, PVM, "vnsrd");
630a9643ea8Slogwang 
631*22ce4affSfengbojiang 			if ((bp->b_ioflags & BIO_ERROR) != 0) {
632*22ce4affSfengbojiang 				KASSERT(bp->b_error != 0,
633*22ce4affSfengbojiang 				    ("%s: buf error but b_error == 0\n", __func__));
634*22ce4affSfengbojiang 				error = bp->b_error;
635*22ce4affSfengbojiang 			}
636a9643ea8Slogwang 
637a9643ea8Slogwang 			/*
638a9643ea8Slogwang 			 * free the buffer header back to the swap buffer pool
639a9643ea8Slogwang 			 */
640a9643ea8Slogwang 			bp->b_vp = NULL;
641a9643ea8Slogwang 			pbrelbo(bp);
642*22ce4affSfengbojiang 			uma_zfree(vnode_pbuf_zone, bp);
643a9643ea8Slogwang 			if (error)
644a9643ea8Slogwang 				break;
645a9643ea8Slogwang 		} else
646a9643ea8Slogwang 			bzero((caddr_t)sf_buf_kva(sf) + i * bsize, bsize);
647a9643ea8Slogwang 		KASSERT((m->dirty & bits) == 0,
648a9643ea8Slogwang 		    ("vnode_pager_input_smlfs: page %p is dirty", m));
649*22ce4affSfengbojiang 		vm_page_bits_set(m, &m->valid, bits);
650a9643ea8Slogwang 	}
651a9643ea8Slogwang 	sf_buf_free(sf);
652a9643ea8Slogwang 	if (error) {
653a9643ea8Slogwang 		return VM_PAGER_ERROR;
654a9643ea8Slogwang 	}
655a9643ea8Slogwang 	return VM_PAGER_OK;
656a9643ea8Slogwang }
657a9643ea8Slogwang 
658a9643ea8Slogwang /*
659a9643ea8Slogwang  * old style vnode pager input routine
660a9643ea8Slogwang  */
661a9643ea8Slogwang static int
vnode_pager_input_old(vm_object_t object,vm_page_t m)662a9643ea8Slogwang vnode_pager_input_old(vm_object_t object, vm_page_t m)
663a9643ea8Slogwang {
664a9643ea8Slogwang 	struct uio auio;
665a9643ea8Slogwang 	struct iovec aiov;
666a9643ea8Slogwang 	int error;
667a9643ea8Slogwang 	int size;
668a9643ea8Slogwang 	struct sf_buf *sf;
669a9643ea8Slogwang 	struct vnode *vp;
670a9643ea8Slogwang 
671a9643ea8Slogwang 	VM_OBJECT_ASSERT_WLOCKED(object);
672a9643ea8Slogwang 	error = 0;
673a9643ea8Slogwang 
674a9643ea8Slogwang 	/*
675a9643ea8Slogwang 	 * Return failure if beyond current EOF
676a9643ea8Slogwang 	 */
677a9643ea8Slogwang 	if (IDX_TO_OFF(m->pindex) >= object->un_pager.vnp.vnp_size) {
678a9643ea8Slogwang 		return VM_PAGER_BAD;
679a9643ea8Slogwang 	} else {
680a9643ea8Slogwang 		size = PAGE_SIZE;
681a9643ea8Slogwang 		if (IDX_TO_OFF(m->pindex) + size > object->un_pager.vnp.vnp_size)
682a9643ea8Slogwang 			size = object->un_pager.vnp.vnp_size - IDX_TO_OFF(m->pindex);
683a9643ea8Slogwang 		vp = object->handle;
684a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
685a9643ea8Slogwang 
686a9643ea8Slogwang 		/*
687a9643ea8Slogwang 		 * Allocate a kernel virtual address and initialize so that
688a9643ea8Slogwang 		 * we can use VOP_READ/WRITE routines.
689a9643ea8Slogwang 		 */
690a9643ea8Slogwang 		sf = sf_buf_alloc(m, 0);
691a9643ea8Slogwang 
692a9643ea8Slogwang 		aiov.iov_base = (caddr_t)sf_buf_kva(sf);
693a9643ea8Slogwang 		aiov.iov_len = size;
694a9643ea8Slogwang 		auio.uio_iov = &aiov;
695a9643ea8Slogwang 		auio.uio_iovcnt = 1;
696a9643ea8Slogwang 		auio.uio_offset = IDX_TO_OFF(m->pindex);
697a9643ea8Slogwang 		auio.uio_segflg = UIO_SYSSPACE;
698a9643ea8Slogwang 		auio.uio_rw = UIO_READ;
699a9643ea8Slogwang 		auio.uio_resid = size;
700a9643ea8Slogwang 		auio.uio_td = curthread;
701a9643ea8Slogwang 
702a9643ea8Slogwang 		error = VOP_READ(vp, &auio, 0, curthread->td_ucred);
703a9643ea8Slogwang 		if (!error) {
704a9643ea8Slogwang 			int count = size - auio.uio_resid;
705a9643ea8Slogwang 
706a9643ea8Slogwang 			if (count == 0)
707a9643ea8Slogwang 				error = EINVAL;
708a9643ea8Slogwang 			else if (count != PAGE_SIZE)
709a9643ea8Slogwang 				bzero((caddr_t)sf_buf_kva(sf) + count,
710a9643ea8Slogwang 				    PAGE_SIZE - count);
711a9643ea8Slogwang 		}
712a9643ea8Slogwang 		sf_buf_free(sf);
713a9643ea8Slogwang 
714a9643ea8Slogwang 		VM_OBJECT_WLOCK(object);
715a9643ea8Slogwang 	}
716a9643ea8Slogwang 	KASSERT(m->dirty == 0, ("vnode_pager_input_old: page %p is dirty", m));
717a9643ea8Slogwang 	if (!error)
718*22ce4affSfengbojiang 		vm_page_valid(m);
719a9643ea8Slogwang 	return error ? VM_PAGER_ERROR : VM_PAGER_OK;
720a9643ea8Slogwang }
721a9643ea8Slogwang 
722a9643ea8Slogwang /*
723a9643ea8Slogwang  * generic vnode pager input routine
724a9643ea8Slogwang  */
725a9643ea8Slogwang 
726a9643ea8Slogwang /*
727a9643ea8Slogwang  * Local media VFS's that do not implement their own VOP_GETPAGES
728a9643ea8Slogwang  * should have their VOP_GETPAGES call to vnode_pager_generic_getpages()
729a9643ea8Slogwang  * to implement the previous behaviour.
730a9643ea8Slogwang  *
731a9643ea8Slogwang  * All other FS's should use the bypass to get to the local media
732a9643ea8Slogwang  * backing vp's VOP_GETPAGES.
733a9643ea8Slogwang  */
734a9643ea8Slogwang static int
vnode_pager_getpages(vm_object_t object,vm_page_t * m,int count,int * rbehind,int * rahead)735a9643ea8Slogwang vnode_pager_getpages(vm_object_t object, vm_page_t *m, int count, int *rbehind,
736a9643ea8Slogwang     int *rahead)
737a9643ea8Slogwang {
738a9643ea8Slogwang 	struct vnode *vp;
739a9643ea8Slogwang 	int rtval;
740a9643ea8Slogwang 
741*22ce4affSfengbojiang 	/* Handle is stable with paging in progress. */
742a9643ea8Slogwang 	vp = object->handle;
743a9643ea8Slogwang 	rtval = VOP_GETPAGES(vp, m, count, rbehind, rahead);
744a9643ea8Slogwang 	KASSERT(rtval != EOPNOTSUPP,
745a9643ea8Slogwang 	    ("vnode_pager: FS getpages not implemented\n"));
746a9643ea8Slogwang 	return rtval;
747a9643ea8Slogwang }
748a9643ea8Slogwang 
749a9643ea8Slogwang static int
vnode_pager_getpages_async(vm_object_t object,vm_page_t * m,int count,int * rbehind,int * rahead,vop_getpages_iodone_t iodone,void * arg)750a9643ea8Slogwang vnode_pager_getpages_async(vm_object_t object, vm_page_t *m, int count,
751a9643ea8Slogwang     int *rbehind, int *rahead, vop_getpages_iodone_t iodone, void *arg)
752a9643ea8Slogwang {
753a9643ea8Slogwang 	struct vnode *vp;
754a9643ea8Slogwang 	int rtval;
755a9643ea8Slogwang 
756a9643ea8Slogwang 	vp = object->handle;
757a9643ea8Slogwang 	rtval = VOP_GETPAGES_ASYNC(vp, m, count, rbehind, rahead, iodone, arg);
758a9643ea8Slogwang 	KASSERT(rtval != EOPNOTSUPP,
759a9643ea8Slogwang 	    ("vnode_pager: FS getpages_async not implemented\n"));
760a9643ea8Slogwang 	return (rtval);
761a9643ea8Slogwang }
762a9643ea8Slogwang 
763a9643ea8Slogwang /*
764a9643ea8Slogwang  * The implementation of VOP_GETPAGES() and VOP_GETPAGES_ASYNC() for
765a9643ea8Slogwang  * local filesystems, where partially valid pages can only occur at
766a9643ea8Slogwang  * the end of file.
767a9643ea8Slogwang  */
768a9643ea8Slogwang int
vnode_pager_local_getpages(struct vop_getpages_args * ap)769a9643ea8Slogwang vnode_pager_local_getpages(struct vop_getpages_args *ap)
770a9643ea8Slogwang {
771a9643ea8Slogwang 
772a9643ea8Slogwang 	return (vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
773a9643ea8Slogwang 	    ap->a_rbehind, ap->a_rahead, NULL, NULL));
774a9643ea8Slogwang }
775a9643ea8Slogwang 
776a9643ea8Slogwang int
vnode_pager_local_getpages_async(struct vop_getpages_async_args * ap)777a9643ea8Slogwang vnode_pager_local_getpages_async(struct vop_getpages_async_args *ap)
778a9643ea8Slogwang {
779*22ce4affSfengbojiang 	int error;
780a9643ea8Slogwang 
781*22ce4affSfengbojiang 	error = vnode_pager_generic_getpages(ap->a_vp, ap->a_m, ap->a_count,
782*22ce4affSfengbojiang 	    ap->a_rbehind, ap->a_rahead, ap->a_iodone, ap->a_arg);
783*22ce4affSfengbojiang 	if (error != 0 && ap->a_iodone != NULL)
784*22ce4affSfengbojiang 		ap->a_iodone(ap->a_arg, ap->a_m, ap->a_count, error);
785*22ce4affSfengbojiang 	return (error);
786a9643ea8Slogwang }
787a9643ea8Slogwang 
788a9643ea8Slogwang /*
789a9643ea8Slogwang  * This is now called from local media FS's to operate against their
790a9643ea8Slogwang  * own vnodes if they fail to implement VOP_GETPAGES.
791a9643ea8Slogwang  */
792a9643ea8Slogwang int
vnode_pager_generic_getpages(struct vnode * vp,vm_page_t * m,int count,int * a_rbehind,int * a_rahead,vop_getpages_iodone_t iodone,void * arg)793a9643ea8Slogwang vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
794a9643ea8Slogwang     int *a_rbehind, int *a_rahead, vop_getpages_iodone_t iodone, void *arg)
795a9643ea8Slogwang {
796a9643ea8Slogwang 	vm_object_t object;
797a9643ea8Slogwang 	struct bufobj *bo;
798a9643ea8Slogwang 	struct buf *bp;
799a9643ea8Slogwang 	off_t foff;
800*22ce4affSfengbojiang #ifdef INVARIANTS
801*22ce4affSfengbojiang 	off_t blkno0;
802*22ce4affSfengbojiang #endif
803*22ce4affSfengbojiang 	int bsize, pagesperblock;
804a9643ea8Slogwang 	int error, before, after, rbehind, rahead, poff, i;
805a9643ea8Slogwang 	int bytecount, secmask;
806a9643ea8Slogwang 
807a9643ea8Slogwang 	KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
808a9643ea8Slogwang 	    ("%s does not support devices", __func__));
809a9643ea8Slogwang 
810*22ce4affSfengbojiang 	if (VN_IS_DOOMED(vp))
811a9643ea8Slogwang 		return (VM_PAGER_BAD);
812a9643ea8Slogwang 
813a9643ea8Slogwang 	object = vp->v_object;
814a9643ea8Slogwang 	foff = IDX_TO_OFF(m[0]->pindex);
815a9643ea8Slogwang 	bsize = vp->v_mount->mnt_stat.f_iosize;
816a9643ea8Slogwang 	pagesperblock = bsize / PAGE_SIZE;
817a9643ea8Slogwang 
818a9643ea8Slogwang 	KASSERT(foff < object->un_pager.vnp.vnp_size,
819a9643ea8Slogwang 	    ("%s: page %p offset beyond vp %p size", __func__, m[0], vp));
820*22ce4affSfengbojiang 	KASSERT(count <= atop(maxphys),
821a9643ea8Slogwang 	    ("%s: requested %d pages", __func__, count));
822a9643ea8Slogwang 
823a9643ea8Slogwang 	/*
824a9643ea8Slogwang 	 * The last page has valid blocks.  Invalid part can only
825a9643ea8Slogwang 	 * exist at the end of file, and the page is made fully valid
826a9643ea8Slogwang 	 * by zeroing in vm_pager_get_pages().
827a9643ea8Slogwang 	 */
828*22ce4affSfengbojiang 	if (!vm_page_none_valid(m[count - 1]) && --count == 0) {
829a9643ea8Slogwang 		if (iodone != NULL)
830a9643ea8Slogwang 			iodone(arg, m, 1, 0);
831a9643ea8Slogwang 		return (VM_PAGER_OK);
832a9643ea8Slogwang 	}
833a9643ea8Slogwang 
834*22ce4affSfengbojiang 	bp = uma_zalloc(vnode_pbuf_zone, M_WAITOK);
835*22ce4affSfengbojiang 	MPASS((bp->b_flags & B_MAXPHYS) != 0);
836a9643ea8Slogwang 
837a9643ea8Slogwang 	/*
838a9643ea8Slogwang 	 * Get the underlying device blocks for the file with VOP_BMAP().
839a9643ea8Slogwang 	 * If the file system doesn't support VOP_BMAP, use old way of
840a9643ea8Slogwang 	 * getting pages via VOP_READ.
841a9643ea8Slogwang 	 */
842a9643ea8Slogwang 	error = VOP_BMAP(vp, foff / bsize, &bo, &bp->b_blkno, &after, &before);
843a9643ea8Slogwang 	if (error == EOPNOTSUPP) {
844*22ce4affSfengbojiang 		uma_zfree(vnode_pbuf_zone, bp);
845a9643ea8Slogwang 		VM_OBJECT_WLOCK(object);
846a9643ea8Slogwang 		for (i = 0; i < count; i++) {
847*22ce4affSfengbojiang 			VM_CNT_INC(v_vnodein);
848*22ce4affSfengbojiang 			VM_CNT_INC(v_vnodepgsin);
849a9643ea8Slogwang 			error = vnode_pager_input_old(object, m[i]);
850a9643ea8Slogwang 			if (error)
851a9643ea8Slogwang 				break;
852a9643ea8Slogwang 		}
853a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
854a9643ea8Slogwang 		return (error);
855a9643ea8Slogwang 	} else if (error != 0) {
856*22ce4affSfengbojiang 		uma_zfree(vnode_pbuf_zone, bp);
857a9643ea8Slogwang 		return (VM_PAGER_ERROR);
858a9643ea8Slogwang 	}
859a9643ea8Slogwang 
860a9643ea8Slogwang 	/*
861a9643ea8Slogwang 	 * If the file system supports BMAP, but blocksize is smaller
862a9643ea8Slogwang 	 * than a page size, then use special small filesystem code.
863a9643ea8Slogwang 	 */
864a9643ea8Slogwang 	if (pagesperblock == 0) {
865*22ce4affSfengbojiang 		uma_zfree(vnode_pbuf_zone, bp);
866a9643ea8Slogwang 		for (i = 0; i < count; i++) {
867*22ce4affSfengbojiang 			VM_CNT_INC(v_vnodein);
868*22ce4affSfengbojiang 			VM_CNT_INC(v_vnodepgsin);
869a9643ea8Slogwang 			error = vnode_pager_input_smlfs(object, m[i]);
870a9643ea8Slogwang 			if (error)
871a9643ea8Slogwang 				break;
872a9643ea8Slogwang 		}
873a9643ea8Slogwang 		return (error);
874a9643ea8Slogwang 	}
875a9643ea8Slogwang 
876a9643ea8Slogwang 	/*
877a9643ea8Slogwang 	 * A sparse file can be encountered only for a single page request,
878a9643ea8Slogwang 	 * which may not be preceded by call to vm_pager_haspage().
879a9643ea8Slogwang 	 */
880a9643ea8Slogwang 	if (bp->b_blkno == -1) {
881a9643ea8Slogwang 		KASSERT(count == 1,
882a9643ea8Slogwang 		    ("%s: array[%d] request to a sparse file %p", __func__,
883a9643ea8Slogwang 		    count, vp));
884*22ce4affSfengbojiang 		uma_zfree(vnode_pbuf_zone, bp);
885a9643ea8Slogwang 		pmap_zero_page(m[0]);
886a9643ea8Slogwang 		KASSERT(m[0]->dirty == 0, ("%s: page %p is dirty",
887a9643ea8Slogwang 		    __func__, m[0]));
888*22ce4affSfengbojiang 		vm_page_valid(m[0]);
889a9643ea8Slogwang 		return (VM_PAGER_OK);
890a9643ea8Slogwang 	}
891a9643ea8Slogwang 
892*22ce4affSfengbojiang #ifdef INVARIANTS
893*22ce4affSfengbojiang 	blkno0 = bp->b_blkno;
894*22ce4affSfengbojiang #endif
895a9643ea8Slogwang 	bp->b_blkno += (foff % bsize) / DEV_BSIZE;
896a9643ea8Slogwang 
897a9643ea8Slogwang 	/* Recalculate blocks available after/before to pages. */
898a9643ea8Slogwang 	poff = (foff % bsize) / PAGE_SIZE;
899a9643ea8Slogwang 	before *= pagesperblock;
900a9643ea8Slogwang 	before += poff;
901a9643ea8Slogwang 	after *= pagesperblock;
902a9643ea8Slogwang 	after += pagesperblock - (poff + 1);
903a9643ea8Slogwang 	if (m[0]->pindex + after >= object->size)
904a9643ea8Slogwang 		after = object->size - 1 - m[0]->pindex;
905a9643ea8Slogwang 	KASSERT(count <= after + 1, ("%s: %d pages asked, can do only %d",
906a9643ea8Slogwang 	    __func__, count, after + 1));
907a9643ea8Slogwang 	after -= count - 1;
908a9643ea8Slogwang 
909a9643ea8Slogwang 	/* Trim requested rbehind/rahead to possible values. */
910a9643ea8Slogwang 	rbehind = a_rbehind ? *a_rbehind : 0;
911a9643ea8Slogwang 	rahead = a_rahead ? *a_rahead : 0;
912a9643ea8Slogwang 	rbehind = min(rbehind, before);
913a9643ea8Slogwang 	rbehind = min(rbehind, m[0]->pindex);
914a9643ea8Slogwang 	rahead = min(rahead, after);
915a9643ea8Slogwang 	rahead = min(rahead, object->size - m[count - 1]->pindex);
916*22ce4affSfengbojiang 	/*
917*22ce4affSfengbojiang 	 * Check that total amount of pages fit into buf.  Trim rbehind and
918*22ce4affSfengbojiang 	 * rahead evenly if not.
919*22ce4affSfengbojiang 	 */
920*22ce4affSfengbojiang 	if (rbehind + rahead + count > atop(maxphys)) {
921*22ce4affSfengbojiang 		int trim, sum;
922*22ce4affSfengbojiang 
923*22ce4affSfengbojiang 		trim = rbehind + rahead + count - atop(maxphys) + 1;
924*22ce4affSfengbojiang 		sum = rbehind + rahead;
925*22ce4affSfengbojiang 		if (rbehind == before) {
926*22ce4affSfengbojiang 			/* Roundup rbehind trim to block size. */
927*22ce4affSfengbojiang 			rbehind -= roundup(trim * rbehind / sum, pagesperblock);
928*22ce4affSfengbojiang 			if (rbehind < 0)
929*22ce4affSfengbojiang 				rbehind = 0;
930*22ce4affSfengbojiang 		} else
931*22ce4affSfengbojiang 			rbehind -= trim * rbehind / sum;
932*22ce4affSfengbojiang 		rahead -= trim * rahead / sum;
933*22ce4affSfengbojiang 	}
934*22ce4affSfengbojiang 	KASSERT(rbehind + rahead + count <= atop(maxphys),
935*22ce4affSfengbojiang 	    ("%s: behind %d ahead %d count %d maxphys %lu", __func__,
936*22ce4affSfengbojiang 	    rbehind, rahead, count, maxphys));
937a9643ea8Slogwang 
938a9643ea8Slogwang 	/*
939a9643ea8Slogwang 	 * Fill in the bp->b_pages[] array with requested and optional
940a9643ea8Slogwang 	 * read behind or read ahead pages.  Read behind pages are looked
941a9643ea8Slogwang 	 * up in a backward direction, down to a first cached page.  Same
942a9643ea8Slogwang 	 * for read ahead pages, but there is no need to shift the array
943a9643ea8Slogwang 	 * in case of encountering a cached page.
944a9643ea8Slogwang 	 */
945a9643ea8Slogwang 	i = bp->b_npages = 0;
946a9643ea8Slogwang 	if (rbehind) {
947a9643ea8Slogwang 		vm_pindex_t startpindex, tpindex;
948a9643ea8Slogwang 		vm_page_t p;
949a9643ea8Slogwang 
950a9643ea8Slogwang 		VM_OBJECT_WLOCK(object);
951a9643ea8Slogwang 		startpindex = m[0]->pindex - rbehind;
952a9643ea8Slogwang 		if ((p = TAILQ_PREV(m[0], pglist, listq)) != NULL &&
953a9643ea8Slogwang 		    p->pindex >= startpindex)
954a9643ea8Slogwang 			startpindex = p->pindex + 1;
955a9643ea8Slogwang 
956a9643ea8Slogwang 		/* tpindex is unsigned; beware of numeric underflow. */
957a9643ea8Slogwang 		for (tpindex = m[0]->pindex - 1;
958a9643ea8Slogwang 		    tpindex >= startpindex && tpindex < m[0]->pindex;
959a9643ea8Slogwang 		    tpindex--, i++) {
960*22ce4affSfengbojiang 			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
961a9643ea8Slogwang 			if (p == NULL) {
962a9643ea8Slogwang 				/* Shift the array. */
963a9643ea8Slogwang 				for (int j = 0; j < i; j++)
964a9643ea8Slogwang 					bp->b_pages[j] = bp->b_pages[j +
965a9643ea8Slogwang 					    tpindex + 1 - startpindex];
966a9643ea8Slogwang 				break;
967a9643ea8Slogwang 			}
968a9643ea8Slogwang 			bp->b_pages[tpindex - startpindex] = p;
969a9643ea8Slogwang 		}
970a9643ea8Slogwang 
971a9643ea8Slogwang 		bp->b_pgbefore = i;
972a9643ea8Slogwang 		bp->b_npages += i;
973a9643ea8Slogwang 		bp->b_blkno -= IDX_TO_OFF(i) / DEV_BSIZE;
974a9643ea8Slogwang 	} else
975a9643ea8Slogwang 		bp->b_pgbefore = 0;
976a9643ea8Slogwang 
977a9643ea8Slogwang 	/* Requested pages. */
978a9643ea8Slogwang 	for (int j = 0; j < count; j++, i++)
979a9643ea8Slogwang 		bp->b_pages[i] = m[j];
980a9643ea8Slogwang 	bp->b_npages += count;
981a9643ea8Slogwang 
982a9643ea8Slogwang 	if (rahead) {
983a9643ea8Slogwang 		vm_pindex_t endpindex, tpindex;
984a9643ea8Slogwang 		vm_page_t p;
985a9643ea8Slogwang 
986a9643ea8Slogwang 		if (!VM_OBJECT_WOWNED(object))
987a9643ea8Slogwang 			VM_OBJECT_WLOCK(object);
988a9643ea8Slogwang 		endpindex = m[count - 1]->pindex + rahead + 1;
989a9643ea8Slogwang 		if ((p = TAILQ_NEXT(m[count - 1], listq)) != NULL &&
990a9643ea8Slogwang 		    p->pindex < endpindex)
991a9643ea8Slogwang 			endpindex = p->pindex;
992a9643ea8Slogwang 		if (endpindex > object->size)
993a9643ea8Slogwang 			endpindex = object->size;
994a9643ea8Slogwang 
995a9643ea8Slogwang 		for (tpindex = m[count - 1]->pindex + 1;
996a9643ea8Slogwang 		    tpindex < endpindex; i++, tpindex++) {
997*22ce4affSfengbojiang 			p = vm_page_alloc(object, tpindex, VM_ALLOC_NORMAL);
998a9643ea8Slogwang 			if (p == NULL)
999a9643ea8Slogwang 				break;
1000a9643ea8Slogwang 			bp->b_pages[i] = p;
1001a9643ea8Slogwang 		}
1002a9643ea8Slogwang 
1003a9643ea8Slogwang 		bp->b_pgafter = i - bp->b_npages;
1004a9643ea8Slogwang 		bp->b_npages = i;
1005a9643ea8Slogwang 	} else
1006a9643ea8Slogwang 		bp->b_pgafter = 0;
1007a9643ea8Slogwang 
1008a9643ea8Slogwang 	if (VM_OBJECT_WOWNED(object))
1009a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
1010a9643ea8Slogwang 
1011a9643ea8Slogwang 	/* Report back actual behind/ahead read. */
1012a9643ea8Slogwang 	if (a_rbehind)
1013a9643ea8Slogwang 		*a_rbehind = bp->b_pgbefore;
1014a9643ea8Slogwang 	if (a_rahead)
1015a9643ea8Slogwang 		*a_rahead = bp->b_pgafter;
1016a9643ea8Slogwang 
1017*22ce4affSfengbojiang #ifdef INVARIANTS
1018*22ce4affSfengbojiang 	KASSERT(bp->b_npages <= atop(maxphys),
1019a9643ea8Slogwang 	    ("%s: buf %p overflowed", __func__, bp));
1020*22ce4affSfengbojiang 	for (int j = 1, prev = 0; j < bp->b_npages; j++) {
1021*22ce4affSfengbojiang 		if (bp->b_pages[j] == bogus_page)
1022*22ce4affSfengbojiang 			continue;
1023*22ce4affSfengbojiang 		KASSERT(bp->b_pages[j]->pindex - bp->b_pages[prev]->pindex ==
1024*22ce4affSfengbojiang 		    j - prev, ("%s: pages array not consecutive, bp %p",
1025*22ce4affSfengbojiang 		     __func__, bp));
1026*22ce4affSfengbojiang 		prev = j;
1027*22ce4affSfengbojiang 	}
1028*22ce4affSfengbojiang #endif
1029a9643ea8Slogwang 
1030a9643ea8Slogwang 	/*
1031a9643ea8Slogwang 	 * Recalculate first offset and bytecount with regards to read behind.
1032a9643ea8Slogwang 	 * Truncate bytecount to vnode real size and round up physical size
1033a9643ea8Slogwang 	 * for real devices.
1034a9643ea8Slogwang 	 */
1035a9643ea8Slogwang 	foff = IDX_TO_OFF(bp->b_pages[0]->pindex);
1036a9643ea8Slogwang 	bytecount = bp->b_npages << PAGE_SHIFT;
1037a9643ea8Slogwang 	if ((foff + bytecount) > object->un_pager.vnp.vnp_size)
1038a9643ea8Slogwang 		bytecount = object->un_pager.vnp.vnp_size - foff;
1039a9643ea8Slogwang 	secmask = bo->bo_bsize - 1;
1040a9643ea8Slogwang 	KASSERT(secmask < PAGE_SIZE && secmask > 0,
1041a9643ea8Slogwang 	    ("%s: sector size %d too large", __func__, secmask + 1));
1042a9643ea8Slogwang 	bytecount = (bytecount + secmask) & ~secmask;
1043a9643ea8Slogwang 
1044a9643ea8Slogwang 	/*
1045a9643ea8Slogwang 	 * And map the pages to be read into the kva, if the filesystem
1046a9643ea8Slogwang 	 * requires mapped buffers.
1047a9643ea8Slogwang 	 */
1048a9643ea8Slogwang 	if ((vp->v_mount->mnt_kern_flag & MNTK_UNMAPPED_BUFS) != 0 &&
1049a9643ea8Slogwang 	    unmapped_buf_allowed) {
1050a9643ea8Slogwang 		bp->b_data = unmapped_buf;
1051a9643ea8Slogwang 		bp->b_offset = 0;
1052a9643ea8Slogwang 	} else {
1053a9643ea8Slogwang 		bp->b_data = bp->b_kvabase;
1054a9643ea8Slogwang 		pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages, bp->b_npages);
1055a9643ea8Slogwang 	}
1056a9643ea8Slogwang 
1057a9643ea8Slogwang 	/* Build a minimal buffer header. */
1058a9643ea8Slogwang 	bp->b_iocmd = BIO_READ;
1059a9643ea8Slogwang 	KASSERT(bp->b_rcred == NOCRED, ("leaking read ucred"));
1060a9643ea8Slogwang 	KASSERT(bp->b_wcred == NOCRED, ("leaking write ucred"));
1061a9643ea8Slogwang 	bp->b_rcred = crhold(curthread->td_ucred);
1062a9643ea8Slogwang 	bp->b_wcred = crhold(curthread->td_ucred);
1063a9643ea8Slogwang 	pbgetbo(bo, bp);
1064a9643ea8Slogwang 	bp->b_vp = vp;
1065a9643ea8Slogwang 	bp->b_bcount = bp->b_bufsize = bp->b_runningbufspace = bytecount;
1066a9643ea8Slogwang 	bp->b_iooffset = dbtob(bp->b_blkno);
1067*22ce4affSfengbojiang 	KASSERT(IDX_TO_OFF(m[0]->pindex - bp->b_pages[0]->pindex) ==
1068*22ce4affSfengbojiang 	    (blkno0 - bp->b_blkno) * DEV_BSIZE +
1069*22ce4affSfengbojiang 	    IDX_TO_OFF(m[0]->pindex) % bsize,
1070*22ce4affSfengbojiang 	    ("wrong offsets bsize %d m[0] %ju b_pages[0] %ju "
1071*22ce4affSfengbojiang 	    "blkno0 %ju b_blkno %ju", bsize,
1072*22ce4affSfengbojiang 	    (uintmax_t)m[0]->pindex, (uintmax_t)bp->b_pages[0]->pindex,
1073*22ce4affSfengbojiang 	    (uintmax_t)blkno0, (uintmax_t)bp->b_blkno));
1074a9643ea8Slogwang 
1075a9643ea8Slogwang 	atomic_add_long(&runningbufspace, bp->b_runningbufspace);
1076*22ce4affSfengbojiang 	VM_CNT_INC(v_vnodein);
1077*22ce4affSfengbojiang 	VM_CNT_ADD(v_vnodepgsin, bp->b_npages);
1078a9643ea8Slogwang 
1079a9643ea8Slogwang 	if (iodone != NULL) { /* async */
1080a9643ea8Slogwang 		bp->b_pgiodone = iodone;
1081a9643ea8Slogwang 		bp->b_caller1 = arg;
1082a9643ea8Slogwang 		bp->b_iodone = vnode_pager_generic_getpages_done_async;
1083a9643ea8Slogwang 		bp->b_flags |= B_ASYNC;
1084a9643ea8Slogwang 		BUF_KERNPROC(bp);
1085a9643ea8Slogwang 		bstrategy(bp);
1086a9643ea8Slogwang 		return (VM_PAGER_OK);
1087a9643ea8Slogwang 	} else {
1088a9643ea8Slogwang 		bp->b_iodone = bdone;
1089a9643ea8Slogwang 		bstrategy(bp);
1090a9643ea8Slogwang 		bwait(bp, PVM, "vnread");
1091a9643ea8Slogwang 		error = vnode_pager_generic_getpages_done(bp);
1092a9643ea8Slogwang 		for (i = 0; i < bp->b_npages; i++)
1093a9643ea8Slogwang 			bp->b_pages[i] = NULL;
1094a9643ea8Slogwang 		bp->b_vp = NULL;
1095a9643ea8Slogwang 		pbrelbo(bp);
1096*22ce4affSfengbojiang 		uma_zfree(vnode_pbuf_zone, bp);
1097a9643ea8Slogwang 		return (error != 0 ? VM_PAGER_ERROR : VM_PAGER_OK);
1098a9643ea8Slogwang 	}
1099a9643ea8Slogwang }
1100a9643ea8Slogwang 
1101a9643ea8Slogwang static void
vnode_pager_generic_getpages_done_async(struct buf * bp)1102a9643ea8Slogwang vnode_pager_generic_getpages_done_async(struct buf *bp)
1103a9643ea8Slogwang {
1104a9643ea8Slogwang 	int error;
1105a9643ea8Slogwang 
1106a9643ea8Slogwang 	error = vnode_pager_generic_getpages_done(bp);
1107a9643ea8Slogwang 	/* Run the iodone upon the requested range. */
1108a9643ea8Slogwang 	bp->b_pgiodone(bp->b_caller1, bp->b_pages + bp->b_pgbefore,
1109a9643ea8Slogwang 	    bp->b_npages - bp->b_pgbefore - bp->b_pgafter, error);
1110a9643ea8Slogwang 	for (int i = 0; i < bp->b_npages; i++)
1111a9643ea8Slogwang 		bp->b_pages[i] = NULL;
1112a9643ea8Slogwang 	bp->b_vp = NULL;
1113a9643ea8Slogwang 	pbrelbo(bp);
1114*22ce4affSfengbojiang 	uma_zfree(vnode_pbuf_zone, bp);
1115a9643ea8Slogwang }
1116a9643ea8Slogwang 
1117a9643ea8Slogwang static int
vnode_pager_generic_getpages_done(struct buf * bp)1118a9643ea8Slogwang vnode_pager_generic_getpages_done(struct buf *bp)
1119a9643ea8Slogwang {
1120a9643ea8Slogwang 	vm_object_t object;
1121a9643ea8Slogwang 	off_t tfoff, nextoff;
1122a9643ea8Slogwang 	int i, error;
1123a9643ea8Slogwang 
1124*22ce4affSfengbojiang 	KASSERT((bp->b_ioflags & BIO_ERROR) == 0 || bp->b_error != 0,
1125*22ce4affSfengbojiang 	    ("%s: buf error but b_error == 0\n", __func__));
1126*22ce4affSfengbojiang 	error = (bp->b_ioflags & BIO_ERROR) != 0 ? bp->b_error : 0;
1127a9643ea8Slogwang 	object = bp->b_vp->v_object;
1128a9643ea8Slogwang 
1129a9643ea8Slogwang 	if (error == 0 && bp->b_bcount != bp->b_npages * PAGE_SIZE) {
1130a9643ea8Slogwang 		if (!buf_mapped(bp)) {
1131a9643ea8Slogwang 			bp->b_data = bp->b_kvabase;
1132a9643ea8Slogwang 			pmap_qenter((vm_offset_t)bp->b_data, bp->b_pages,
1133a9643ea8Slogwang 			    bp->b_npages);
1134a9643ea8Slogwang 		}
1135a9643ea8Slogwang 		bzero(bp->b_data + bp->b_bcount,
1136a9643ea8Slogwang 		    PAGE_SIZE * bp->b_npages - bp->b_bcount);
1137a9643ea8Slogwang 	}
1138a9643ea8Slogwang 	if (buf_mapped(bp)) {
1139a9643ea8Slogwang 		pmap_qremove((vm_offset_t)bp->b_data, bp->b_npages);
1140a9643ea8Slogwang 		bp->b_data = unmapped_buf;
1141a9643ea8Slogwang 	}
1142a9643ea8Slogwang 
1143*22ce4affSfengbojiang 	/*
1144*22ce4affSfengbojiang 	 * If the read failed, we must free any read ahead/behind pages here.
1145*22ce4affSfengbojiang 	 * The requested pages are freed by the caller (for sync requests)
1146*22ce4affSfengbojiang 	 * or by the bp->b_pgiodone callback (for async requests).
1147*22ce4affSfengbojiang 	 */
1148*22ce4affSfengbojiang 	if (error != 0) {
1149a9643ea8Slogwang 		VM_OBJECT_WLOCK(object);
1150*22ce4affSfengbojiang 		for (i = 0; i < bp->b_pgbefore; i++)
1151*22ce4affSfengbojiang 			vm_page_free_invalid(bp->b_pages[i]);
1152*22ce4affSfengbojiang 		for (i = bp->b_npages - bp->b_pgafter; i < bp->b_npages; i++)
1153*22ce4affSfengbojiang 			vm_page_free_invalid(bp->b_pages[i]);
1154*22ce4affSfengbojiang 		VM_OBJECT_WUNLOCK(object);
1155*22ce4affSfengbojiang 		return (error);
1156*22ce4affSfengbojiang 	}
1157*22ce4affSfengbojiang 
1158*22ce4affSfengbojiang 	/* Read lock to protect size. */
1159*22ce4affSfengbojiang 	VM_OBJECT_RLOCK(object);
1160a9643ea8Slogwang 	for (i = 0, tfoff = IDX_TO_OFF(bp->b_pages[0]->pindex);
1161a9643ea8Slogwang 	    i < bp->b_npages; i++, tfoff = nextoff) {
1162a9643ea8Slogwang 		vm_page_t mt;
1163a9643ea8Slogwang 
1164a9643ea8Slogwang 		nextoff = tfoff + PAGE_SIZE;
1165a9643ea8Slogwang 		mt = bp->b_pages[i];
1166*22ce4affSfengbojiang 		if (mt == bogus_page)
1167*22ce4affSfengbojiang 			continue;
1168a9643ea8Slogwang 
1169a9643ea8Slogwang 		if (nextoff <= object->un_pager.vnp.vnp_size) {
1170a9643ea8Slogwang 			/*
1171a9643ea8Slogwang 			 * Read filled up entire page.
1172a9643ea8Slogwang 			 */
1173*22ce4affSfengbojiang 			vm_page_valid(mt);
1174a9643ea8Slogwang 			KASSERT(mt->dirty == 0,
1175a9643ea8Slogwang 			    ("%s: page %p is dirty", __func__, mt));
1176a9643ea8Slogwang 			KASSERT(!pmap_page_is_mapped(mt),
1177a9643ea8Slogwang 			    ("%s: page %p is mapped", __func__, mt));
1178a9643ea8Slogwang 		} else {
1179a9643ea8Slogwang 			/*
1180a9643ea8Slogwang 			 * Read did not fill up entire page.
1181a9643ea8Slogwang 			 *
1182a9643ea8Slogwang 			 * Currently we do not set the entire page valid,
1183a9643ea8Slogwang 			 * we just try to clear the piece that we couldn't
1184a9643ea8Slogwang 			 * read.
1185a9643ea8Slogwang 			 */
1186a9643ea8Slogwang 			vm_page_set_valid_range(mt, 0,
1187a9643ea8Slogwang 			    object->un_pager.vnp.vnp_size - tfoff);
1188a9643ea8Slogwang 			KASSERT((mt->dirty & vm_page_bits(0,
1189a9643ea8Slogwang 			    object->un_pager.vnp.vnp_size - tfoff)) == 0,
1190a9643ea8Slogwang 			    ("%s: page %p is dirty", __func__, mt));
1191a9643ea8Slogwang 		}
1192a9643ea8Slogwang 
1193a9643ea8Slogwang 		if (i < bp->b_pgbefore || i >= bp->b_npages - bp->b_pgafter)
1194a9643ea8Slogwang 			vm_page_readahead_finish(mt);
1195a9643ea8Slogwang 	}
1196*22ce4affSfengbojiang 	VM_OBJECT_RUNLOCK(object);
1197a9643ea8Slogwang 
1198a9643ea8Slogwang 	return (error);
1199a9643ea8Slogwang }
1200a9643ea8Slogwang 
1201a9643ea8Slogwang /*
1202a9643ea8Slogwang  * EOPNOTSUPP is no longer legal.  For local media VFS's that do not
1203a9643ea8Slogwang  * implement their own VOP_PUTPAGES, their VOP_PUTPAGES should call to
1204a9643ea8Slogwang  * vnode_pager_generic_putpages() to implement the previous behaviour.
1205a9643ea8Slogwang  *
1206a9643ea8Slogwang  * All other FS's should use the bypass to get to the local media
1207a9643ea8Slogwang  * backing vp's VOP_PUTPAGES.
1208a9643ea8Slogwang  */
1209a9643ea8Slogwang static void
vnode_pager_putpages(vm_object_t object,vm_page_t * m,int count,int flags,int * rtvals)1210a9643ea8Slogwang vnode_pager_putpages(vm_object_t object, vm_page_t *m, int count,
1211a9643ea8Slogwang     int flags, int *rtvals)
1212a9643ea8Slogwang {
1213a9643ea8Slogwang 	int rtval;
1214a9643ea8Slogwang 	struct vnode *vp;
1215a9643ea8Slogwang 	int bytes = count * PAGE_SIZE;
1216a9643ea8Slogwang 
1217a9643ea8Slogwang 	/*
1218a9643ea8Slogwang 	 * Force synchronous operation if we are extremely low on memory
1219a9643ea8Slogwang 	 * to prevent a low-memory deadlock.  VOP operations often need to
1220a9643ea8Slogwang 	 * allocate more memory to initiate the I/O ( i.e. do a BMAP
1221a9643ea8Slogwang 	 * operation ).  The swapper handles the case by limiting the amount
1222a9643ea8Slogwang 	 * of asynchronous I/O, but that sort of solution doesn't scale well
1223a9643ea8Slogwang 	 * for the vnode pager without a lot of work.
1224a9643ea8Slogwang 	 *
1225a9643ea8Slogwang 	 * Also, the backing vnode's iodone routine may not wake the pageout
1226a9643ea8Slogwang 	 * daemon up.  This should be probably be addressed XXX.
1227a9643ea8Slogwang 	 */
1228a9643ea8Slogwang 
1229*22ce4affSfengbojiang 	if (vm_page_count_min())
1230a9643ea8Slogwang 		flags |= VM_PAGER_PUT_SYNC;
1231a9643ea8Slogwang 
1232a9643ea8Slogwang 	/*
1233a9643ea8Slogwang 	 * Call device-specific putpages function
1234a9643ea8Slogwang 	 */
1235a9643ea8Slogwang 	vp = object->handle;
1236a9643ea8Slogwang 	VM_OBJECT_WUNLOCK(object);
1237a9643ea8Slogwang 	rtval = VOP_PUTPAGES(vp, m, bytes, flags, rtvals);
1238a9643ea8Slogwang 	KASSERT(rtval != EOPNOTSUPP,
1239a9643ea8Slogwang 	    ("vnode_pager: stale FS putpages\n"));
1240a9643ea8Slogwang 	VM_OBJECT_WLOCK(object);
1241a9643ea8Slogwang }
1242a9643ea8Slogwang 
1243*22ce4affSfengbojiang static int
vn_off2bidx(vm_ooffset_t offset)1244*22ce4affSfengbojiang vn_off2bidx(vm_ooffset_t offset)
1245*22ce4affSfengbojiang {
1246*22ce4affSfengbojiang 
1247*22ce4affSfengbojiang 	return ((offset & PAGE_MASK) / DEV_BSIZE);
1248*22ce4affSfengbojiang }
1249*22ce4affSfengbojiang 
1250*22ce4affSfengbojiang static bool
vn_dirty_blk(vm_page_t m,vm_ooffset_t offset)1251*22ce4affSfengbojiang vn_dirty_blk(vm_page_t m, vm_ooffset_t offset)
1252*22ce4affSfengbojiang {
1253*22ce4affSfengbojiang 
1254*22ce4affSfengbojiang 	KASSERT(IDX_TO_OFF(m->pindex) <= offset &&
1255*22ce4affSfengbojiang 	    offset < IDX_TO_OFF(m->pindex + 1),
1256*22ce4affSfengbojiang 	    ("page %p pidx %ju offset %ju", m, (uintmax_t)m->pindex,
1257*22ce4affSfengbojiang 	    (uintmax_t)offset));
1258*22ce4affSfengbojiang 	return ((m->dirty & ((vm_page_bits_t)1 << vn_off2bidx(offset))) != 0);
1259*22ce4affSfengbojiang }
1260a9643ea8Slogwang 
1261a9643ea8Slogwang /*
1262a9643ea8Slogwang  * This is now called from local media FS's to operate against their
1263a9643ea8Slogwang  * own vnodes if they fail to implement VOP_PUTPAGES.
1264a9643ea8Slogwang  *
1265a9643ea8Slogwang  * This is typically called indirectly via the pageout daemon and
1266a9643ea8Slogwang  * clustering has already typically occurred, so in general we ask the
1267a9643ea8Slogwang  * underlying filesystem to write the data out asynchronously rather
1268a9643ea8Slogwang  * then delayed.
1269a9643ea8Slogwang  */
1270a9643ea8Slogwang int
vnode_pager_generic_putpages(struct vnode * vp,vm_page_t * ma,int bytecount,int flags,int * rtvals)1271a9643ea8Slogwang vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount,
1272a9643ea8Slogwang     int flags, int *rtvals)
1273a9643ea8Slogwang {
1274a9643ea8Slogwang 	vm_object_t object;
1275a9643ea8Slogwang 	vm_page_t m;
1276*22ce4affSfengbojiang 	vm_ooffset_t maxblksz, next_offset, poffset, prev_offset;
1277a9643ea8Slogwang 	struct uio auio;
1278a9643ea8Slogwang 	struct iovec aiov;
1279*22ce4affSfengbojiang 	off_t prev_resid, wrsz;
1280*22ce4affSfengbojiang 	int count, error, i, maxsize, ncount, pgoff, ppscheck;
1281*22ce4affSfengbojiang 	bool in_hole;
1282a9643ea8Slogwang 	static struct timeval lastfail;
1283a9643ea8Slogwang 	static int curfail;
1284a9643ea8Slogwang 
1285a9643ea8Slogwang 	object = vp->v_object;
1286a9643ea8Slogwang 	count = bytecount / PAGE_SIZE;
1287a9643ea8Slogwang 
1288a9643ea8Slogwang 	for (i = 0; i < count; i++)
1289a9643ea8Slogwang 		rtvals[i] = VM_PAGER_ERROR;
1290a9643ea8Slogwang 
1291a9643ea8Slogwang 	if ((int64_t)ma[0]->pindex < 0) {
1292*22ce4affSfengbojiang 		printf("vnode_pager_generic_putpages: "
1293*22ce4affSfengbojiang 		    "attempt to write meta-data 0x%jx(%lx)\n",
1294*22ce4affSfengbojiang 		    (uintmax_t)ma[0]->pindex, (u_long)ma[0]->dirty);
1295a9643ea8Slogwang 		rtvals[0] = VM_PAGER_BAD;
1296*22ce4affSfengbojiang 		return (VM_PAGER_BAD);
1297a9643ea8Slogwang 	}
1298a9643ea8Slogwang 
1299a9643ea8Slogwang 	maxsize = count * PAGE_SIZE;
1300a9643ea8Slogwang 	ncount = count;
1301a9643ea8Slogwang 
1302a9643ea8Slogwang 	poffset = IDX_TO_OFF(ma[0]->pindex);
1303a9643ea8Slogwang 
1304a9643ea8Slogwang 	/*
1305a9643ea8Slogwang 	 * If the page-aligned write is larger then the actual file we
1306a9643ea8Slogwang 	 * have to invalidate pages occurring beyond the file EOF.  However,
1307a9643ea8Slogwang 	 * there is an edge case where a file may not be page-aligned where
1308a9643ea8Slogwang 	 * the last page is partially invalid.  In this case the filesystem
1309a9643ea8Slogwang 	 * may not properly clear the dirty bits for the entire page (which
1310a9643ea8Slogwang 	 * could be VM_PAGE_BITS_ALL due to the page having been mmap()d).
1311*22ce4affSfengbojiang 	 * With the page busied we are free to fix up the dirty bits here.
1312a9643ea8Slogwang 	 *
1313a9643ea8Slogwang 	 * We do not under any circumstances truncate the valid bits, as
1314a9643ea8Slogwang 	 * this will screw up bogus page replacement.
1315a9643ea8Slogwang 	 */
1316*22ce4affSfengbojiang 	VM_OBJECT_RLOCK(object);
1317a9643ea8Slogwang 	if (maxsize + poffset > object->un_pager.vnp.vnp_size) {
1318a9643ea8Slogwang 		if (object->un_pager.vnp.vnp_size > poffset) {
1319a9643ea8Slogwang 			maxsize = object->un_pager.vnp.vnp_size - poffset;
1320a9643ea8Slogwang 			ncount = btoc(maxsize);
1321a9643ea8Slogwang 			if ((pgoff = (int)maxsize & PAGE_MASK) != 0) {
1322*22ce4affSfengbojiang 				pgoff = roundup2(pgoff, DEV_BSIZE);
1323*22ce4affSfengbojiang 
1324a9643ea8Slogwang 				/*
1325*22ce4affSfengbojiang 				 * If the page is busy and the following
1326a9643ea8Slogwang 				 * conditions hold, then the page's dirty
1327a9643ea8Slogwang 				 * field cannot be concurrently changed by a
1328a9643ea8Slogwang 				 * pmap operation.
1329a9643ea8Slogwang 				 */
1330a9643ea8Slogwang 				m = ma[ncount - 1];
1331a9643ea8Slogwang 				vm_page_assert_sbusied(m);
1332a9643ea8Slogwang 				KASSERT(!pmap_page_is_write_mapped(m),
1333a9643ea8Slogwang 		("vnode_pager_generic_putpages: page %p is not read-only", m));
1334*22ce4affSfengbojiang 				MPASS(m->dirty != 0);
1335a9643ea8Slogwang 				vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
1336a9643ea8Slogwang 				    pgoff);
1337a9643ea8Slogwang 			}
1338a9643ea8Slogwang 		} else {
1339a9643ea8Slogwang 			maxsize = 0;
1340a9643ea8Slogwang 			ncount = 0;
1341a9643ea8Slogwang 		}
1342*22ce4affSfengbojiang 		for (i = ncount; i < count; i++)
1343a9643ea8Slogwang 			rtvals[i] = VM_PAGER_BAD;
1344a9643ea8Slogwang 	}
1345*22ce4affSfengbojiang 	VM_OBJECT_RUNLOCK(object);
1346a9643ea8Slogwang 
1347a9643ea8Slogwang 	auio.uio_iov = &aiov;
1348a9643ea8Slogwang 	auio.uio_segflg = UIO_NOCOPY;
1349a9643ea8Slogwang 	auio.uio_rw = UIO_WRITE;
1350*22ce4affSfengbojiang 	auio.uio_td = NULL;
1351*22ce4affSfengbojiang 	maxblksz = roundup2(poffset + maxsize, DEV_BSIZE);
1352a9643ea8Slogwang 
1353*22ce4affSfengbojiang 	for (prev_offset = poffset; prev_offset < maxblksz;) {
1354*22ce4affSfengbojiang 		/* Skip clean blocks. */
1355*22ce4affSfengbojiang 		for (in_hole = true; in_hole && prev_offset < maxblksz;) {
1356*22ce4affSfengbojiang 			m = ma[OFF_TO_IDX(prev_offset - poffset)];
1357*22ce4affSfengbojiang 			for (i = vn_off2bidx(prev_offset);
1358*22ce4affSfengbojiang 			    i < sizeof(vm_page_bits_t) * NBBY &&
1359*22ce4affSfengbojiang 			    prev_offset < maxblksz; i++) {
1360*22ce4affSfengbojiang 				if (vn_dirty_blk(m, prev_offset)) {
1361*22ce4affSfengbojiang 					in_hole = false;
1362*22ce4affSfengbojiang 					break;
1363a9643ea8Slogwang 				}
1364*22ce4affSfengbojiang 				prev_offset += DEV_BSIZE;
1365a9643ea8Slogwang 			}
1366*22ce4affSfengbojiang 		}
1367*22ce4affSfengbojiang 		if (in_hole)
1368*22ce4affSfengbojiang 			goto write_done;
1369*22ce4affSfengbojiang 
1370*22ce4affSfengbojiang 		/* Find longest run of dirty blocks. */
1371*22ce4affSfengbojiang 		for (next_offset = prev_offset; next_offset < maxblksz;) {
1372*22ce4affSfengbojiang 			m = ma[OFF_TO_IDX(next_offset - poffset)];
1373*22ce4affSfengbojiang 			for (i = vn_off2bidx(next_offset);
1374*22ce4affSfengbojiang 			    i < sizeof(vm_page_bits_t) * NBBY &&
1375*22ce4affSfengbojiang 			    next_offset < maxblksz; i++) {
1376*22ce4affSfengbojiang 				if (!vn_dirty_blk(m, next_offset))
1377*22ce4affSfengbojiang 					goto start_write;
1378*22ce4affSfengbojiang 				next_offset += DEV_BSIZE;
1379*22ce4affSfengbojiang 			}
1380*22ce4affSfengbojiang 		}
1381*22ce4affSfengbojiang start_write:
1382*22ce4affSfengbojiang 		if (next_offset > poffset + maxsize)
1383*22ce4affSfengbojiang 			next_offset = poffset + maxsize;
1384*22ce4affSfengbojiang 
1385*22ce4affSfengbojiang 		/*
1386*22ce4affSfengbojiang 		 * Getting here requires finding a dirty block in the
1387*22ce4affSfengbojiang 		 * 'skip clean blocks' loop.
1388*22ce4affSfengbojiang 		 */
1389*22ce4affSfengbojiang 		MPASS(prev_offset < next_offset);
1390*22ce4affSfengbojiang 
1391*22ce4affSfengbojiang 		aiov.iov_base = NULL;
1392*22ce4affSfengbojiang 		auio.uio_iovcnt = 1;
1393*22ce4affSfengbojiang 		auio.uio_offset = prev_offset;
1394*22ce4affSfengbojiang 		prev_resid = auio.uio_resid = aiov.iov_len = next_offset -
1395*22ce4affSfengbojiang 		    prev_offset;
1396*22ce4affSfengbojiang 		error = VOP_WRITE(vp, &auio,
1397*22ce4affSfengbojiang 		    vnode_pager_putpages_ioflags(flags), curthread->td_ucred);
1398*22ce4affSfengbojiang 
1399*22ce4affSfengbojiang 		wrsz = prev_resid - auio.uio_resid;
1400*22ce4affSfengbojiang 		if (wrsz == 0) {
1401*22ce4affSfengbojiang 			if (ppsratecheck(&lastfail, &curfail, 1) != 0) {
1402*22ce4affSfengbojiang 				vn_printf(vp, "vnode_pager_putpages: "
1403*22ce4affSfengbojiang 				    "zero-length write at %ju resid %zd\n",
1404*22ce4affSfengbojiang 				    auio.uio_offset, auio.uio_resid);
1405*22ce4affSfengbojiang 			}
1406*22ce4affSfengbojiang 			break;
1407*22ce4affSfengbojiang 		}
1408*22ce4affSfengbojiang 
1409*22ce4affSfengbojiang 		/* Adjust the starting offset for next iteration. */
1410*22ce4affSfengbojiang 		prev_offset += wrsz;
1411*22ce4affSfengbojiang 		MPASS(auio.uio_offset == prev_offset);
1412*22ce4affSfengbojiang 
1413*22ce4affSfengbojiang 		ppscheck = 0;
1414*22ce4affSfengbojiang 		if (error != 0 && (ppscheck = ppsratecheck(&lastfail,
1415*22ce4affSfengbojiang 		    &curfail, 1)) != 0)
1416*22ce4affSfengbojiang 			vn_printf(vp, "vnode_pager_putpages: I/O error %d\n",
1417*22ce4affSfengbojiang 			    error);
1418*22ce4affSfengbojiang 		if (auio.uio_resid != 0 && (ppscheck != 0 ||
1419*22ce4affSfengbojiang 		    ppsratecheck(&lastfail, &curfail, 1) != 0))
1420*22ce4affSfengbojiang 			vn_printf(vp, "vnode_pager_putpages: residual I/O %zd "
1421*22ce4affSfengbojiang 			    "at %ju\n", auio.uio_resid,
1422*22ce4affSfengbojiang 			    (uintmax_t)ma[0]->pindex);
1423*22ce4affSfengbojiang 		if (error != 0 || auio.uio_resid != 0)
1424*22ce4affSfengbojiang 			break;
1425*22ce4affSfengbojiang 	}
1426*22ce4affSfengbojiang write_done:
1427*22ce4affSfengbojiang 	/* Mark completely processed pages. */
1428*22ce4affSfengbojiang 	for (i = 0; i < OFF_TO_IDX(prev_offset - poffset); i++)
1429a9643ea8Slogwang 		rtvals[i] = VM_PAGER_OK;
1430*22ce4affSfengbojiang 	/* Mark partial EOF page. */
1431*22ce4affSfengbojiang 	if (prev_offset == poffset + maxsize && (prev_offset & PAGE_MASK) != 0)
1432*22ce4affSfengbojiang 		rtvals[i++] = VM_PAGER_OK;
1433*22ce4affSfengbojiang 	/* Unwritten pages in range, free bonus if the page is clean. */
1434*22ce4affSfengbojiang 	for (; i < ncount; i++)
1435*22ce4affSfengbojiang 		rtvals[i] = ma[i]->dirty == 0 ? VM_PAGER_OK : VM_PAGER_ERROR;
1436*22ce4affSfengbojiang 	VM_CNT_ADD(v_vnodepgsout, i);
1437*22ce4affSfengbojiang 	VM_CNT_INC(v_vnodeout);
1438*22ce4affSfengbojiang 	return (rtvals[0]);
1439a9643ea8Slogwang }
1440a9643ea8Slogwang 
1441*22ce4affSfengbojiang int
vnode_pager_putpages_ioflags(int pager_flags)1442*22ce4affSfengbojiang vnode_pager_putpages_ioflags(int pager_flags)
1443*22ce4affSfengbojiang {
1444*22ce4affSfengbojiang 	int ioflags;
1445*22ce4affSfengbojiang 
1446*22ce4affSfengbojiang 	/*
1447*22ce4affSfengbojiang 	 * Pageouts are already clustered, use IO_ASYNC to force a
1448*22ce4affSfengbojiang 	 * bawrite() rather then a bdwrite() to prevent paging I/O
1449*22ce4affSfengbojiang 	 * from saturating the buffer cache.  Dummy-up the sequential
1450*22ce4affSfengbojiang 	 * heuristic to cause large ranges to cluster.  If neither
1451*22ce4affSfengbojiang 	 * IO_SYNC or IO_ASYNC is set, the system decides how to
1452*22ce4affSfengbojiang 	 * cluster.
1453*22ce4affSfengbojiang 	 */
1454*22ce4affSfengbojiang 	ioflags = IO_VMIO;
1455*22ce4affSfengbojiang 	if ((pager_flags & (VM_PAGER_PUT_SYNC | VM_PAGER_PUT_INVAL)) != 0)
1456*22ce4affSfengbojiang 		ioflags |= IO_SYNC;
1457*22ce4affSfengbojiang 	else if ((pager_flags & VM_PAGER_CLUSTER_OK) == 0)
1458*22ce4affSfengbojiang 		ioflags |= IO_ASYNC;
1459*22ce4affSfengbojiang 	ioflags |= (pager_flags & VM_PAGER_PUT_INVAL) != 0 ? IO_INVAL: 0;
1460*22ce4affSfengbojiang 	ioflags |= (pager_flags & VM_PAGER_PUT_NOREUSE) != 0 ? IO_NOREUSE : 0;
1461*22ce4affSfengbojiang 	ioflags |= IO_SEQMAX << IO_SEQSHIFT;
1462*22ce4affSfengbojiang 	return (ioflags);
1463*22ce4affSfengbojiang }
1464*22ce4affSfengbojiang 
1465*22ce4affSfengbojiang /*
1466*22ce4affSfengbojiang  * vnode_pager_undirty_pages().
1467*22ce4affSfengbojiang  *
1468*22ce4affSfengbojiang  * A helper to mark pages as clean after pageout that was possibly
1469*22ce4affSfengbojiang  * done with a short write.  The lpos argument specifies the page run
1470*22ce4affSfengbojiang  * length in bytes, and the written argument specifies how many bytes
1471*22ce4affSfengbojiang  * were actually written.  eof is the offset past the last valid byte
1472*22ce4affSfengbojiang  * in the vnode using the absolute file position of the first byte in
1473*22ce4affSfengbojiang  * the run as the base from which it is computed.
1474*22ce4affSfengbojiang  */
1475a9643ea8Slogwang void
vnode_pager_undirty_pages(vm_page_t * ma,int * rtvals,int written,off_t eof,int lpos)1476*22ce4affSfengbojiang vnode_pager_undirty_pages(vm_page_t *ma, int *rtvals, int written, off_t eof,
1477*22ce4affSfengbojiang     int lpos)
1478a9643ea8Slogwang {
1479a9643ea8Slogwang 	vm_object_t obj;
1480*22ce4affSfengbojiang 	int i, pos, pos_devb;
1481a9643ea8Slogwang 
1482*22ce4affSfengbojiang 	if (written == 0 && eof >= lpos)
1483a9643ea8Slogwang 		return;
1484a9643ea8Slogwang 	obj = ma[0]->object;
1485a9643ea8Slogwang 	for (i = 0, pos = 0; pos < written; i++, pos += PAGE_SIZE) {
1486a9643ea8Slogwang 		if (pos < trunc_page(written)) {
1487a9643ea8Slogwang 			rtvals[i] = VM_PAGER_OK;
1488a9643ea8Slogwang 			vm_page_undirty(ma[i]);
1489a9643ea8Slogwang 		} else {
1490a9643ea8Slogwang 			/* Partially written page. */
1491a9643ea8Slogwang 			rtvals[i] = VM_PAGER_AGAIN;
1492a9643ea8Slogwang 			vm_page_clear_dirty(ma[i], 0, written & PAGE_MASK);
1493a9643ea8Slogwang 		}
1494a9643ea8Slogwang 	}
1495*22ce4affSfengbojiang 	if (eof >= lpos) /* avoid truncation */
1496*22ce4affSfengbojiang 		return;
1497*22ce4affSfengbojiang 	for (pos = eof, i = OFF_TO_IDX(trunc_page(pos)); pos < lpos; i++) {
1498*22ce4affSfengbojiang 		if (pos != trunc_page(pos)) {
1499*22ce4affSfengbojiang 			/*
1500*22ce4affSfengbojiang 			 * The page contains the last valid byte in
1501*22ce4affSfengbojiang 			 * the vnode, mark the rest of the page as
1502*22ce4affSfengbojiang 			 * clean, potentially making the whole page
1503*22ce4affSfengbojiang 			 * clean.
1504*22ce4affSfengbojiang 			 */
1505*22ce4affSfengbojiang 			pos_devb = roundup2(pos & PAGE_MASK, DEV_BSIZE);
1506*22ce4affSfengbojiang 			vm_page_clear_dirty(ma[i], pos_devb, PAGE_SIZE -
1507*22ce4affSfengbojiang 			    pos_devb);
1508*22ce4affSfengbojiang 
1509*22ce4affSfengbojiang 			/*
1510*22ce4affSfengbojiang 			 * If the page was cleaned, report the pageout
1511*22ce4affSfengbojiang 			 * on it as successful.  msync() no longer
1512*22ce4affSfengbojiang 			 * needs to write out the page, endlessly
1513*22ce4affSfengbojiang 			 * creating write requests and dirty buffers.
1514*22ce4affSfengbojiang 			 */
1515*22ce4affSfengbojiang 			if (ma[i]->dirty == 0)
1516*22ce4affSfengbojiang 				rtvals[i] = VM_PAGER_OK;
1517*22ce4affSfengbojiang 
1518*22ce4affSfengbojiang 			pos = round_page(pos);
1519*22ce4affSfengbojiang 		} else {
1520*22ce4affSfengbojiang 			/* vm_pageout_flush() clears dirty */
1521*22ce4affSfengbojiang 			rtvals[i] = VM_PAGER_BAD;
1522*22ce4affSfengbojiang 			pos += PAGE_SIZE;
1523*22ce4affSfengbojiang 		}
1524*22ce4affSfengbojiang 	}
1525a9643ea8Slogwang }
1526a9643ea8Slogwang 
1527*22ce4affSfengbojiang static void
vnode_pager_update_writecount(vm_object_t object,vm_offset_t start,vm_offset_t end)1528a9643ea8Slogwang vnode_pager_update_writecount(vm_object_t object, vm_offset_t start,
1529a9643ea8Slogwang     vm_offset_t end)
1530a9643ea8Slogwang {
1531a9643ea8Slogwang 	struct vnode *vp;
1532a9643ea8Slogwang 	vm_ooffset_t old_wm;
1533a9643ea8Slogwang 
1534a9643ea8Slogwang 	VM_OBJECT_WLOCK(object);
1535a9643ea8Slogwang 	if (object->type != OBJT_VNODE) {
1536a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
1537a9643ea8Slogwang 		return;
1538a9643ea8Slogwang 	}
1539a9643ea8Slogwang 	old_wm = object->un_pager.vnp.writemappings;
1540a9643ea8Slogwang 	object->un_pager.vnp.writemappings += (vm_ooffset_t)end - start;
1541a9643ea8Slogwang 	vp = object->handle;
1542a9643ea8Slogwang 	if (old_wm == 0 && object->un_pager.vnp.writemappings != 0) {
1543*22ce4affSfengbojiang 		ASSERT_VOP_LOCKED(vp, "v_writecount inc");
1544*22ce4affSfengbojiang 		VOP_ADD_WRITECOUNT_CHECKED(vp, 1);
1545a9643ea8Slogwang 		CTR3(KTR_VFS, "%s: vp %p v_writecount increased to %d",
1546a9643ea8Slogwang 		    __func__, vp, vp->v_writecount);
1547a9643ea8Slogwang 	} else if (old_wm != 0 && object->un_pager.vnp.writemappings == 0) {
1548*22ce4affSfengbojiang 		ASSERT_VOP_LOCKED(vp, "v_writecount dec");
1549*22ce4affSfengbojiang 		VOP_ADD_WRITECOUNT_CHECKED(vp, -1);
1550a9643ea8Slogwang 		CTR3(KTR_VFS, "%s: vp %p v_writecount decreased to %d",
1551a9643ea8Slogwang 		    __func__, vp, vp->v_writecount);
1552a9643ea8Slogwang 	}
1553a9643ea8Slogwang 	VM_OBJECT_WUNLOCK(object);
1554a9643ea8Slogwang }
1555a9643ea8Slogwang 
1556*22ce4affSfengbojiang static void
vnode_pager_release_writecount(vm_object_t object,vm_offset_t start,vm_offset_t end)1557a9643ea8Slogwang vnode_pager_release_writecount(vm_object_t object, vm_offset_t start,
1558a9643ea8Slogwang     vm_offset_t end)
1559a9643ea8Slogwang {
1560a9643ea8Slogwang 	struct vnode *vp;
1561a9643ea8Slogwang 	struct mount *mp;
1562a9643ea8Slogwang 	vm_offset_t inc;
1563a9643ea8Slogwang 
1564a9643ea8Slogwang 	VM_OBJECT_WLOCK(object);
1565a9643ea8Slogwang 
1566a9643ea8Slogwang 	/*
1567a9643ea8Slogwang 	 * First, recheck the object type to account for the race when
1568a9643ea8Slogwang 	 * the vnode is reclaimed.
1569a9643ea8Slogwang 	 */
1570a9643ea8Slogwang 	if (object->type != OBJT_VNODE) {
1571a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
1572a9643ea8Slogwang 		return;
1573a9643ea8Slogwang 	}
1574a9643ea8Slogwang 
1575a9643ea8Slogwang 	/*
1576a9643ea8Slogwang 	 * Optimize for the case when writemappings is not going to
1577a9643ea8Slogwang 	 * zero.
1578a9643ea8Slogwang 	 */
1579a9643ea8Slogwang 	inc = end - start;
1580a9643ea8Slogwang 	if (object->un_pager.vnp.writemappings != inc) {
1581a9643ea8Slogwang 		object->un_pager.vnp.writemappings -= inc;
1582a9643ea8Slogwang 		VM_OBJECT_WUNLOCK(object);
1583a9643ea8Slogwang 		return;
1584a9643ea8Slogwang 	}
1585a9643ea8Slogwang 
1586a9643ea8Slogwang 	vp = object->handle;
1587a9643ea8Slogwang 	vhold(vp);
1588a9643ea8Slogwang 	VM_OBJECT_WUNLOCK(object);
1589a9643ea8Slogwang 	mp = NULL;
1590a9643ea8Slogwang 	vn_start_write(vp, &mp, V_WAIT);
1591*22ce4affSfengbojiang 	vn_lock(vp, LK_SHARED | LK_RETRY);
1592a9643ea8Slogwang 
1593a9643ea8Slogwang 	/*
1594a9643ea8Slogwang 	 * Decrement the object's writemappings, by swapping the start
1595a9643ea8Slogwang 	 * and end arguments for vnode_pager_update_writecount().  If
1596a9643ea8Slogwang 	 * there was not a race with vnode reclaimation, then the
1597a9643ea8Slogwang 	 * vnode's v_writecount is decremented.
1598a9643ea8Slogwang 	 */
1599a9643ea8Slogwang 	vnode_pager_update_writecount(object, end, start);
1600*22ce4affSfengbojiang 	VOP_UNLOCK(vp);
1601a9643ea8Slogwang 	vdrop(vp);
1602a9643ea8Slogwang 	if (mp != NULL)
1603a9643ea8Slogwang 		vn_finished_write(mp);
1604a9643ea8Slogwang }
1605