1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * from nfs_syscalls.c 8.5 (Berkeley) 3/30/95
35 */
36
37 #include <sys/cdefs.h>
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/sysproto.h>
41 #include <sys/kernel.h>
42 #include <sys/sysctl.h>
43 #include <sys/file.h>
44 #include <sys/vnode.h>
45 #include <sys/malloc.h>
46 #include <sys/mount.h>
47 #include <sys/proc.h>
48 #include <sys/bio.h>
49 #include <sys/buf.h>
50 #include <sys/mbuf.h>
51 #include <sys/socket.h>
52 #include <sys/socketvar.h>
53 #include <sys/domain.h>
54 #include <sys/protosw.h>
55 #include <sys/namei.h>
56 #include <sys/unistd.h>
57 #include <sys/kthread.h>
58 #include <sys/fcntl.h>
59 #include <sys/lockf.h>
60 #include <sys/mutex.h>
61 #include <sys/taskqueue.h>
62
63 #include <netinet/in.h>
64 #include <netinet/tcp.h>
65
66 #include <fs/nfs/nfsport.h>
67 #include <fs/nfsclient/nfsmount.h>
68 #include <fs/nfsclient/nfs.h>
69 #include <fs/nfsclient/nfsnode.h>
70
71 extern struct mtx ncl_iod_mutex;
72 extern struct task ncl_nfsiodnew_task;
73
74 int ncl_numasync;
75 enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
76 struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
77
78 static void nfssvc_iod(void *);
79
80 static int nfs_asyncdaemon[NFS_MAXASYNCDAEMON];
81
82 SYSCTL_DECL(_vfs_nfs);
83
84 /* Maximum number of seconds a nfsiod kthread will sleep before exiting */
85 static unsigned int nfs_iodmaxidle = 120;
86 SYSCTL_UINT(_vfs_nfs, OID_AUTO, iodmaxidle, CTLFLAG_RW, &nfs_iodmaxidle, 0,
87 "Max number of seconds an nfsiod kthread will sleep before exiting");
88
89 /* Maximum number of nfsiod kthreads */
90 unsigned int ncl_iodmax = 20;
91
92 /* Minimum number of nfsiod kthreads to keep as spares */
93 static unsigned int nfs_iodmin = 0;
94
95 static int nfs_nfsiodnew_sync(void);
96
97 static int
sysctl_iodmin(SYSCTL_HANDLER_ARGS)98 sysctl_iodmin(SYSCTL_HANDLER_ARGS)
99 {
100 int error, i;
101 int newmin;
102
103 newmin = nfs_iodmin;
104 error = sysctl_handle_int(oidp, &newmin, 0, req);
105 if (error || (req->newptr == NULL))
106 return (error);
107 NFSLOCKIOD();
108 if (newmin > ncl_iodmax) {
109 error = EINVAL;
110 goto out;
111 }
112 nfs_iodmin = newmin;
113 if (ncl_numasync >= nfs_iodmin)
114 goto out;
115 /*
116 * If the current number of nfsiod is lower
117 * than the new minimum, create some more.
118 */
119 for (i = nfs_iodmin - ncl_numasync; i > 0; i--)
120 nfs_nfsiodnew_sync();
121 out:
122 NFSUNLOCKIOD();
123 return (0);
124 }
125 SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmin,
126 CTLTYPE_UINT | CTLFLAG_RWTUN | CTLFLAG_NOFETCH | CTLFLAG_MPSAFE,
127 0, sizeof (nfs_iodmin), sysctl_iodmin, "IU",
128 "Min number of nfsiod kthreads to keep as spares");
129
130 static int
sysctl_iodmax(SYSCTL_HANDLER_ARGS)131 sysctl_iodmax(SYSCTL_HANDLER_ARGS)
132 {
133 int error, i;
134 int iod, newmax;
135
136 newmax = ncl_iodmax;
137 error = sysctl_handle_int(oidp, &newmax, 0, req);
138 if (error || (req->newptr == NULL))
139 return (error);
140 if (newmax > NFS_MAXASYNCDAEMON)
141 return (EINVAL);
142 NFSLOCKIOD();
143 ncl_iodmax = newmax;
144 if (ncl_numasync <= ncl_iodmax)
145 goto out;
146 /*
147 * If there are some asleep nfsiods that should
148 * exit, wakeup() them so that they check ncl_iodmax
149 * and exit. Those who are active will exit as
150 * soon as they finish I/O.
151 */
152 iod = ncl_numasync - 1;
153 for (i = 0; i < ncl_numasync - ncl_iodmax; i++) {
154 if (ncl_iodwant[iod] == NFSIOD_AVAILABLE)
155 wakeup(&ncl_iodwant[iod]);
156 iod--;
157 }
158 out:
159 NFSUNLOCKIOD();
160 return (0);
161 }
162 SYSCTL_PROC(_vfs_nfs, OID_AUTO, iodmax,
163 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, 0, sizeof (ncl_iodmax),
164 sysctl_iodmax, "IU",
165 "Max number of nfsiod kthreads");
166
167 static int
nfs_nfsiodnew_sync(void)168 nfs_nfsiodnew_sync(void)
169 {
170 int error, i;
171
172 NFSASSERTIOD();
173 for (i = 0; i < ncl_iodmax; i++) {
174 if (nfs_asyncdaemon[i] == 0) {
175 nfs_asyncdaemon[i] = 1;
176 break;
177 }
178 }
179 if (i == ncl_iodmax)
180 return (0);
181 NFSUNLOCKIOD();
182 error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL,
183 RFHIGHPID, 0, "newnfs %d", i);
184 NFSLOCKIOD();
185 if (error == 0) {
186 ncl_numasync++;
187 ncl_iodwant[i] = NFSIOD_AVAILABLE;
188 } else
189 nfs_asyncdaemon[i] = 0;
190 return (error);
191 }
192
193 void
ncl_nfsiodnew_tq(__unused void * arg,int pending)194 ncl_nfsiodnew_tq(__unused void *arg, int pending)
195 {
196
197 NFSLOCKIOD();
198 while (pending > 0) {
199 pending--;
200 nfs_nfsiodnew_sync();
201 }
202 NFSUNLOCKIOD();
203 }
204
205 void
ncl_nfsiodnew(void)206 ncl_nfsiodnew(void)
207 {
208
209 NFSASSERTIOD();
210 taskqueue_enqueue(taskqueue_thread, &ncl_nfsiodnew_task);
211 }
212
213 static void
nfsiod_setup(void * dummy)214 nfsiod_setup(void *dummy)
215 {
216 int error;
217
218 TUNABLE_INT_FETCH("vfs.nfs.iodmin", &nfs_iodmin);
219 nfscl_init();
220 NFSLOCKIOD();
221 /* Silently limit the start number of nfsiod's */
222 if (nfs_iodmin > NFS_MAXASYNCDAEMON)
223 nfs_iodmin = NFS_MAXASYNCDAEMON;
224
225 while (ncl_numasync < nfs_iodmin) {
226 error = nfs_nfsiodnew_sync();
227 if (error == -1)
228 panic("nfsiod_setup: nfs_nfsiodnew failed");
229 }
230 NFSUNLOCKIOD();
231 }
232 SYSINIT(newnfsiod, SI_SUB_KTHREAD_IDLE, SI_ORDER_ANY, nfsiod_setup, NULL);
233
234 static int nfs_defect = 0;
235 SYSCTL_INT(_vfs_nfs, OID_AUTO, defect, CTLFLAG_RW, &nfs_defect, 0,
236 "Allow nfsiods to migrate serving different mounts");
237
238 /*
239 * Asynchronous I/O daemons for client nfs.
240 * They do read-ahead and write-behind operations on the block I/O cache.
241 * Returns if we hit the timeout defined by the iodmaxidle sysctl.
242 */
243 static void
nfssvc_iod(void * instance)244 nfssvc_iod(void *instance)
245 {
246 struct buf *bp;
247 struct nfsmount *nmp;
248 int myiod, timo;
249 int error = 0;
250
251 NFSLOCKIOD();
252 myiod = (int *)instance - nfs_asyncdaemon;
253 /*
254 * Main loop
255 */
256 for (;;) {
257 while (((nmp = ncl_iodmount[myiod]) == NULL)
258 || !TAILQ_FIRST(&nmp->nm_bufq)) {
259 if (myiod >= ncl_iodmax)
260 goto finish;
261 if (nmp)
262 nmp->nm_bufqiods--;
263 if (ncl_iodwant[myiod] == NFSIOD_NOT_AVAILABLE)
264 ncl_iodwant[myiod] = NFSIOD_AVAILABLE;
265 ncl_iodmount[myiod] = NULL;
266 /*
267 * Always keep at least nfs_iodmin kthreads.
268 */
269 timo = (myiod < nfs_iodmin) ? 0 : nfs_iodmaxidle * hz;
270 error = msleep(&ncl_iodwant[myiod], &ncl_iod_mutex, PWAIT | PCATCH,
271 "-", timo);
272 if (error) {
273 nmp = ncl_iodmount[myiod];
274 /*
275 * Rechecking the nm_bufq closes a rare race where the
276 * nfsiod is woken up at the exact time the idle timeout
277 * fires
278 */
279 if (nmp && TAILQ_FIRST(&nmp->nm_bufq))
280 error = 0;
281 break;
282 }
283 }
284 if (error)
285 break;
286 while ((bp = TAILQ_FIRST(&nmp->nm_bufq)) != NULL) {
287 /* Take one off the front of the list */
288 TAILQ_REMOVE(&nmp->nm_bufq, bp, b_freelist);
289 nmp->nm_bufqlen--;
290 if (nmp->nm_bufqwant && nmp->nm_bufqlen <= ncl_numasync) {
291 nmp->nm_bufqwant = 0;
292 wakeup(&nmp->nm_bufq);
293 }
294 NFSUNLOCKIOD();
295 KASSERT((bp->b_flags & B_DIRECT) == 0,
296 ("nfssvc_iod: B_DIRECT set"));
297 if (bp->b_iocmd == BIO_READ)
298 (void) ncl_doio(bp->b_vp, bp, bp->b_rcred,
299 NULL, 0);
300 else
301 (void) ncl_doio(bp->b_vp, bp, bp->b_wcred,
302 NULL, 0);
303 NFSLOCKIOD();
304 /*
305 * Make sure the nmp hasn't been dismounted as soon as
306 * ncl_doio() completes for the last buffer.
307 */
308 nmp = ncl_iodmount[myiod];
309 if (nmp == NULL)
310 break;
311
312 /*
313 * If there are more than one iod on this mount, then defect
314 * so that the iods can be shared out fairly between the mounts
315 */
316 if (nfs_defect && nmp->nm_bufqiods > 1) {
317 NFS_DPF(ASYNCIO,
318 ("nfssvc_iod: iod %d defecting from mount %p\n",
319 myiod, nmp));
320 ncl_iodmount[myiod] = NULL;
321 nmp->nm_bufqiods--;
322 break;
323 }
324 }
325 }
326 finish:
327 nfs_asyncdaemon[myiod] = 0;
328 if (nmp)
329 nmp->nm_bufqiods--;
330 ncl_iodwant[myiod] = NFSIOD_NOT_AVAILABLE;
331 ncl_iodmount[myiod] = NULL;
332 /* Someone may be waiting for the last nfsiod to terminate. */
333 if (--ncl_numasync == 0)
334 wakeup(&ncl_numasync);
335 NFSUNLOCKIOD();
336 if ((error == 0) || (error == EWOULDBLOCK))
337 kproc_exit(0);
338 /* Abnormal termination */
339 kproc_exit(1);
340 }
341