xref: /f-stack/freebsd/kern/sys_generic.c (revision 22ce4aff)
1 /*-
2  * SPDX-License-Identifier: BSD-3-Clause
3  *
4  * Copyright (c) 1982, 1986, 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  * (c) UNIX System Laboratories, Inc.
7  * All or some portions of this file are derived from material licensed
8  * to the University of California by American Telephone and Telegraph
9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
10  * the permission of UNIX System Laboratories, Inc.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)sys_generic.c	8.5 (Berkeley) 1/21/94
37  */
38 
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
41 
42 #include "opt_capsicum.h"
43 #include "opt_ktrace.h"
44 
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/sysproto.h>
48 #include <sys/capsicum.h>
49 #include <sys/filedesc.h>
50 #include <sys/filio.h>
51 #include <sys/fcntl.h>
52 #include <sys/file.h>
53 #include <sys/lock.h>
54 #include <sys/proc.h>
55 #include <sys/signalvar.h>
56 #include <sys/socketvar.h>
57 #include <sys/uio.h>
58 #include <sys/eventfd.h>
59 #include <sys/kernel.h>
60 #include <sys/ktr.h>
61 #include <sys/limits.h>
62 #include <sys/malloc.h>
63 #include <sys/poll.h>
64 #include <sys/resourcevar.h>
65 #include <sys/selinfo.h>
66 #include <sys/sleepqueue.h>
67 #include <sys/specialfd.h>
68 #include <sys/syscallsubr.h>
69 #include <sys/sysctl.h>
70 #include <sys/sysent.h>
71 #include <sys/vnode.h>
72 #include <sys/bio.h>
73 #include <sys/buf.h>
74 #include <sys/condvar.h>
75 #ifdef KTRACE
76 #include <sys/ktrace.h>
77 #endif
78 
79 #include <security/audit/audit.h>
80 
81 /*
82  * The following macro defines how many bytes will be allocated from
83  * the stack instead of memory allocated when passing the IOCTL data
84  * structures from userspace and to the kernel. Some IOCTLs having
85  * small data structures are used very frequently and this small
86  * buffer on the stack gives a significant speedup improvement for
87  * those requests. The value of this define should be greater or equal
88  * to 64 bytes and should also be power of two. The data structure is
89  * currently hard-aligned to a 8-byte boundary on the stack. This
90  * should currently be sufficient for all supported platforms.
91  */
92 #define	SYS_IOCTL_SMALL_SIZE	128	/* bytes */
93 #define	SYS_IOCTL_SMALL_ALIGN	8	/* bytes */
94 
95 #ifdef __LP64__
96 static int iosize_max_clamp = 0;
97 SYSCTL_INT(_debug, OID_AUTO, iosize_max_clamp, CTLFLAG_RW,
98     &iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX");
99 static int devfs_iosize_max_clamp = 1;
100 SYSCTL_INT(_debug, OID_AUTO, devfs_iosize_max_clamp, CTLFLAG_RW,
101     &devfs_iosize_max_clamp, 0, "Clamp max i/o size to INT_MAX for devices");
102 #endif
103 
104 /*
105  * Assert that the return value of read(2) and write(2) syscalls fits
106  * into a register.  If not, an architecture will need to provide the
107  * usermode wrappers to reconstruct the result.
108  */
109 CTASSERT(sizeof(register_t) >= sizeof(size_t));
110 
111 static MALLOC_DEFINE(M_IOCTLOPS, "ioctlops", "ioctl data buffer");
112 static MALLOC_DEFINE(M_SELECT, "select", "select() buffer");
113 MALLOC_DEFINE(M_IOV, "iov", "large iov's");
114 
115 static int	pollout(struct thread *, struct pollfd *, struct pollfd *,
116 		    u_int);
117 static int	pollscan(struct thread *, struct pollfd *, u_int);
118 static int	pollrescan(struct thread *);
119 static int	selscan(struct thread *, fd_mask **, fd_mask **, int);
120 static int	selrescan(struct thread *, fd_mask **, fd_mask **);
121 static void	selfdalloc(struct thread *, void *);
122 static void	selfdfree(struct seltd *, struct selfd *);
123 static int	dofileread(struct thread *, int, struct file *, struct uio *,
124 		    off_t, int);
125 static int	dofilewrite(struct thread *, int, struct file *, struct uio *,
126 		    off_t, int);
127 static void	doselwakeup(struct selinfo *, int);
128 static void	seltdinit(struct thread *);
129 static int	seltdwait(struct thread *, sbintime_t, sbintime_t);
130 static void	seltdclear(struct thread *);
131 
132 /*
133  * One seltd per-thread allocated on demand as needed.
134  *
135  *	t - protected by st_mtx
136  * 	k - Only accessed by curthread or read-only
137  */
138 struct seltd {
139 	STAILQ_HEAD(, selfd)	st_selq;	/* (k) List of selfds. */
140 	struct selfd		*st_free1;	/* (k) free fd for read set. */
141 	struct selfd		*st_free2;	/* (k) free fd for write set. */
142 	struct mtx		st_mtx;		/* Protects struct seltd */
143 	struct cv		st_wait;	/* (t) Wait channel. */
144 	int			st_flags;	/* (t) SELTD_ flags. */
145 };
146 
147 #define	SELTD_PENDING	0x0001			/* We have pending events. */
148 #define	SELTD_RESCAN	0x0002			/* Doing a rescan. */
149 
150 /*
151  * One selfd allocated per-thread per-file-descriptor.
152  *	f - protected by sf_mtx
153  */
154 struct selfd {
155 	STAILQ_ENTRY(selfd)	sf_link;	/* (k) fds owned by this td. */
156 	TAILQ_ENTRY(selfd)	sf_threads;	/* (f) fds on this selinfo. */
157 	struct selinfo		*sf_si;		/* (f) selinfo when linked. */
158 	struct mtx		*sf_mtx;	/* Pointer to selinfo mtx. */
159 	struct seltd		*sf_td;		/* (k) owning seltd. */
160 	void			*sf_cookie;	/* (k) fd or pollfd. */
161 };
162 
163 MALLOC_DEFINE(M_SELFD, "selfd", "selfd");
164 static struct mtx_pool *mtxpool_select;
165 
166 #ifdef __LP64__
167 size_t
devfs_iosize_max(void)168 devfs_iosize_max(void)
169 {
170 
171 	return (devfs_iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
172 	    INT_MAX : SSIZE_MAX);
173 }
174 
175 size_t
iosize_max(void)176 iosize_max(void)
177 {
178 
179 	return (iosize_max_clamp || SV_CURPROC_FLAG(SV_ILP32) ?
180 	    INT_MAX : SSIZE_MAX);
181 }
182 #endif
183 
184 #ifndef _SYS_SYSPROTO_H_
185 struct read_args {
186 	int	fd;
187 	void	*buf;
188 	size_t	nbyte;
189 };
190 #endif
191 int
sys_read(struct thread * td,struct read_args * uap)192 sys_read(struct thread *td, struct read_args *uap)
193 {
194 	struct uio auio;
195 	struct iovec aiov;
196 	int error;
197 
198 	if (uap->nbyte > IOSIZE_MAX)
199 		return (EINVAL);
200 	aiov.iov_base = uap->buf;
201 	aiov.iov_len = uap->nbyte;
202 	auio.uio_iov = &aiov;
203 	auio.uio_iovcnt = 1;
204 	auio.uio_resid = uap->nbyte;
205 	auio.uio_segflg = UIO_USERSPACE;
206 	error = kern_readv(td, uap->fd, &auio);
207 	return (error);
208 }
209 
210 /*
211  * Positioned read system call
212  */
213 #ifndef _SYS_SYSPROTO_H_
214 struct pread_args {
215 	int	fd;
216 	void	*buf;
217 	size_t	nbyte;
218 	int	pad;
219 	off_t	offset;
220 };
221 #endif
222 int
sys_pread(struct thread * td,struct pread_args * uap)223 sys_pread(struct thread *td, struct pread_args *uap)
224 {
225 
226 	return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
227 }
228 
229 int
kern_pread(struct thread * td,int fd,void * buf,size_t nbyte,off_t offset)230 kern_pread(struct thread *td, int fd, void *buf, size_t nbyte, off_t offset)
231 {
232 	struct uio auio;
233 	struct iovec aiov;
234 	int error;
235 
236 	if (nbyte > IOSIZE_MAX)
237 		return (EINVAL);
238 	aiov.iov_base = buf;
239 	aiov.iov_len = nbyte;
240 	auio.uio_iov = &aiov;
241 	auio.uio_iovcnt = 1;
242 	auio.uio_resid = nbyte;
243 	auio.uio_segflg = UIO_USERSPACE;
244 	error = kern_preadv(td, fd, &auio, offset);
245 	return (error);
246 }
247 
248 #if defined(COMPAT_FREEBSD6)
249 int
freebsd6_pread(struct thread * td,struct freebsd6_pread_args * uap)250 freebsd6_pread(struct thread *td, struct freebsd6_pread_args *uap)
251 {
252 
253 	return (kern_pread(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
254 }
255 #endif
256 
257 /*
258  * Scatter read system call.
259  */
260 #ifndef _SYS_SYSPROTO_H_
261 struct readv_args {
262 	int	fd;
263 	struct	iovec *iovp;
264 	u_int	iovcnt;
265 };
266 #endif
267 int
sys_readv(struct thread * td,struct readv_args * uap)268 sys_readv(struct thread *td, struct readv_args *uap)
269 {
270 	struct uio *auio;
271 	int error;
272 
273 	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
274 	if (error)
275 		return (error);
276 	error = kern_readv(td, uap->fd, auio);
277 	free(auio, M_IOV);
278 	return (error);
279 }
280 
281 int
kern_readv(struct thread * td,int fd,struct uio * auio)282 kern_readv(struct thread *td, int fd, struct uio *auio)
283 {
284 	struct file *fp;
285 	int error;
286 
287 	error = fget_read(td, fd, &cap_read_rights, &fp);
288 	if (error)
289 		return (error);
290 	error = dofileread(td, fd, fp, auio, (off_t)-1, 0);
291 	fdrop(fp, td);
292 	return (error);
293 }
294 
295 /*
296  * Scatter positioned read system call.
297  */
298 #ifndef _SYS_SYSPROTO_H_
299 struct preadv_args {
300 	int	fd;
301 	struct	iovec *iovp;
302 	u_int	iovcnt;
303 	off_t	offset;
304 };
305 #endif
306 int
sys_preadv(struct thread * td,struct preadv_args * uap)307 sys_preadv(struct thread *td, struct preadv_args *uap)
308 {
309 	struct uio *auio;
310 	int error;
311 
312 	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
313 	if (error)
314 		return (error);
315 	error = kern_preadv(td, uap->fd, auio, uap->offset);
316 	free(auio, M_IOV);
317 	return (error);
318 }
319 
320 int
kern_preadv(struct thread * td,int fd,struct uio * auio,off_t offset)321 kern_preadv(struct thread *td, int fd, struct uio *auio, off_t offset)
322 {
323 	struct file *fp;
324 	int error;
325 
326 	error = fget_read(td, fd, &cap_pread_rights, &fp);
327 	if (error)
328 		return (error);
329 	if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
330 		error = ESPIPE;
331 	else if (offset < 0 &&
332 	    (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
333 		error = EINVAL;
334 	else
335 		error = dofileread(td, fd, fp, auio, offset, FOF_OFFSET);
336 	fdrop(fp, td);
337 	return (error);
338 }
339 
340 /*
341  * Common code for readv and preadv that reads data in
342  * from a file using the passed in uio, offset, and flags.
343  */
344 static int
dofileread(struct thread * td,int fd,struct file * fp,struct uio * auio,off_t offset,int flags)345 dofileread(struct thread *td, int fd, struct file *fp, struct uio *auio,
346     off_t offset, int flags)
347 {
348 	ssize_t cnt;
349 	int error;
350 #ifdef KTRACE
351 	struct uio *ktruio = NULL;
352 #endif
353 
354 	AUDIT_ARG_FD(fd);
355 
356 	/* Finish zero length reads right here */
357 	if (auio->uio_resid == 0) {
358 		td->td_retval[0] = 0;
359 		return (0);
360 	}
361 	auio->uio_rw = UIO_READ;
362 	auio->uio_offset = offset;
363 	auio->uio_td = td;
364 #ifdef KTRACE
365 	if (KTRPOINT(td, KTR_GENIO))
366 		ktruio = cloneuio(auio);
367 #endif
368 	cnt = auio->uio_resid;
369 	if ((error = fo_read(fp, auio, td->td_ucred, flags, td))) {
370 		if (auio->uio_resid != cnt && (error == ERESTART ||
371 		    error == EINTR || error == EWOULDBLOCK))
372 			error = 0;
373 	}
374 	cnt -= auio->uio_resid;
375 #ifdef KTRACE
376 	if (ktruio != NULL) {
377 		ktruio->uio_resid = cnt;
378 		ktrgenio(fd, UIO_READ, ktruio, error);
379 	}
380 #endif
381 	td->td_retval[0] = cnt;
382 	return (error);
383 }
384 
385 #ifndef _SYS_SYSPROTO_H_
386 struct write_args {
387 	int	fd;
388 	const void *buf;
389 	size_t	nbyte;
390 };
391 #endif
392 int
sys_write(struct thread * td,struct write_args * uap)393 sys_write(struct thread *td, struct write_args *uap)
394 {
395 	struct uio auio;
396 	struct iovec aiov;
397 	int error;
398 
399 	if (uap->nbyte > IOSIZE_MAX)
400 		return (EINVAL);
401 	aiov.iov_base = (void *)(uintptr_t)uap->buf;
402 	aiov.iov_len = uap->nbyte;
403 	auio.uio_iov = &aiov;
404 	auio.uio_iovcnt = 1;
405 	auio.uio_resid = uap->nbyte;
406 	auio.uio_segflg = UIO_USERSPACE;
407 	error = kern_writev(td, uap->fd, &auio);
408 	return (error);
409 }
410 
411 /*
412  * Positioned write system call.
413  */
414 #ifndef _SYS_SYSPROTO_H_
415 struct pwrite_args {
416 	int	fd;
417 	const void *buf;
418 	size_t	nbyte;
419 	int	pad;
420 	off_t	offset;
421 };
422 #endif
423 int
sys_pwrite(struct thread * td,struct pwrite_args * uap)424 sys_pwrite(struct thread *td, struct pwrite_args *uap)
425 {
426 
427 	return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
428 }
429 
430 int
kern_pwrite(struct thread * td,int fd,const void * buf,size_t nbyte,off_t offset)431 kern_pwrite(struct thread *td, int fd, const void *buf, size_t nbyte,
432     off_t offset)
433 {
434 	struct uio auio;
435 	struct iovec aiov;
436 	int error;
437 
438 	if (nbyte > IOSIZE_MAX)
439 		return (EINVAL);
440 	aiov.iov_base = (void *)(uintptr_t)buf;
441 	aiov.iov_len = nbyte;
442 	auio.uio_iov = &aiov;
443 	auio.uio_iovcnt = 1;
444 	auio.uio_resid = nbyte;
445 	auio.uio_segflg = UIO_USERSPACE;
446 	error = kern_pwritev(td, fd, &auio, offset);
447 	return (error);
448 }
449 
450 #if defined(COMPAT_FREEBSD6)
451 int
freebsd6_pwrite(struct thread * td,struct freebsd6_pwrite_args * uap)452 freebsd6_pwrite(struct thread *td, struct freebsd6_pwrite_args *uap)
453 {
454 
455 	return (kern_pwrite(td, uap->fd, uap->buf, uap->nbyte, uap->offset));
456 }
457 #endif
458 
459 /*
460  * Gather write system call.
461  */
462 #ifndef _SYS_SYSPROTO_H_
463 struct writev_args {
464 	int	fd;
465 	struct	iovec *iovp;
466 	u_int	iovcnt;
467 };
468 #endif
469 int
sys_writev(struct thread * td,struct writev_args * uap)470 sys_writev(struct thread *td, struct writev_args *uap)
471 {
472 	struct uio *auio;
473 	int error;
474 
475 	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
476 	if (error)
477 		return (error);
478 	error = kern_writev(td, uap->fd, auio);
479 	free(auio, M_IOV);
480 	return (error);
481 }
482 
483 int
kern_writev(struct thread * td,int fd,struct uio * auio)484 kern_writev(struct thread *td, int fd, struct uio *auio)
485 {
486 	struct file *fp;
487 	int error;
488 
489 	error = fget_write(td, fd, &cap_write_rights, &fp);
490 	if (error)
491 		return (error);
492 	error = dofilewrite(td, fd, fp, auio, (off_t)-1, 0);
493 	fdrop(fp, td);
494 	return (error);
495 }
496 
497 /*
498  * Gather positioned write system call.
499  */
500 #ifndef _SYS_SYSPROTO_H_
501 struct pwritev_args {
502 	int	fd;
503 	struct	iovec *iovp;
504 	u_int	iovcnt;
505 	off_t	offset;
506 };
507 #endif
508 int
sys_pwritev(struct thread * td,struct pwritev_args * uap)509 sys_pwritev(struct thread *td, struct pwritev_args *uap)
510 {
511 	struct uio *auio;
512 	int error;
513 
514 	error = copyinuio(uap->iovp, uap->iovcnt, &auio);
515 	if (error)
516 		return (error);
517 	error = kern_pwritev(td, uap->fd, auio, uap->offset);
518 	free(auio, M_IOV);
519 	return (error);
520 }
521 
522 int
kern_pwritev(struct thread * td,int fd,struct uio * auio,off_t offset)523 kern_pwritev(struct thread *td, int fd, struct uio *auio, off_t offset)
524 {
525 	struct file *fp;
526 	int error;
527 
528 	error = fget_write(td, fd, &cap_pwrite_rights, &fp);
529 	if (error)
530 		return (error);
531 	if (!(fp->f_ops->fo_flags & DFLAG_SEEKABLE))
532 		error = ESPIPE;
533 	else if (offset < 0 &&
534 	    (fp->f_vnode == NULL || fp->f_vnode->v_type != VCHR))
535 		error = EINVAL;
536 	else
537 		error = dofilewrite(td, fd, fp, auio, offset, FOF_OFFSET);
538 	fdrop(fp, td);
539 	return (error);
540 }
541 
542 /*
543  * Common code for writev and pwritev that writes data to
544  * a file using the passed in uio, offset, and flags.
545  */
546 static int
dofilewrite(struct thread * td,int fd,struct file * fp,struct uio * auio,off_t offset,int flags)547 dofilewrite(struct thread *td, int fd, struct file *fp, struct uio *auio,
548     off_t offset, int flags)
549 {
550 	ssize_t cnt;
551 	int error;
552 #ifdef KTRACE
553 	struct uio *ktruio = NULL;
554 #endif
555 
556 	AUDIT_ARG_FD(fd);
557 	auio->uio_rw = UIO_WRITE;
558 	auio->uio_td = td;
559 	auio->uio_offset = offset;
560 #ifdef KTRACE
561 	if (KTRPOINT(td, KTR_GENIO))
562 		ktruio = cloneuio(auio);
563 #endif
564 	cnt = auio->uio_resid;
565 	if ((error = fo_write(fp, auio, td->td_ucred, flags, td))) {
566 		if (auio->uio_resid != cnt && (error == ERESTART ||
567 		    error == EINTR || error == EWOULDBLOCK))
568 			error = 0;
569 		/* Socket layer is responsible for issuing SIGPIPE. */
570 		if (fp->f_type != DTYPE_SOCKET && error == EPIPE) {
571 			PROC_LOCK(td->td_proc);
572 			tdsignal(td, SIGPIPE);
573 			PROC_UNLOCK(td->td_proc);
574 		}
575 	}
576 	cnt -= auio->uio_resid;
577 #ifdef KTRACE
578 	if (ktruio != NULL) {
579 		ktruio->uio_resid = cnt;
580 		ktrgenio(fd, UIO_WRITE, ktruio, error);
581 	}
582 #endif
583 	td->td_retval[0] = cnt;
584 	return (error);
585 }
586 
587 /*
588  * Truncate a file given a file descriptor.
589  *
590  * Can't use fget_write() here, since must return EINVAL and not EBADF if the
591  * descriptor isn't writable.
592  */
593 int
kern_ftruncate(struct thread * td,int fd,off_t length)594 kern_ftruncate(struct thread *td, int fd, off_t length)
595 {
596 	struct file *fp;
597 	int error;
598 
599 	AUDIT_ARG_FD(fd);
600 	if (length < 0)
601 		return (EINVAL);
602 	error = fget(td, fd, &cap_ftruncate_rights, &fp);
603 	if (error)
604 		return (error);
605 	AUDIT_ARG_FILE(td->td_proc, fp);
606 	if (!(fp->f_flag & FWRITE)) {
607 		fdrop(fp, td);
608 		return (EINVAL);
609 	}
610 	error = fo_truncate(fp, length, td->td_ucred, td);
611 	fdrop(fp, td);
612 	return (error);
613 }
614 
615 #ifndef _SYS_SYSPROTO_H_
616 struct ftruncate_args {
617 	int	fd;
618 	int	pad;
619 	off_t	length;
620 };
621 #endif
622 int
sys_ftruncate(struct thread * td,struct ftruncate_args * uap)623 sys_ftruncate(struct thread *td, struct ftruncate_args *uap)
624 {
625 
626 	return (kern_ftruncate(td, uap->fd, uap->length));
627 }
628 
629 #if defined(COMPAT_43)
630 #ifndef _SYS_SYSPROTO_H_
631 struct oftruncate_args {
632 	int	fd;
633 	long	length;
634 };
635 #endif
636 int
oftruncate(struct thread * td,struct oftruncate_args * uap)637 oftruncate(struct thread *td, struct oftruncate_args *uap)
638 {
639 
640 	return (kern_ftruncate(td, uap->fd, uap->length));
641 }
642 #endif /* COMPAT_43 */
643 
644 #ifndef _SYS_SYSPROTO_H_
645 struct ioctl_args {
646 	int	fd;
647 	u_long	com;
648 	caddr_t	data;
649 };
650 #endif
651 /* ARGSUSED */
652 int
sys_ioctl(struct thread * td,struct ioctl_args * uap)653 sys_ioctl(struct thread *td, struct ioctl_args *uap)
654 {
655 	u_char smalldata[SYS_IOCTL_SMALL_SIZE] __aligned(SYS_IOCTL_SMALL_ALIGN);
656 	uint32_t com;
657 	int arg, error;
658 	u_int size;
659 	caddr_t data;
660 
661 #ifdef INVARIANTS
662 	if (uap->com > 0xffffffff) {
663 		printf(
664 		    "WARNING pid %d (%s): ioctl sign-extension ioctl %lx\n",
665 		    td->td_proc->p_pid, td->td_name, uap->com);
666 	}
667 #endif
668 	com = (uint32_t)uap->com;
669 
670 	/*
671 	 * Interpret high order word to find amount of data to be
672 	 * copied to/from the user's address space.
673 	 */
674 	size = IOCPARM_LEN(com);
675 	if ((size > IOCPARM_MAX) ||
676 	    ((com & (IOC_VOID  | IOC_IN | IOC_OUT)) == 0) ||
677 #if defined(COMPAT_FREEBSD5) || defined(COMPAT_FREEBSD4) || defined(COMPAT_43)
678 	    ((com & IOC_OUT) && size == 0) ||
679 #else
680 	    ((com & (IOC_IN | IOC_OUT)) && size == 0) ||
681 #endif
682 	    ((com & IOC_VOID) && size > 0 && size != sizeof(int)))
683 		return (ENOTTY);
684 
685 	if (size > 0) {
686 		if (com & IOC_VOID) {
687 			/* Integer argument. */
688 			arg = (intptr_t)uap->data;
689 			data = (void *)&arg;
690 			size = 0;
691 		} else {
692 			if (size > SYS_IOCTL_SMALL_SIZE)
693 				data = malloc((u_long)size, M_IOCTLOPS, M_WAITOK);
694 			else
695 				data = smalldata;
696 		}
697 	} else
698 		data = (void *)&uap->data;
699 	if (com & IOC_IN) {
700 		error = copyin(uap->data, data, (u_int)size);
701 		if (error != 0)
702 			goto out;
703 	} else if (com & IOC_OUT) {
704 		/*
705 		 * Zero the buffer so the user always
706 		 * gets back something deterministic.
707 		 */
708 		bzero(data, size);
709 	}
710 
711 	error = kern_ioctl(td, uap->fd, com, data);
712 
713 	if (error == 0 && (com & IOC_OUT))
714 		error = copyout(data, uap->data, (u_int)size);
715 
716 out:
717 	if (size > SYS_IOCTL_SMALL_SIZE)
718 		free(data, M_IOCTLOPS);
719 	return (error);
720 }
721 
722 int
kern_ioctl(struct thread * td,int fd,u_long com,caddr_t data)723 kern_ioctl(struct thread *td, int fd, u_long com, caddr_t data)
724 {
725 	struct file *fp;
726 	struct filedesc *fdp;
727 	int error, tmp, locked;
728 
729 	AUDIT_ARG_FD(fd);
730 	AUDIT_ARG_CMD(com);
731 
732 	fdp = td->td_proc->p_fd;
733 
734 	switch (com) {
735 	case FIONCLEX:
736 	case FIOCLEX:
737 		FILEDESC_XLOCK(fdp);
738 		locked = LA_XLOCKED;
739 		break;
740 	default:
741 #ifdef CAPABILITIES
742 		FILEDESC_SLOCK(fdp);
743 		locked = LA_SLOCKED;
744 #else
745 		locked = LA_UNLOCKED;
746 #endif
747 		break;
748 	}
749 
750 #ifdef CAPABILITIES
751 	if ((fp = fget_locked(fdp, fd)) == NULL) {
752 		error = EBADF;
753 		goto out;
754 	}
755 	if ((error = cap_ioctl_check(fdp, fd, com)) != 0) {
756 		fp = NULL;	/* fhold() was not called yet */
757 		goto out;
758 	}
759 	if (!fhold(fp)) {
760 		error = EBADF;
761 		fp = NULL;
762 		goto out;
763 	}
764 	if (locked == LA_SLOCKED) {
765 		FILEDESC_SUNLOCK(fdp);
766 		locked = LA_UNLOCKED;
767 	}
768 #else
769 	error = fget(td, fd, &cap_ioctl_rights, &fp);
770 	if (error != 0) {
771 		fp = NULL;
772 		goto out;
773 	}
774 #endif
775 	if ((fp->f_flag & (FREAD | FWRITE)) == 0) {
776 		error = EBADF;
777 		goto out;
778 	}
779 
780 	switch (com) {
781 	case FIONCLEX:
782 		fdp->fd_ofiles[fd].fde_flags &= ~UF_EXCLOSE;
783 		goto out;
784 	case FIOCLEX:
785 		fdp->fd_ofiles[fd].fde_flags |= UF_EXCLOSE;
786 		goto out;
787 	case FIONBIO:
788 		if ((tmp = *(int *)data))
789 			atomic_set_int(&fp->f_flag, FNONBLOCK);
790 		else
791 			atomic_clear_int(&fp->f_flag, FNONBLOCK);
792 		data = (void *)&tmp;
793 		break;
794 	case FIOASYNC:
795 		if ((tmp = *(int *)data))
796 			atomic_set_int(&fp->f_flag, FASYNC);
797 		else
798 			atomic_clear_int(&fp->f_flag, FASYNC);
799 		data = (void *)&tmp;
800 		break;
801 	}
802 
803 	error = fo_ioctl(fp, com, data, td->td_ucred, td);
804 out:
805 	switch (locked) {
806 	case LA_XLOCKED:
807 		FILEDESC_XUNLOCK(fdp);
808 		break;
809 #ifdef CAPABILITIES
810 	case LA_SLOCKED:
811 		FILEDESC_SUNLOCK(fdp);
812 		break;
813 #endif
814 	default:
815 		FILEDESC_UNLOCK_ASSERT(fdp);
816 		break;
817 	}
818 	if (fp != NULL)
819 		fdrop(fp, td);
820 	return (error);
821 }
822 
823 int
sys_posix_fallocate(struct thread * td,struct posix_fallocate_args * uap)824 sys_posix_fallocate(struct thread *td, struct posix_fallocate_args *uap)
825 {
826 	int error;
827 
828 	error = kern_posix_fallocate(td, uap->fd, uap->offset, uap->len);
829 	return (kern_posix_error(td, error));
830 }
831 
832 int
kern_posix_fallocate(struct thread * td,int fd,off_t offset,off_t len)833 kern_posix_fallocate(struct thread *td, int fd, off_t offset, off_t len)
834 {
835 	struct file *fp;
836 	int error;
837 
838 	AUDIT_ARG_FD(fd);
839 	if (offset < 0 || len <= 0)
840 		return (EINVAL);
841 	/* Check for wrap. */
842 	if (offset > OFF_MAX - len)
843 		return (EFBIG);
844 	AUDIT_ARG_FD(fd);
845 	error = fget(td, fd, &cap_pwrite_rights, &fp);
846 	if (error != 0)
847 		return (error);
848 	AUDIT_ARG_FILE(td->td_proc, fp);
849 	if ((fp->f_ops->fo_flags & DFLAG_SEEKABLE) == 0) {
850 		error = ESPIPE;
851 		goto out;
852 	}
853 	if ((fp->f_flag & FWRITE) == 0) {
854 		error = EBADF;
855 		goto out;
856 	}
857 
858 	error = fo_fallocate(fp, offset, len, td);
859  out:
860 	fdrop(fp, td);
861 	return (error);
862 }
863 
864 int
kern_specialfd(struct thread * td,int type,void * arg)865 kern_specialfd(struct thread *td, int type, void *arg)
866 {
867 	struct file *fp;
868 	struct specialfd_eventfd *ae;
869 	int error, fd, fflags;
870 
871 	fflags = 0;
872 	error = falloc_noinstall(td, &fp);
873 	if (error != 0)
874 		return (error);
875 
876 	switch (type) {
877 	case SPECIALFD_EVENTFD:
878 		ae = arg;
879 		if ((ae->flags & EFD_CLOEXEC) != 0)
880 			fflags |= O_CLOEXEC;
881 		error = eventfd_create_file(td, fp, ae->initval, ae->flags);
882 		break;
883 	default:
884 		error = EINVAL;
885 		break;
886 	}
887 
888 	if (error == 0)
889 		error = finstall(td, fp, &fd, fflags, NULL);
890 	fdrop(fp, td);
891 	if (error == 0)
892 		td->td_retval[0] = fd;
893 	return (error);
894 }
895 
896 int
sys___specialfd(struct thread * td,struct __specialfd_args * args)897 sys___specialfd(struct thread *td, struct __specialfd_args *args)
898 {
899 	struct specialfd_eventfd ae;
900 	int error;
901 
902 	switch (args->type) {
903 	case SPECIALFD_EVENTFD:
904 		if (args->len != sizeof(struct specialfd_eventfd)) {
905 			error = EINVAL;
906 			break;
907 		}
908 		error = copyin(args->req, &ae, sizeof(ae));
909 		if (error != 0)
910 			break;
911 		if ((ae.flags & ~(EFD_CLOEXEC | EFD_NONBLOCK |
912 		    EFD_SEMAPHORE)) != 0) {
913 			error = EINVAL;
914 			break;
915 		}
916 		error = kern_specialfd(td, args->type, &ae);
917 		break;
918 	default:
919 		error = EINVAL;
920 		break;
921 	}
922 	return (error);
923 }
924 
925 int
poll_no_poll(int events)926 poll_no_poll(int events)
927 {
928 	/*
929 	 * Return true for read/write.  If the user asked for something
930 	 * special, return POLLNVAL, so that clients have a way of
931 	 * determining reliably whether or not the extended
932 	 * functionality is present without hard-coding knowledge
933 	 * of specific filesystem implementations.
934 	 */
935 	if (events & ~POLLSTANDARD)
936 		return (POLLNVAL);
937 
938 	return (events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
939 }
940 
941 int
sys_pselect(struct thread * td,struct pselect_args * uap)942 sys_pselect(struct thread *td, struct pselect_args *uap)
943 {
944 	struct timespec ts;
945 	struct timeval tv, *tvp;
946 	sigset_t set, *uset;
947 	int error;
948 
949 	if (uap->ts != NULL) {
950 		error = copyin(uap->ts, &ts, sizeof(ts));
951 		if (error != 0)
952 		    return (error);
953 		TIMESPEC_TO_TIMEVAL(&tv, &ts);
954 		tvp = &tv;
955 	} else
956 		tvp = NULL;
957 	if (uap->sm != NULL) {
958 		error = copyin(uap->sm, &set, sizeof(set));
959 		if (error != 0)
960 			return (error);
961 		uset = &set;
962 	} else
963 		uset = NULL;
964 	return (kern_pselect(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
965 	    uset, NFDBITS));
966 }
967 
968 int
kern_pselect(struct thread * td,int nd,fd_set * in,fd_set * ou,fd_set * ex,struct timeval * tvp,sigset_t * uset,int abi_nfdbits)969 kern_pselect(struct thread *td, int nd, fd_set *in, fd_set *ou, fd_set *ex,
970     struct timeval *tvp, sigset_t *uset, int abi_nfdbits)
971 {
972 	int error;
973 #ifndef FSTACK
974 	if (uset != NULL) {
975 		error = kern_sigprocmask(td, SIG_SETMASK, uset,
976 		    &td->td_oldsigmask, 0);
977 		if (error != 0)
978 			return (error);
979 		td->td_pflags |= TDP_OLDMASK;
980 		/*
981 		 * Make sure that ast() is called on return to
982 		 * usermode and TDP_OLDMASK is cleared, restoring old
983 		 * sigmask.
984 		 */
985 		thread_lock(td);
986 		td->td_flags |= TDF_ASTPENDING;
987 		thread_unlock(td);
988 	}
989 #endif
990 	error = kern_select(td, nd, in, ou, ex, tvp, abi_nfdbits);
991 	return (error);
992 }
993 
994 #ifndef _SYS_SYSPROTO_H_
995 struct select_args {
996 	int	nd;
997 	fd_set	*in, *ou, *ex;
998 	struct	timeval *tv;
999 };
1000 #endif
1001 int
sys_select(struct thread * td,struct select_args * uap)1002 sys_select(struct thread *td, struct select_args *uap)
1003 {
1004 	struct timeval tv, *tvp;
1005 	int error;
1006 
1007 	if (uap->tv != NULL) {
1008 		error = copyin(uap->tv, &tv, sizeof(tv));
1009 		if (error)
1010 			return (error);
1011 		tvp = &tv;
1012 	} else
1013 		tvp = NULL;
1014 
1015 	return (kern_select(td, uap->nd, uap->in, uap->ou, uap->ex, tvp,
1016 	    NFDBITS));
1017 }
1018 
1019 /*
1020  * In the unlikely case when user specified n greater then the last
1021  * open file descriptor, check that no bits are set after the last
1022  * valid fd.  We must return EBADF if any is set.
1023  *
1024  * There are applications that rely on the behaviour.
1025  *
1026  * nd is fd_nfiles.
1027  */
1028 static int
select_check_badfd(fd_set * fd_in,int nd,int ndu,int abi_nfdbits)1029 select_check_badfd(fd_set *fd_in, int nd, int ndu, int abi_nfdbits)
1030 {
1031 	char *addr, *oaddr;
1032 	int b, i, res;
1033 	uint8_t bits;
1034 
1035 	if (nd >= ndu || fd_in == NULL)
1036 		return (0);
1037 
1038 	oaddr = NULL;
1039 	bits = 0; /* silence gcc */
1040 	for (i = nd; i < ndu; i++) {
1041 		b = i / NBBY;
1042 #if BYTE_ORDER == LITTLE_ENDIAN
1043 		addr = (char *)fd_in + b;
1044 #else
1045 		addr = (char *)fd_in;
1046 		if (abi_nfdbits == NFDBITS) {
1047 			addr += rounddown(b, sizeof(fd_mask)) +
1048 			    sizeof(fd_mask) - 1 - b % sizeof(fd_mask);
1049 		} else {
1050 			addr += rounddown(b, sizeof(uint32_t)) +
1051 			    sizeof(uint32_t) - 1 - b % sizeof(uint32_t);
1052 		}
1053 #endif
1054 		if (addr != oaddr) {
1055 			res = fubyte(addr);
1056 			if (res == -1)
1057 				return (EFAULT);
1058 			oaddr = addr;
1059 			bits = res;
1060 		}
1061 		if ((bits & (1 << (i % NBBY))) != 0)
1062 			return (EBADF);
1063 	}
1064 	return (0);
1065 }
1066 
1067 int
kern_select(struct thread * td,int nd,fd_set * fd_in,fd_set * fd_ou,fd_set * fd_ex,struct timeval * tvp,int abi_nfdbits)1068 kern_select(struct thread *td, int nd, fd_set *fd_in, fd_set *fd_ou,
1069     fd_set *fd_ex, struct timeval *tvp, int abi_nfdbits)
1070 {
1071 	struct filedesc *fdp;
1072 	/*
1073 	 * The magic 2048 here is chosen to be just enough for FD_SETSIZE
1074 	 * infds with the new FD_SETSIZE of 1024, and more than enough for
1075 	 * FD_SETSIZE infds, outfds and exceptfds with the old FD_SETSIZE
1076 	 * of 256.
1077 	 */
1078 	fd_mask s_selbits[howmany(2048, NFDBITS)];
1079 	fd_mask *ibits[3], *obits[3], *selbits, *sbp;
1080 	struct timeval rtv;
1081 	sbintime_t asbt, precision, rsbt;
1082 	u_int nbufbytes, ncpbytes, ncpubytes, nfdbits;
1083 	int error, lf, ndu;
1084 
1085 	if (nd < 0)
1086 		return (EINVAL);
1087 	fdp = td->td_proc->p_fd;
1088 	ndu = nd;
1089 	lf = fdp->fd_nfiles;
1090 	if (nd > lf)
1091 		nd = lf;
1092 
1093 	error = select_check_badfd(fd_in, nd, ndu, abi_nfdbits);
1094 	if (error != 0)
1095 		return (error);
1096 	error = select_check_badfd(fd_ou, nd, ndu, abi_nfdbits);
1097 	if (error != 0)
1098 		return (error);
1099 	error = select_check_badfd(fd_ex, nd, ndu, abi_nfdbits);
1100 	if (error != 0)
1101 		return (error);
1102 
1103 	/*
1104 	 * Allocate just enough bits for the non-null fd_sets.  Use the
1105 	 * preallocated auto buffer if possible.
1106 	 */
1107 	nfdbits = roundup(nd, NFDBITS);
1108 	ncpbytes = nfdbits / NBBY;
1109 	ncpubytes = roundup(nd, abi_nfdbits) / NBBY;
1110 	nbufbytes = 0;
1111 	if (fd_in != NULL)
1112 		nbufbytes += 2 * ncpbytes;
1113 	if (fd_ou != NULL)
1114 		nbufbytes += 2 * ncpbytes;
1115 	if (fd_ex != NULL)
1116 		nbufbytes += 2 * ncpbytes;
1117 	if (nbufbytes <= sizeof s_selbits)
1118 		selbits = &s_selbits[0];
1119 	else
1120 		selbits = malloc(nbufbytes, M_SELECT, M_WAITOK);
1121 
1122 	/*
1123 	 * Assign pointers into the bit buffers and fetch the input bits.
1124 	 * Put the output buffers together so that they can be bzeroed
1125 	 * together.
1126 	 */
1127 	sbp = selbits;
1128 #define	getbits(name, x) \
1129 	do {								\
1130 		if (name == NULL) {					\
1131 			ibits[x] = NULL;				\
1132 			obits[x] = NULL;				\
1133 		} else {						\
1134 			ibits[x] = sbp + nbufbytes / 2 / sizeof *sbp;	\
1135 			obits[x] = sbp;					\
1136 			sbp += ncpbytes / sizeof *sbp;			\
1137 			error = copyin(name, ibits[x], ncpubytes);	\
1138 			if (error != 0)					\
1139 				goto done;				\
1140 			if (ncpbytes != ncpubytes)			\
1141 				bzero((char *)ibits[x] + ncpubytes,	\
1142 				    ncpbytes - ncpubytes);		\
1143 		}							\
1144 	} while (0)
1145 	getbits(fd_in, 0);
1146 	getbits(fd_ou, 1);
1147 	getbits(fd_ex, 2);
1148 #undef	getbits
1149 
1150 #if BYTE_ORDER == BIG_ENDIAN && defined(__LP64__)
1151 	/*
1152 	 * XXX: swizzle_fdset assumes that if abi_nfdbits != NFDBITS,
1153 	 * we are running under 32-bit emulation. This should be more
1154 	 * generic.
1155 	 */
1156 #define swizzle_fdset(bits)						\
1157 	if (abi_nfdbits != NFDBITS && bits != NULL) {			\
1158 		int i;							\
1159 		for (i = 0; i < ncpbytes / sizeof *sbp; i++)		\
1160 			bits[i] = (bits[i] >> 32) | (bits[i] << 32);	\
1161 	}
1162 #else
1163 #define swizzle_fdset(bits)
1164 #endif
1165 
1166 	/* Make sure the bit order makes it through an ABI transition */
1167 	swizzle_fdset(ibits[0]);
1168 	swizzle_fdset(ibits[1]);
1169 	swizzle_fdset(ibits[2]);
1170 
1171 	if (nbufbytes != 0)
1172 		bzero(selbits, nbufbytes / 2);
1173 
1174 	precision = 0;
1175 	if (tvp != NULL) {
1176 		rtv = *tvp;
1177 		if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1178 		    rtv.tv_usec >= 1000000) {
1179 			error = EINVAL;
1180 			goto done;
1181 		}
1182 		if (!timevalisset(&rtv))
1183 			asbt = 0;
1184 		else if (rtv.tv_sec <= INT32_MAX) {
1185 			rsbt = tvtosbt(rtv);
1186 			precision = rsbt;
1187 			precision >>= tc_precexp;
1188 			if (TIMESEL(&asbt, rsbt))
1189 				asbt += tc_tick_sbt;
1190 			if (asbt <= SBT_MAX - rsbt)
1191 				asbt += rsbt;
1192 			else
1193 				asbt = -1;
1194 		} else
1195 			asbt = -1;
1196 	} else
1197 		asbt = -1;
1198 	seltdinit(td);
1199 	/* Iterate until the timeout expires or descriptors become ready. */
1200 	for (;;) {
1201 		error = selscan(td, ibits, obits, nd);
1202 		if (error || td->td_retval[0] != 0)
1203 			break;
1204 		error = seltdwait(td, asbt, precision);
1205 		if (error)
1206 			break;
1207 		error = selrescan(td, ibits, obits);
1208 		if (error || td->td_retval[0] != 0)
1209 			break;
1210 	}
1211 	seltdclear(td);
1212 
1213 done:
1214 	/* select is not restarted after signals... */
1215 	if (error == ERESTART)
1216 		error = EINTR;
1217 	if (error == EWOULDBLOCK)
1218 		error = 0;
1219 
1220 	/* swizzle bit order back, if necessary */
1221 	swizzle_fdset(obits[0]);
1222 	swizzle_fdset(obits[1]);
1223 	swizzle_fdset(obits[2]);
1224 #undef swizzle_fdset
1225 
1226 #define	putbits(name, x) \
1227 	if (name && (error2 = copyout(obits[x], name, ncpubytes))) \
1228 		error = error2;
1229 	if (error == 0) {
1230 		int error2;
1231 
1232 		putbits(fd_in, 0);
1233 		putbits(fd_ou, 1);
1234 		putbits(fd_ex, 2);
1235 #undef putbits
1236 	}
1237 	if (selbits != &s_selbits[0])
1238 		free(selbits, M_SELECT);
1239 
1240 	return (error);
1241 }
1242 /*
1243  * Convert a select bit set to poll flags.
1244  *
1245  * The backend always returns POLLHUP/POLLERR if appropriate and we
1246  * return this as a set bit in any set.
1247  */
1248 static int select_flags[3] = {
1249     POLLRDNORM | POLLHUP | POLLERR,
1250     POLLWRNORM | POLLHUP | POLLERR,
1251     POLLRDBAND | POLLERR
1252 };
1253 
1254 /*
1255  * Compute the fo_poll flags required for a fd given by the index and
1256  * bit position in the fd_mask array.
1257  */
1258 static __inline int
selflags(fd_mask ** ibits,int idx,fd_mask bit)1259 selflags(fd_mask **ibits, int idx, fd_mask bit)
1260 {
1261 	int flags;
1262 	int msk;
1263 
1264 	flags = 0;
1265 	for (msk = 0; msk < 3; msk++) {
1266 		if (ibits[msk] == NULL)
1267 			continue;
1268 		if ((ibits[msk][idx] & bit) == 0)
1269 			continue;
1270 		flags |= select_flags[msk];
1271 	}
1272 	return (flags);
1273 }
1274 
1275 /*
1276  * Set the appropriate output bits given a mask of fired events and the
1277  * input bits originally requested.
1278  */
1279 static __inline int
selsetbits(fd_mask ** ibits,fd_mask ** obits,int idx,fd_mask bit,int events)1280 selsetbits(fd_mask **ibits, fd_mask **obits, int idx, fd_mask bit, int events)
1281 {
1282 	int msk;
1283 	int n;
1284 
1285 	n = 0;
1286 	for (msk = 0; msk < 3; msk++) {
1287 		if ((events & select_flags[msk]) == 0)
1288 			continue;
1289 		if (ibits[msk] == NULL)
1290 			continue;
1291 		if ((ibits[msk][idx] & bit) == 0)
1292 			continue;
1293 		/*
1294 		 * XXX Check for a duplicate set.  This can occur because a
1295 		 * socket calls selrecord() twice for each poll() call
1296 		 * resulting in two selfds per real fd.  selrescan() will
1297 		 * call selsetbits twice as a result.
1298 		 */
1299 		if ((obits[msk][idx] & bit) != 0)
1300 			continue;
1301 		obits[msk][idx] |= bit;
1302 		n++;
1303 	}
1304 
1305 	return (n);
1306 }
1307 
1308 /*
1309  * Traverse the list of fds attached to this thread's seltd and check for
1310  * completion.
1311  */
1312 static int
selrescan(struct thread * td,fd_mask ** ibits,fd_mask ** obits)1313 selrescan(struct thread *td, fd_mask **ibits, fd_mask **obits)
1314 {
1315 	struct filedesc *fdp;
1316 	struct selinfo *si;
1317 	struct seltd *stp;
1318 	struct selfd *sfp;
1319 	struct selfd *sfn;
1320 	struct file *fp;
1321 	fd_mask bit;
1322 	int fd, ev, n, idx;
1323 	int error;
1324 	bool only_user;
1325 
1326 	fdp = td->td_proc->p_fd;
1327 	stp = td->td_sel;
1328 	n = 0;
1329 	only_user = FILEDESC_IS_ONLY_USER(fdp);
1330 	STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1331 		fd = (int)(uintptr_t)sfp->sf_cookie;
1332 		si = sfp->sf_si;
1333 		selfdfree(stp, sfp);
1334 		/* If the selinfo wasn't cleared the event didn't fire. */
1335 		if (si != NULL)
1336 			continue;
1337 		if (only_user)
1338 			error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1339 		else
1340 			error = fget_unlocked(fdp, fd, &cap_event_rights, &fp);
1341 		if (__predict_false(error != 0))
1342 			return (error);
1343 		idx = fd / NFDBITS;
1344 		bit = (fd_mask)1 << (fd % NFDBITS);
1345 		ev = fo_poll(fp, selflags(ibits, idx, bit), td->td_ucred, td);
1346 		if (only_user)
1347 			fput_only_user(fdp, fp);
1348 		else
1349 			fdrop(fp, td);
1350 		if (ev != 0)
1351 			n += selsetbits(ibits, obits, idx, bit, ev);
1352 	}
1353 	stp->st_flags = 0;
1354 	td->td_retval[0] = n;
1355 	return (0);
1356 }
1357 
1358 /*
1359  * Perform the initial filedescriptor scan and register ourselves with
1360  * each selinfo.
1361  */
1362 static int
selscan(struct thread * td,fd_mask ** ibits,fd_mask ** obits,int nfd)1363 selscan(struct thread *td, fd_mask **ibits, fd_mask **obits, int nfd)
1364 {
1365 	struct filedesc *fdp;
1366 	struct file *fp;
1367 	fd_mask bit;
1368 	int ev, flags, end, fd;
1369 	int n, idx;
1370 	int error;
1371 	bool only_user;
1372 
1373 	fdp = td->td_proc->p_fd;
1374 	n = 0;
1375 	only_user = FILEDESC_IS_ONLY_USER(fdp);
1376 	for (idx = 0, fd = 0; fd < nfd; idx++) {
1377 		end = imin(fd + NFDBITS, nfd);
1378 		for (bit = 1; fd < end; bit <<= 1, fd++) {
1379 			/* Compute the list of events we're interested in. */
1380 			flags = selflags(ibits, idx, bit);
1381 			if (flags == 0)
1382 				continue;
1383 			if (only_user)
1384 				error = fget_only_user(fdp, fd, &cap_event_rights, &fp);
1385 			else
1386 				error = fget_unlocked(fdp, fd, &cap_event_rights, &fp);
1387 			if (__predict_false(error != 0))
1388 				return (error);
1389 			selfdalloc(td, (void *)(uintptr_t)fd);
1390 			ev = fo_poll(fp, flags, td->td_ucred, td);
1391 			if (only_user)
1392 				fput_only_user(fdp, fp);
1393 			else
1394 				fdrop(fp, td);
1395 			if (ev != 0)
1396 				n += selsetbits(ibits, obits, idx, bit, ev);
1397 		}
1398 	}
1399 
1400 	td->td_retval[0] = n;
1401 	return (0);
1402 }
1403 
1404 int
sys_poll(struct thread * td,struct poll_args * uap)1405 sys_poll(struct thread *td, struct poll_args *uap)
1406 {
1407 	struct timespec ts, *tsp;
1408 
1409 	if (uap->timeout != INFTIM) {
1410 		if (uap->timeout < 0)
1411 			return (EINVAL);
1412 		ts.tv_sec = uap->timeout / 1000;
1413 		ts.tv_nsec = (uap->timeout % 1000) * 1000000;
1414 		tsp = &ts;
1415 	} else
1416 		tsp = NULL;
1417 
1418 	return (kern_poll(td, uap->fds, uap->nfds, tsp, NULL));
1419 }
1420 
1421 int
kern_poll(struct thread * td,struct pollfd * ufds,u_int nfds,struct timespec * tsp,sigset_t * uset)1422 kern_poll(struct thread *td, struct pollfd *ufds, u_int nfds,
1423     struct timespec *tsp, sigset_t *uset)
1424 {
1425 	struct pollfd *kfds;
1426 	struct pollfd stackfds[32];
1427 	sbintime_t sbt, precision, tmp;
1428 	time_t over;
1429 	struct timespec ts;
1430 	int error;
1431 
1432 	precision = 0;
1433 	if (tsp != NULL) {
1434 		if (tsp->tv_sec < 0)
1435 			return (EINVAL);
1436 		if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
1437 			return (EINVAL);
1438 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
1439 			sbt = 0;
1440 		else {
1441 			ts = *tsp;
1442 			if (ts.tv_sec > INT32_MAX / 2) {
1443 				over = ts.tv_sec - INT32_MAX / 2;
1444 				ts.tv_sec -= over;
1445 			} else
1446 				over = 0;
1447 			tmp = tstosbt(ts);
1448 			precision = tmp;
1449 			precision >>= tc_precexp;
1450 			if (TIMESEL(&sbt, tmp))
1451 				sbt += tc_tick_sbt;
1452 			sbt += tmp;
1453 		}
1454 	} else
1455 		sbt = -1;
1456 
1457 	/*
1458 	 * This is kinda bogus.  We have fd limits, but that is not
1459 	 * really related to the size of the pollfd array.  Make sure
1460 	 * we let the process use at least FD_SETSIZE entries and at
1461 	 * least enough for the system-wide limits.  We want to be reasonably
1462 	 * safe, but not overly restrictive.
1463 	 */
1464 	if (nfds > maxfilesperproc && nfds > FD_SETSIZE)
1465 		return (EINVAL);
1466 	if (nfds > nitems(stackfds))
1467 		kfds = mallocarray(nfds, sizeof(*kfds), M_TEMP, M_WAITOK);
1468 	else
1469 		kfds = stackfds;
1470 	error = copyin(ufds, kfds, nfds * sizeof(*kfds));
1471 	if (error)
1472 		goto done;
1473 
1474 #ifndef FSTACK
1475 	if (uset != NULL) {
1476 		error = kern_sigprocmask(td, SIG_SETMASK, uset,
1477 		    &td->td_oldsigmask, 0);
1478 		if (error)
1479 			goto done;
1480 		td->td_pflags |= TDP_OLDMASK;
1481 		/*
1482 		 * Make sure that ast() is called on return to
1483 		 * usermode and TDP_OLDMASK is cleared, restoring old
1484 		 * sigmask.
1485 		 */
1486 		thread_lock(td);
1487 		td->td_flags |= TDF_ASTPENDING;
1488 		thread_unlock(td);
1489 	}
1490 #endif
1491 
1492 	seltdinit(td);
1493 	/* Iterate until the timeout expires or descriptors become ready. */
1494 	for (;;) {
1495 		error = pollscan(td, kfds, nfds);
1496 		if (error || td->td_retval[0] != 0)
1497 			break;
1498 		error = seltdwait(td, sbt, precision);
1499 		if (error)
1500 			break;
1501 		error = pollrescan(td);
1502 		if (error || td->td_retval[0] != 0)
1503 			break;
1504 	}
1505 	seltdclear(td);
1506 
1507 done:
1508 	/* poll is not restarted after signals... */
1509 	if (error == ERESTART)
1510 		error = EINTR;
1511 	if (error == EWOULDBLOCK)
1512 		error = 0;
1513 	if (error == 0) {
1514 		error = pollout(td, kfds, ufds, nfds);
1515 		if (error)
1516 			goto out;
1517 	}
1518 out:
1519 	if (nfds > nitems(stackfds))
1520 		free(kfds, M_TEMP);
1521 	return (error);
1522 }
1523 
1524 int
sys_ppoll(struct thread * td,struct ppoll_args * uap)1525 sys_ppoll(struct thread *td, struct ppoll_args *uap)
1526 {
1527 	struct timespec ts, *tsp;
1528 	sigset_t set, *ssp;
1529 	int error;
1530 
1531 	if (uap->ts != NULL) {
1532 		error = copyin(uap->ts, &ts, sizeof(ts));
1533 		if (error)
1534 			return (error);
1535 		tsp = &ts;
1536 	} else
1537 		tsp = NULL;
1538 	if (uap->set != NULL) {
1539 		error = copyin(uap->set, &set, sizeof(set));
1540 		if (error)
1541 			return (error);
1542 		ssp = &set;
1543 	} else
1544 		ssp = NULL;
1545 	/*
1546 	 * fds is still a pointer to user space. kern_poll() will
1547 	 * take care of copyin that array to the kernel space.
1548 	 */
1549 
1550 	return (kern_poll(td, uap->fds, uap->nfds, tsp, ssp));
1551 }
1552 
1553 static int
pollrescan(struct thread * td)1554 pollrescan(struct thread *td)
1555 {
1556 	struct seltd *stp;
1557 	struct selfd *sfp;
1558 	struct selfd *sfn;
1559 	struct selinfo *si;
1560 	struct filedesc *fdp;
1561 	struct file *fp;
1562 	struct pollfd *fd;
1563 	int n, error;
1564 	bool only_user;
1565 
1566 	n = 0;
1567 	fdp = td->td_proc->p_fd;
1568 	stp = td->td_sel;
1569 	only_user = FILEDESC_IS_ONLY_USER(fdp);
1570 	STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn) {
1571 		fd = (struct pollfd *)sfp->sf_cookie;
1572 		si = sfp->sf_si;
1573 		selfdfree(stp, sfp);
1574 		/* If the selinfo wasn't cleared the event didn't fire. */
1575 		if (si != NULL)
1576 			continue;
1577 		if (only_user)
1578 			error = fget_only_user(fdp, fd->fd, &cap_event_rights, &fp);
1579 		else
1580 			error = fget_unlocked(fdp, fd->fd, &cap_event_rights, &fp);
1581 		if (__predict_false(error != 0)) {
1582 			fd->revents = POLLNVAL;
1583 			n++;
1584 			continue;
1585 		}
1586 		/*
1587 		 * Note: backend also returns POLLHUP and
1588 		 * POLLERR if appropriate.
1589 		 */
1590 		fd->revents = fo_poll(fp, fd->events, td->td_ucred, td);
1591 		if (only_user)
1592 			fput_only_user(fdp, fp);
1593 		else
1594 			fdrop(fp, td);
1595 		if (fd->revents != 0)
1596 			n++;
1597 	}
1598 	stp->st_flags = 0;
1599 	td->td_retval[0] = n;
1600 	return (0);
1601 }
1602 
1603 static int
pollout(struct thread * td,struct pollfd * fds,struct pollfd * ufds,u_int nfd)1604 pollout(struct thread *td, struct pollfd *fds, struct pollfd *ufds, u_int nfd)
1605 {
1606 	int error = 0;
1607 	u_int i = 0;
1608 	u_int n = 0;
1609 
1610 	for (i = 0; i < nfd; i++) {
1611 		error = copyout(&fds->revents, &ufds->revents,
1612 		    sizeof(ufds->revents));
1613 		if (error)
1614 			return (error);
1615 		if (fds->revents != 0)
1616 			n++;
1617 		fds++;
1618 		ufds++;
1619 	}
1620 	td->td_retval[0] = n;
1621 	return (0);
1622 }
1623 
1624 static int
pollscan(struct thread * td,struct pollfd * fds,u_int nfd)1625 pollscan(struct thread *td, struct pollfd *fds, u_int nfd)
1626 {
1627 	struct filedesc *fdp;
1628 	struct file *fp;
1629 	int i, n, error;
1630 	bool only_user;
1631 
1632 	n = 0;
1633 	fdp = td->td_proc->p_fd;
1634 	only_user = FILEDESC_IS_ONLY_USER(fdp);
1635 	for (i = 0; i < nfd; i++, fds++) {
1636 		if (fds->fd < 0) {
1637 			fds->revents = 0;
1638 			continue;
1639 		}
1640 		if (only_user)
1641 			error = fget_only_user(fdp, fds->fd, &cap_event_rights, &fp);
1642 		else
1643 			error = fget_unlocked(fdp, fds->fd, &cap_event_rights, &fp);
1644 		if (__predict_false(error != 0)) {
1645 			fds->revents = POLLNVAL;
1646 			n++;
1647 			continue;
1648 		}
1649 		/*
1650 		 * Note: backend also returns POLLHUP and
1651 		 * POLLERR if appropriate.
1652 		 */
1653 		selfdalloc(td, fds);
1654 		fds->revents = fo_poll(fp, fds->events,
1655 		    td->td_ucred, td);
1656 		if (only_user)
1657 			fput_only_user(fdp, fp);
1658 		else
1659 			fdrop(fp, td);
1660 		/*
1661 		 * POSIX requires POLLOUT to be never
1662 		 * set simultaneously with POLLHUP.
1663 		 */
1664 		if ((fds->revents & POLLHUP) != 0)
1665 			fds->revents &= ~POLLOUT;
1666 
1667 		if (fds->revents != 0)
1668 			n++;
1669 	}
1670 	td->td_retval[0] = n;
1671 	return (0);
1672 }
1673 
1674 /*
1675  * XXX This was created specifically to support netncp and netsmb.  This
1676  * allows the caller to specify a socket to wait for events on.  It returns
1677  * 0 if any events matched and an error otherwise.  There is no way to
1678  * determine which events fired.
1679  */
1680 int
selsocket(struct socket * so,int events,struct timeval * tvp,struct thread * td)1681 selsocket(struct socket *so, int events, struct timeval *tvp, struct thread *td)
1682 {
1683 	struct timeval rtv;
1684 	sbintime_t asbt, precision, rsbt;
1685 	int error;
1686 
1687 	precision = 0;	/* stupid gcc! */
1688 	if (tvp != NULL) {
1689 		rtv = *tvp;
1690 		if (rtv.tv_sec < 0 || rtv.tv_usec < 0 ||
1691 		    rtv.tv_usec >= 1000000)
1692 			return (EINVAL);
1693 		if (!timevalisset(&rtv))
1694 			asbt = 0;
1695 		else if (rtv.tv_sec <= INT32_MAX) {
1696 			rsbt = tvtosbt(rtv);
1697 			precision = rsbt;
1698 			precision >>= tc_precexp;
1699 			if (TIMESEL(&asbt, rsbt))
1700 				asbt += tc_tick_sbt;
1701 			if (asbt <= SBT_MAX - rsbt)
1702 				asbt += rsbt;
1703 			else
1704 				asbt = -1;
1705 		} else
1706 			asbt = -1;
1707 	} else
1708 		asbt = -1;
1709 	seltdinit(td);
1710 	/*
1711 	 * Iterate until the timeout expires or the socket becomes ready.
1712 	 */
1713 	for (;;) {
1714 		selfdalloc(td, NULL);
1715 		error = sopoll(so, events, NULL, td);
1716 		/* error here is actually the ready events. */
1717 		if (error)
1718 			return (0);
1719 		error = seltdwait(td, asbt, precision);
1720 		if (error)
1721 			break;
1722 	}
1723 	seltdclear(td);
1724 	/* XXX Duplicates ncp/smb behavior. */
1725 	if (error == ERESTART)
1726 		error = 0;
1727 	return (error);
1728 }
1729 
1730 /*
1731  * Preallocate two selfds associated with 'cookie'.  Some fo_poll routines
1732  * have two select sets, one for read and another for write.
1733  */
1734 static void
selfdalloc(struct thread * td,void * cookie)1735 selfdalloc(struct thread *td, void *cookie)
1736 {
1737 	struct seltd *stp;
1738 
1739 	stp = td->td_sel;
1740 	if (stp->st_free1 == NULL)
1741 		stp->st_free1 = malloc(sizeof(*stp->st_free1), M_SELFD, M_WAITOK|M_ZERO);
1742 	stp->st_free1->sf_td = stp;
1743 	stp->st_free1->sf_cookie = cookie;
1744 	if (stp->st_free2 == NULL)
1745 		stp->st_free2 = malloc(sizeof(*stp->st_free2), M_SELFD, M_WAITOK|M_ZERO);
1746 	stp->st_free2->sf_td = stp;
1747 	stp->st_free2->sf_cookie = cookie;
1748 }
1749 
1750 static void
selfdfree(struct seltd * stp,struct selfd * sfp)1751 selfdfree(struct seltd *stp, struct selfd *sfp)
1752 {
1753 	STAILQ_REMOVE(&stp->st_selq, sfp, selfd, sf_link);
1754 	/*
1755 	 * Paired with doselwakeup.
1756 	 */
1757 	if (atomic_load_acq_ptr((uintptr_t *)&sfp->sf_si) != (uintptr_t)NULL) {
1758 		mtx_lock(sfp->sf_mtx);
1759 		if (sfp->sf_si != NULL) {
1760 			TAILQ_REMOVE(&sfp->sf_si->si_tdlist, sfp, sf_threads);
1761 		}
1762 		mtx_unlock(sfp->sf_mtx);
1763 	}
1764 	free(sfp, M_SELFD);
1765 }
1766 
1767 /* Drain the waiters tied to all the selfd belonging the specified selinfo. */
1768 void
seldrain(struct selinfo * sip)1769 seldrain(struct selinfo *sip)
1770 {
1771 
1772 	/*
1773 	 * This feature is already provided by doselwakeup(), thus it is
1774 	 * enough to go for it.
1775 	 * Eventually, the context, should take care to avoid races
1776 	 * between thread calling select()/poll() and file descriptor
1777 	 * detaching, but, again, the races are just the same as
1778 	 * selwakeup().
1779 	 */
1780         doselwakeup(sip, -1);
1781 }
1782 
1783 /*
1784  * Record a select request.
1785  */
1786 void
selrecord(struct thread * selector,struct selinfo * sip)1787 selrecord(struct thread *selector, struct selinfo *sip)
1788 {
1789 	struct selfd *sfp;
1790 	struct seltd *stp;
1791 	struct mtx *mtxp;
1792 
1793 	stp = selector->td_sel;
1794 	/*
1795 	 * Don't record when doing a rescan.
1796 	 */
1797 	if (stp->st_flags & SELTD_RESCAN)
1798 		return;
1799 	/*
1800 	 * Grab one of the preallocated descriptors.
1801 	 */
1802 	sfp = NULL;
1803 	if ((sfp = stp->st_free1) != NULL)
1804 		stp->st_free1 = NULL;
1805 	else if ((sfp = stp->st_free2) != NULL)
1806 		stp->st_free2 = NULL;
1807 	else
1808 		panic("selrecord: No free selfd on selq");
1809 	mtxp = sip->si_mtx;
1810 	if (mtxp == NULL)
1811 		mtxp = mtx_pool_find(mtxpool_select, sip);
1812 	/*
1813 	 * Initialize the sfp and queue it in the thread.
1814 	 */
1815 	sfp->sf_si = sip;
1816 	sfp->sf_mtx = mtxp;
1817 	STAILQ_INSERT_TAIL(&stp->st_selq, sfp, sf_link);
1818 	/*
1819 	 * Now that we've locked the sip, check for initialization.
1820 	 */
1821 	mtx_lock(mtxp);
1822 	if (sip->si_mtx == NULL) {
1823 		sip->si_mtx = mtxp;
1824 		TAILQ_INIT(&sip->si_tdlist);
1825 	}
1826 	/*
1827 	 * Add this thread to the list of selfds listening on this selinfo.
1828 	 */
1829 	TAILQ_INSERT_TAIL(&sip->si_tdlist, sfp, sf_threads);
1830 	mtx_unlock(sip->si_mtx);
1831 }
1832 
1833 /* Wake up a selecting thread. */
1834 void
selwakeup(struct selinfo * sip)1835 selwakeup(struct selinfo *sip)
1836 {
1837 	doselwakeup(sip, -1);
1838 }
1839 
1840 /* Wake up a selecting thread, and set its priority. */
1841 void
selwakeuppri(struct selinfo * sip,int pri)1842 selwakeuppri(struct selinfo *sip, int pri)
1843 {
1844 	doselwakeup(sip, pri);
1845 }
1846 
1847 /*
1848  * Do a wakeup when a selectable event occurs.
1849  */
1850 static void
doselwakeup(struct selinfo * sip,int pri)1851 doselwakeup(struct selinfo *sip, int pri)
1852 {
1853 	struct selfd *sfp;
1854 	struct selfd *sfn;
1855 	struct seltd *stp;
1856 
1857 	/* If it's not initialized there can't be any waiters. */
1858 	if (sip->si_mtx == NULL)
1859 		return;
1860 	/*
1861 	 * Locking the selinfo locks all selfds associated with it.
1862 	 */
1863 	mtx_lock(sip->si_mtx);
1864 	TAILQ_FOREACH_SAFE(sfp, &sip->si_tdlist, sf_threads, sfn) {
1865 		/*
1866 		 * Once we remove this sfp from the list and clear the
1867 		 * sf_si seltdclear will know to ignore this si.
1868 		 */
1869 		TAILQ_REMOVE(&sip->si_tdlist, sfp, sf_threads);
1870 		stp = sfp->sf_td;
1871 		mtx_lock(&stp->st_mtx);
1872 		stp->st_flags |= SELTD_PENDING;
1873 		cv_broadcastpri(&stp->st_wait, pri);
1874 		mtx_unlock(&stp->st_mtx);
1875 		/*
1876 		 * Paired with selfdfree.
1877 		 *
1878 		 * Storing this only after the wakeup provides an invariant that
1879 		 * stp is not used after selfdfree returns.
1880 		 */
1881 		atomic_store_rel_ptr((uintptr_t *)&sfp->sf_si, (uintptr_t)NULL);
1882 	}
1883 	mtx_unlock(sip->si_mtx);
1884 }
1885 
1886 static void
seltdinit(struct thread * td)1887 seltdinit(struct thread *td)
1888 {
1889 	struct seltd *stp;
1890 
1891 	stp = td->td_sel;
1892 	if (stp != NULL) {
1893 		MPASS(stp->st_flags == 0);
1894 		MPASS(STAILQ_EMPTY(&stp->st_selq));
1895 		return;
1896 	}
1897 	stp = malloc(sizeof(*stp), M_SELECT, M_WAITOK|M_ZERO);
1898 	mtx_init(&stp->st_mtx, "sellck", NULL, MTX_DEF);
1899 	cv_init(&stp->st_wait, "select");
1900 	stp->st_flags = 0;
1901 	STAILQ_INIT(&stp->st_selq);
1902 	td->td_sel = stp;
1903 }
1904 
1905 static int
seltdwait(struct thread * td,sbintime_t sbt,sbintime_t precision)1906 seltdwait(struct thread *td, sbintime_t sbt, sbintime_t precision)
1907 {
1908 	struct seltd *stp;
1909 	int error;
1910 
1911 	stp = td->td_sel;
1912 	/*
1913 	 * An event of interest may occur while we do not hold the seltd
1914 	 * locked so check the pending flag before we sleep.
1915 	 */
1916 	mtx_lock(&stp->st_mtx);
1917 	/*
1918 	 * Any further calls to selrecord will be a rescan.
1919 	 */
1920 	stp->st_flags |= SELTD_RESCAN;
1921 	if (stp->st_flags & SELTD_PENDING) {
1922 		mtx_unlock(&stp->st_mtx);
1923 		return (0);
1924 	}
1925 	if (sbt == 0)
1926 		error = EWOULDBLOCK;
1927 	else if (sbt != -1)
1928 		error = cv_timedwait_sig_sbt(&stp->st_wait, &stp->st_mtx,
1929 		    sbt, precision, C_ABSOLUTE);
1930 	else
1931 		error = cv_wait_sig(&stp->st_wait, &stp->st_mtx);
1932 	mtx_unlock(&stp->st_mtx);
1933 
1934 	return (error);
1935 }
1936 
1937 void
seltdfini(struct thread * td)1938 seltdfini(struct thread *td)
1939 {
1940 	struct seltd *stp;
1941 
1942 	stp = td->td_sel;
1943 	if (stp == NULL)
1944 		return;
1945 	MPASS(stp->st_flags == 0);
1946 	MPASS(STAILQ_EMPTY(&stp->st_selq));
1947 	if (stp->st_free1)
1948 		free(stp->st_free1, M_SELFD);
1949 	if (stp->st_free2)
1950 		free(stp->st_free2, M_SELFD);
1951 	td->td_sel = NULL;
1952 	cv_destroy(&stp->st_wait);
1953 	mtx_destroy(&stp->st_mtx);
1954 	free(stp, M_SELECT);
1955 }
1956 
1957 /*
1958  * Remove the references to the thread from all of the objects we were
1959  * polling.
1960  */
1961 static void
seltdclear(struct thread * td)1962 seltdclear(struct thread *td)
1963 {
1964 	struct seltd *stp;
1965 	struct selfd *sfp;
1966 	struct selfd *sfn;
1967 
1968 	stp = td->td_sel;
1969 	STAILQ_FOREACH_SAFE(sfp, &stp->st_selq, sf_link, sfn)
1970 		selfdfree(stp, sfp);
1971 	stp->st_flags = 0;
1972 }
1973 
1974 static void selectinit(void *);
1975 SYSINIT(select, SI_SUB_SYSCALLS, SI_ORDER_ANY, selectinit, NULL);
1976 static void
selectinit(void * dummy __unused)1977 selectinit(void *dummy __unused)
1978 {
1979 
1980 	mtxpool_select = mtx_pool_create("select mtxpool", 128, MTX_DEF);
1981 }
1982 
1983 /*
1984  * Set up a syscall return value that follows the convention specified for
1985  * posix_* functions.
1986  */
1987 int
kern_posix_error(struct thread * td,int error)1988 kern_posix_error(struct thread *td, int error)
1989 {
1990 
1991 	if (error <= 0)
1992 		return (error);
1993 	td->td_errno = error;
1994 	td->td_pflags |= TDP_NERRNO;
1995 	td->td_retval[0] = error;
1996 	return (0);
1997 }
1998