1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _LINUX_POLL_H 3 #define _LINUX_POLL_H 4 5 6 #include <linux/compiler.h> 7 #include <linux/ktime.h> 8 #include <linux/wait.h> 9 #include <linux/string.h> 10 #include <linux/fs.h> 11 #include <linux/uaccess.h> 12 #include <uapi/linux/poll.h> 13 #include <uapi/linux/eventpoll.h> 14 15 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating 16 additional memory. */ 17 #define MAX_STACK_ALLOC 832 18 #define FRONTEND_STACK_ALLOC 256 19 #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC 20 #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC 21 #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) 22 #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) 23 24 #define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM) 25 26 struct poll_table_struct; 27 28 /* 29 * structures and helpers for f_op->poll implementations 30 */ 31 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); 32 33 /* 34 * Do not touch the structure directly, use the access functions 35 * poll_does_not_wait() and poll_requested_events() instead. 36 */ 37 typedef struct poll_table_struct { 38 poll_queue_proc _qproc; 39 __poll_t _key; 40 } poll_table; 41 42 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) 43 { 44 if (p && p->_qproc) { 45 p->_qproc(filp, wait_address, p); 46 /* 47 * This memory barrier is paired in the wq_has_sleeper(). 48 * See the comment above prepare_to_wait(), we need to 49 * ensure that subsequent tests in this thread can't be 50 * reordered with __add_wait_queue() in _qproc() paths. 51 */ 52 smp_mb(); 53 } 54 } 55 56 /* 57 * Return true if it is guaranteed that poll will not wait. This is the case 58 * if the poll() of another file descriptor in the set got an event, so there 59 * is no need for waiting. 60 */ 61 static inline bool poll_does_not_wait(const poll_table *p) 62 { 63 return p == NULL || p->_qproc == NULL; 64 } 65 66 /* 67 * Return the set of events that the application wants to poll for. 68 * This is useful for drivers that need to know whether a DMA transfer has 69 * to be started implicitly on poll(). You typically only want to do that 70 * if the application is actually polling for POLLIN and/or POLLOUT. 71 */ 72 static inline __poll_t poll_requested_events(const poll_table *p) 73 { 74 return p ? p->_key : ~(__poll_t)0; 75 } 76 77 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) 78 { 79 pt->_qproc = qproc; 80 pt->_key = ~(__poll_t)0; /* all events enabled */ 81 } 82 83 static inline bool file_can_poll(struct file *file) 84 { 85 return file->f_op->poll; 86 } 87 88 static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt) 89 { 90 if (unlikely(!file->f_op->poll)) 91 return DEFAULT_POLLMASK; 92 return file->f_op->poll(file, pt); 93 } 94 95 struct poll_table_entry { 96 struct file *filp; 97 __poll_t key; 98 wait_queue_entry_t wait; 99 wait_queue_head_t *wait_address; 100 }; 101 102 /* 103 * Structures and helpers for select/poll syscall 104 */ 105 struct poll_wqueues { 106 poll_table pt; 107 struct poll_table_page *table; 108 struct task_struct *polling_task; 109 int triggered; 110 int error; 111 int inline_index; 112 struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; 113 }; 114 115 extern void poll_initwait(struct poll_wqueues *pwq); 116 extern void poll_freewait(struct poll_wqueues *pwq); 117 extern u64 select_estimate_accuracy(struct timespec64 *tv); 118 119 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) 120 121 extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 122 fd_set __user *exp, struct timespec64 *end_time); 123 124 extern int poll_select_set_timeout(struct timespec64 *to, time64_t sec, 125 long nsec); 126 127 #define __MAP(v, from, to) \ 128 (from < to ? (v & from) * (to/from) : (v & from) / (from/to)) 129 130 static inline __u16 mangle_poll(__poll_t val) 131 { 132 __u16 v = (__force __u16)val; 133 #define M(X) __MAP(v, (__force __u16)EPOLL##X, POLL##X) 134 return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | 135 M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | 136 M(HUP) | M(RDHUP) | M(MSG); 137 #undef M 138 } 139 140 static inline __poll_t demangle_poll(u16 val) 141 { 142 #define M(X) (__force __poll_t)__MAP(val, POLL##X, (__force __u16)EPOLL##X) 143 return M(IN) | M(OUT) | M(PRI) | M(ERR) | M(NVAL) | 144 M(RDNORM) | M(RDBAND) | M(WRNORM) | M(WRBAND) | 145 M(HUP) | M(RDHUP) | M(MSG); 146 #undef M 147 } 148 #undef __MAP 149 150 151 #endif /* _LINUX_POLL_H */ 152