1 #ifndef _LINUX_POLL_H 2 #define _LINUX_POLL_H 3 4 #include <asm/poll.h> 5 6 #ifdef __KERNEL__ 7 8 #include <linux/compiler.h> 9 #include <linux/ktime.h> 10 #include <linux/wait.h> 11 #include <linux/string.h> 12 #include <linux/fs.h> 13 #include <linux/sysctl.h> 14 #include <asm/uaccess.h> 15 16 extern struct ctl_table epoll_table[]; /* for sysctl */ 17 /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating 18 additional memory. */ 19 #define MAX_STACK_ALLOC 832 20 #define FRONTEND_STACK_ALLOC 256 21 #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC 22 #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC 23 #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) 24 #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) 25 26 #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) 27 28 struct poll_table_struct; 29 30 /* 31 * structures and helpers for f_op->poll implementations 32 */ 33 typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); 34 35 typedef struct poll_table_struct { 36 poll_queue_proc qproc; 37 unsigned long key; 38 } poll_table; 39 40 static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) 41 { 42 if (p && wait_address) 43 p->qproc(filp, wait_address, p); 44 } 45 46 static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) 47 { 48 pt->qproc = qproc; 49 pt->key = ~0UL; /* all events enabled */ 50 } 51 52 struct poll_table_entry { 53 struct file *filp; 54 unsigned long key; 55 wait_queue_t wait; 56 wait_queue_head_t *wait_address; 57 }; 58 59 /* 60 * Structures and helpers for sys_poll/sys_poll 61 */ 62 struct poll_wqueues { 63 poll_table pt; 64 struct poll_table_page *table; 65 struct task_struct *polling_task; 66 int triggered; 67 int error; 68 int inline_index; 69 struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; 70 }; 71 72 extern void poll_initwait(struct poll_wqueues *pwq); 73 extern void poll_freewait(struct poll_wqueues *pwq); 74 extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, 75 ktime_t *expires, unsigned long slack); 76 77 static inline int poll_schedule(struct poll_wqueues *pwq, int state) 78 { 79 return poll_schedule_timeout(pwq, state, NULL, 0); 80 } 81 82 /* 83 * Scaleable version of the fd_set. 84 */ 85 86 typedef struct { 87 unsigned long *in, *out, *ex; 88 unsigned long *res_in, *res_out, *res_ex; 89 } fd_set_bits; 90 91 /* 92 * How many longwords for "nr" bits? 93 */ 94 #define FDS_BITPERLONG (8*sizeof(long)) 95 #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) 96 #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) 97 98 /* 99 * We do a VERIFY_WRITE here even though we are only reading this time: 100 * we'll write to it eventually.. 101 * 102 * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. 103 */ 104 static inline 105 int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 106 { 107 nr = FDS_BYTES(nr); 108 if (ufdset) 109 return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; 110 111 memset(fdset, 0, nr); 112 return 0; 113 } 114 115 static inline unsigned long __must_check 116 set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) 117 { 118 if (ufdset) 119 return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); 120 return 0; 121 } 122 123 static inline 124 void zero_fd_set(unsigned long nr, unsigned long *fdset) 125 { 126 memset(fdset, 0, FDS_BYTES(nr)); 127 } 128 129 #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) 130 131 extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); 132 extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, 133 struct timespec *end_time); 134 extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, 135 fd_set __user *exp, struct timespec *end_time); 136 137 extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); 138 139 #endif /* KERNEL */ 140 141 #endif /* _LINUX_POLL_H */ 142