Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _LINUX_POLL_H |
| 2 | #define _LINUX_POLL_H |
| 3 | |
| 4 | #include <asm/poll.h> |
| 5 | |
| 6 | #ifdef __KERNEL__ |
| 7 | |
| 8 | #include <linux/compiler.h> |
Alexey Dobriyan | a99bbaf | 2009-10-04 16:11:37 +0400 | [diff] [blame] | 9 | #include <linux/ktime.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/wait.h> |
| 11 | #include <linux/string.h> |
Al Viro | f23f6e0 | 2006-10-20 15:17:02 -0400 | [diff] [blame] | 12 | #include <linux/fs.h> |
Dave Young | 9ff9933 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 13 | #include <linux/sysctl.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <asm/uaccess.h> |
| 15 | |
Dave Young | 9ff9933 | 2010-03-10 15:24:10 -0800 | [diff] [blame] | 16 | extern struct ctl_table epoll_table[]; /* for sysctl */ |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 17 | /* ~832 bytes of stack space used max in sys_select/sys_poll before allocating |
| 18 | additional memory. */ |
| 19 | #define MAX_STACK_ALLOC 832 |
| 20 | #define FRONTEND_STACK_ALLOC 256 |
| 21 | #define SELECT_STACK_ALLOC FRONTEND_STACK_ALLOC |
| 22 | #define POLL_STACK_ALLOC FRONTEND_STACK_ALLOC |
| 23 | #define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC) |
| 24 | #define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry)) |
| 25 | |
Alexey Dobriyan | dd23aae | 2007-09-11 15:23:55 -0700 | [diff] [blame] | 26 | #define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM) |
| 27 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | struct poll_table_struct; |
| 29 | |
| 30 | /* |
| 31 | * structures and helpers for f_op->poll implementations |
| 32 | */ |
| 33 | typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_table_struct *); |
| 34 | |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 35 | /* |
| 36 | * Do not touch the structure directly, use the access functions |
| 37 | * poll_does_not_wait() and poll_requested_events() instead. |
| 38 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | typedef struct poll_table_struct { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 40 | poll_queue_proc _qproc; |
| 41 | unsigned long _key; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | } poll_table; |
| 43 | |
| 44 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) |
| 45 | { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 46 | if (p && p->_qproc && wait_address) |
| 47 | p->_qproc(filp, wait_address, p); |
| 48 | } |
| 49 | |
| 50 | /* |
| 51 | * Return true if it is guaranteed that poll will not wait. This is the case |
| 52 | * if the poll() of another file descriptor in the set got an event, so there |
| 53 | * is no need for waiting. |
| 54 | */ |
| 55 | static inline bool poll_does_not_wait(const poll_table *p) |
| 56 | { |
| 57 | return p == NULL || p->_qproc == NULL; |
| 58 | } |
| 59 | |
| 60 | /* |
| 61 | * Return the set of events that the application wants to poll for. |
| 62 | * This is useful for drivers that need to know whether a DMA transfer has |
| 63 | * to be started implicitly on poll(). You typically only want to do that |
| 64 | * if the application is actually polling for POLLIN and/or POLLOUT. |
| 65 | */ |
| 66 | static inline unsigned long poll_requested_events(const poll_table *p) |
| 67 | { |
| 68 | return p ? p->_key : ~0UL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) |
| 72 | { |
Hans Verkuil | 626cf23 | 2012-03-23 15:02:27 -0700 | [diff] [blame] | 73 | pt->_qproc = qproc; |
| 74 | pt->_key = ~0UL; /* all events enabled */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 77 | struct poll_table_entry { |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 78 | struct file *filp; |
Eric Dumazet | 4938d7e | 2009-06-16 15:33:36 -0700 | [diff] [blame] | 79 | unsigned long key; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 80 | wait_queue_t wait; |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 81 | wait_queue_head_t *wait_address; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 82 | }; |
| 83 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | /* |
Namhyung Kim | dac36dd | 2010-12-10 01:57:07 +0900 | [diff] [blame] | 85 | * Structures and helpers for select/poll syscall |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | */ |
| 87 | struct poll_wqueues { |
| 88 | poll_table pt; |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 89 | struct poll_table_page *table; |
| 90 | struct task_struct *polling_task; |
| 91 | int triggered; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | int error; |
Andi Kleen | 70674f9 | 2006-03-28 01:56:33 -0800 | [diff] [blame] | 93 | int inline_index; |
| 94 | struct poll_table_entry inline_entries[N_INLINE_POLL_ENTRIES]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | }; |
| 96 | |
| 97 | extern void poll_initwait(struct poll_wqueues *pwq); |
| 98 | extern void poll_freewait(struct poll_wqueues *pwq); |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 99 | extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state, |
| 100 | ktime_t *expires, unsigned long slack); |
Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 101 | extern long select_estimate_accuracy(struct timespec *tv); |
| 102 | |
Tejun Heo | 5f820f6 | 2009-01-06 14:40:59 -0800 | [diff] [blame] | 103 | |
| 104 | static inline int poll_schedule(struct poll_wqueues *pwq, int state) |
| 105 | { |
| 106 | return poll_schedule_timeout(pwq, state, NULL, 0); |
| 107 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | |
| 109 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 110 | * Scalable version of the fd_set. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | */ |
| 112 | |
| 113 | typedef struct { |
| 114 | unsigned long *in, *out, *ex; |
| 115 | unsigned long *res_in, *res_out, *res_ex; |
| 116 | } fd_set_bits; |
| 117 | |
| 118 | /* |
| 119 | * How many longwords for "nr" bits? |
| 120 | */ |
| 121 | #define FDS_BITPERLONG (8*sizeof(long)) |
| 122 | #define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG) |
| 123 | #define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long)) |
| 124 | |
| 125 | /* |
| 126 | * We do a VERIFY_WRITE here even though we are only reading this time: |
| 127 | * we'll write to it eventually.. |
| 128 | * |
| 129 | * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned. |
| 130 | */ |
| 131 | static inline |
| 132 | int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) |
| 133 | { |
| 134 | nr = FDS_BYTES(nr); |
| 135 | if (ufdset) |
| 136 | return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0; |
| 137 | |
| 138 | memset(fdset, 0, nr); |
| 139 | return 0; |
| 140 | } |
| 141 | |
| 142 | static inline unsigned long __must_check |
| 143 | set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset) |
| 144 | { |
| 145 | if (ufdset) |
| 146 | return __copy_to_user(ufdset, fdset, FDS_BYTES(nr)); |
| 147 | return 0; |
| 148 | } |
| 149 | |
| 150 | static inline |
| 151 | void zero_fd_set(unsigned long nr, unsigned long *fdset) |
| 152 | { |
| 153 | memset(fdset, 0, FDS_BYTES(nr)); |
| 154 | } |
| 155 | |
David Woodhouse | 9f72949 | 2006-01-18 17:44:05 -0800 | [diff] [blame] | 156 | #define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1) |
| 157 | |
Arjan van de Ven | 8ff3e8e | 2008-08-31 08:26:40 -0700 | [diff] [blame] | 158 | extern int do_select(int n, fd_set_bits *fds, struct timespec *end_time); |
David Woodhouse | 9f72949 | 2006-01-18 17:44:05 -0800 | [diff] [blame] | 159 | extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds, |
Arjan van de Ven | 8ff3e8e | 2008-08-31 08:26:40 -0700 | [diff] [blame] | 160 | struct timespec *end_time); |
Al Viro | a2dcb44 | 2008-04-23 14:05:15 -0400 | [diff] [blame] | 161 | extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, |
Arjan van de Ven | 8ff3e8e | 2008-08-31 08:26:40 -0700 | [diff] [blame] | 162 | fd_set __user *exp, struct timespec *end_time); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | |
Thomas Gleixner | b773ad4 | 2008-08-31 08:16:57 -0700 | [diff] [blame] | 164 | extern int poll_select_set_timeout(struct timespec *to, long sec, long nsec); |
| 165 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | #endif /* KERNEL */ |
| 167 | |
| 168 | #endif /* _LINUX_POLL_H */ |