| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 2 | *  fs/eventpoll.c (Efficient event retrieval implementation) | 
|  | 3 | *  Copyright (C) 2001,...,2009	 Davide Libenzi | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * | 
|  | 5 | *  This program is free software; you can redistribute it and/or modify | 
|  | 6 | *  it under the terms of the GNU General Public License as published by | 
|  | 7 | *  the Free Software Foundation; either version 2 of the License, or | 
|  | 8 | *  (at your option) any later version. | 
|  | 9 | * | 
|  | 10 | *  Davide Libenzi <davidel@xmailserver.org> | 
|  | 11 | * | 
|  | 12 | */ | 
|  | 13 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/init.h> | 
|  | 15 | #include <linux/kernel.h> | 
|  | 16 | #include <linux/sched.h> | 
|  | 17 | #include <linux/fs.h> | 
|  | 18 | #include <linux/file.h> | 
|  | 19 | #include <linux/signal.h> | 
|  | 20 | #include <linux/errno.h> | 
|  | 21 | #include <linux/mm.h> | 
|  | 22 | #include <linux/slab.h> | 
|  | 23 | #include <linux/poll.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | #include <linux/string.h> | 
|  | 25 | #include <linux/list.h> | 
|  | 26 | #include <linux/hash.h> | 
|  | 27 | #include <linux/spinlock.h> | 
|  | 28 | #include <linux/syscalls.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <linux/rbtree.h> | 
|  | 30 | #include <linux/wait.h> | 
|  | 31 | #include <linux/eventpoll.h> | 
|  | 32 | #include <linux/mount.h> | 
|  | 33 | #include <linux/bitops.h> | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 34 | #include <linux/mutex.h> | 
| Davide Libenzi | da66f7c | 2007-05-10 22:23:21 -0700 | [diff] [blame] | 35 | #include <linux/anon_inodes.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | #include <asm/uaccess.h> | 
|  | 37 | #include <asm/system.h> | 
|  | 38 | #include <asm/io.h> | 
|  | 39 | #include <asm/mman.h> | 
|  | 40 | #include <asm/atomic.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | /* | 
|  | 43 | * LOCKING: | 
|  | 44 | * There are three level of locking required by epoll : | 
|  | 45 | * | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 46 | * 1) epmutex (mutex) | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 47 | * 2) ep->mtx (mutex) | 
|  | 48 | * 3) ep->lock (spinlock) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | * | 
|  | 50 | * The acquire order is the one listed above, from 1 to 3. | 
|  | 51 | * We need a spinlock (ep->lock) because we manipulate objects | 
|  | 52 | * from inside the poll callback, that might be triggered from | 
|  | 53 | * a wake_up() that in turn might be called from IRQ context. | 
|  | 54 | * So we can't sleep inside the poll callback and hence we need | 
|  | 55 | * a spinlock. During the event transfer loop (from kernel to | 
|  | 56 | * user space) we could end up sleeping due a copy_to_user(), so | 
|  | 57 | * we need a lock that will allow us to sleep. This lock is a | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 58 | * mutex (ep->mtx). It is acquired during the event transfer loop, | 
|  | 59 | * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file(). | 
|  | 60 | * Then we also need a global mutex to serialize eventpoll_release_file() | 
|  | 61 | * and ep_free(). | 
|  | 62 | * This mutex is acquired by ep_free() during the epoll file | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | * cleanup path and it is also acquired by eventpoll_release_file() | 
|  | 64 | * if a file has been pushed inside an epoll set and it is then | 
| Daniel Baluta | bf6a41d | 2011-01-30 23:42:29 +0200 | [diff] [blame] | 65 | * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL). | 
| Davide Libenzi | 22bacca | 2011-02-25 14:44:12 -0800 | [diff] [blame] | 66 | * It is also acquired when inserting an epoll fd onto another epoll | 
|  | 67 | * fd. We do this so that we walk the epoll tree and ensure that this | 
|  | 68 | * insertion does not create a cycle of epoll file descriptors, which | 
|  | 69 | * could lead to deadlock. We need a global mutex to prevent two | 
|  | 70 | * simultaneous inserts (A into B and B into A) from racing and | 
|  | 71 | * constructing a cycle without either insert observing that it is | 
|  | 72 | * going to. | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 73 | * It is possible to drop the "ep->mtx" and to use the global | 
|  | 74 | * mutex "epmutex" (together with "ep->lock") to have it working, | 
|  | 75 | * but having "ep->mtx" will make the interface more scalable. | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 76 | * Events that require holding "epmutex" are very rare, while for | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 77 | * normal operations the epoll private "ep->mtx" will guarantee | 
|  | 78 | * a better scalability. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | */ | 
|  | 80 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | /* Epoll private bits inside the event mask */ | 
|  | 82 | #define EP_PRIVATE_BITS (EPOLLONESHOT | EPOLLET) | 
|  | 83 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 84 | /* Maximum number of nesting allowed inside epoll sets */ | 
|  | 85 | #define EP_MAX_NESTS 4 | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 |  | 
| Davide Libenzi | b611967 | 2006-10-11 01:21:44 -0700 | [diff] [blame] | 87 | #define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event)) | 
|  | 88 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 89 | #define EP_UNACTIVE_PTR ((void *) -1L) | 
|  | 90 |  | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 91 | #define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry)) | 
|  | 92 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | struct epoll_filefd { | 
|  | 94 | struct file *file; | 
|  | 95 | int fd; | 
|  | 96 | }; | 
|  | 97 |  | 
|  | 98 | /* | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 99 | * Structure used to track possible nested calls, for too deep recursions | 
|  | 100 | * and loop cycles. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 102 | struct nested_call_node { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 103 | struct list_head llink; | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 104 | void *cookie; | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 105 | void *ctx; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | }; | 
|  | 107 |  | 
|  | 108 | /* | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 109 | * This structure is used as collector for nested calls, to check for | 
|  | 110 | * maximum recursion dept and loop cycles. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 112 | struct nested_calls { | 
|  | 113 | struct list_head tasks_call_list; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 114 | spinlock_t lock; | 
|  | 115 | }; | 
|  | 116 |  | 
|  | 117 | /* | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | * Each file descriptor added to the eventpoll interface will | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 119 | * have an entry of this type linked to the "rbr" RB tree. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | */ | 
|  | 121 | struct epitem { | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 122 | /* RB tree node used to link this structure to the eventpoll RB tree */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | struct rb_node rbn; | 
|  | 124 |  | 
|  | 125 | /* List header used to link this structure to the eventpoll ready list */ | 
|  | 126 | struct list_head rdllink; | 
|  | 127 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 128 | /* | 
|  | 129 | * Works together "struct eventpoll"->ovflist in keeping the | 
|  | 130 | * single linked chain of items. | 
|  | 131 | */ | 
|  | 132 | struct epitem *next; | 
|  | 133 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 134 | /* The file descriptor information this item refers to */ | 
|  | 135 | struct epoll_filefd ffd; | 
|  | 136 |  | 
|  | 137 | /* Number of active wait queue attached to poll operations */ | 
|  | 138 | int nwait; | 
|  | 139 |  | 
|  | 140 | /* List containing poll wait queues */ | 
|  | 141 | struct list_head pwqlist; | 
|  | 142 |  | 
|  | 143 | /* The "container" of this item */ | 
|  | 144 | struct eventpoll *ep; | 
|  | 145 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | /* List header used to link this item to the "struct file" items list */ | 
|  | 147 | struct list_head fllink; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 148 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 149 | /* The structure that describe the interested events and the source fd */ | 
|  | 150 | struct epoll_event event; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 151 | }; | 
|  | 152 |  | 
|  | 153 | /* | 
|  | 154 | * This structure is stored inside the "private_data" member of the file | 
| Daniel Baluta | bf6a41d | 2011-01-30 23:42:29 +0200 | [diff] [blame] | 155 | * structure and represents the main data structure for the eventpoll | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 156 | * interface. | 
|  | 157 | */ | 
|  | 158 | struct eventpoll { | 
| Daniel Baluta | bf6a41d | 2011-01-30 23:42:29 +0200 | [diff] [blame] | 159 | /* Protect the access to this structure */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 160 | spinlock_t lock; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 161 |  | 
|  | 162 | /* | 
|  | 163 | * This mutex is used to ensure that files are not removed | 
|  | 164 | * while epoll is using them. This is held during the event | 
|  | 165 | * collection loop, the file cleanup path, the epoll file exit | 
|  | 166 | * code and the ctl operations. | 
|  | 167 | */ | 
|  | 168 | struct mutex mtx; | 
|  | 169 |  | 
|  | 170 | /* Wait queue used by sys_epoll_wait() */ | 
|  | 171 | wait_queue_head_t wq; | 
|  | 172 |  | 
|  | 173 | /* Wait queue used by file->poll() */ | 
|  | 174 | wait_queue_head_t poll_wait; | 
|  | 175 |  | 
|  | 176 | /* List of ready file descriptors */ | 
|  | 177 | struct list_head rdllist; | 
|  | 178 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 179 | /* RB tree root used to store monitored fd structs */ | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 180 | struct rb_root rbr; | 
|  | 181 |  | 
|  | 182 | /* | 
|  | 183 | * This is a single linked list that chains all the "struct epitem" that | 
|  | 184 | * happened while transfering ready events to userspace w/out | 
|  | 185 | * holding ->lock. | 
|  | 186 | */ | 
|  | 187 | struct epitem *ovflist; | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 188 |  | 
|  | 189 | /* The user that created the eventpoll descriptor */ | 
|  | 190 | struct user_struct *user; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 191 | }; | 
|  | 192 |  | 
|  | 193 | /* Wait structure used by the poll hooks */ | 
|  | 194 | struct eppoll_entry { | 
|  | 195 | /* List header used to link this structure to the "struct epitem" */ | 
|  | 196 | struct list_head llink; | 
|  | 197 |  | 
|  | 198 | /* The "base" pointer is set to the container "struct epitem" */ | 
| Tony Battersby | 4f0989d | 2009-03-31 15:24:16 -0700 | [diff] [blame] | 199 | struct epitem *base; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 200 |  | 
|  | 201 | /* | 
|  | 202 | * Wait queue item that will be linked to the target file wait | 
|  | 203 | * queue head. | 
|  | 204 | */ | 
|  | 205 | wait_queue_t wait; | 
|  | 206 |  | 
|  | 207 | /* The wait queue head that linked the "wait" wait queue item */ | 
|  | 208 | wait_queue_head_t *whead; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | }; | 
|  | 210 |  | 
|  | 211 | /* Wrapper struct used by poll queueing */ | 
|  | 212 | struct ep_pqueue { | 
|  | 213 | poll_table pt; | 
|  | 214 | struct epitem *epi; | 
|  | 215 | }; | 
|  | 216 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 217 | /* Used by the ep_send_events() function as callback private data */ | 
|  | 218 | struct ep_send_events_data { | 
|  | 219 | int maxevents; | 
|  | 220 | struct epoll_event __user *events; | 
|  | 221 | }; | 
|  | 222 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | /* | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 224 | * Configuration options available inside /proc/sys/fs/epoll/ | 
|  | 225 | */ | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 226 | /* Maximum number of epoll watched descriptors, per user */ | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 227 | static long max_user_watches __read_mostly; | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 228 |  | 
|  | 229 | /* | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 230 | * This mutex is used to serialize ep_free() and eventpoll_release_file(). | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | */ | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 232 | static DEFINE_MUTEX(epmutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 |  | 
| Davide Libenzi | 22bacca | 2011-02-25 14:44:12 -0800 | [diff] [blame] | 234 | /* Used to check for epoll file descriptor inclusion loops */ | 
|  | 235 | static struct nested_calls poll_loop_ncalls; | 
|  | 236 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 237 | /* Used for safe wake up implementation */ | 
|  | 238 | static struct nested_calls poll_safewake_ncalls; | 
|  | 239 |  | 
|  | 240 | /* Used to call file's f_op->poll() under the nested calls boundaries */ | 
|  | 241 | static struct nested_calls poll_readywalk_ncalls; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 |  | 
|  | 243 | /* Slab cache used to allocate "struct epitem" */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 244 | static struct kmem_cache *epi_cache __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 |  | 
|  | 246 | /* Slab cache used to allocate "struct eppoll_entry" */ | 
| Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 247 | static struct kmem_cache *pwq_cache __read_mostly; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 |  | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 249 | #ifdef CONFIG_SYSCTL | 
|  | 250 |  | 
|  | 251 | #include <linux/sysctl.h> | 
|  | 252 |  | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 253 | static long zero; | 
|  | 254 | static long long_max = LONG_MAX; | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 255 |  | 
|  | 256 | ctl_table epoll_table[] = { | 
|  | 257 | { | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 258 | .procname	= "max_user_watches", | 
|  | 259 | .data		= &max_user_watches, | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 260 | .maxlen		= sizeof(max_user_watches), | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 261 | .mode		= 0644, | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 262 | .proc_handler	= proc_doulongvec_minmax, | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 263 | .extra1		= &zero, | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 264 | .extra2		= &long_max, | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 265 | }, | 
| Eric W. Biederman | ab09203 | 2009-11-05 14:25:10 -0800 | [diff] [blame] | 266 | { } | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 267 | }; | 
|  | 268 | #endif /* CONFIG_SYSCTL */ | 
|  | 269 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 270 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 271 | /* Setup the structure that is used as key for the RB tree */ | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 272 | static inline void ep_set_ffd(struct epoll_filefd *ffd, | 
|  | 273 | struct file *file, int fd) | 
|  | 274 | { | 
|  | 275 | ffd->file = file; | 
|  | 276 | ffd->fd = fd; | 
|  | 277 | } | 
|  | 278 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 279 | /* Compare RB tree keys */ | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 280 | static inline int ep_cmp_ffd(struct epoll_filefd *p1, | 
|  | 281 | struct epoll_filefd *p2) | 
|  | 282 | { | 
|  | 283 | return (p1->file > p2->file ? +1: | 
|  | 284 | (p1->file < p2->file ? -1 : p1->fd - p2->fd)); | 
|  | 285 | } | 
|  | 286 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 287 | /* Tells us if the item is currently linked */ | 
|  | 288 | static inline int ep_is_linked(struct list_head *p) | 
|  | 289 | { | 
|  | 290 | return !list_empty(p); | 
|  | 291 | } | 
|  | 292 |  | 
|  | 293 | /* Get the "struct epitem" from a wait queue pointer */ | 
| Davide Libenzi | cdac75e | 2008-04-29 00:58:34 -0700 | [diff] [blame] | 294 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 295 | { | 
|  | 296 | return container_of(p, struct eppoll_entry, wait)->base; | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 | /* Get the "struct epitem" from an epoll queue wrapper */ | 
| Davide Libenzi | cdac75e | 2008-04-29 00:58:34 -0700 | [diff] [blame] | 300 | static inline struct epitem *ep_item_from_epqueue(poll_table *p) | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 301 | { | 
|  | 302 | return container_of(p, struct ep_pqueue, pt)->epi; | 
|  | 303 | } | 
|  | 304 |  | 
|  | 305 | /* Tells if the epoll_ctl(2) operation needs an event copy from userspace */ | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 306 | static inline int ep_op_has_event(int op) | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 307 | { | 
|  | 308 | return op != EPOLL_CTL_DEL; | 
|  | 309 | } | 
|  | 310 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | /* Initialize the poll safe wake up structure */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 312 | static void ep_nested_calls_init(struct nested_calls *ncalls) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | { | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 314 | INIT_LIST_HEAD(&ncalls->tasks_call_list); | 
|  | 315 | spin_lock_init(&ncalls->lock); | 
|  | 316 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 318 | /** | 
|  | 319 | * ep_call_nested - Perform a bound (possibly) nested call, by checking | 
|  | 320 | *                  that the recursion limit is not exceeded, and that | 
|  | 321 | *                  the same nested call (by the meaning of same cookie) is | 
|  | 322 | *                  no re-entered. | 
|  | 323 | * | 
|  | 324 | * @ncalls: Pointer to the nested_calls structure to be used for this call. | 
|  | 325 | * @max_nests: Maximum number of allowed nesting calls. | 
|  | 326 | * @nproc: Nested call core function pointer. | 
|  | 327 | * @priv: Opaque data to be passed to the @nproc callback. | 
|  | 328 | * @cookie: Cookie to be used to identify this nested call. | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 329 | * @ctx: This instance context. | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 330 | * | 
|  | 331 | * Returns: Returns the code returned by the @nproc callback, or -1 if | 
|  | 332 | *          the maximum recursion limit has been exceeded. | 
|  | 333 | */ | 
|  | 334 | static int ep_call_nested(struct nested_calls *ncalls, int max_nests, | 
|  | 335 | int (*nproc)(void *, void *, int), void *priv, | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 336 | void *cookie, void *ctx) | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 337 | { | 
|  | 338 | int error, call_nests = 0; | 
|  | 339 | unsigned long flags; | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 340 | struct list_head *lsthead = &ncalls->tasks_call_list; | 
|  | 341 | struct nested_call_node *tncur; | 
|  | 342 | struct nested_call_node tnode; | 
|  | 343 |  | 
|  | 344 | spin_lock_irqsave(&ncalls->lock, flags); | 
|  | 345 |  | 
|  | 346 | /* | 
|  | 347 | * Try to see if the current task is already inside this wakeup call. | 
|  | 348 | * We use a list here, since the population inside this set is always | 
|  | 349 | * very much limited. | 
|  | 350 | */ | 
|  | 351 | list_for_each_entry(tncur, lsthead, llink) { | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 352 | if (tncur->ctx == ctx && | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 353 | (tncur->cookie == cookie || ++call_nests > max_nests)) { | 
|  | 354 | /* | 
|  | 355 | * Ops ... loop detected or maximum nest level reached. | 
|  | 356 | * We abort this wake by breaking the cycle itself. | 
|  | 357 | */ | 
| Tony Battersby | abff55c | 2009-03-31 15:24:13 -0700 | [diff] [blame] | 358 | error = -1; | 
|  | 359 | goto out_unlock; | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 360 | } | 
|  | 361 | } | 
|  | 362 |  | 
|  | 363 | /* Add the current task and cookie to the list */ | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 364 | tnode.ctx = ctx; | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 365 | tnode.cookie = cookie; | 
|  | 366 | list_add(&tnode.llink, lsthead); | 
|  | 367 |  | 
|  | 368 | spin_unlock_irqrestore(&ncalls->lock, flags); | 
|  | 369 |  | 
|  | 370 | /* Call the nested function */ | 
|  | 371 | error = (*nproc)(priv, cookie, call_nests); | 
|  | 372 |  | 
|  | 373 | /* Remove the current task from the list */ | 
|  | 374 | spin_lock_irqsave(&ncalls->lock, flags); | 
|  | 375 | list_del(&tnode.llink); | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 376 | out_unlock: | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 377 | spin_unlock_irqrestore(&ncalls->lock, flags); | 
|  | 378 |  | 
|  | 379 | return error; | 
|  | 380 | } | 
|  | 381 |  | 
| Davide Libenzi | 2dfa4ee | 2009-03-31 15:24:22 -0700 | [diff] [blame] | 382 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 
|  | 383 | static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, | 
|  | 384 | unsigned long events, int subclass) | 
|  | 385 | { | 
|  | 386 | unsigned long flags; | 
|  | 387 |  | 
|  | 388 | spin_lock_irqsave_nested(&wqueue->lock, flags, subclass); | 
|  | 389 | wake_up_locked_poll(wqueue, events); | 
|  | 390 | spin_unlock_irqrestore(&wqueue->lock, flags); | 
|  | 391 | } | 
|  | 392 | #else | 
|  | 393 | static inline void ep_wake_up_nested(wait_queue_head_t *wqueue, | 
|  | 394 | unsigned long events, int subclass) | 
|  | 395 | { | 
|  | 396 | wake_up_poll(wqueue, events); | 
|  | 397 | } | 
|  | 398 | #endif | 
|  | 399 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 400 | static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) | 
|  | 401 | { | 
| Davide Libenzi | 2dfa4ee | 2009-03-31 15:24:22 -0700 | [diff] [blame] | 402 | ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN, | 
|  | 403 | 1 + call_nests); | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 404 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 405 | } | 
|  | 406 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | /* | 
|  | 408 | * Perform a safe wake up of the poll wait list. The problem is that | 
|  | 409 | * with the new callback'd wake up system, it is possible that the | 
|  | 410 | * poll callback is reentered from inside the call to wake_up() done | 
|  | 411 | * on the poll wait queue head. The rule is that we cannot reenter the | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 412 | * wake up code from the same task more than EP_MAX_NESTS times, | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 413 | * and we cannot reenter the same wait queue head at all. This will | 
|  | 414 | * enable to have a hierarchy of epoll file descriptor of no more than | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 415 | * EP_MAX_NESTS deep. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 416 | */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 417 | static void ep_poll_safewake(wait_queue_head_t *wq) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 418 | { | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 419 | int this_cpu = get_cpu(); | 
|  | 420 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 421 | ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 422 | ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); | 
|  | 423 |  | 
|  | 424 | put_cpu(); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | } | 
|  | 426 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 427 | /* | 
| Tony Battersby | d1bc90d | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 428 | * This function unregisters poll callbacks from the associated file | 
|  | 429 | * descriptor.  Must be called with "mtx" held (or "epmutex" if called from | 
|  | 430 | * ep_free). | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 431 | */ | 
|  | 432 | static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) | 
|  | 433 | { | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 434 | struct list_head *lsthead = &epi->pwqlist; | 
|  | 435 | struct eppoll_entry *pwq; | 
|  | 436 |  | 
| Tony Battersby | d1bc90d | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 437 | while (!list_empty(lsthead)) { | 
|  | 438 | pwq = list_first_entry(lsthead, struct eppoll_entry, llink); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 439 |  | 
| Tony Battersby | d1bc90d | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 440 | list_del(&pwq->llink); | 
|  | 441 | remove_wait_queue(pwq->whead, &pwq->wait); | 
|  | 442 | kmem_cache_free(pwq_cache, pwq); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 443 | } | 
|  | 444 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 446 | /** | 
|  | 447 | * ep_scan_ready_list - Scans the ready list in a way that makes possible for | 
|  | 448 | *                      the scan code, to call f_op->poll(). Also allows for | 
|  | 449 | *                      O(NumReady) performance. | 
|  | 450 | * | 
|  | 451 | * @ep: Pointer to the epoll private data structure. | 
|  | 452 | * @sproc: Pointer to the scan callback. | 
|  | 453 | * @priv: Private opaque data passed to the @sproc callback. | 
|  | 454 | * | 
|  | 455 | * Returns: The same integer error code returned by the @sproc callback. | 
|  | 456 | */ | 
|  | 457 | static int ep_scan_ready_list(struct eventpoll *ep, | 
|  | 458 | int (*sproc)(struct eventpoll *, | 
|  | 459 | struct list_head *, void *), | 
|  | 460 | void *priv) | 
|  | 461 | { | 
|  | 462 | int error, pwake = 0; | 
|  | 463 | unsigned long flags; | 
|  | 464 | struct epitem *epi, *nepi; | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 465 | LIST_HEAD(txlist); | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 466 |  | 
|  | 467 | /* | 
|  | 468 | * We need to lock this because we could be hit by | 
| Tony Battersby | e057e15 | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 469 | * eventpoll_release_file() and epoll_ctl(). | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 470 | */ | 
|  | 471 | mutex_lock(&ep->mtx); | 
|  | 472 |  | 
|  | 473 | /* | 
|  | 474 | * Steal the ready list, and re-init the original one to the | 
|  | 475 | * empty list. Also, set ep->ovflist to NULL so that events | 
|  | 476 | * happening while looping w/out locks, are not lost. We cannot | 
|  | 477 | * have the poll callback to queue directly on ep->rdllist, | 
|  | 478 | * because we want the "sproc" callback to be able to do it | 
|  | 479 | * in a lockless way. | 
|  | 480 | */ | 
|  | 481 | spin_lock_irqsave(&ep->lock, flags); | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 482 | list_splice_init(&ep->rdllist, &txlist); | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 483 | ep->ovflist = NULL; | 
|  | 484 | spin_unlock_irqrestore(&ep->lock, flags); | 
|  | 485 |  | 
|  | 486 | /* | 
|  | 487 | * Now call the callback function. | 
|  | 488 | */ | 
|  | 489 | error = (*sproc)(ep, &txlist, priv); | 
|  | 490 |  | 
|  | 491 | spin_lock_irqsave(&ep->lock, flags); | 
|  | 492 | /* | 
|  | 493 | * During the time we spent inside the "sproc" callback, some | 
|  | 494 | * other events might have been queued by the poll callback. | 
|  | 495 | * We re-insert them inside the main ready-list here. | 
|  | 496 | */ | 
|  | 497 | for (nepi = ep->ovflist; (epi = nepi) != NULL; | 
|  | 498 | nepi = epi->next, epi->next = EP_UNACTIVE_PTR) { | 
|  | 499 | /* | 
|  | 500 | * We need to check if the item is already in the list. | 
|  | 501 | * During the "sproc" callback execution time, items are | 
|  | 502 | * queued into ->ovflist but the "txlist" might already | 
|  | 503 | * contain them, and the list_splice() below takes care of them. | 
|  | 504 | */ | 
|  | 505 | if (!ep_is_linked(&epi->rdllink)) | 
|  | 506 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
|  | 507 | } | 
|  | 508 | /* | 
|  | 509 | * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after | 
|  | 510 | * releasing the lock, events will be queued in the normal way inside | 
|  | 511 | * ep->rdllist. | 
|  | 512 | */ | 
|  | 513 | ep->ovflist = EP_UNACTIVE_PTR; | 
|  | 514 |  | 
|  | 515 | /* | 
|  | 516 | * Quickly re-inject items left on "txlist". | 
|  | 517 | */ | 
|  | 518 | list_splice(&txlist, &ep->rdllist); | 
|  | 519 |  | 
|  | 520 | if (!list_empty(&ep->rdllist)) { | 
|  | 521 | /* | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 522 | * Wake up (if active) both the eventpoll wait list and | 
|  | 523 | * the ->poll() wait list (delayed after we release the lock). | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 524 | */ | 
|  | 525 | if (waitqueue_active(&ep->wq)) | 
|  | 526 | wake_up_locked(&ep->wq); | 
|  | 527 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 528 | pwake++; | 
|  | 529 | } | 
|  | 530 | spin_unlock_irqrestore(&ep->lock, flags); | 
|  | 531 |  | 
|  | 532 | mutex_unlock(&ep->mtx); | 
|  | 533 |  | 
|  | 534 | /* We have to call this outside the lock */ | 
|  | 535 | if (pwake) | 
|  | 536 | ep_poll_safewake(&ep->poll_wait); | 
|  | 537 |  | 
|  | 538 | return error; | 
|  | 539 | } | 
|  | 540 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | /* | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 542 | * Removes a "struct epitem" from the eventpoll RB tree and deallocates | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 543 | * all the associated resources. Must be called with "mtx" held. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 544 | */ | 
|  | 545 | static int ep_remove(struct eventpoll *ep, struct epitem *epi) | 
|  | 546 | { | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 547 | unsigned long flags; | 
|  | 548 | struct file *file = epi->ffd.file; | 
|  | 549 |  | 
|  | 550 | /* | 
|  | 551 | * Removes poll wait queue hooks. We _have_ to do this without holding | 
|  | 552 | * the "ep->lock" otherwise a deadlock might occur. This because of the | 
|  | 553 | * sequence of the lock acquisition. Here we do "ep->lock" then the wait | 
|  | 554 | * queue head lock when unregistering the wait queue. The wakeup callback | 
|  | 555 | * will run by holding the wait queue head lock and will call our callback | 
|  | 556 | * that will try to get "ep->lock". | 
|  | 557 | */ | 
|  | 558 | ep_unregister_pollwait(ep, epi); | 
|  | 559 |  | 
|  | 560 | /* Remove the current item from the list of epoll hooks */ | 
| Jonathan Corbet | 6849991 | 2009-02-06 13:52:43 -0700 | [diff] [blame] | 561 | spin_lock(&file->f_lock); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 562 | if (ep_is_linked(&epi->fllink)) | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 563 | list_del_init(&epi->fllink); | 
| Jonathan Corbet | 6849991 | 2009-02-06 13:52:43 -0700 | [diff] [blame] | 564 | spin_unlock(&file->f_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 565 |  | 
| Davide Libenzi | cdac75e | 2008-04-29 00:58:34 -0700 | [diff] [blame] | 566 | rb_erase(&epi->rbn, &ep->rbr); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 568 | spin_lock_irqsave(&ep->lock, flags); | 
|  | 569 | if (ep_is_linked(&epi->rdllink)) | 
|  | 570 | list_del_init(&epi->rdllink); | 
|  | 571 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 573 | /* At this point it is safe to free the eventpoll item */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 574 | kmem_cache_free(epi_cache, epi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 |  | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 576 | atomic_long_dec(&ep->user->epoll_watches); | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 577 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 578 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 579 | } | 
|  | 580 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 581 | static void ep_free(struct eventpoll *ep) | 
|  | 582 | { | 
|  | 583 | struct rb_node *rbp; | 
|  | 584 | struct epitem *epi; | 
|  | 585 |  | 
|  | 586 | /* We need to release all tasks waiting for these file */ | 
|  | 587 | if (waitqueue_active(&ep->poll_wait)) | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 588 | ep_poll_safewake(&ep->poll_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 |  | 
|  | 590 | /* | 
|  | 591 | * We need to lock this because we could be hit by | 
|  | 592 | * eventpoll_release_file() while we're freeing the "struct eventpoll". | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 593 | * We do not need to hold "ep->mtx" here because the epoll file | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 594 | * is on the way to be removed and no one has references to it | 
|  | 595 | * anymore. The only hit might come from eventpoll_release_file() but | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 596 | * holding "epmutex" is sufficent here. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 | */ | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 598 | mutex_lock(&epmutex); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 |  | 
|  | 600 | /* | 
|  | 601 | * Walks through the whole tree by unregistering poll callbacks. | 
|  | 602 | */ | 
|  | 603 | for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { | 
|  | 604 | epi = rb_entry(rbp, struct epitem, rbn); | 
|  | 605 |  | 
|  | 606 | ep_unregister_pollwait(ep, epi); | 
|  | 607 | } | 
|  | 608 |  | 
|  | 609 | /* | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 610 | * Walks through the whole tree by freeing each "struct epitem". At this | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | * point we are sure no poll callbacks will be lingering around, and also by | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 612 | * holding "epmutex" we can be sure that no file cleanup code will hit | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | * us during this operation. So we can avoid the lock on "ep->lock". | 
|  | 614 | */ | 
| Stephen Hemminger | c80544d | 2007-10-18 03:07:05 -0700 | [diff] [blame] | 615 | while ((rbp = rb_first(&ep->rbr)) != NULL) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | epi = rb_entry(rbp, struct epitem, rbn); | 
|  | 617 | ep_remove(ep, epi); | 
|  | 618 | } | 
|  | 619 |  | 
| Arjan van de Ven | 144efe3 | 2006-03-23 03:00:32 -0800 | [diff] [blame] | 620 | mutex_unlock(&epmutex); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 621 | mutex_destroy(&ep->mtx); | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 622 | free_uid(ep->user); | 
| Davide Libenzi | f0ee9aa | 2007-05-15 01:40:57 -0700 | [diff] [blame] | 623 | kfree(ep); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | } | 
|  | 625 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 626 | static int ep_eventpoll_release(struct inode *inode, struct file *file) | 
|  | 627 | { | 
|  | 628 | struct eventpoll *ep = file->private_data; | 
|  | 629 |  | 
| Davide Libenzi | f0ee9aa | 2007-05-15 01:40:57 -0700 | [diff] [blame] | 630 | if (ep) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 631 | ep_free(ep); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 632 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 633 | return 0; | 
|  | 634 | } | 
|  | 635 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 636 | static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head, | 
|  | 637 | void *priv) | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 638 | { | 
|  | 639 | struct epitem *epi, *tmp; | 
|  | 640 |  | 
|  | 641 | list_for_each_entry_safe(epi, tmp, head, rdllink) { | 
|  | 642 | if (epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & | 
|  | 643 | epi->event.events) | 
|  | 644 | return POLLIN | POLLRDNORM; | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 645 | else { | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 646 | /* | 
|  | 647 | * Item has been dropped into the ready list by the poll | 
|  | 648 | * callback, but it's not actually ready, as far as | 
|  | 649 | * caller requested events goes. We can remove it here. | 
|  | 650 | */ | 
|  | 651 | list_del_init(&epi->rdllink); | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 652 | } | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 653 | } | 
|  | 654 |  | 
|  | 655 | return 0; | 
|  | 656 | } | 
|  | 657 |  | 
|  | 658 | static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests) | 
|  | 659 | { | 
|  | 660 | return ep_scan_ready_list(priv, ep_read_events_proc, NULL); | 
|  | 661 | } | 
|  | 662 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 663 | static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait) | 
|  | 664 | { | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 665 | int pollflags; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 666 | struct eventpoll *ep = file->private_data; | 
|  | 667 |  | 
|  | 668 | /* Insert inside our poll wait queue */ | 
|  | 669 | poll_wait(file, &ep->poll_wait, wait); | 
|  | 670 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 671 | /* | 
|  | 672 | * Proceed to find out if wanted events are really available inside | 
|  | 673 | * the ready list. This need to be done under ep_call_nested() | 
|  | 674 | * supervision, since the call to f_op->poll() done on listed files | 
|  | 675 | * could re-enter here. | 
|  | 676 | */ | 
|  | 677 | pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS, | 
| Davide Libenzi | 3fe4a97 | 2009-06-17 16:25:58 -0700 | [diff] [blame] | 678 | ep_poll_readyevents_proc, ep, ep, current); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 679 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 680 | return pollflags != -1 ? pollflags : 0; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 681 | } | 
|  | 682 |  | 
|  | 683 | /* File callbacks that implement the eventpoll file behaviour */ | 
|  | 684 | static const struct file_operations eventpoll_fops = { | 
|  | 685 | .release	= ep_eventpoll_release, | 
| Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 686 | .poll		= ep_eventpoll_poll, | 
|  | 687 | .llseek		= noop_llseek, | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 688 | }; | 
|  | 689 |  | 
|  | 690 | /* Fast test to see if the file is an evenpoll file */ | 
|  | 691 | static inline int is_file_epoll(struct file *f) | 
|  | 692 | { | 
|  | 693 | return f->f_op == &eventpoll_fops; | 
|  | 694 | } | 
|  | 695 |  | 
|  | 696 | /* | 
|  | 697 | * This is called from eventpoll_release() to unlink files from the eventpoll | 
|  | 698 | * interface. We need to have this facility to cleanup correctly files that are | 
|  | 699 | * closed without being removed from the eventpoll interface. | 
|  | 700 | */ | 
|  | 701 | void eventpoll_release_file(struct file *file) | 
|  | 702 | { | 
|  | 703 | struct list_head *lsthead = &file->f_ep_links; | 
|  | 704 | struct eventpoll *ep; | 
|  | 705 | struct epitem *epi; | 
|  | 706 |  | 
|  | 707 | /* | 
| Jonathan Corbet | 6849991 | 2009-02-06 13:52:43 -0700 | [diff] [blame] | 708 | * We don't want to get "file->f_lock" because it is not | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 709 | * necessary. It is not necessary because we're in the "struct file" | 
|  | 710 | * cleanup path, and this means that noone is using this file anymore. | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 711 | * So, for example, epoll_ctl() cannot hit here since if we reach this | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 712 | * point, the file counter already went to zero and fget() would fail. | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 713 | * The only hit might come from ep_free() but by holding the mutex | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 714 | * will correctly serialize the operation. We do need to acquire | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 715 | * "ep->mtx" after "epmutex" because ep_remove() requires it when called | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 716 | * from anywhere but ep_free(). | 
| Jonathan Corbet | 6849991 | 2009-02-06 13:52:43 -0700 | [diff] [blame] | 717 | * | 
|  | 718 | * Besides, ep_remove() acquires the lock, so we can't hold it here. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 719 | */ | 
|  | 720 | mutex_lock(&epmutex); | 
|  | 721 |  | 
|  | 722 | while (!list_empty(lsthead)) { | 
|  | 723 | epi = list_first_entry(lsthead, struct epitem, fllink); | 
|  | 724 |  | 
|  | 725 | ep = epi->ep; | 
|  | 726 | list_del_init(&epi->fllink); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 727 | mutex_lock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 728 | ep_remove(ep, epi); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 729 | mutex_unlock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 730 | } | 
|  | 731 |  | 
|  | 732 | mutex_unlock(&epmutex); | 
|  | 733 | } | 
|  | 734 |  | 
|  | 735 | static int ep_alloc(struct eventpoll **pep) | 
|  | 736 | { | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 737 | int error; | 
|  | 738 | struct user_struct *user; | 
|  | 739 | struct eventpoll *ep; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 740 |  | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 741 | user = get_current_user(); | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 742 | error = -ENOMEM; | 
|  | 743 | ep = kzalloc(sizeof(*ep), GFP_KERNEL); | 
|  | 744 | if (unlikely(!ep)) | 
|  | 745 | goto free_uid; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 746 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 747 | spin_lock_init(&ep->lock); | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 748 | mutex_init(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 749 | init_waitqueue_head(&ep->wq); | 
|  | 750 | init_waitqueue_head(&ep->poll_wait); | 
|  | 751 | INIT_LIST_HEAD(&ep->rdllist); | 
|  | 752 | ep->rbr = RB_ROOT; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 753 | ep->ovflist = EP_UNACTIVE_PTR; | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 754 | ep->user = user; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 755 |  | 
|  | 756 | *pep = ep; | 
|  | 757 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 758 | return 0; | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 759 |  | 
|  | 760 | free_uid: | 
|  | 761 | free_uid(user); | 
|  | 762 | return error; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 763 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 |  | 
|  | 765 | /* | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 766 | * Search the file inside the eventpoll tree. The RB tree operations | 
|  | 767 | * are protected by the "mtx" mutex, and ep_find() must be called with | 
|  | 768 | * "mtx" held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | */ | 
|  | 770 | static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd) | 
|  | 771 | { | 
|  | 772 | int kcmp; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 773 | struct rb_node *rbp; | 
|  | 774 | struct epitem *epi, *epir = NULL; | 
|  | 775 | struct epoll_filefd ffd; | 
|  | 776 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 777 | ep_set_ffd(&ffd, file, fd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 778 | for (rbp = ep->rbr.rb_node; rbp; ) { | 
|  | 779 | epi = rb_entry(rbp, struct epitem, rbn); | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 780 | kcmp = ep_cmp_ffd(&ffd, &epi->ffd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | if (kcmp > 0) | 
|  | 782 | rbp = rbp->rb_right; | 
|  | 783 | else if (kcmp < 0) | 
|  | 784 | rbp = rbp->rb_left; | 
|  | 785 | else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 786 | epir = epi; | 
|  | 787 | break; | 
|  | 788 | } | 
|  | 789 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 791 | return epir; | 
|  | 792 | } | 
|  | 793 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | /* | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 795 | * This is the callback that is passed to the wait queue wakeup | 
| Daniel Baluta | bf6a41d | 2011-01-30 23:42:29 +0200 | [diff] [blame] | 796 | * mechanism. It is called by the stored file descriptors when they | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 797 | * have events to report. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 798 | */ | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 799 | static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 800 | { | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 801 | int pwake = 0; | 
|  | 802 | unsigned long flags; | 
|  | 803 | struct epitem *epi = ep_item_from_wait(wait); | 
|  | 804 | struct eventpoll *ep = epi->ep; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 806 | spin_lock_irqsave(&ep->lock, flags); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 807 |  | 
|  | 808 | /* | 
|  | 809 | * If the event mask does not contain any poll(2) event, we consider the | 
|  | 810 | * descriptor to be disabled. This condition is likely the effect of the | 
|  | 811 | * EPOLLONESHOT bit that disables the descriptor when an event is received, | 
|  | 812 | * until the next EPOLL_CTL_MOD will be issued. | 
|  | 813 | */ | 
|  | 814 | if (!(epi->event.events & ~EP_PRIVATE_BITS)) | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 815 | goto out_unlock; | 
|  | 816 |  | 
|  | 817 | /* | 
| Davide Libenzi | 2dfa4ee | 2009-03-31 15:24:22 -0700 | [diff] [blame] | 818 | * Check the events coming with the callback. At this stage, not | 
|  | 819 | * every device reports the events in the "key" parameter of the | 
|  | 820 | * callback. We need to be able to handle both cases here, hence the | 
|  | 821 | * test for "key" != NULL before the event match test. | 
|  | 822 | */ | 
|  | 823 | if (key && !((unsigned long) key & epi->event.events)) | 
|  | 824 | goto out_unlock; | 
|  | 825 |  | 
|  | 826 | /* | 
| Daniel Baluta | bf6a41d | 2011-01-30 23:42:29 +0200 | [diff] [blame] | 827 | * If we are transferring events to userspace, we can hold no locks | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 828 | * (because we're accessing user memory, and because of linux f_op->poll() | 
| Daniel Baluta | bf6a41d | 2011-01-30 23:42:29 +0200 | [diff] [blame] | 829 | * semantics). All the events that happen during that period of time are | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 830 | * chained in ep->ovflist and requeued later on. | 
|  | 831 | */ | 
|  | 832 | if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) { | 
|  | 833 | if (epi->next == EP_UNACTIVE_PTR) { | 
|  | 834 | epi->next = ep->ovflist; | 
|  | 835 | ep->ovflist = epi; | 
|  | 836 | } | 
|  | 837 | goto out_unlock; | 
|  | 838 | } | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 839 |  | 
|  | 840 | /* If this file is already in the ready list we exit soon */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 841 | if (!ep_is_linked(&epi->rdllink)) | 
|  | 842 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 843 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 844 | /* | 
|  | 845 | * Wake up ( if active ) both the eventpoll wait list and the ->poll() | 
|  | 846 | * wait list. | 
|  | 847 | */ | 
|  | 848 | if (waitqueue_active(&ep->wq)) | 
| Matthew Wilcox | 4a6e9e2 | 2007-08-30 16:10:22 -0400 | [diff] [blame] | 849 | wake_up_locked(&ep->wq); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 850 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 851 | pwake++; | 
|  | 852 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 853 | out_unlock: | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 854 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 855 |  | 
|  | 856 | /* We have to call this outside the lock */ | 
|  | 857 | if (pwake) | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 858 | ep_poll_safewake(&ep->poll_wait); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 859 |  | 
|  | 860 | return 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 861 | } | 
|  | 862 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | /* | 
|  | 864 | * This is the callback that is used to add our wait queue to the | 
|  | 865 | * target file wakeup lists. | 
|  | 866 | */ | 
|  | 867 | static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead, | 
|  | 868 | poll_table *pt) | 
|  | 869 | { | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 870 | struct epitem *epi = ep_item_from_epqueue(pt); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 871 | struct eppoll_entry *pwq; | 
|  | 872 |  | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 873 | if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | init_waitqueue_func_entry(&pwq->wait, ep_poll_callback); | 
|  | 875 | pwq->whead = whead; | 
|  | 876 | pwq->base = epi; | 
|  | 877 | add_wait_queue(whead, &pwq->wait); | 
|  | 878 | list_add_tail(&pwq->llink, &epi->pwqlist); | 
|  | 879 | epi->nwait++; | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 880 | } else { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | /* We have to signal that an error occurred */ | 
|  | 882 | epi->nwait = -1; | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 883 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | } | 
|  | 885 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 886 | static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) | 
|  | 887 | { | 
|  | 888 | int kcmp; | 
|  | 889 | struct rb_node **p = &ep->rbr.rb_node, *parent = NULL; | 
|  | 890 | struct epitem *epic; | 
|  | 891 |  | 
|  | 892 | while (*p) { | 
|  | 893 | parent = *p; | 
|  | 894 | epic = rb_entry(parent, struct epitem, rbn); | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 895 | kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 896 | if (kcmp > 0) | 
|  | 897 | p = &parent->rb_right; | 
|  | 898 | else | 
|  | 899 | p = &parent->rb_left; | 
|  | 900 | } | 
|  | 901 | rb_link_node(&epi->rbn, parent, p); | 
|  | 902 | rb_insert_color(&epi->rbn, &ep->rbr); | 
|  | 903 | } | 
|  | 904 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 905 | /* | 
|  | 906 | * Must be called with "mtx" held. | 
|  | 907 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 908 | static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | 
|  | 909 | struct file *tfile, int fd) | 
|  | 910 | { | 
|  | 911 | int error, revents, pwake = 0; | 
|  | 912 | unsigned long flags; | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 913 | long user_watches; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 914 | struct epitem *epi; | 
|  | 915 | struct ep_pqueue epq; | 
|  | 916 |  | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 917 | user_watches = atomic_long_read(&ep->user->epoll_watches); | 
|  | 918 | if (unlikely(user_watches >= max_user_watches)) | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 919 | return -ENOSPC; | 
| Christoph Lameter | e94b176 | 2006-12-06 20:33:17 -0800 | [diff] [blame] | 920 | if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 921 | return -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 922 |  | 
|  | 923 | /* Item initialization follow here ... */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 924 | INIT_LIST_HEAD(&epi->rdllink); | 
|  | 925 | INIT_LIST_HEAD(&epi->fllink); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 | INIT_LIST_HEAD(&epi->pwqlist); | 
|  | 927 | epi->ep = ep; | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 928 | ep_set_ffd(&epi->ffd, tfile, fd); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 | epi->event = *event; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | epi->nwait = 0; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 931 | epi->next = EP_UNACTIVE_PTR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 932 |  | 
|  | 933 | /* Initialize the poll table using the queue callback */ | 
|  | 934 | epq.epi = epi; | 
|  | 935 | init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); | 
|  | 936 |  | 
|  | 937 | /* | 
|  | 938 | * Attach the item to the poll hooks and get current event bits. | 
|  | 939 | * We can safely use the file* here because its usage count has | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 940 | * been increased by the caller of this function. Note that after | 
|  | 941 | * this operation completes, the poll callback can start hitting | 
|  | 942 | * the new item. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | */ | 
|  | 944 | revents = tfile->f_op->poll(tfile, &epq.pt); | 
|  | 945 |  | 
|  | 946 | /* | 
|  | 947 | * We have to check if something went wrong during the poll wait queue | 
|  | 948 | * install process. Namely an allocation for a wait queue failed due | 
|  | 949 | * high memory pressure. | 
|  | 950 | */ | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 951 | error = -ENOMEM; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | if (epi->nwait < 0) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 953 | goto error_unregister; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 |  | 
|  | 955 | /* Add the current item to the list of active epoll hook for this file */ | 
| Jonathan Corbet | 6849991 | 2009-02-06 13:52:43 -0700 | [diff] [blame] | 956 | spin_lock(&tfile->f_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 957 | list_add_tail(&epi->fllink, &tfile->f_ep_links); | 
| Jonathan Corbet | 6849991 | 2009-02-06 13:52:43 -0700 | [diff] [blame] | 958 | spin_unlock(&tfile->f_lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 959 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 960 | /* | 
|  | 961 | * Add the current item to the RB tree. All RB tree operations are | 
|  | 962 | * protected by "mtx", and ep_insert() is called with "mtx" held. | 
|  | 963 | */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | ep_rbtree_insert(ep, epi); | 
|  | 965 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 966 | /* We have to drop the new item inside our item list to keep track of it */ | 
|  | 967 | spin_lock_irqsave(&ep->lock, flags); | 
|  | 968 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 969 | /* If the file is already "ready" we drop it inside the ready list */ | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 970 | if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 971 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
|  | 972 |  | 
|  | 973 | /* Notify waiting tasks that events are available */ | 
|  | 974 | if (waitqueue_active(&ep->wq)) | 
| Matthew Wilcox | 4a6e9e2 | 2007-08-30 16:10:22 -0400 | [diff] [blame] | 975 | wake_up_locked(&ep->wq); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 977 | pwake++; | 
|  | 978 | } | 
|  | 979 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 980 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 |  | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 982 | atomic_long_inc(&ep->user->epoll_watches); | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 983 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | /* We have to call this outside the lock */ | 
|  | 985 | if (pwake) | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 986 | ep_poll_safewake(&ep->poll_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 988 | return 0; | 
|  | 989 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 990 | error_unregister: | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | ep_unregister_pollwait(ep, epi); | 
|  | 992 |  | 
|  | 993 | /* | 
|  | 994 | * We need to do this because an event could have been arrived on some | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 995 | * allocated wait queue. Note that we don't care about the ep->ovflist | 
|  | 996 | * list, since that is used/cleaned only inside a section bound by "mtx". | 
|  | 997 | * And ep_insert() is called with "mtx" held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 999 | spin_lock_irqsave(&ep->lock, flags); | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 1000 | if (ep_is_linked(&epi->rdllink)) | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 1001 | list_del_init(&epi->rdllink); | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1002 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1003 |  | 
| Pekka Enberg | b030a4d | 2005-06-23 00:10:03 -0700 | [diff] [blame] | 1004 | kmem_cache_free(epi_cache, epi); | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 1005 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1006 | return error; | 
|  | 1007 | } | 
|  | 1008 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | /* | 
|  | 1010 | * Modify the interest event mask by dropping an event if the new mask | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1011 | * has a match in the current file status. Must be called with "mtx" held. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | */ | 
|  | 1013 | static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event) | 
|  | 1014 | { | 
|  | 1015 | int pwake = 0; | 
|  | 1016 | unsigned int revents; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1017 |  | 
|  | 1018 | /* | 
| Tony Battersby | e057e15 | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 1019 | * Set the new event interest mask before calling f_op->poll(); | 
|  | 1020 | * otherwise we might miss an event that happens between the | 
|  | 1021 | * f_op->poll() call and the new event set registering. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1022 | */ | 
|  | 1023 | epi->event.events = event->events; | 
| Tony Battersby | e057e15 | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 1024 | epi->event.data = event->data; /* protected by mtx */ | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1025 |  | 
|  | 1026 | /* | 
|  | 1027 | * Get current event bits. We can safely use the file* here because | 
|  | 1028 | * its usage count has been increased by the caller of this function. | 
|  | 1029 | */ | 
|  | 1030 | revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL); | 
|  | 1031 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1032 | /* | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1033 | * If the item is "hot" and it is not registered inside the ready | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 1034 | * list, push it inside. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | */ | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1036 | if (revents & event->events) { | 
| Tony Battersby | e057e15 | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 1037 | spin_lock_irq(&ep->lock); | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1038 | if (!ep_is_linked(&epi->rdllink)) { | 
|  | 1039 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1040 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1041 | /* Notify waiting tasks that events are available */ | 
|  | 1042 | if (waitqueue_active(&ep->wq)) | 
| Matthew Wilcox | 4a6e9e2 | 2007-08-30 16:10:22 -0400 | [diff] [blame] | 1043 | wake_up_locked(&ep->wq); | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1044 | if (waitqueue_active(&ep->poll_wait)) | 
|  | 1045 | pwake++; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | } | 
| Tony Battersby | e057e15 | 2009-03-31 15:24:15 -0700 | [diff] [blame] | 1047 | spin_unlock_irq(&ep->lock); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1048 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1049 |  | 
|  | 1050 | /* We have to call this outside the lock */ | 
|  | 1051 | if (pwake) | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1052 | ep_poll_safewake(&ep->poll_wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 |  | 
|  | 1054 | return 0; | 
|  | 1055 | } | 
|  | 1056 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1057 | static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head, | 
|  | 1058 | void *priv) | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1059 | { | 
|  | 1060 | struct ep_send_events_data *esed = priv; | 
|  | 1061 | int eventcnt; | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1062 | unsigned int revents; | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1063 | struct epitem *epi; | 
|  | 1064 | struct epoll_event __user *uevent; | 
|  | 1065 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1066 | /* | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1067 | * We can loop without lock because we are passed a task private list. | 
|  | 1068 | * Items cannot vanish during the loop because ep_scan_ready_list() is | 
|  | 1069 | * holding "mtx" during this call. | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1070 | */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1071 | for (eventcnt = 0, uevent = esed->events; | 
|  | 1072 | !list_empty(head) && eventcnt < esed->maxevents;) { | 
|  | 1073 | epi = list_first_entry(head, struct epitem, rdllink); | 
|  | 1074 |  | 
|  | 1075 | list_del_init(&epi->rdllink); | 
|  | 1076 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1077 | revents = epi->ffd.file->f_op->poll(epi->ffd.file, NULL) & | 
|  | 1078 | epi->event.events; | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1079 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1080 | /* | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1081 | * If the event mask intersect the caller-requested one, | 
|  | 1082 | * deliver the event to userspace. Again, ep_scan_ready_list() | 
|  | 1083 | * is holding "mtx", so no operations coming from userspace | 
|  | 1084 | * can change the item. | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1085 | */ | 
|  | 1086 | if (revents) { | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1087 | if (__put_user(revents, &uevent->events) || | 
| Tony Battersby | d030588 | 2009-03-31 15:24:14 -0700 | [diff] [blame] | 1088 | __put_user(epi->event.data, &uevent->data)) { | 
|  | 1089 | list_add(&epi->rdllink, head); | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1090 | return eventcnt ? eventcnt : -EFAULT; | 
| Tony Battersby | d030588 | 2009-03-31 15:24:14 -0700 | [diff] [blame] | 1091 | } | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1092 | eventcnt++; | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1093 | uevent++; | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1094 | if (epi->event.events & EPOLLONESHOT) | 
|  | 1095 | epi->event.events &= EP_PRIVATE_BITS; | 
|  | 1096 | else if (!(epi->event.events & EPOLLET)) { | 
|  | 1097 | /* | 
|  | 1098 | * If this file has been added with Level | 
|  | 1099 | * Trigger mode, we need to insert back inside | 
|  | 1100 | * the ready list, so that the next call to | 
|  | 1101 | * epoll_wait() will check again the events | 
|  | 1102 | * availability. At this point, noone can insert | 
|  | 1103 | * into ep->rdllist besides us. The epoll_ctl() | 
|  | 1104 | * callers are locked out by | 
|  | 1105 | * ep_scan_ready_list() holding "mtx" and the | 
|  | 1106 | * poll callback will queue them in ep->ovflist. | 
|  | 1107 | */ | 
|  | 1108 | list_add_tail(&epi->rdllink, &ep->rdllist); | 
|  | 1109 | } | 
|  | 1110 | } | 
|  | 1111 | } | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1112 |  | 
|  | 1113 | return eventcnt; | 
|  | 1114 | } | 
|  | 1115 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1116 | static int ep_send_events(struct eventpoll *ep, | 
|  | 1117 | struct epoll_event __user *events, int maxevents) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1118 | { | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1119 | struct ep_send_events_data esed; | 
| Davide Libenzi | 6192bd5 | 2007-05-08 00:25:41 -0700 | [diff] [blame] | 1120 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1121 | esed.maxevents = maxevents; | 
|  | 1122 | esed.events = events; | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 1123 |  | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1124 | return ep_scan_ready_list(ep, ep_send_events_proc, &esed); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1125 | } | 
|  | 1126 |  | 
| Eric Dumazet | 0781b90 | 2011-02-01 15:52:35 -0800 | [diff] [blame] | 1127 | static inline struct timespec ep_set_mstimeout(long ms) | 
|  | 1128 | { | 
|  | 1129 | struct timespec now, ts = { | 
|  | 1130 | .tv_sec = ms / MSEC_PER_SEC, | 
|  | 1131 | .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), | 
|  | 1132 | }; | 
|  | 1133 |  | 
|  | 1134 | ktime_get_ts(&now); | 
|  | 1135 | return timespec_add_safe(now, ts); | 
|  | 1136 | } | 
|  | 1137 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1138 | static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, | 
|  | 1139 | int maxevents, long timeout) | 
|  | 1140 | { | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1141 | int res, eavail, timed_out = 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1142 | unsigned long flags; | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1143 | long slack; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1144 | wait_queue_t wait; | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1145 | ktime_t expires, *to = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1146 |  | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1147 | if (timeout > 0) { | 
| Eric Dumazet | 0781b90 | 2011-02-01 15:52:35 -0800 | [diff] [blame] | 1148 | struct timespec end_time = ep_set_mstimeout(timeout); | 
|  | 1149 |  | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1150 | slack = select_estimate_accuracy(&end_time); | 
|  | 1151 | to = &expires; | 
|  | 1152 | *to = timespec_to_ktime(end_time); | 
|  | 1153 | } else if (timeout == 0) { | 
|  | 1154 | timed_out = 1; | 
|  | 1155 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1156 |  | 
|  | 1157 | retry: | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1158 | spin_lock_irqsave(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1159 |  | 
|  | 1160 | res = 0; | 
|  | 1161 | if (list_empty(&ep->rdllist)) { | 
|  | 1162 | /* | 
|  | 1163 | * We don't have any available event to return to the caller. | 
|  | 1164 | * We need to sleep here, and we will be wake up by | 
|  | 1165 | * ep_poll_callback() when events will become available. | 
|  | 1166 | */ | 
|  | 1167 | init_waitqueue_entry(&wait, current); | 
| Changli Gao | a93d2f1 | 2010-05-07 14:33:26 +0800 | [diff] [blame] | 1168 | __add_wait_queue_exclusive(&ep->wq, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1169 |  | 
|  | 1170 | for (;;) { | 
|  | 1171 | /* | 
|  | 1172 | * We don't want to sleep if the ep_poll_callback() sends us | 
|  | 1173 | * a wakeup in between. That's why we set the task state | 
|  | 1174 | * to TASK_INTERRUPTIBLE before doing the checks. | 
|  | 1175 | */ | 
|  | 1176 | set_current_state(TASK_INTERRUPTIBLE); | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1177 | if (!list_empty(&ep->rdllist) || timed_out) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | break; | 
|  | 1179 | if (signal_pending(current)) { | 
|  | 1180 | res = -EINTR; | 
|  | 1181 | break; | 
|  | 1182 | } | 
|  | 1183 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1184 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1185 | if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) | 
|  | 1186 | timed_out = 1; | 
|  | 1187 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1188 | spin_lock_irqsave(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1189 | } | 
| Davide Libenzi | 3419b23 | 2006-06-25 05:48:14 -0700 | [diff] [blame] | 1190 | __remove_wait_queue(&ep->wq, &wait); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1191 |  | 
|  | 1192 | set_current_state(TASK_RUNNING); | 
|  | 1193 | } | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1194 | /* Is it worth to try to dig for events ? */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1195 | eavail = !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1196 |  | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1197 | spin_unlock_irqrestore(&ep->lock, flags); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1198 |  | 
|  | 1199 | /* | 
|  | 1200 | * Try to transfer events to user space. In case we get 0 events and | 
|  | 1201 | * there's still timeout left over, we go trying again in search of | 
|  | 1202 | * more luck. | 
|  | 1203 | */ | 
|  | 1204 | if (!res && eavail && | 
| Shawn Bohrer | 95aac7b | 2010-10-27 15:34:54 -0700 | [diff] [blame] | 1205 | !(res = ep_send_events(ep, events, maxevents)) && !timed_out) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | goto retry; | 
|  | 1207 |  | 
|  | 1208 | return res; | 
|  | 1209 | } | 
|  | 1210 |  | 
| Davide Libenzi | 22bacca | 2011-02-25 14:44:12 -0800 | [diff] [blame] | 1211 | /** | 
|  | 1212 | * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() | 
|  | 1213 | *                      API, to verify that adding an epoll file inside another | 
|  | 1214 | *                      epoll structure, does not violate the constraints, in | 
|  | 1215 | *                      terms of closed loops, or too deep chains (which can | 
|  | 1216 | *                      result in excessive stack usage). | 
|  | 1217 | * | 
|  | 1218 | * @priv: Pointer to the epoll file to be currently checked. | 
|  | 1219 | * @cookie: Original cookie for this call. This is the top-of-the-chain epoll | 
|  | 1220 | *          data structure pointer. | 
|  | 1221 | * @call_nests: Current dept of the @ep_call_nested() call stack. | 
|  | 1222 | * | 
|  | 1223 | * Returns: Returns zero if adding the epoll @file inside current epoll | 
|  | 1224 | *          structure @ep does not violate the constraints, or -1 otherwise. | 
|  | 1225 | */ | 
|  | 1226 | static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) | 
|  | 1227 | { | 
|  | 1228 | int error = 0; | 
|  | 1229 | struct file *file = priv; | 
|  | 1230 | struct eventpoll *ep = file->private_data; | 
|  | 1231 | struct rb_node *rbp; | 
|  | 1232 | struct epitem *epi; | 
|  | 1233 |  | 
|  | 1234 | mutex_lock(&ep->mtx); | 
|  | 1235 | for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { | 
|  | 1236 | epi = rb_entry(rbp, struct epitem, rbn); | 
|  | 1237 | if (unlikely(is_file_epoll(epi->ffd.file))) { | 
|  | 1238 | error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, | 
|  | 1239 | ep_loop_check_proc, epi->ffd.file, | 
|  | 1240 | epi->ffd.file->private_data, current); | 
|  | 1241 | if (error != 0) | 
|  | 1242 | break; | 
|  | 1243 | } | 
|  | 1244 | } | 
|  | 1245 | mutex_unlock(&ep->mtx); | 
|  | 1246 |  | 
|  | 1247 | return error; | 
|  | 1248 | } | 
|  | 1249 |  | 
|  | 1250 | /** | 
|  | 1251 | * ep_loop_check - Performs a check to verify that adding an epoll file (@file) | 
|  | 1252 | *                 another epoll file (represented by @ep) does not create | 
|  | 1253 | *                 closed loops or too deep chains. | 
|  | 1254 | * | 
|  | 1255 | * @ep: Pointer to the epoll private data structure. | 
|  | 1256 | * @file: Pointer to the epoll file to be checked. | 
|  | 1257 | * | 
|  | 1258 | * Returns: Returns zero if adding the epoll @file inside current epoll | 
|  | 1259 | *          structure @ep does not violate the constraints, or -1 otherwise. | 
|  | 1260 | */ | 
|  | 1261 | static int ep_loop_check(struct eventpoll *ep, struct file *file) | 
|  | 1262 | { | 
|  | 1263 | return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, | 
|  | 1264 | ep_loop_check_proc, file, ep, current); | 
|  | 1265 | } | 
|  | 1266 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1267 | /* | 
| Andrew Morton | 523723b | 2008-08-12 15:09:01 -0700 | [diff] [blame] | 1268 | * Open an eventpoll file descriptor. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1269 | */ | 
| Heiko Carstens | 5a8a82b | 2009-01-14 14:14:25 +0100 | [diff] [blame] | 1270 | SYSCALL_DEFINE1(epoll_create1, int, flags) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1271 | { | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1272 | int error; | 
|  | 1273 | struct eventpoll *ep = NULL; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1274 |  | 
| Ulrich Drepper | e38b36f | 2008-07-23 21:29:42 -0700 | [diff] [blame] | 1275 | /* Check the EPOLL_* constant for consistency.  */ | 
|  | 1276 | BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); | 
|  | 1277 |  | 
| Davide Libenzi | 296e236 | 2009-03-31 15:24:11 -0700 | [diff] [blame] | 1278 | if (flags & ~EPOLL_CLOEXEC) | 
|  | 1279 | return -EINVAL; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1280 | /* | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1281 | * Create the internal data structure ("struct eventpoll"). | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1282 | */ | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1283 | error = ep_alloc(&ep); | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1284 | if (error < 0) | 
|  | 1285 | return error; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1286 | /* | 
|  | 1287 | * Creates all the items needed to setup an eventpoll file. That is, | 
| Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 1288 | * a file structure and a free file descriptor. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1289 | */ | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1290 | error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, | 
| Roland Dreier | 628ff7c | 2009-12-18 09:41:24 -0800 | [diff] [blame] | 1291 | O_RDWR | (flags & O_CLOEXEC)); | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1292 | if (error < 0) | 
| Al Viro | 2030a42 | 2008-02-23 06:46:49 -0500 | [diff] [blame] | 1293 | ep_free(ep); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1294 |  | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1295 | return error; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1296 | } | 
|  | 1297 |  | 
| Heiko Carstens | 5a8a82b | 2009-01-14 14:14:25 +0100 | [diff] [blame] | 1298 | SYSCALL_DEFINE1(epoll_create, int, size) | 
| Ulrich Drepper | a0998b5 | 2008-07-23 21:29:27 -0700 | [diff] [blame] | 1299 | { | 
| Davide Libenzi | bfe3891 | 2009-05-12 13:19:44 -0700 | [diff] [blame] | 1300 | if (size <= 0) | 
| Ulrich Drepper | 9fe5ad9 | 2008-07-23 21:29:43 -0700 | [diff] [blame] | 1301 | return -EINVAL; | 
|  | 1302 |  | 
|  | 1303 | return sys_epoll_create1(0); | 
| Ulrich Drepper | a0998b5 | 2008-07-23 21:29:27 -0700 | [diff] [blame] | 1304 | } | 
|  | 1305 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1306 | /* | 
|  | 1307 | * The following function implements the controller interface for | 
|  | 1308 | * the eventpoll file that enables the insertion/removal/change of | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 1309 | * file descriptors inside the interest set. | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1310 | */ | 
| Heiko Carstens | 5a8a82b | 2009-01-14 14:14:25 +0100 | [diff] [blame] | 1311 | SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | 
|  | 1312 | struct epoll_event __user *, event) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1313 | { | 
|  | 1314 | int error; | 
| Davide Libenzi | 22bacca | 2011-02-25 14:44:12 -0800 | [diff] [blame] | 1315 | int did_lock_epmutex = 0; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1316 | struct file *file, *tfile; | 
|  | 1317 | struct eventpoll *ep; | 
|  | 1318 | struct epitem *epi; | 
|  | 1319 | struct epoll_event epds; | 
|  | 1320 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1321 | error = -EFAULT; | 
|  | 1322 | if (ep_op_has_event(op) && | 
|  | 1323 | copy_from_user(&epds, event, sizeof(struct epoll_event))) | 
|  | 1324 | goto error_return; | 
|  | 1325 |  | 
|  | 1326 | /* Get the "struct file *" for the eventpoll file */ | 
|  | 1327 | error = -EBADF; | 
|  | 1328 | file = fget(epfd); | 
|  | 1329 | if (!file) | 
|  | 1330 | goto error_return; | 
|  | 1331 |  | 
|  | 1332 | /* Get the "struct file *" for the target file */ | 
|  | 1333 | tfile = fget(fd); | 
|  | 1334 | if (!tfile) | 
|  | 1335 | goto error_fput; | 
|  | 1336 |  | 
|  | 1337 | /* The target file descriptor must support poll */ | 
|  | 1338 | error = -EPERM; | 
|  | 1339 | if (!tfile->f_op || !tfile->f_op->poll) | 
|  | 1340 | goto error_tgt_fput; | 
|  | 1341 |  | 
|  | 1342 | /* | 
|  | 1343 | * We have to check that the file structure underneath the file descriptor | 
|  | 1344 | * the user passed to us _is_ an eventpoll file. And also we do not permit | 
|  | 1345 | * adding an epoll file descriptor inside itself. | 
|  | 1346 | */ | 
|  | 1347 | error = -EINVAL; | 
|  | 1348 | if (file == tfile || !is_file_epoll(file)) | 
|  | 1349 | goto error_tgt_fput; | 
|  | 1350 |  | 
|  | 1351 | /* | 
|  | 1352 | * At this point it is safe to assume that the "private_data" contains | 
|  | 1353 | * our own data structure. | 
|  | 1354 | */ | 
|  | 1355 | ep = file->private_data; | 
|  | 1356 |  | 
| Davide Libenzi | 22bacca | 2011-02-25 14:44:12 -0800 | [diff] [blame] | 1357 | /* | 
|  | 1358 | * When we insert an epoll file descriptor, inside another epoll file | 
|  | 1359 | * descriptor, there is the change of creating closed loops, which are | 
|  | 1360 | * better be handled here, than in more critical paths. | 
|  | 1361 | * | 
|  | 1362 | * We hold epmutex across the loop check and the insert in this case, in | 
|  | 1363 | * order to prevent two separate inserts from racing and each doing the | 
|  | 1364 | * insert "at the same time" such that ep_loop_check passes on both | 
|  | 1365 | * before either one does the insert, thereby creating a cycle. | 
|  | 1366 | */ | 
|  | 1367 | if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { | 
|  | 1368 | mutex_lock(&epmutex); | 
|  | 1369 | did_lock_epmutex = 1; | 
|  | 1370 | error = -ELOOP; | 
|  | 1371 | if (ep_loop_check(ep, tfile) != 0) | 
|  | 1372 | goto error_tgt_fput; | 
|  | 1373 | } | 
|  | 1374 |  | 
|  | 1375 |  | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 1376 | mutex_lock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1377 |  | 
| Davide Libenzi | 67647d0 | 2007-05-15 01:40:52 -0700 | [diff] [blame] | 1378 | /* | 
|  | 1379 | * Try to lookup the file inside our RB tree, Since we grabbed "mtx" | 
|  | 1380 | * above, we can be sure to be able to use the item looked up by | 
|  | 1381 | * ep_find() till we release the mutex. | 
|  | 1382 | */ | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1383 | epi = ep_find(ep, tfile, fd); | 
|  | 1384 |  | 
|  | 1385 | error = -EINVAL; | 
|  | 1386 | switch (op) { | 
|  | 1387 | case EPOLL_CTL_ADD: | 
|  | 1388 | if (!epi) { | 
|  | 1389 | epds.events |= POLLERR | POLLHUP; | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1390 | error = ep_insert(ep, &epds, tfile, fd); | 
|  | 1391 | } else | 
|  | 1392 | error = -EEXIST; | 
|  | 1393 | break; | 
|  | 1394 | case EPOLL_CTL_DEL: | 
|  | 1395 | if (epi) | 
|  | 1396 | error = ep_remove(ep, epi); | 
|  | 1397 | else | 
|  | 1398 | error = -ENOENT; | 
|  | 1399 | break; | 
|  | 1400 | case EPOLL_CTL_MOD: | 
|  | 1401 | if (epi) { | 
|  | 1402 | epds.events |= POLLERR | POLLHUP; | 
|  | 1403 | error = ep_modify(ep, epi, &epds); | 
|  | 1404 | } else | 
|  | 1405 | error = -ENOENT; | 
|  | 1406 | break; | 
|  | 1407 | } | 
| Davide Libenzi | d47de16 | 2007-05-15 01:40:41 -0700 | [diff] [blame] | 1408 | mutex_unlock(&ep->mtx); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1409 |  | 
|  | 1410 | error_tgt_fput: | 
| Davide Libenzi | 22bacca | 2011-02-25 14:44:12 -0800 | [diff] [blame] | 1411 | if (unlikely(did_lock_epmutex)) | 
|  | 1412 | mutex_unlock(&epmutex); | 
|  | 1413 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1414 | fput(tfile); | 
|  | 1415 | error_fput: | 
|  | 1416 | fput(file); | 
|  | 1417 | error_return: | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1418 |  | 
|  | 1419 | return error; | 
|  | 1420 | } | 
|  | 1421 |  | 
|  | 1422 | /* | 
|  | 1423 | * Implement the event wait interface for the eventpoll file. It is the kernel | 
|  | 1424 | * part of the user space epoll_wait(2). | 
|  | 1425 | */ | 
| Heiko Carstens | 5a8a82b | 2009-01-14 14:14:25 +0100 | [diff] [blame] | 1426 | SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events, | 
|  | 1427 | int, maxevents, int, timeout) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1428 | { | 
|  | 1429 | int error; | 
|  | 1430 | struct file *file; | 
|  | 1431 | struct eventpoll *ep; | 
|  | 1432 |  | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1433 | /* The maximum number of event must be greater than zero */ | 
|  | 1434 | if (maxevents <= 0 || maxevents > EP_MAX_EVENTS) | 
|  | 1435 | return -EINVAL; | 
|  | 1436 |  | 
|  | 1437 | /* Verify that the area passed by the user is writeable */ | 
|  | 1438 | if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event))) { | 
|  | 1439 | error = -EFAULT; | 
|  | 1440 | goto error_return; | 
|  | 1441 | } | 
|  | 1442 |  | 
|  | 1443 | /* Get the "struct file *" for the eventpoll file */ | 
|  | 1444 | error = -EBADF; | 
|  | 1445 | file = fget(epfd); | 
|  | 1446 | if (!file) | 
|  | 1447 | goto error_return; | 
|  | 1448 |  | 
|  | 1449 | /* | 
|  | 1450 | * We have to check that the file structure underneath the fd | 
|  | 1451 | * the user passed to us _is_ an eventpoll file. | 
|  | 1452 | */ | 
|  | 1453 | error = -EINVAL; | 
|  | 1454 | if (!is_file_epoll(file)) | 
|  | 1455 | goto error_fput; | 
|  | 1456 |  | 
|  | 1457 | /* | 
|  | 1458 | * At this point it is safe to assume that the "private_data" contains | 
|  | 1459 | * our own data structure. | 
|  | 1460 | */ | 
|  | 1461 | ep = file->private_data; | 
|  | 1462 |  | 
|  | 1463 | /* Time to fish for events ... */ | 
|  | 1464 | error = ep_poll(ep, events, maxevents, timeout); | 
|  | 1465 |  | 
|  | 1466 | error_fput: | 
|  | 1467 | fput(file); | 
|  | 1468 | error_return: | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1469 |  | 
|  | 1470 | return error; | 
|  | 1471 | } | 
|  | 1472 |  | 
| Roland McGrath | f3de272 | 2008-04-30 00:53:09 -0700 | [diff] [blame] | 1473 | #ifdef HAVE_SET_RESTORE_SIGMASK | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1474 |  | 
|  | 1475 | /* | 
|  | 1476 | * Implement the event wait interface for the eventpoll file. It is the kernel | 
|  | 1477 | * part of the user space epoll_pwait(2). | 
|  | 1478 | */ | 
| Heiko Carstens | 5a8a82b | 2009-01-14 14:14:25 +0100 | [diff] [blame] | 1479 | SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events, | 
|  | 1480 | int, maxevents, int, timeout, const sigset_t __user *, sigmask, | 
|  | 1481 | size_t, sigsetsize) | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1482 | { | 
|  | 1483 | int error; | 
|  | 1484 | sigset_t ksigmask, sigsaved; | 
|  | 1485 |  | 
|  | 1486 | /* | 
|  | 1487 | * If the caller wants a certain signal mask to be set during the wait, | 
|  | 1488 | * we apply it here. | 
|  | 1489 | */ | 
|  | 1490 | if (sigmask) { | 
|  | 1491 | if (sigsetsize != sizeof(sigset_t)) | 
|  | 1492 | return -EINVAL; | 
|  | 1493 | if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask))) | 
|  | 1494 | return -EFAULT; | 
|  | 1495 | sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP)); | 
|  | 1496 | sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved); | 
|  | 1497 | } | 
|  | 1498 |  | 
|  | 1499 | error = sys_epoll_wait(epfd, events, maxevents, timeout); | 
|  | 1500 |  | 
|  | 1501 | /* | 
|  | 1502 | * If we changed the signal mask, we need to restore the original one. | 
|  | 1503 | * In case we've got a signal while waiting, we do not restore the | 
|  | 1504 | * signal mask yet, and we allow do_signal() to deliver the signal on | 
|  | 1505 | * the way back to userspace, before the signal mask is restored. | 
|  | 1506 | */ | 
|  | 1507 | if (sigmask) { | 
|  | 1508 | if (error == -EINTR) { | 
|  | 1509 | memcpy(¤t->saved_sigmask, &sigsaved, | 
| Davide Libenzi | c7ea763 | 2007-05-15 01:40:47 -0700 | [diff] [blame] | 1510 | sizeof(sigsaved)); | 
| Roland McGrath | 4e4c22c | 2008-04-30 00:53:06 -0700 | [diff] [blame] | 1511 | set_restore_sigmask(); | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1512 | } else | 
|  | 1513 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | 
|  | 1514 | } | 
|  | 1515 |  | 
|  | 1516 | return error; | 
|  | 1517 | } | 
|  | 1518 |  | 
| Roland McGrath | f3de272 | 2008-04-30 00:53:09 -0700 | [diff] [blame] | 1519 | #endif /* HAVE_SET_RESTORE_SIGMASK */ | 
| Davide Libenzi | 7699acd | 2007-05-10 22:23:23 -0700 | [diff] [blame] | 1520 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1521 | static int __init eventpoll_init(void) | 
|  | 1522 | { | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 1523 | struct sysinfo si; | 
|  | 1524 |  | 
|  | 1525 | si_meminfo(&si); | 
| Davide Libenzi | 9df04e1 | 2009-01-29 14:25:26 -0800 | [diff] [blame] | 1526 | /* | 
|  | 1527 | * Allows top 4% of lomem to be allocated for epoll watches (per user). | 
|  | 1528 | */ | 
|  | 1529 | max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / | 
| Davide Libenzi | 7ef9964 | 2008-12-01 13:13:55 -0800 | [diff] [blame] | 1530 | EP_ITEM_COST; | 
| Robin Holt | 52bd19f7 | 2011-01-12 17:00:01 -0800 | [diff] [blame] | 1531 | BUG_ON(max_user_watches < 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1532 |  | 
| Davide Libenzi | 22bacca | 2011-02-25 14:44:12 -0800 | [diff] [blame] | 1533 | /* | 
|  | 1534 | * Initialize the structure used to perform epoll file descriptor | 
|  | 1535 | * inclusion loops checks. | 
|  | 1536 | */ | 
|  | 1537 | ep_nested_calls_init(&poll_loop_ncalls); | 
|  | 1538 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1539 | /* Initialize the structure used to perform safe poll wait head wake ups */ | 
| Davide Libenzi | 5071f97 | 2009-03-31 15:24:10 -0700 | [diff] [blame] | 1540 | ep_nested_calls_init(&poll_safewake_ncalls); | 
|  | 1541 |  | 
|  | 1542 | /* Initialize the structure used to perform file's f_op->poll() calls */ | 
|  | 1543 | ep_nested_calls_init(&poll_readywalk_ncalls); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1544 |  | 
|  | 1545 | /* Allocates slab cache used to allocate "struct epitem" items */ | 
|  | 1546 | epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem), | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1547 | 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1548 |  | 
|  | 1549 | /* Allocates slab cache used to allocate "struct eppoll_entry" */ | 
|  | 1550 | pwq_cache = kmem_cache_create("eventpoll_pwq", | 
| Davide Libenzi | bb57c3e | 2009-03-31 15:24:12 -0700 | [diff] [blame] | 1551 | sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1552 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1553 | return 0; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1554 | } | 
| Davide Libenzi | cea6924 | 2007-05-10 22:23:22 -0700 | [diff] [blame] | 1555 | fs_initcall(eventpoll_init); |