blob: a3acf936c72af92e7316be81a3f5b4900a1fd518 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Davide Libenzi5071f972009-03-31 15:24:10 -07002 * fs/eventpoll.c (Efficient event retrieval implementation)
3 * Copyright (C) 2001,...,2009 Davide Libenzi
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * Davide Libenzi <davidel@xmailserver.org>
11 *
12 */
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/fs.h>
18#include <linux/file.h>
19#include <linux/signal.h>
20#include <linux/errno.h>
21#include <linux/mm.h>
22#include <linux/slab.h>
23#include <linux/poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/string.h>
25#include <linux/list.h>
26#include <linux/hash.h>
27#include <linux/spinlock.h>
28#include <linux/syscalls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/rbtree.h>
30#include <linux/wait.h>
31#include <linux/eventpoll.h>
32#include <linux/mount.h>
33#include <linux/bitops.h>
Arjan van de Ven144efe32006-03-23 03:00:32 -080034#include <linux/mutex.h>
Davide Libenzida66f7c2007-05-10 22:23:21 -070035#include <linux/anon_inodes.h>
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +020036#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <asm/io.h>
39#include <asm/mman.h>
Arun Sharma600634972011-07-26 16:09:06 -070040#include <linux/atomic.h>
Cyrill Gorcunov138d22b2012-12-17 16:05:02 -080041#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Linus Torvalds1da177e2005-04-16 15:20:36 -070044/*
45 * LOCKING:
46 * There are three level of locking required by epoll :
47 *
Arjan van de Ven144efe32006-03-23 03:00:32 -080048 * 1) epmutex (mutex)
Davide Libenzic7ea7632007-05-15 01:40:47 -070049 * 2) ep->mtx (mutex)
50 * 3) ep->lock (spinlock)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 *
52 * The acquire order is the one listed above, from 1 to 3.
53 * We need a spinlock (ep->lock) because we manipulate objects
54 * from inside the poll callback, that might be triggered from
55 * a wake_up() that in turn might be called from IRQ context.
56 * So we can't sleep inside the poll callback and hence we need
57 * a spinlock. During the event transfer loop (from kernel to
58 * user space) we could end up sleeping due a copy_to_user(), so
59 * we need a lock that will allow us to sleep. This lock is a
Davide Libenzid47de162007-05-15 01:40:41 -070060 * mutex (ep->mtx). It is acquired during the event transfer loop,
61 * during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().
62 * Then we also need a global mutex to serialize eventpoll_release_file()
63 * and ep_free().
64 * This mutex is acquired by ep_free() during the epoll file
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 * cleanup path and it is also acquired by eventpoll_release_file()
66 * if a file has been pushed inside an epoll set and it is then
Daniel Balutabf6a41d2011-01-30 23:42:29 +020067 * close()d without a previous call to epoll_ctl(EPOLL_CTL_DEL).
Davide Libenzi22bacca2011-02-25 14:44:12 -080068 * It is also acquired when inserting an epoll fd onto another epoll
69 * fd. We do this so that we walk the epoll tree and ensure that this
70 * insertion does not create a cycle of epoll file descriptors, which
71 * could lead to deadlock. We need a global mutex to prevent two
72 * simultaneous inserts (A into B and B into A) from racing and
73 * constructing a cycle without either insert observing that it is
74 * going to.
Nelson Elhaged8805e62011-10-31 17:13:14 -070075 * It is necessary to acquire multiple "ep->mtx"es at once in the
76 * case when one epoll fd is added to another. In this case, we
77 * always acquire the locks in the order of nesting (i.e. after
78 * epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired
79 * before e2->mtx). Since we disallow cycles of epoll file
80 * descriptors, this ensures that the mutexes are well-ordered. In
81 * order to communicate this nesting to lockdep, when walking a tree
82 * of epoll file descriptors, we use the current recursion depth as
83 * the lockdep subkey.
Davide Libenzid47de162007-05-15 01:40:41 -070084 * It is possible to drop the "ep->mtx" and to use the global
85 * mutex "epmutex" (together with "ep->lock") to have it working,
86 * but having "ep->mtx" will make the interface more scalable.
Arjan van de Ven144efe32006-03-23 03:00:32 -080087 * Events that require holding "epmutex" are very rare, while for
Davide Libenzid47de162007-05-15 01:40:41 -070088 * normal operations the epoll private "ep->mtx" will guarantee
89 * a better scalability.
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 */
91
Linus Torvalds1da177e2005-04-16 15:20:36 -070092/* Epoll private bits inside the event mask */
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +020093#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Davide Libenzi5071f972009-03-31 15:24:10 -070095/* Maximum number of nesting allowed inside epoll sets */
96#define EP_MAX_NESTS 4
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Davide Libenzib6119672006-10-11 01:21:44 -070098#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))
99
Davide Libenzid47de162007-05-15 01:40:41 -0700100#define EP_UNACTIVE_PTR ((void *) -1L)
101
Davide Libenzi7ef99642008-12-01 13:13:55 -0800102#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104struct epoll_filefd {
105 struct file *file;
106 int fd;
Eric Wong39732ca2013-04-30 15:27:38 -0700107} __packed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
109/*
Davide Libenzi5071f972009-03-31 15:24:10 -0700110 * Structure used to track possible nested calls, for too deep recursions
111 * and loop cycles.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 */
Davide Libenzi5071f972009-03-31 15:24:10 -0700113struct nested_call_node {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 struct list_head llink;
Davide Libenzi5071f972009-03-31 15:24:10 -0700115 void *cookie;
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700116 void *ctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117};
118
119/*
Davide Libenzi5071f972009-03-31 15:24:10 -0700120 * This structure is used as collector for nested calls, to check for
121 * maximum recursion dept and loop cycles.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 */
Davide Libenzi5071f972009-03-31 15:24:10 -0700123struct nested_calls {
124 struct list_head tasks_call_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 spinlock_t lock;
126};
127
128/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 * Each file descriptor added to the eventpoll interface will
Davide Libenzi6192bd52007-05-08 00:25:41 -0700130 * have an entry of this type linked to the "rbr" RB tree.
Eric Wong39732ca2013-04-30 15:27:38 -0700131 * Avoid increasing the size of this struct, there can be many thousands
132 * of these on a server and we do not want this to take another cache line.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 */
134struct epitem {
Davide Libenzi67647d02007-05-15 01:40:52 -0700135 /* RB tree node used to link this structure to the eventpoll RB tree */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 struct rb_node rbn;
137
138 /* List header used to link this structure to the eventpoll ready list */
139 struct list_head rdllink;
140
Davide Libenzic7ea7632007-05-15 01:40:47 -0700141 /*
142 * Works together "struct eventpoll"->ovflist in keeping the
143 * single linked chain of items.
144 */
145 struct epitem *next;
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 /* The file descriptor information this item refers to */
148 struct epoll_filefd ffd;
149
150 /* Number of active wait queue attached to poll operations */
151 int nwait;
152
153 /* List containing poll wait queues */
154 struct list_head pwqlist;
155
156 /* The "container" of this item */
157 struct eventpoll *ep;
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 /* List header used to link this item to the "struct file" items list */
160 struct list_head fllink;
Davide Libenzid47de162007-05-15 01:40:41 -0700161
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200162 /* wakeup_source used when EPOLLWAKEUP is set */
Eric Wongeea1d582013-04-30 15:27:39 -0700163 struct wakeup_source __rcu *ws;
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200164
Davide Libenzic7ea7632007-05-15 01:40:47 -0700165 /* The structure that describe the interested events and the source fd */
166 struct epoll_event event;
Davide Libenzid47de162007-05-15 01:40:41 -0700167};
168
169/*
170 * This structure is stored inside the "private_data" member of the file
Daniel Balutabf6a41d2011-01-30 23:42:29 +0200171 * structure and represents the main data structure for the eventpoll
Davide Libenzid47de162007-05-15 01:40:41 -0700172 * interface.
173 */
174struct eventpoll {
Daniel Balutabf6a41d2011-01-30 23:42:29 +0200175 /* Protect the access to this structure */
Davide Libenzic7ea7632007-05-15 01:40:47 -0700176 spinlock_t lock;
Davide Libenzid47de162007-05-15 01:40:41 -0700177
178 /*
179 * This mutex is used to ensure that files are not removed
180 * while epoll is using them. This is held during the event
181 * collection loop, the file cleanup path, the epoll file exit
182 * code and the ctl operations.
183 */
184 struct mutex mtx;
185
186 /* Wait queue used by sys_epoll_wait() */
187 wait_queue_head_t wq;
188
189 /* Wait queue used by file->poll() */
190 wait_queue_head_t poll_wait;
191
192 /* List of ready file descriptors */
193 struct list_head rdllist;
194
Davide Libenzi67647d02007-05-15 01:40:52 -0700195 /* RB tree root used to store monitored fd structs */
Davide Libenzid47de162007-05-15 01:40:41 -0700196 struct rb_root rbr;
197
198 /*
199 * This is a single linked list that chains all the "struct epitem" that
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300200 * happened while transferring ready events to userspace w/out
Davide Libenzid47de162007-05-15 01:40:41 -0700201 * holding ->lock.
202 */
203 struct epitem *ovflist;
Davide Libenzi7ef99642008-12-01 13:13:55 -0800204
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200205 /* wakeup_source used when ep_scan_ready_list is running */
206 struct wakeup_source *ws;
207
Davide Libenzi7ef99642008-12-01 13:13:55 -0800208 /* The user that created the eventpoll descriptor */
209 struct user_struct *user;
Jason Baron28d82dc2012-01-12 17:17:43 -0800210
211 struct file *file;
212
213 /* used to optimize loop detection check */
214 int visited;
215 struct list_head visited_list_link;
Davide Libenzid47de162007-05-15 01:40:41 -0700216};
217
218/* Wait structure used by the poll hooks */
219struct eppoll_entry {
220 /* List header used to link this structure to the "struct epitem" */
221 struct list_head llink;
222
223 /* The "base" pointer is set to the container "struct epitem" */
Tony Battersby4f0989d2009-03-31 15:24:16 -0700224 struct epitem *base;
Davide Libenzid47de162007-05-15 01:40:41 -0700225
226 /*
227 * Wait queue item that will be linked to the target file wait
228 * queue head.
229 */
230 wait_queue_t wait;
231
232 /* The wait queue head that linked the "wait" wait queue item */
233 wait_queue_head_t *whead;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234};
235
236/* Wrapper struct used by poll queueing */
237struct ep_pqueue {
238 poll_table pt;
239 struct epitem *epi;
240};
241
Davide Libenzi5071f972009-03-31 15:24:10 -0700242/* Used by the ep_send_events() function as callback private data */
243struct ep_send_events_data {
244 int maxevents;
245 struct epoll_event __user *events;
246};
247
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248/*
Davide Libenzi7ef99642008-12-01 13:13:55 -0800249 * Configuration options available inside /proc/sys/fs/epoll/
250 */
Davide Libenzi7ef99642008-12-01 13:13:55 -0800251/* Maximum number of epoll watched descriptors, per user */
Robin Holt52bd19f72011-01-12 17:00:01 -0800252static long max_user_watches __read_mostly;
Davide Libenzi7ef99642008-12-01 13:13:55 -0800253
254/*
Davide Libenzid47de162007-05-15 01:40:41 -0700255 * This mutex is used to serialize ep_free() and eventpoll_release_file().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 */
Davide Libenzi7ef99642008-12-01 13:13:55 -0800257static DEFINE_MUTEX(epmutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Davide Libenzi22bacca2011-02-25 14:44:12 -0800259/* Used to check for epoll file descriptor inclusion loops */
260static struct nested_calls poll_loop_ncalls;
261
Davide Libenzi5071f972009-03-31 15:24:10 -0700262/* Used for safe wake up implementation */
263static struct nested_calls poll_safewake_ncalls;
264
265/* Used to call file's f_op->poll() under the nested calls boundaries */
266static struct nested_calls poll_readywalk_ncalls;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268/* Slab cache used to allocate "struct epitem" */
Christoph Lametere18b8902006-12-06 20:33:20 -0800269static struct kmem_cache *epi_cache __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271/* Slab cache used to allocate "struct eppoll_entry" */
Christoph Lametere18b8902006-12-06 20:33:20 -0800272static struct kmem_cache *pwq_cache __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Jason Baron28d82dc2012-01-12 17:17:43 -0800274/* Visited nodes during ep_loop_check(), so we can unset them when we finish */
275static LIST_HEAD(visited_list);
276
277/*
278 * List of files with newly added links, where we may need to limit the number
279 * of emanating paths. Protected by the epmutex.
280 */
281static LIST_HEAD(tfile_check_list);
282
Davide Libenzi7ef99642008-12-01 13:13:55 -0800283#ifdef CONFIG_SYSCTL
284
285#include <linux/sysctl.h>
286
Robin Holt52bd19f72011-01-12 17:00:01 -0800287static long zero;
288static long long_max = LONG_MAX;
Davide Libenzi7ef99642008-12-01 13:13:55 -0800289
290ctl_table epoll_table[] = {
291 {
Davide Libenzi7ef99642008-12-01 13:13:55 -0800292 .procname = "max_user_watches",
293 .data = &max_user_watches,
Robin Holt52bd19f72011-01-12 17:00:01 -0800294 .maxlen = sizeof(max_user_watches),
Davide Libenzi7ef99642008-12-01 13:13:55 -0800295 .mode = 0644,
Robin Holt52bd19f72011-01-12 17:00:01 -0800296 .proc_handler = proc_doulongvec_minmax,
Davide Libenzi7ef99642008-12-01 13:13:55 -0800297 .extra1 = &zero,
Robin Holt52bd19f72011-01-12 17:00:01 -0800298 .extra2 = &long_max,
Davide Libenzi7ef99642008-12-01 13:13:55 -0800299 },
Eric W. Biedermanab092032009-11-05 14:25:10 -0800300 { }
Davide Libenzi7ef99642008-12-01 13:13:55 -0800301};
302#endif /* CONFIG_SYSCTL */
303
Jason Baron28d82dc2012-01-12 17:17:43 -0800304static const struct file_operations eventpoll_fops;
305
306static inline int is_file_epoll(struct file *f)
307{
308 return f->f_op == &eventpoll_fops;
309}
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700310
Davide Libenzi67647d02007-05-15 01:40:52 -0700311/* Setup the structure that is used as key for the RB tree */
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700312static inline void ep_set_ffd(struct epoll_filefd *ffd,
313 struct file *file, int fd)
314{
315 ffd->file = file;
316 ffd->fd = fd;
317}
318
Davide Libenzi67647d02007-05-15 01:40:52 -0700319/* Compare RB tree keys */
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700320static inline int ep_cmp_ffd(struct epoll_filefd *p1,
321 struct epoll_filefd *p2)
322{
323 return (p1->file > p2->file ? +1:
324 (p1->file < p2->file ? -1 : p1->fd - p2->fd));
325}
326
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700327/* Tells us if the item is currently linked */
328static inline int ep_is_linked(struct list_head *p)
329{
330 return !list_empty(p);
331}
332
Oleg Nesterov971316f2012-02-24 20:07:29 +0100333static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
334{
335 return container_of(p, struct eppoll_entry, wait);
336}
337
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700338/* Get the "struct epitem" from a wait queue pointer */
Davide Libenzicdac75e2008-04-29 00:58:34 -0700339static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700340{
341 return container_of(p, struct eppoll_entry, wait)->base;
342}
343
344/* Get the "struct epitem" from an epoll queue wrapper */
Davide Libenzicdac75e2008-04-29 00:58:34 -0700345static inline struct epitem *ep_item_from_epqueue(poll_table *p)
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700346{
347 return container_of(p, struct ep_pqueue, pt)->epi;
348}
349
350/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
Davide Libenzi6192bd52007-05-08 00:25:41 -0700351static inline int ep_op_has_event(int op)
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700352{
Andrew Mortona80a6b82012-11-08 15:53:35 -0800353 return op != EPOLL_CTL_DEL;
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700354}
355
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356/* Initialize the poll safe wake up structure */
Davide Libenzi5071f972009-03-31 15:24:10 -0700357static void ep_nested_calls_init(struct nested_calls *ncalls)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358{
Davide Libenzi5071f972009-03-31 15:24:10 -0700359 INIT_LIST_HEAD(&ncalls->tasks_call_list);
360 spin_lock_init(&ncalls->lock);
361}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
Davide Libenzi5071f972009-03-31 15:24:10 -0700363/**
Davide Libenzi3fb0e582011-03-22 16:34:46 -0700364 * ep_events_available - Checks if ready events might be available.
365 *
366 * @ep: Pointer to the eventpoll context.
367 *
368 * Returns: Returns a value different than zero if ready events are available,
369 * or zero otherwise.
370 */
371static inline int ep_events_available(struct eventpoll *ep)
372{
373 return !list_empty(&ep->rdllist) || ep->ovflist != EP_UNACTIVE_PTR;
374}
375
376/**
Davide Libenzi5071f972009-03-31 15:24:10 -0700377 * ep_call_nested - Perform a bound (possibly) nested call, by checking
378 * that the recursion limit is not exceeded, and that
379 * the same nested call (by the meaning of same cookie) is
380 * no re-entered.
381 *
382 * @ncalls: Pointer to the nested_calls structure to be used for this call.
383 * @max_nests: Maximum number of allowed nesting calls.
384 * @nproc: Nested call core function pointer.
385 * @priv: Opaque data to be passed to the @nproc callback.
386 * @cookie: Cookie to be used to identify this nested call.
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700387 * @ctx: This instance context.
Davide Libenzi5071f972009-03-31 15:24:10 -0700388 *
389 * Returns: Returns the code returned by the @nproc callback, or -1 if
390 * the maximum recursion limit has been exceeded.
391 */
392static int ep_call_nested(struct nested_calls *ncalls, int max_nests,
393 int (*nproc)(void *, void *, int), void *priv,
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700394 void *cookie, void *ctx)
Davide Libenzi5071f972009-03-31 15:24:10 -0700395{
396 int error, call_nests = 0;
397 unsigned long flags;
Davide Libenzi5071f972009-03-31 15:24:10 -0700398 struct list_head *lsthead = &ncalls->tasks_call_list;
399 struct nested_call_node *tncur;
400 struct nested_call_node tnode;
401
402 spin_lock_irqsave(&ncalls->lock, flags);
403
404 /*
405 * Try to see if the current task is already inside this wakeup call.
406 * We use a list here, since the population inside this set is always
407 * very much limited.
408 */
409 list_for_each_entry(tncur, lsthead, llink) {
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700410 if (tncur->ctx == ctx &&
Davide Libenzi5071f972009-03-31 15:24:10 -0700411 (tncur->cookie == cookie || ++call_nests > max_nests)) {
412 /*
413 * Ops ... loop detected or maximum nest level reached.
414 * We abort this wake by breaking the cycle itself.
415 */
Tony Battersbyabff55c2009-03-31 15:24:13 -0700416 error = -1;
417 goto out_unlock;
Davide Libenzi5071f972009-03-31 15:24:10 -0700418 }
419 }
420
421 /* Add the current task and cookie to the list */
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700422 tnode.ctx = ctx;
Davide Libenzi5071f972009-03-31 15:24:10 -0700423 tnode.cookie = cookie;
424 list_add(&tnode.llink, lsthead);
425
426 spin_unlock_irqrestore(&ncalls->lock, flags);
427
428 /* Call the nested function */
429 error = (*nproc)(priv, cookie, call_nests);
430
431 /* Remove the current task from the list */
432 spin_lock_irqsave(&ncalls->lock, flags);
433 list_del(&tnode.llink);
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700434out_unlock:
Davide Libenzi5071f972009-03-31 15:24:10 -0700435 spin_unlock_irqrestore(&ncalls->lock, flags);
436
437 return error;
438}
439
Steven Rostedt02edc6f2012-03-23 15:02:27 -0700440/*
441 * As described in commit 0ccf831cb lockdep: annotate epoll
442 * the use of wait queues used by epoll is done in a very controlled
443 * manner. Wake ups can nest inside each other, but are never done
444 * with the same locking. For example:
445 *
446 * dfd = socket(...);
447 * efd1 = epoll_create();
448 * efd2 = epoll_create();
449 * epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);
450 * epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);
451 *
452 * When a packet arrives to the device underneath "dfd", the net code will
453 * issue a wake_up() on its poll wake list. Epoll (efd1) has installed a
454 * callback wakeup entry on that queue, and the wake_up() performed by the
455 * "dfd" net code will end up in ep_poll_callback(). At this point epoll
456 * (efd1) notices that it may have some event ready, so it needs to wake up
457 * the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()
458 * that ends up in another wake_up(), after having checked about the
459 * recursion constraints. That are, no more than EP_MAX_POLLWAKE_NESTS, to
460 * avoid stack blasting.
461 *
462 * When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle
463 * this special case of epoll.
464 */
Davide Libenzi2dfa4ee2009-03-31 15:24:22 -0700465#ifdef CONFIG_DEBUG_LOCK_ALLOC
466static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
467 unsigned long events, int subclass)
468{
469 unsigned long flags;
470
471 spin_lock_irqsave_nested(&wqueue->lock, flags, subclass);
472 wake_up_locked_poll(wqueue, events);
473 spin_unlock_irqrestore(&wqueue->lock, flags);
474}
475#else
476static inline void ep_wake_up_nested(wait_queue_head_t *wqueue,
477 unsigned long events, int subclass)
478{
479 wake_up_poll(wqueue, events);
480}
481#endif
482
Davide Libenzi5071f972009-03-31 15:24:10 -0700483static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests)
484{
Davide Libenzi2dfa4ee2009-03-31 15:24:22 -0700485 ep_wake_up_nested((wait_queue_head_t *) cookie, POLLIN,
486 1 + call_nests);
Davide Libenzi5071f972009-03-31 15:24:10 -0700487 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490/*
491 * Perform a safe wake up of the poll wait list. The problem is that
492 * with the new callback'd wake up system, it is possible that the
493 * poll callback is reentered from inside the call to wake_up() done
494 * on the poll wait queue head. The rule is that we cannot reenter the
Davide Libenzi5071f972009-03-31 15:24:10 -0700495 * wake up code from the same task more than EP_MAX_NESTS times,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 * and we cannot reenter the same wait queue head at all. This will
497 * enable to have a hierarchy of epoll file descriptor of no more than
Davide Libenzi5071f972009-03-31 15:24:10 -0700498 * EP_MAX_NESTS deep.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 */
Davide Libenzi5071f972009-03-31 15:24:10 -0700500static void ep_poll_safewake(wait_queue_head_t *wq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501{
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700502 int this_cpu = get_cpu();
503
Davide Libenzi5071f972009-03-31 15:24:10 -0700504 ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700505 ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
506
507 put_cpu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508}
509
Oleg Nesterov971316f2012-02-24 20:07:29 +0100510static void ep_remove_wait_queue(struct eppoll_entry *pwq)
511{
512 wait_queue_head_t *whead;
513
514 rcu_read_lock();
515 /* If it is cleared by POLLFREE, it should be rcu-safe */
516 whead = rcu_dereference(pwq->whead);
517 if (whead)
518 remove_wait_queue(whead, &pwq->wait);
519 rcu_read_unlock();
520}
521
Davide Libenzi7699acd2007-05-10 22:23:23 -0700522/*
Tony Battersbyd1bc90d2009-03-31 15:24:15 -0700523 * This function unregisters poll callbacks from the associated file
524 * descriptor. Must be called with "mtx" held (or "epmutex" if called from
525 * ep_free).
Davide Libenzi7699acd2007-05-10 22:23:23 -0700526 */
527static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)
528{
Davide Libenzi7699acd2007-05-10 22:23:23 -0700529 struct list_head *lsthead = &epi->pwqlist;
530 struct eppoll_entry *pwq;
531
Tony Battersbyd1bc90d2009-03-31 15:24:15 -0700532 while (!list_empty(lsthead)) {
533 pwq = list_first_entry(lsthead, struct eppoll_entry, llink);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700534
Tony Battersbyd1bc90d2009-03-31 15:24:15 -0700535 list_del(&pwq->llink);
Oleg Nesterov971316f2012-02-24 20:07:29 +0100536 ep_remove_wait_queue(pwq);
Tony Battersbyd1bc90d2009-03-31 15:24:15 -0700537 kmem_cache_free(pwq_cache, pwq);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700538 }
539}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Eric Wongeea1d582013-04-30 15:27:39 -0700541/* call only when ep->mtx is held */
542static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)
543{
544 return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));
545}
546
547/* call only when ep->mtx is held */
548static inline void ep_pm_stay_awake(struct epitem *epi)
549{
550 struct wakeup_source *ws = ep_wakeup_source(epi);
551
552 if (ws)
553 __pm_stay_awake(ws);
554}
555
556static inline bool ep_has_wakeup_source(struct epitem *epi)
557{
558 return rcu_access_pointer(epi->ws) ? true : false;
559}
560
561/* call when ep->mtx cannot be held (ep_poll_callback) */
562static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
563{
564 struct wakeup_source *ws;
565
566 rcu_read_lock();
567 ws = rcu_dereference(epi->ws);
568 if (ws)
569 __pm_stay_awake(ws);
570 rcu_read_unlock();
571}
572
Davide Libenzi5071f972009-03-31 15:24:10 -0700573/**
574 * ep_scan_ready_list - Scans the ready list in a way that makes possible for
575 * the scan code, to call f_op->poll(). Also allows for
576 * O(NumReady) performance.
577 *
578 * @ep: Pointer to the epoll private data structure.
579 * @sproc: Pointer to the scan callback.
580 * @priv: Private opaque data passed to the @sproc callback.
Nelson Elhaged8805e62011-10-31 17:13:14 -0700581 * @depth: The current depth of recursive f_op->poll calls.
Davide Libenzi5071f972009-03-31 15:24:10 -0700582 *
583 * Returns: The same integer error code returned by the @sproc callback.
584 */
585static int ep_scan_ready_list(struct eventpoll *ep,
586 int (*sproc)(struct eventpoll *,
587 struct list_head *, void *),
Nelson Elhaged8805e62011-10-31 17:13:14 -0700588 void *priv,
589 int depth)
Davide Libenzi5071f972009-03-31 15:24:10 -0700590{
591 int error, pwake = 0;
592 unsigned long flags;
593 struct epitem *epi, *nepi;
Davide Libenzi296e2362009-03-31 15:24:11 -0700594 LIST_HEAD(txlist);
Davide Libenzi5071f972009-03-31 15:24:10 -0700595
596 /*
597 * We need to lock this because we could be hit by
Tony Battersbye057e152009-03-31 15:24:15 -0700598 * eventpoll_release_file() and epoll_ctl().
Davide Libenzi5071f972009-03-31 15:24:10 -0700599 */
Nelson Elhaged8805e62011-10-31 17:13:14 -0700600 mutex_lock_nested(&ep->mtx, depth);
Davide Libenzi5071f972009-03-31 15:24:10 -0700601
602 /*
603 * Steal the ready list, and re-init the original one to the
604 * empty list. Also, set ep->ovflist to NULL so that events
605 * happening while looping w/out locks, are not lost. We cannot
606 * have the poll callback to queue directly on ep->rdllist,
607 * because we want the "sproc" callback to be able to do it
608 * in a lockless way.
609 */
610 spin_lock_irqsave(&ep->lock, flags);
Davide Libenzi296e2362009-03-31 15:24:11 -0700611 list_splice_init(&ep->rdllist, &txlist);
Davide Libenzi5071f972009-03-31 15:24:10 -0700612 ep->ovflist = NULL;
613 spin_unlock_irqrestore(&ep->lock, flags);
614
615 /*
616 * Now call the callback function.
617 */
618 error = (*sproc)(ep, &txlist, priv);
619
620 spin_lock_irqsave(&ep->lock, flags);
621 /*
622 * During the time we spent inside the "sproc" callback, some
623 * other events might have been queued by the poll callback.
624 * We re-insert them inside the main ready-list here.
625 */
626 for (nepi = ep->ovflist; (epi = nepi) != NULL;
627 nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {
628 /*
629 * We need to check if the item is already in the list.
630 * During the "sproc" callback execution time, items are
631 * queued into ->ovflist but the "txlist" might already
632 * contain them, and the list_splice() below takes care of them.
633 */
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200634 if (!ep_is_linked(&epi->rdllink)) {
Davide Libenzi5071f972009-03-31 15:24:10 -0700635 list_add_tail(&epi->rdllink, &ep->rdllist);
Eric Wongeea1d582013-04-30 15:27:39 -0700636 ep_pm_stay_awake(epi);
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200637 }
Davide Libenzi5071f972009-03-31 15:24:10 -0700638 }
639 /*
640 * We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after
641 * releasing the lock, events will be queued in the normal way inside
642 * ep->rdllist.
643 */
644 ep->ovflist = EP_UNACTIVE_PTR;
645
646 /*
647 * Quickly re-inject items left on "txlist".
648 */
649 list_splice(&txlist, &ep->rdllist);
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200650 __pm_relax(ep->ws);
Davide Libenzi5071f972009-03-31 15:24:10 -0700651
652 if (!list_empty(&ep->rdllist)) {
653 /*
Davide Libenzi296e2362009-03-31 15:24:11 -0700654 * Wake up (if active) both the eventpoll wait list and
655 * the ->poll() wait list (delayed after we release the lock).
Davide Libenzi5071f972009-03-31 15:24:10 -0700656 */
657 if (waitqueue_active(&ep->wq))
658 wake_up_locked(&ep->wq);
659 if (waitqueue_active(&ep->poll_wait))
660 pwake++;
661 }
662 spin_unlock_irqrestore(&ep->lock, flags);
663
664 mutex_unlock(&ep->mtx);
665
666 /* We have to call this outside the lock */
667 if (pwake)
668 ep_poll_safewake(&ep->poll_wait);
669
670 return error;
671}
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673/*
Davide Libenzi7699acd2007-05-10 22:23:23 -0700674 * Removes a "struct epitem" from the eventpoll RB tree and deallocates
Davide Libenzic7ea7632007-05-15 01:40:47 -0700675 * all the associated resources. Must be called with "mtx" held.
Davide Libenzi7699acd2007-05-10 22:23:23 -0700676 */
677static int ep_remove(struct eventpoll *ep, struct epitem *epi)
678{
Davide Libenzi7699acd2007-05-10 22:23:23 -0700679 unsigned long flags;
680 struct file *file = epi->ffd.file;
681
682 /*
683 * Removes poll wait queue hooks. We _have_ to do this without holding
684 * the "ep->lock" otherwise a deadlock might occur. This because of the
685 * sequence of the lock acquisition. Here we do "ep->lock" then the wait
686 * queue head lock when unregistering the wait queue. The wakeup callback
687 * will run by holding the wait queue head lock and will call our callback
688 * that will try to get "ep->lock".
689 */
690 ep_unregister_pollwait(ep, epi);
691
692 /* Remove the current item from the list of epoll hooks */
Jonathan Corbet68499912009-02-06 13:52:43 -0700693 spin_lock(&file->f_lock);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700694 if (ep_is_linked(&epi->fllink))
Davide Libenzi6192bd52007-05-08 00:25:41 -0700695 list_del_init(&epi->fllink);
Jonathan Corbet68499912009-02-06 13:52:43 -0700696 spin_unlock(&file->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Davide Libenzicdac75e2008-04-29 00:58:34 -0700698 rb_erase(&epi->rbn, &ep->rbr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Davide Libenzic7ea7632007-05-15 01:40:47 -0700700 spin_lock_irqsave(&ep->lock, flags);
701 if (ep_is_linked(&epi->rdllink))
702 list_del_init(&epi->rdllink);
703 spin_unlock_irqrestore(&ep->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Eric Wongeea1d582013-04-30 15:27:39 -0700705 wakeup_source_unregister(ep_wakeup_source(epi));
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200706
Davide Libenzi7699acd2007-05-10 22:23:23 -0700707 /* At this point it is safe to free the eventpoll item */
Davide Libenzic7ea7632007-05-15 01:40:47 -0700708 kmem_cache_free(epi_cache, epi);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
Robin Holt52bd19f72011-01-12 17:00:01 -0800710 atomic_long_dec(&ep->user->epoll_watches);
Davide Libenzi7ef99642008-12-01 13:13:55 -0800711
Davide Libenzic7ea7632007-05-15 01:40:47 -0700712 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713}
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715static void ep_free(struct eventpoll *ep)
716{
717 struct rb_node *rbp;
718 struct epitem *epi;
719
720 /* We need to release all tasks waiting for these file */
721 if (waitqueue_active(&ep->poll_wait))
Davide Libenzi5071f972009-03-31 15:24:10 -0700722 ep_poll_safewake(&ep->poll_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 /*
725 * We need to lock this because we could be hit by
726 * eventpoll_release_file() while we're freeing the "struct eventpoll".
Davide Libenzid47de162007-05-15 01:40:41 -0700727 * We do not need to hold "ep->mtx" here because the epoll file
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 * is on the way to be removed and no one has references to it
729 * anymore. The only hit might come from eventpoll_release_file() but
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300730 * holding "epmutex" is sufficient here.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 */
Arjan van de Ven144efe32006-03-23 03:00:32 -0800732 mutex_lock(&epmutex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 /*
735 * Walks through the whole tree by unregistering poll callbacks.
736 */
737 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
738 epi = rb_entry(rbp, struct epitem, rbn);
739
740 ep_unregister_pollwait(ep, epi);
741 }
742
743 /*
Davide Libenzi6192bd52007-05-08 00:25:41 -0700744 * Walks through the whole tree by freeing each "struct epitem". At this
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 * point we are sure no poll callbacks will be lingering around, and also by
Davide Libenzid47de162007-05-15 01:40:41 -0700746 * holding "epmutex" we can be sure that no file cleanup code will hit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 * us during this operation. So we can avoid the lock on "ep->lock".
748 */
Stephen Hemmingerc80544d2007-10-18 03:07:05 -0700749 while ((rbp = rb_first(&ep->rbr)) != NULL) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 epi = rb_entry(rbp, struct epitem, rbn);
751 ep_remove(ep, epi);
752 }
753
Arjan van de Ven144efe32006-03-23 03:00:32 -0800754 mutex_unlock(&epmutex);
Davide Libenzid47de162007-05-15 01:40:41 -0700755 mutex_destroy(&ep->mtx);
Davide Libenzi7ef99642008-12-01 13:13:55 -0800756 free_uid(ep->user);
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +0200757 wakeup_source_unregister(ep->ws);
Davide Libenzif0ee9aa2007-05-15 01:40:57 -0700758 kfree(ep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759}
760
Davide Libenzi7699acd2007-05-10 22:23:23 -0700761static int ep_eventpoll_release(struct inode *inode, struct file *file)
762{
763 struct eventpoll *ep = file->private_data;
764
Davide Libenzif0ee9aa2007-05-15 01:40:57 -0700765 if (ep)
Davide Libenzi7699acd2007-05-10 22:23:23 -0700766 ep_free(ep);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700767
Davide Libenzi7699acd2007-05-10 22:23:23 -0700768 return 0;
769}
770
Davide Libenzi296e2362009-03-31 15:24:11 -0700771static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
772 void *priv)
Davide Libenzi5071f972009-03-31 15:24:10 -0700773{
774 struct epitem *epi, *tmp;
Hans Verkuil626cf232012-03-23 15:02:27 -0700775 poll_table pt;
Davide Libenzi5071f972009-03-31 15:24:10 -0700776
Hans Verkuil626cf232012-03-23 15:02:27 -0700777 init_poll_funcptr(&pt, NULL);
Davide Libenzi5071f972009-03-31 15:24:10 -0700778 list_for_each_entry_safe(epi, tmp, head, rdllink) {
Hans Verkuil626cf232012-03-23 15:02:27 -0700779 pt._key = epi->event.events;
780 if (epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
Davide Libenzi5071f972009-03-31 15:24:10 -0700781 epi->event.events)
782 return POLLIN | POLLRDNORM;
Davide Libenzi296e2362009-03-31 15:24:11 -0700783 else {
Davide Libenzi5071f972009-03-31 15:24:10 -0700784 /*
785 * Item has been dropped into the ready list by the poll
786 * callback, but it's not actually ready, as far as
787 * caller requested events goes. We can remove it here.
788 */
Eric Wongeea1d582013-04-30 15:27:39 -0700789 __pm_relax(ep_wakeup_source(epi));
Davide Libenzi5071f972009-03-31 15:24:10 -0700790 list_del_init(&epi->rdllink);
Davide Libenzi296e2362009-03-31 15:24:11 -0700791 }
Davide Libenzi5071f972009-03-31 15:24:10 -0700792 }
793
794 return 0;
795}
796
797static int ep_poll_readyevents_proc(void *priv, void *cookie, int call_nests)
798{
Nelson Elhaged8805e62011-10-31 17:13:14 -0700799 return ep_scan_ready_list(priv, ep_read_events_proc, NULL, call_nests + 1);
Davide Libenzi5071f972009-03-31 15:24:10 -0700800}
801
Davide Libenzi7699acd2007-05-10 22:23:23 -0700802static unsigned int ep_eventpoll_poll(struct file *file, poll_table *wait)
803{
Davide Libenzi5071f972009-03-31 15:24:10 -0700804 int pollflags;
Davide Libenzi7699acd2007-05-10 22:23:23 -0700805 struct eventpoll *ep = file->private_data;
806
807 /* Insert inside our poll wait queue */
808 poll_wait(file, &ep->poll_wait, wait);
809
Davide Libenzi5071f972009-03-31 15:24:10 -0700810 /*
811 * Proceed to find out if wanted events are really available inside
812 * the ready list. This need to be done under ep_call_nested()
813 * supervision, since the call to f_op->poll() done on listed files
814 * could re-enter here.
815 */
816 pollflags = ep_call_nested(&poll_readywalk_ncalls, EP_MAX_NESTS,
Davide Libenzi3fe4a972009-06-17 16:25:58 -0700817 ep_poll_readyevents_proc, ep, ep, current);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700818
Davide Libenzi296e2362009-03-31 15:24:11 -0700819 return pollflags != -1 ? pollflags : 0;
Davide Libenzi7699acd2007-05-10 22:23:23 -0700820}
821
Cyrill Gorcunov138d22b2012-12-17 16:05:02 -0800822#ifdef CONFIG_PROC_FS
823static int ep_show_fdinfo(struct seq_file *m, struct file *f)
824{
825 struct eventpoll *ep = f->private_data;
826 struct rb_node *rbp;
827 int ret = 0;
828
829 mutex_lock(&ep->mtx);
830 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
831 struct epitem *epi = rb_entry(rbp, struct epitem, rbn);
832
833 ret = seq_printf(m, "tfd: %8d events: %8x data: %16llx\n",
834 epi->ffd.fd, epi->event.events,
835 (long long)epi->event.data);
836 if (ret)
837 break;
838 }
839 mutex_unlock(&ep->mtx);
840
841 return ret;
842}
843#endif
844
Davide Libenzi7699acd2007-05-10 22:23:23 -0700845/* File callbacks that implement the eventpoll file behaviour */
846static const struct file_operations eventpoll_fops = {
Cyrill Gorcunov138d22b2012-12-17 16:05:02 -0800847#ifdef CONFIG_PROC_FS
848 .show_fdinfo = ep_show_fdinfo,
849#endif
Davide Libenzi7699acd2007-05-10 22:23:23 -0700850 .release = ep_eventpoll_release,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200851 .poll = ep_eventpoll_poll,
852 .llseek = noop_llseek,
Davide Libenzi7699acd2007-05-10 22:23:23 -0700853};
854
Davide Libenzi7699acd2007-05-10 22:23:23 -0700855/*
856 * This is called from eventpoll_release() to unlink files from the eventpoll
857 * interface. We need to have this facility to cleanup correctly files that are
858 * closed without being removed from the eventpoll interface.
859 */
860void eventpoll_release_file(struct file *file)
861{
862 struct list_head *lsthead = &file->f_ep_links;
863 struct eventpoll *ep;
864 struct epitem *epi;
865
866 /*
Jonathan Corbet68499912009-02-06 13:52:43 -0700867 * We don't want to get "file->f_lock" because it is not
Davide Libenzi7699acd2007-05-10 22:23:23 -0700868 * necessary. It is not necessary because we're in the "struct file"
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300869 * cleanup path, and this means that no one is using this file anymore.
Davide Libenzi5071f972009-03-31 15:24:10 -0700870 * So, for example, epoll_ctl() cannot hit here since if we reach this
Davide Libenzi67647d02007-05-15 01:40:52 -0700871 * point, the file counter already went to zero and fget() would fail.
Davide Libenzid47de162007-05-15 01:40:41 -0700872 * The only hit might come from ep_free() but by holding the mutex
Davide Libenzi7699acd2007-05-10 22:23:23 -0700873 * will correctly serialize the operation. We do need to acquire
Davide Libenzid47de162007-05-15 01:40:41 -0700874 * "ep->mtx" after "epmutex" because ep_remove() requires it when called
Davide Libenzi7699acd2007-05-10 22:23:23 -0700875 * from anywhere but ep_free().
Jonathan Corbet68499912009-02-06 13:52:43 -0700876 *
877 * Besides, ep_remove() acquires the lock, so we can't hold it here.
Davide Libenzi7699acd2007-05-10 22:23:23 -0700878 */
879 mutex_lock(&epmutex);
880
881 while (!list_empty(lsthead)) {
882 epi = list_first_entry(lsthead, struct epitem, fllink);
883
884 ep = epi->ep;
885 list_del_init(&epi->fllink);
Nelson Elhaged8805e62011-10-31 17:13:14 -0700886 mutex_lock_nested(&ep->mtx, 0);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700887 ep_remove(ep, epi);
Davide Libenzid47de162007-05-15 01:40:41 -0700888 mutex_unlock(&ep->mtx);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700889 }
890
891 mutex_unlock(&epmutex);
892}
893
894static int ep_alloc(struct eventpoll **pep)
895{
Davide Libenzi7ef99642008-12-01 13:13:55 -0800896 int error;
897 struct user_struct *user;
898 struct eventpoll *ep;
Davide Libenzi7699acd2007-05-10 22:23:23 -0700899
Davide Libenzi7ef99642008-12-01 13:13:55 -0800900 user = get_current_user();
Davide Libenzi7ef99642008-12-01 13:13:55 -0800901 error = -ENOMEM;
902 ep = kzalloc(sizeof(*ep), GFP_KERNEL);
903 if (unlikely(!ep))
904 goto free_uid;
Davide Libenzi7699acd2007-05-10 22:23:23 -0700905
Davide Libenzic7ea7632007-05-15 01:40:47 -0700906 spin_lock_init(&ep->lock);
Davide Libenzid47de162007-05-15 01:40:41 -0700907 mutex_init(&ep->mtx);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700908 init_waitqueue_head(&ep->wq);
909 init_waitqueue_head(&ep->poll_wait);
910 INIT_LIST_HEAD(&ep->rdllist);
911 ep->rbr = RB_ROOT;
Davide Libenzid47de162007-05-15 01:40:41 -0700912 ep->ovflist = EP_UNACTIVE_PTR;
Davide Libenzi7ef99642008-12-01 13:13:55 -0800913 ep->user = user;
Davide Libenzi7699acd2007-05-10 22:23:23 -0700914
915 *pep = ep;
916
Davide Libenzi7699acd2007-05-10 22:23:23 -0700917 return 0;
Davide Libenzi7ef99642008-12-01 13:13:55 -0800918
919free_uid:
920 free_uid(user);
921 return error;
Davide Libenzi7699acd2007-05-10 22:23:23 -0700922}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924/*
Davide Libenzic7ea7632007-05-15 01:40:47 -0700925 * Search the file inside the eventpoll tree. The RB tree operations
926 * are protected by the "mtx" mutex, and ep_find() must be called with
927 * "mtx" held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 */
929static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
930{
931 int kcmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 struct rb_node *rbp;
933 struct epitem *epi, *epir = NULL;
934 struct epoll_filefd ffd;
935
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700936 ep_set_ffd(&ffd, file, fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 for (rbp = ep->rbr.rb_node; rbp; ) {
938 epi = rb_entry(rbp, struct epitem, rbn);
Pekka Enbergb030a4d2005-06-23 00:10:03 -0700939 kcmp = ep_cmp_ffd(&ffd, &epi->ffd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (kcmp > 0)
941 rbp = rbp->rb_right;
942 else if (kcmp < 0)
943 rbp = rbp->rb_left;
944 else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 epir = epi;
946 break;
947 }
948 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 return epir;
951}
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953/*
Davide Libenzi7699acd2007-05-10 22:23:23 -0700954 * This is the callback that is passed to the wait queue wakeup
Daniel Balutabf6a41d2011-01-30 23:42:29 +0200955 * mechanism. It is called by the stored file descriptors when they
Davide Libenzi7699acd2007-05-10 22:23:23 -0700956 * have events to report.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 */
Davide Libenzi7699acd2007-05-10 22:23:23 -0700958static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959{
Davide Libenzi7699acd2007-05-10 22:23:23 -0700960 int pwake = 0;
961 unsigned long flags;
962 struct epitem *epi = ep_item_from_wait(wait);
963 struct eventpoll *ep = epi->ep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
Oleg Nesterov971316f2012-02-24 20:07:29 +0100965 if ((unsigned long)key & POLLFREE) {
966 ep_pwq_from_wait(wait)->whead = NULL;
967 /*
968 * whead = NULL above can race with ep_remove_wait_queue()
969 * which can do another remove_wait_queue() after us, so we
970 * can't use __remove_wait_queue(). whead->lock is held by
971 * the caller.
972 */
Oleg Nesterovd80e7312012-02-24 20:07:11 +0100973 list_del_init(&wait->task_list);
Oleg Nesterov971316f2012-02-24 20:07:29 +0100974 }
Oleg Nesterovd80e7312012-02-24 20:07:11 +0100975
Davide Libenzic7ea7632007-05-15 01:40:47 -0700976 spin_lock_irqsave(&ep->lock, flags);
Davide Libenzi7699acd2007-05-10 22:23:23 -0700977
978 /*
979 * If the event mask does not contain any poll(2) event, we consider the
980 * descriptor to be disabled. This condition is likely the effect of the
981 * EPOLLONESHOT bit that disables the descriptor when an event is received,
982 * until the next EPOLL_CTL_MOD will be issued.
983 */
984 if (!(epi->event.events & ~EP_PRIVATE_BITS))
Davide Libenzid47de162007-05-15 01:40:41 -0700985 goto out_unlock;
986
987 /*
Davide Libenzi2dfa4ee2009-03-31 15:24:22 -0700988 * Check the events coming with the callback. At this stage, not
989 * every device reports the events in the "key" parameter of the
990 * callback. We need to be able to handle both cases here, hence the
991 * test for "key" != NULL before the event match test.
992 */
993 if (key && !((unsigned long) key & epi->event.events))
994 goto out_unlock;
995
996 /*
Daniel Balutabf6a41d2011-01-30 23:42:29 +0200997 * If we are transferring events to userspace, we can hold no locks
Davide Libenzid47de162007-05-15 01:40:41 -0700998 * (because we're accessing user memory, and because of linux f_op->poll()
Daniel Balutabf6a41d2011-01-30 23:42:29 +0200999 * semantics). All the events that happen during that period of time are
Davide Libenzid47de162007-05-15 01:40:41 -07001000 * chained in ep->ovflist and requeued later on.
1001 */
1002 if (unlikely(ep->ovflist != EP_UNACTIVE_PTR)) {
1003 if (epi->next == EP_UNACTIVE_PTR) {
1004 epi->next = ep->ovflist;
1005 ep->ovflist = epi;
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001006 if (epi->ws) {
1007 /*
1008 * Activate ep->ws since epi->ws may get
1009 * deactivated at any time.
1010 */
1011 __pm_stay_awake(ep->ws);
1012 }
1013
Davide Libenzid47de162007-05-15 01:40:41 -07001014 }
1015 goto out_unlock;
1016 }
Davide Libenzi7699acd2007-05-10 22:23:23 -07001017
1018 /* If this file is already in the ready list we exit soon */
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001019 if (!ep_is_linked(&epi->rdllink)) {
Davide Libenzi5071f972009-03-31 15:24:10 -07001020 list_add_tail(&epi->rdllink, &ep->rdllist);
Eric Wongeea1d582013-04-30 15:27:39 -07001021 ep_pm_stay_awake_rcu(epi);
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001022 }
Davide Libenzi7699acd2007-05-10 22:23:23 -07001023
Davide Libenzi7699acd2007-05-10 22:23:23 -07001024 /*
1025 * Wake up ( if active ) both the eventpoll wait list and the ->poll()
1026 * wait list.
1027 */
1028 if (waitqueue_active(&ep->wq))
Matthew Wilcox4a6e9e22007-08-30 16:10:22 -04001029 wake_up_locked(&ep->wq);
Davide Libenzi7699acd2007-05-10 22:23:23 -07001030 if (waitqueue_active(&ep->poll_wait))
1031 pwake++;
1032
Davide Libenzid47de162007-05-15 01:40:41 -07001033out_unlock:
Davide Libenzic7ea7632007-05-15 01:40:47 -07001034 spin_unlock_irqrestore(&ep->lock, flags);
Davide Libenzi7699acd2007-05-10 22:23:23 -07001035
1036 /* We have to call this outside the lock */
1037 if (pwake)
Davide Libenzi5071f972009-03-31 15:24:10 -07001038 ep_poll_safewake(&ep->poll_wait);
Davide Libenzi7699acd2007-05-10 22:23:23 -07001039
1040 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041}
1042
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043/*
1044 * This is the callback that is used to add our wait queue to the
1045 * target file wakeup lists.
1046 */
1047static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
1048 poll_table *pt)
1049{
Pekka Enbergb030a4d2005-06-23 00:10:03 -07001050 struct epitem *epi = ep_item_from_epqueue(pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 struct eppoll_entry *pwq;
1052
Christoph Lametere94b1762006-12-06 20:33:17 -08001053 if (epi->nwait >= 0 && (pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);
1055 pwq->whead = whead;
1056 pwq->base = epi;
1057 add_wait_queue(whead, &pwq->wait);
1058 list_add_tail(&pwq->llink, &epi->pwqlist);
1059 epi->nwait++;
Davide Libenzi296e2362009-03-31 15:24:11 -07001060 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 /* We have to signal that an error occurred */
1062 epi->nwait = -1;
Davide Libenzi296e2362009-03-31 15:24:11 -07001063 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064}
1065
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)
1067{
1068 int kcmp;
1069 struct rb_node **p = &ep->rbr.rb_node, *parent = NULL;
1070 struct epitem *epic;
1071
1072 while (*p) {
1073 parent = *p;
1074 epic = rb_entry(parent, struct epitem, rbn);
Pekka Enbergb030a4d2005-06-23 00:10:03 -07001075 kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 if (kcmp > 0)
1077 p = &parent->rb_right;
1078 else
1079 p = &parent->rb_left;
1080 }
1081 rb_link_node(&epi->rbn, parent, p);
1082 rb_insert_color(&epi->rbn, &ep->rbr);
1083}
1084
Andrew Mortona80a6b82012-11-08 15:53:35 -08001085
1086
Jason Baron28d82dc2012-01-12 17:17:43 -08001087#define PATH_ARR_SIZE 5
1088/*
1089 * These are the number paths of length 1 to 5, that we are allowing to emanate
1090 * from a single file of interest. For example, we allow 1000 paths of length
1091 * 1, to emanate from each file of interest. This essentially represents the
1092 * potential wakeup paths, which need to be limited in order to avoid massive
1093 * uncontrolled wakeup storms. The common use case should be a single ep which
1094 * is connected to n file sources. In this case each file source has 1 path
1095 * of length 1. Thus, the numbers below should be more than sufficient. These
1096 * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify
1097 * and delete can't add additional paths. Protected by the epmutex.
1098 */
1099static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };
1100static int path_count[PATH_ARR_SIZE];
1101
1102static int path_count_inc(int nests)
1103{
Jason Baron93dc6102012-03-16 16:34:03 -04001104 /* Allow an arbitrary number of depth 1 paths */
1105 if (nests == 0)
1106 return 0;
1107
Jason Baron28d82dc2012-01-12 17:17:43 -08001108 if (++path_count[nests] > path_limits[nests])
1109 return -1;
1110 return 0;
1111}
1112
1113static void path_count_init(void)
1114{
1115 int i;
1116
1117 for (i = 0; i < PATH_ARR_SIZE; i++)
1118 path_count[i] = 0;
1119}
1120
1121static int reverse_path_check_proc(void *priv, void *cookie, int call_nests)
1122{
1123 int error = 0;
1124 struct file *file = priv;
1125 struct file *child_file;
1126 struct epitem *epi;
1127
1128 list_for_each_entry(epi, &file->f_ep_links, fllink) {
1129 child_file = epi->ep->file;
1130 if (is_file_epoll(child_file)) {
1131 if (list_empty(&child_file->f_ep_links)) {
1132 if (path_count_inc(call_nests)) {
1133 error = -1;
1134 break;
1135 }
1136 } else {
1137 error = ep_call_nested(&poll_loop_ncalls,
1138 EP_MAX_NESTS,
1139 reverse_path_check_proc,
1140 child_file, child_file,
1141 current);
1142 }
1143 if (error != 0)
1144 break;
1145 } else {
1146 printk(KERN_ERR "reverse_path_check_proc: "
1147 "file is not an ep!\n");
1148 }
1149 }
1150 return error;
1151}
1152
1153/**
1154 * reverse_path_check - The tfile_check_list is list of file *, which have
1155 * links that are proposed to be newly added. We need to
1156 * make sure that those added links don't add too many
1157 * paths such that we will spend all our time waking up
1158 * eventpoll objects.
1159 *
1160 * Returns: Returns zero if the proposed links don't create too many paths,
1161 * -1 otherwise.
1162 */
1163static int reverse_path_check(void)
1164{
Jason Baron28d82dc2012-01-12 17:17:43 -08001165 int error = 0;
1166 struct file *current_file;
1167
1168 /* let's call this for all tfiles */
1169 list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) {
Jason Baron28d82dc2012-01-12 17:17:43 -08001170 path_count_init();
1171 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1172 reverse_path_check_proc, current_file,
1173 current_file, current);
1174 if (error)
1175 break;
1176 }
1177 return error;
1178}
1179
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001180static int ep_create_wakeup_source(struct epitem *epi)
1181{
1182 const char *name;
Eric Wongeea1d582013-04-30 15:27:39 -07001183 struct wakeup_source *ws;
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001184
1185 if (!epi->ep->ws) {
1186 epi->ep->ws = wakeup_source_register("eventpoll");
1187 if (!epi->ep->ws)
1188 return -ENOMEM;
1189 }
1190
1191 name = epi->ffd.file->f_path.dentry->d_name.name;
Eric Wongeea1d582013-04-30 15:27:39 -07001192 ws = wakeup_source_register(name);
1193
1194 if (!ws)
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001195 return -ENOMEM;
Eric Wongeea1d582013-04-30 15:27:39 -07001196 rcu_assign_pointer(epi->ws, ws);
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001197
1198 return 0;
1199}
1200
Eric Wongeea1d582013-04-30 15:27:39 -07001201/* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */
1202static noinline void ep_destroy_wakeup_source(struct epitem *epi)
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001203{
Eric Wongeea1d582013-04-30 15:27:39 -07001204 struct wakeup_source *ws = ep_wakeup_source(epi);
1205
1206 rcu_assign_pointer(epi->ws, NULL);
1207
1208 /*
1209 * wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is
1210 * used internally by wakeup_source_remove, too (called by
1211 * wakeup_source_unregister), so we cannot use call_rcu
1212 */
1213 synchronize_rcu();
1214 wakeup_source_unregister(ws);
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001215}
1216
Davide Libenzic7ea7632007-05-15 01:40:47 -07001217/*
1218 * Must be called with "mtx" held.
1219 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220static int ep_insert(struct eventpoll *ep, struct epoll_event *event,
1221 struct file *tfile, int fd)
1222{
1223 int error, revents, pwake = 0;
1224 unsigned long flags;
Robin Holt52bd19f72011-01-12 17:00:01 -08001225 long user_watches;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 struct epitem *epi;
1227 struct ep_pqueue epq;
1228
Robin Holt52bd19f72011-01-12 17:00:01 -08001229 user_watches = atomic_long_read(&ep->user->epoll_watches);
1230 if (unlikely(user_watches >= max_user_watches))
Davide Libenzi7ef99642008-12-01 13:13:55 -08001231 return -ENOSPC;
Christoph Lametere94b1762006-12-06 20:33:17 -08001232 if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL)))
Davide Libenzi7ef99642008-12-01 13:13:55 -08001233 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 /* Item initialization follow here ... */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 INIT_LIST_HEAD(&epi->rdllink);
1237 INIT_LIST_HEAD(&epi->fllink);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 INIT_LIST_HEAD(&epi->pwqlist);
1239 epi->ep = ep;
Pekka Enbergb030a4d2005-06-23 00:10:03 -07001240 ep_set_ffd(&epi->ffd, tfile, fd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 epi->event = *event;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 epi->nwait = 0;
Davide Libenzid47de162007-05-15 01:40:41 -07001243 epi->next = EP_UNACTIVE_PTR;
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001244 if (epi->event.events & EPOLLWAKEUP) {
1245 error = ep_create_wakeup_source(epi);
1246 if (error)
1247 goto error_create_wakeup_source;
1248 } else {
Eric Wongeea1d582013-04-30 15:27:39 -07001249 RCU_INIT_POINTER(epi->ws, NULL);
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 /* Initialize the poll table using the queue callback */
1253 epq.epi = epi;
1254 init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);
Hans Verkuil626cf232012-03-23 15:02:27 -07001255 epq.pt._key = event->events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257 /*
1258 * Attach the item to the poll hooks and get current event bits.
1259 * We can safely use the file* here because its usage count has
Davide Libenzic7ea7632007-05-15 01:40:47 -07001260 * been increased by the caller of this function. Note that after
1261 * this operation completes, the poll callback can start hitting
1262 * the new item.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 */
1264 revents = tfile->f_op->poll(tfile, &epq.pt);
1265
1266 /*
1267 * We have to check if something went wrong during the poll wait queue
1268 * install process. Namely an allocation for a wait queue failed due
1269 * high memory pressure.
1270 */
Davide Libenzi7ef99642008-12-01 13:13:55 -08001271 error = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 if (epi->nwait < 0)
Davide Libenzi7699acd2007-05-10 22:23:23 -07001273 goto error_unregister;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
1275 /* Add the current item to the list of active epoll hook for this file */
Jonathan Corbet68499912009-02-06 13:52:43 -07001276 spin_lock(&tfile->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 list_add_tail(&epi->fllink, &tfile->f_ep_links);
Jonathan Corbet68499912009-02-06 13:52:43 -07001278 spin_unlock(&tfile->f_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
Davide Libenzic7ea7632007-05-15 01:40:47 -07001280 /*
1281 * Add the current item to the RB tree. All RB tree operations are
1282 * protected by "mtx", and ep_insert() is called with "mtx" held.
1283 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 ep_rbtree_insert(ep, epi);
1285
Jason Baron28d82dc2012-01-12 17:17:43 -08001286 /* now check if we've created too many backpaths */
1287 error = -EINVAL;
1288 if (reverse_path_check())
1289 goto error_remove_epi;
1290
Davide Libenzic7ea7632007-05-15 01:40:47 -07001291 /* We have to drop the new item inside our item list to keep track of it */
1292 spin_lock_irqsave(&ep->lock, flags);
1293
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 /* If the file is already "ready" we drop it inside the ready list */
Pekka Enbergb030a4d2005-06-23 00:10:03 -07001295 if ((revents & event->events) && !ep_is_linked(&epi->rdllink)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 list_add_tail(&epi->rdllink, &ep->rdllist);
Eric Wongeea1d582013-04-30 15:27:39 -07001297 ep_pm_stay_awake(epi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 /* Notify waiting tasks that events are available */
1300 if (waitqueue_active(&ep->wq))
Matthew Wilcox4a6e9e22007-08-30 16:10:22 -04001301 wake_up_locked(&ep->wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 if (waitqueue_active(&ep->poll_wait))
1303 pwake++;
1304 }
1305
Davide Libenzic7ea7632007-05-15 01:40:47 -07001306 spin_unlock_irqrestore(&ep->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
Robin Holt52bd19f72011-01-12 17:00:01 -08001308 atomic_long_inc(&ep->user->epoll_watches);
Davide Libenzi7ef99642008-12-01 13:13:55 -08001309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 /* We have to call this outside the lock */
1311 if (pwake)
Davide Libenzi5071f972009-03-31 15:24:10 -07001312 ep_poll_safewake(&ep->poll_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 return 0;
1315
Jason Baron28d82dc2012-01-12 17:17:43 -08001316error_remove_epi:
1317 spin_lock(&tfile->f_lock);
1318 if (ep_is_linked(&epi->fllink))
1319 list_del_init(&epi->fllink);
1320 spin_unlock(&tfile->f_lock);
1321
1322 rb_erase(&epi->rbn, &ep->rbr);
1323
Davide Libenzi7699acd2007-05-10 22:23:23 -07001324error_unregister:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 ep_unregister_pollwait(ep, epi);
1326
1327 /*
1328 * We need to do this because an event could have been arrived on some
Davide Libenzi67647d02007-05-15 01:40:52 -07001329 * allocated wait queue. Note that we don't care about the ep->ovflist
1330 * list, since that is used/cleaned only inside a section bound by "mtx".
1331 * And ep_insert() is called with "mtx" held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 */
Davide Libenzic7ea7632007-05-15 01:40:47 -07001333 spin_lock_irqsave(&ep->lock, flags);
Pekka Enbergb030a4d2005-06-23 00:10:03 -07001334 if (ep_is_linked(&epi->rdllink))
Davide Libenzi6192bd52007-05-08 00:25:41 -07001335 list_del_init(&epi->rdllink);
Davide Libenzic7ea7632007-05-15 01:40:47 -07001336 spin_unlock_irqrestore(&ep->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
Eric Wongeea1d582013-04-30 15:27:39 -07001338 wakeup_source_unregister(ep_wakeup_source(epi));
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001339
1340error_create_wakeup_source:
Pekka Enbergb030a4d2005-06-23 00:10:03 -07001341 kmem_cache_free(epi_cache, epi);
Davide Libenzi7ef99642008-12-01 13:13:55 -08001342
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 return error;
1344}
1345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346/*
1347 * Modify the interest event mask by dropping an event if the new mask
Davide Libenzic7ea7632007-05-15 01:40:47 -07001348 * has a match in the current file status. Must be called with "mtx" held.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 */
1350static int ep_modify(struct eventpoll *ep, struct epitem *epi, struct epoll_event *event)
1351{
1352 int pwake = 0;
1353 unsigned int revents;
Hans Verkuil626cf232012-03-23 15:02:27 -07001354 poll_table pt;
1355
1356 init_poll_funcptr(&pt, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357
1358 /*
Tony Battersbye057e152009-03-31 15:24:15 -07001359 * Set the new event interest mask before calling f_op->poll();
1360 * otherwise we might miss an event that happens between the
1361 * f_op->poll() call and the new event set registering.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 */
Eric Wong128dd172013-01-01 21:20:27 +00001363 epi->event.events = event->events; /* need barrier below */
Hans Verkuil626cf232012-03-23 15:02:27 -07001364 pt._key = event->events;
Tony Battersbye057e152009-03-31 15:24:15 -07001365 epi->event.data = event->data; /* protected by mtx */
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001366 if (epi->event.events & EPOLLWAKEUP) {
Eric Wongeea1d582013-04-30 15:27:39 -07001367 if (!ep_has_wakeup_source(epi))
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001368 ep_create_wakeup_source(epi);
Eric Wongeea1d582013-04-30 15:27:39 -07001369 } else if (ep_has_wakeup_source(epi)) {
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001370 ep_destroy_wakeup_source(epi);
1371 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 /*
Eric Wong128dd172013-01-01 21:20:27 +00001374 * The following barrier has two effects:
1375 *
1376 * 1) Flush epi changes above to other CPUs. This ensures
1377 * we do not miss events from ep_poll_callback if an
1378 * event occurs immediately after we call f_op->poll().
1379 * We need this because we did not take ep->lock while
1380 * changing epi above (but ep_poll_callback does take
1381 * ep->lock).
1382 *
1383 * 2) We also need to ensure we do not miss _past_ events
1384 * when calling f_op->poll(). This barrier also
1385 * pairs with the barrier in wq_has_sleeper (see
1386 * comments for wq_has_sleeper).
1387 *
1388 * This barrier will now guarantee ep_poll_callback or f_op->poll
1389 * (or both) will notice the readiness of an item.
1390 */
1391 smp_mb();
1392
1393 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 * Get current event bits. We can safely use the file* here because
1395 * its usage count has been increased by the caller of this function.
1396 */
Hans Verkuil626cf232012-03-23 15:02:27 -07001397 revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 /*
Davide Libenzic7ea7632007-05-15 01:40:47 -07001400 * If the item is "hot" and it is not registered inside the ready
Davide Libenzi67647d02007-05-15 01:40:52 -07001401 * list, push it inside.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 */
Davide Libenzic7ea7632007-05-15 01:40:47 -07001403 if (revents & event->events) {
Tony Battersbye057e152009-03-31 15:24:15 -07001404 spin_lock_irq(&ep->lock);
Davide Libenzic7ea7632007-05-15 01:40:47 -07001405 if (!ep_is_linked(&epi->rdllink)) {
1406 list_add_tail(&epi->rdllink, &ep->rdllist);
Eric Wongeea1d582013-04-30 15:27:39 -07001407 ep_pm_stay_awake(epi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408
Davide Libenzic7ea7632007-05-15 01:40:47 -07001409 /* Notify waiting tasks that events are available */
1410 if (waitqueue_active(&ep->wq))
Matthew Wilcox4a6e9e22007-08-30 16:10:22 -04001411 wake_up_locked(&ep->wq);
Davide Libenzic7ea7632007-05-15 01:40:47 -07001412 if (waitqueue_active(&ep->poll_wait))
1413 pwake++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 }
Tony Battersbye057e152009-03-31 15:24:15 -07001415 spin_unlock_irq(&ep->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417
1418 /* We have to call this outside the lock */
1419 if (pwake)
Davide Libenzi5071f972009-03-31 15:24:10 -07001420 ep_poll_safewake(&ep->poll_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
1422 return 0;
1423}
1424
Davide Libenzi296e2362009-03-31 15:24:11 -07001425static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
1426 void *priv)
Davide Libenzi5071f972009-03-31 15:24:10 -07001427{
1428 struct ep_send_events_data *esed = priv;
1429 int eventcnt;
Davide Libenzi296e2362009-03-31 15:24:11 -07001430 unsigned int revents;
Davide Libenzi5071f972009-03-31 15:24:10 -07001431 struct epitem *epi;
1432 struct epoll_event __user *uevent;
Eric Wongeea1d582013-04-30 15:27:39 -07001433 struct wakeup_source *ws;
Hans Verkuil626cf232012-03-23 15:02:27 -07001434 poll_table pt;
1435
1436 init_poll_funcptr(&pt, NULL);
Davide Libenzi5071f972009-03-31 15:24:10 -07001437
Davide Libenzi296e2362009-03-31 15:24:11 -07001438 /*
Davide Libenzi5071f972009-03-31 15:24:10 -07001439 * We can loop without lock because we are passed a task private list.
1440 * Items cannot vanish during the loop because ep_scan_ready_list() is
1441 * holding "mtx" during this call.
Davide Libenzi296e2362009-03-31 15:24:11 -07001442 */
Davide Libenzi5071f972009-03-31 15:24:10 -07001443 for (eventcnt = 0, uevent = esed->events;
1444 !list_empty(head) && eventcnt < esed->maxevents;) {
1445 epi = list_first_entry(head, struct epitem, rdllink);
1446
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001447 /*
1448 * Activate ep->ws before deactivating epi->ws to prevent
1449 * triggering auto-suspend here (in case we reactive epi->ws
1450 * below).
1451 *
1452 * This could be rearranged to delay the deactivation of epi->ws
1453 * instead, but then epi->ws would temporarily be out of sync
1454 * with ep_is_linked().
1455 */
Eric Wongeea1d582013-04-30 15:27:39 -07001456 ws = ep_wakeup_source(epi);
1457 if (ws) {
1458 if (ws->active)
1459 __pm_stay_awake(ep->ws);
1460 __pm_relax(ws);
1461 }
1462
Davide Libenzi5071f972009-03-31 15:24:10 -07001463 list_del_init(&epi->rdllink);
1464
Hans Verkuil626cf232012-03-23 15:02:27 -07001465 pt._key = epi->event.events;
1466 revents = epi->ffd.file->f_op->poll(epi->ffd.file, &pt) &
Davide Libenzi296e2362009-03-31 15:24:11 -07001467 epi->event.events;
Davide Libenzi5071f972009-03-31 15:24:10 -07001468
Davide Libenzi296e2362009-03-31 15:24:11 -07001469 /*
Davide Libenzi5071f972009-03-31 15:24:10 -07001470 * If the event mask intersect the caller-requested one,
1471 * deliver the event to userspace. Again, ep_scan_ready_list()
1472 * is holding "mtx", so no operations coming from userspace
1473 * can change the item.
Davide Libenzi296e2362009-03-31 15:24:11 -07001474 */
1475 if (revents) {
Davide Libenzi5071f972009-03-31 15:24:10 -07001476 if (__put_user(revents, &uevent->events) ||
Tony Battersbyd0305882009-03-31 15:24:14 -07001477 __put_user(epi->event.data, &uevent->data)) {
1478 list_add(&epi->rdllink, head);
Eric Wongeea1d582013-04-30 15:27:39 -07001479 ep_pm_stay_awake(epi);
Davide Libenzi296e2362009-03-31 15:24:11 -07001480 return eventcnt ? eventcnt : -EFAULT;
Tony Battersbyd0305882009-03-31 15:24:14 -07001481 }
Davide Libenzi296e2362009-03-31 15:24:11 -07001482 eventcnt++;
Davide Libenzi5071f972009-03-31 15:24:10 -07001483 uevent++;
Davide Libenzi296e2362009-03-31 15:24:11 -07001484 if (epi->event.events & EPOLLONESHOT)
1485 epi->event.events &= EP_PRIVATE_BITS;
1486 else if (!(epi->event.events & EPOLLET)) {
1487 /*
1488 * If this file has been added with Level
1489 * Trigger mode, we need to insert back inside
1490 * the ready list, so that the next call to
1491 * epoll_wait() will check again the events
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001492 * availability. At this point, no one can insert
Davide Libenzi296e2362009-03-31 15:24:11 -07001493 * into ep->rdllist besides us. The epoll_ctl()
1494 * callers are locked out by
1495 * ep_scan_ready_list() holding "mtx" and the
1496 * poll callback will queue them in ep->ovflist.
1497 */
1498 list_add_tail(&epi->rdllink, &ep->rdllist);
Eric Wongeea1d582013-04-30 15:27:39 -07001499 ep_pm_stay_awake(epi);
Davide Libenzi296e2362009-03-31 15:24:11 -07001500 }
1501 }
1502 }
Davide Libenzi5071f972009-03-31 15:24:10 -07001503
1504 return eventcnt;
1505}
1506
Davide Libenzi296e2362009-03-31 15:24:11 -07001507static int ep_send_events(struct eventpoll *ep,
1508 struct epoll_event __user *events, int maxevents)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509{
Davide Libenzi5071f972009-03-31 15:24:10 -07001510 struct ep_send_events_data esed;
Davide Libenzi6192bd52007-05-08 00:25:41 -07001511
Davide Libenzi5071f972009-03-31 15:24:10 -07001512 esed.maxevents = maxevents;
1513 esed.events = events;
Davide Libenzid47de162007-05-15 01:40:41 -07001514
Nelson Elhaged8805e62011-10-31 17:13:14 -07001515 return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516}
1517
Eric Dumazet0781b902011-02-01 15:52:35 -08001518static inline struct timespec ep_set_mstimeout(long ms)
1519{
1520 struct timespec now, ts = {
1521 .tv_sec = ms / MSEC_PER_SEC,
1522 .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC),
1523 };
1524
1525 ktime_get_ts(&now);
1526 return timespec_add_safe(now, ts);
1527}
1528
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001529/**
1530 * ep_poll - Retrieves ready events, and delivers them to the caller supplied
1531 * event buffer.
1532 *
1533 * @ep: Pointer to the eventpoll context.
1534 * @events: Pointer to the userspace buffer where the ready events should be
1535 * stored.
1536 * @maxevents: Size (in terms of number of events) of the caller event buffer.
1537 * @timeout: Maximum timeout for the ready events fetch operation, in
1538 * milliseconds. If the @timeout is zero, the function will not block,
1539 * while if the @timeout is less than zero, the function will block
1540 * until at least one event has been retrieved (or an error
1541 * occurred).
1542 *
1543 * Returns: Returns the number of ready events which have been fetched, or an
1544 * error code, in case of error.
1545 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
1547 int maxevents, long timeout)
1548{
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001549 int res = 0, eavail, timed_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 unsigned long flags;
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001551 long slack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 wait_queue_t wait;
Shawn Bohrer95aac7b2010-10-27 15:34:54 -07001553 ktime_t expires, *to = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554
Shawn Bohrer95aac7b2010-10-27 15:34:54 -07001555 if (timeout > 0) {
Eric Dumazet0781b902011-02-01 15:52:35 -08001556 struct timespec end_time = ep_set_mstimeout(timeout);
1557
Shawn Bohrer95aac7b2010-10-27 15:34:54 -07001558 slack = select_estimate_accuracy(&end_time);
1559 to = &expires;
1560 *to = timespec_to_ktime(end_time);
1561 } else if (timeout == 0) {
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001562 /*
1563 * Avoid the unnecessary trip to the wait queue loop, if the
1564 * caller specified a non blocking operation.
1565 */
Shawn Bohrer95aac7b2010-10-27 15:34:54 -07001566 timed_out = 1;
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001567 spin_lock_irqsave(&ep->lock, flags);
1568 goto check_events;
Shawn Bohrer95aac7b2010-10-27 15:34:54 -07001569 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001571fetch_events:
Davide Libenzic7ea7632007-05-15 01:40:47 -07001572 spin_lock_irqsave(&ep->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
Davide Libenzi3fb0e582011-03-22 16:34:46 -07001574 if (!ep_events_available(ep)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 /*
1576 * We don't have any available event to return to the caller.
1577 * We need to sleep here, and we will be wake up by
1578 * ep_poll_callback() when events will become available.
1579 */
1580 init_waitqueue_entry(&wait, current);
Changli Gaoa93d2f12010-05-07 14:33:26 +08001581 __add_wait_queue_exclusive(&ep->wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 for (;;) {
1584 /*
1585 * We don't want to sleep if the ep_poll_callback() sends us
1586 * a wakeup in between. That's why we set the task state
1587 * to TASK_INTERRUPTIBLE before doing the checks.
1588 */
1589 set_current_state(TASK_INTERRUPTIBLE);
Davide Libenzi3fb0e582011-03-22 16:34:46 -07001590 if (ep_events_available(ep) || timed_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 break;
1592 if (signal_pending(current)) {
1593 res = -EINTR;
1594 break;
1595 }
1596
Davide Libenzic7ea7632007-05-15 01:40:47 -07001597 spin_unlock_irqrestore(&ep->lock, flags);
Shawn Bohrer95aac7b2010-10-27 15:34:54 -07001598 if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS))
1599 timed_out = 1;
1600
Davide Libenzic7ea7632007-05-15 01:40:47 -07001601 spin_lock_irqsave(&ep->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 }
Davide Libenzi3419b232006-06-25 05:48:14 -07001603 __remove_wait_queue(&ep->wq, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604
1605 set_current_state(TASK_RUNNING);
1606 }
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001607check_events:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 /* Is it worth to try to dig for events ? */
Davide Libenzi3fb0e582011-03-22 16:34:46 -07001609 eavail = ep_events_available(ep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Davide Libenzic7ea7632007-05-15 01:40:47 -07001611 spin_unlock_irqrestore(&ep->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612
1613 /*
1614 * Try to transfer events to user space. In case we get 0 events and
1615 * there's still timeout left over, we go trying again in search of
1616 * more luck.
1617 */
1618 if (!res && eavail &&
Shawn Bohrer95aac7b2010-10-27 15:34:54 -07001619 !(res = ep_send_events(ep, events, maxevents)) && !timed_out)
Shawn Bohrerf4d93ad2011-03-22 16:34:47 -07001620 goto fetch_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621
1622 return res;
1623}
1624
Davide Libenzi22bacca2011-02-25 14:44:12 -08001625/**
1626 * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1627 * API, to verify that adding an epoll file inside another
1628 * epoll structure, does not violate the constraints, in
1629 * terms of closed loops, or too deep chains (which can
1630 * result in excessive stack usage).
1631 *
1632 * @priv: Pointer to the epoll file to be currently checked.
1633 * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1634 * data structure pointer.
1635 * @call_nests: Current dept of the @ep_call_nested() call stack.
1636 *
1637 * Returns: Returns zero if adding the epoll @file inside current epoll
1638 * structure @ep does not violate the constraints, or -1 otherwise.
1639 */
1640static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1641{
1642 int error = 0;
1643 struct file *file = priv;
1644 struct eventpoll *ep = file->private_data;
Jason Baron28d82dc2012-01-12 17:17:43 -08001645 struct eventpoll *ep_tovisit;
Davide Libenzi22bacca2011-02-25 14:44:12 -08001646 struct rb_node *rbp;
1647 struct epitem *epi;
1648
Nelson Elhaged8805e62011-10-31 17:13:14 -07001649 mutex_lock_nested(&ep->mtx, call_nests + 1);
Jason Baron28d82dc2012-01-12 17:17:43 -08001650 ep->visited = 1;
1651 list_add(&ep->visited_list_link, &visited_list);
Davide Libenzi22bacca2011-02-25 14:44:12 -08001652 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1653 epi = rb_entry(rbp, struct epitem, rbn);
1654 if (unlikely(is_file_epoll(epi->ffd.file))) {
Jason Baron28d82dc2012-01-12 17:17:43 -08001655 ep_tovisit = epi->ffd.file->private_data;
1656 if (ep_tovisit->visited)
1657 continue;
Davide Libenzi22bacca2011-02-25 14:44:12 -08001658 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
Jason Baron28d82dc2012-01-12 17:17:43 -08001659 ep_loop_check_proc, epi->ffd.file,
1660 ep_tovisit, current);
Davide Libenzi22bacca2011-02-25 14:44:12 -08001661 if (error != 0)
1662 break;
Jason Baron28d82dc2012-01-12 17:17:43 -08001663 } else {
1664 /*
1665 * If we've reached a file that is not associated with
1666 * an ep, then we need to check if the newly added
1667 * links are going to add too many wakeup paths. We do
1668 * this by adding it to the tfile_check_list, if it's
1669 * not already there, and calling reverse_path_check()
1670 * during ep_insert().
1671 */
1672 if (list_empty(&epi->ffd.file->f_tfile_llink))
1673 list_add(&epi->ffd.file->f_tfile_llink,
1674 &tfile_check_list);
Davide Libenzi22bacca2011-02-25 14:44:12 -08001675 }
1676 }
1677 mutex_unlock(&ep->mtx);
1678
1679 return error;
1680}
1681
1682/**
1683 * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1684 * another epoll file (represented by @ep) does not create
1685 * closed loops or too deep chains.
1686 *
1687 * @ep: Pointer to the epoll private data structure.
1688 * @file: Pointer to the epoll file to be checked.
1689 *
1690 * Returns: Returns zero if adding the epoll @file inside current epoll
1691 * structure @ep does not violate the constraints, or -1 otherwise.
1692 */
1693static int ep_loop_check(struct eventpoll *ep, struct file *file)
1694{
Jason Baron28d82dc2012-01-12 17:17:43 -08001695 int ret;
1696 struct eventpoll *ep_cur, *ep_next;
1697
1698 ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
Davide Libenzi22bacca2011-02-25 14:44:12 -08001699 ep_loop_check_proc, file, ep, current);
Jason Baron28d82dc2012-01-12 17:17:43 -08001700 /* clear visited list */
1701 list_for_each_entry_safe(ep_cur, ep_next, &visited_list,
1702 visited_list_link) {
1703 ep_cur->visited = 0;
1704 list_del(&ep_cur->visited_list_link);
1705 }
1706 return ret;
1707}
1708
1709static void clear_tfile_check_list(void)
1710{
1711 struct file *file;
1712
1713 /* first clear the tfile_check_list */
1714 while (!list_empty(&tfile_check_list)) {
1715 file = list_first_entry(&tfile_check_list, struct file,
1716 f_tfile_llink);
1717 list_del_init(&file->f_tfile_llink);
1718 }
1719 INIT_LIST_HEAD(&tfile_check_list);
Davide Libenzi22bacca2011-02-25 14:44:12 -08001720}
1721
Davide Libenzi7699acd2007-05-10 22:23:23 -07001722/*
Andrew Morton523723b2008-08-12 15:09:01 -07001723 * Open an eventpoll file descriptor.
Davide Libenzi7699acd2007-05-10 22:23:23 -07001724 */
Heiko Carstens5a8a82b2009-01-14 14:14:25 +01001725SYSCALL_DEFINE1(epoll_create1, int, flags)
Davide Libenzi7699acd2007-05-10 22:23:23 -07001726{
Jason Baron28d82dc2012-01-12 17:17:43 -08001727 int error, fd;
Davide Libenzibb57c3e2009-03-31 15:24:12 -07001728 struct eventpoll *ep = NULL;
Jason Baron28d82dc2012-01-12 17:17:43 -08001729 struct file *file;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001730
Ulrich Dreppere38b36f2008-07-23 21:29:42 -07001731 /* Check the EPOLL_* constant for consistency. */
1732 BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);
1733
Davide Libenzi296e2362009-03-31 15:24:11 -07001734 if (flags & ~EPOLL_CLOEXEC)
1735 return -EINVAL;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001736 /*
Davide Libenzibb57c3e2009-03-31 15:24:12 -07001737 * Create the internal data structure ("struct eventpoll").
Davide Libenzi7699acd2007-05-10 22:23:23 -07001738 */
Ulrich Drepper9fe5ad92008-07-23 21:29:43 -07001739 error = ep_alloc(&ep);
Davide Libenzibb57c3e2009-03-31 15:24:12 -07001740 if (error < 0)
1741 return error;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001742 /*
1743 * Creates all the items needed to setup an eventpoll file. That is,
Al Viro2030a422008-02-23 06:46:49 -05001744 * a file structure and a free file descriptor.
Davide Libenzi7699acd2007-05-10 22:23:23 -07001745 */
Jason Baron28d82dc2012-01-12 17:17:43 -08001746 fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));
1747 if (fd < 0) {
1748 error = fd;
1749 goto out_free_ep;
1750 }
1751 file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,
Roland Dreier628ff7c2009-12-18 09:41:24 -08001752 O_RDWR | (flags & O_CLOEXEC));
Jason Baron28d82dc2012-01-12 17:17:43 -08001753 if (IS_ERR(file)) {
1754 error = PTR_ERR(file);
1755 goto out_free_fd;
1756 }
Jason Baron28d82dc2012-01-12 17:17:43 -08001757 ep->file = file;
Al Viro98022742012-08-17 22:42:36 -04001758 fd_install(fd, file);
Jason Baron28d82dc2012-01-12 17:17:43 -08001759 return fd;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001760
Jason Baron28d82dc2012-01-12 17:17:43 -08001761out_free_fd:
1762 put_unused_fd(fd);
1763out_free_ep:
1764 ep_free(ep);
Davide Libenzibb57c3e2009-03-31 15:24:12 -07001765 return error;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001766}
1767
Heiko Carstens5a8a82b2009-01-14 14:14:25 +01001768SYSCALL_DEFINE1(epoll_create, int, size)
Ulrich Dreppera0998b52008-07-23 21:29:27 -07001769{
Davide Libenzibfe38912009-05-12 13:19:44 -07001770 if (size <= 0)
Ulrich Drepper9fe5ad92008-07-23 21:29:43 -07001771 return -EINVAL;
1772
1773 return sys_epoll_create1(0);
Ulrich Dreppera0998b52008-07-23 21:29:27 -07001774}
1775
Davide Libenzi7699acd2007-05-10 22:23:23 -07001776/*
1777 * The following function implements the controller interface for
1778 * the eventpoll file that enables the insertion/removal/change of
Davide Libenzi67647d02007-05-15 01:40:52 -07001779 * file descriptors inside the interest set.
Davide Libenzi7699acd2007-05-10 22:23:23 -07001780 */
Heiko Carstens5a8a82b2009-01-14 14:14:25 +01001781SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1782 struct epoll_event __user *, event)
Davide Libenzi7699acd2007-05-10 22:23:23 -07001783{
1784 int error;
Davide Libenzi22bacca2011-02-25 14:44:12 -08001785 int did_lock_epmutex = 0;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001786 struct file *file, *tfile;
1787 struct eventpoll *ep;
1788 struct epitem *epi;
1789 struct epoll_event epds;
1790
Davide Libenzi7699acd2007-05-10 22:23:23 -07001791 error = -EFAULT;
1792 if (ep_op_has_event(op) &&
1793 copy_from_user(&epds, event, sizeof(struct epoll_event)))
1794 goto error_return;
1795
1796 /* Get the "struct file *" for the eventpoll file */
1797 error = -EBADF;
1798 file = fget(epfd);
1799 if (!file)
1800 goto error_return;
1801
1802 /* Get the "struct file *" for the target file */
1803 tfile = fget(fd);
1804 if (!tfile)
1805 goto error_fput;
1806
1807 /* The target file descriptor must support poll */
1808 error = -EPERM;
1809 if (!tfile->f_op || !tfile->f_op->poll)
1810 goto error_tgt_fput;
1811
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001812 /* Check if EPOLLWAKEUP is allowed */
Michael Kerriskd9914cf2012-07-17 21:37:27 +02001813 if ((epds.events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))
Rafael J. Wysockia8159412012-05-21 21:20:48 +02001814 epds.events &= ~EPOLLWAKEUP;
Arve Hjønnevåg4d7e30d2012-05-01 21:33:34 +02001815
Davide Libenzi7699acd2007-05-10 22:23:23 -07001816 /*
1817 * We have to check that the file structure underneath the file descriptor
1818 * the user passed to us _is_ an eventpoll file. And also we do not permit
1819 * adding an epoll file descriptor inside itself.
1820 */
1821 error = -EINVAL;
1822 if (file == tfile || !is_file_epoll(file))
1823 goto error_tgt_fput;
1824
1825 /*
1826 * At this point it is safe to assume that the "private_data" contains
1827 * our own data structure.
1828 */
1829 ep = file->private_data;
1830
Davide Libenzi22bacca2011-02-25 14:44:12 -08001831 /*
1832 * When we insert an epoll file descriptor, inside another epoll file
1833 * descriptor, there is the change of creating closed loops, which are
Jason Baron28d82dc2012-01-12 17:17:43 -08001834 * better be handled here, than in more critical paths. While we are
1835 * checking for loops we also determine the list of files reachable
1836 * and hang them on the tfile_check_list, so we can check that we
1837 * haven't created too many possible wakeup paths.
Davide Libenzi22bacca2011-02-25 14:44:12 -08001838 *
Jason Baron28d82dc2012-01-12 17:17:43 -08001839 * We need to hold the epmutex across both ep_insert and ep_remove
1840 * b/c we want to make sure we are looking at a coherent view of
1841 * epoll network.
Davide Libenzi22bacca2011-02-25 14:44:12 -08001842 */
Jason Baron28d82dc2012-01-12 17:17:43 -08001843 if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) {
Davide Libenzi22bacca2011-02-25 14:44:12 -08001844 mutex_lock(&epmutex);
1845 did_lock_epmutex = 1;
Davide Libenzi22bacca2011-02-25 14:44:12 -08001846 }
Jason Baron28d82dc2012-01-12 17:17:43 -08001847 if (op == EPOLL_CTL_ADD) {
1848 if (is_file_epoll(tfile)) {
1849 error = -ELOOP;
Jason Baron13d51802012-04-25 16:01:47 -07001850 if (ep_loop_check(ep, tfile) != 0) {
1851 clear_tfile_check_list();
Jason Baron28d82dc2012-01-12 17:17:43 -08001852 goto error_tgt_fput;
Jason Baron13d51802012-04-25 16:01:47 -07001853 }
Jason Baron28d82dc2012-01-12 17:17:43 -08001854 } else
1855 list_add(&tfile->f_tfile_llink, &tfile_check_list);
1856 }
Davide Libenzi22bacca2011-02-25 14:44:12 -08001857
Nelson Elhaged8805e62011-10-31 17:13:14 -07001858 mutex_lock_nested(&ep->mtx, 0);
Davide Libenzi7699acd2007-05-10 22:23:23 -07001859
Davide Libenzi67647d02007-05-15 01:40:52 -07001860 /*
1861 * Try to lookup the file inside our RB tree, Since we grabbed "mtx"
1862 * above, we can be sure to be able to use the item looked up by
1863 * ep_find() till we release the mutex.
1864 */
Davide Libenzi7699acd2007-05-10 22:23:23 -07001865 epi = ep_find(ep, tfile, fd);
1866
1867 error = -EINVAL;
1868 switch (op) {
1869 case EPOLL_CTL_ADD:
1870 if (!epi) {
1871 epds.events |= POLLERR | POLLHUP;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001872 error = ep_insert(ep, &epds, tfile, fd);
1873 } else
1874 error = -EEXIST;
Jason Baron28d82dc2012-01-12 17:17:43 -08001875 clear_tfile_check_list();
Davide Libenzi7699acd2007-05-10 22:23:23 -07001876 break;
1877 case EPOLL_CTL_DEL:
1878 if (epi)
1879 error = ep_remove(ep, epi);
1880 else
1881 error = -ENOENT;
1882 break;
1883 case EPOLL_CTL_MOD:
1884 if (epi) {
1885 epds.events |= POLLERR | POLLHUP;
1886 error = ep_modify(ep, epi, &epds);
1887 } else
1888 error = -ENOENT;
1889 break;
1890 }
Davide Libenzid47de162007-05-15 01:40:41 -07001891 mutex_unlock(&ep->mtx);
Davide Libenzi7699acd2007-05-10 22:23:23 -07001892
1893error_tgt_fput:
Jason Baron28d82dc2012-01-12 17:17:43 -08001894 if (did_lock_epmutex)
Davide Libenzi22bacca2011-02-25 14:44:12 -08001895 mutex_unlock(&epmutex);
1896
Davide Libenzi7699acd2007-05-10 22:23:23 -07001897 fput(tfile);
1898error_fput:
1899 fput(file);
1900error_return:
Davide Libenzi7699acd2007-05-10 22:23:23 -07001901
1902 return error;
1903}
1904
1905/*
1906 * Implement the event wait interface for the eventpoll file. It is the kernel
1907 * part of the user space epoll_wait(2).
1908 */
Heiko Carstens5a8a82b2009-01-14 14:14:25 +01001909SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,
1910 int, maxevents, int, timeout)
Davide Libenzi7699acd2007-05-10 22:23:23 -07001911{
Al Viro2903ff02012-08-28 12:52:22 -04001912 int error;
1913 struct fd f;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001914 struct eventpoll *ep;
1915
Davide Libenzi7699acd2007-05-10 22:23:23 -07001916 /* The maximum number of event must be greater than zero */
1917 if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)
1918 return -EINVAL;
1919
1920 /* Verify that the area passed by the user is writeable */
Al Viro2903ff02012-08-28 12:52:22 -04001921 if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event)))
1922 return -EFAULT;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001923
1924 /* Get the "struct file *" for the eventpoll file */
Al Viro2903ff02012-08-28 12:52:22 -04001925 f = fdget(epfd);
1926 if (!f.file)
1927 return -EBADF;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001928
1929 /*
1930 * We have to check that the file structure underneath the fd
1931 * the user passed to us _is_ an eventpoll file.
1932 */
1933 error = -EINVAL;
Al Viro2903ff02012-08-28 12:52:22 -04001934 if (!is_file_epoll(f.file))
Davide Libenzi7699acd2007-05-10 22:23:23 -07001935 goto error_fput;
1936
1937 /*
1938 * At this point it is safe to assume that the "private_data" contains
1939 * our own data structure.
1940 */
Al Viro2903ff02012-08-28 12:52:22 -04001941 ep = f.file->private_data;
Davide Libenzi7699acd2007-05-10 22:23:23 -07001942
1943 /* Time to fish for events ... */
1944 error = ep_poll(ep, events, maxevents, timeout);
1945
1946error_fput:
Al Viro2903ff02012-08-28 12:52:22 -04001947 fdput(f);
Davide Libenzi7699acd2007-05-10 22:23:23 -07001948 return error;
1949}
1950
Davide Libenzi7699acd2007-05-10 22:23:23 -07001951/*
1952 * Implement the event wait interface for the eventpoll file. It is the kernel
1953 * part of the user space epoll_pwait(2).
1954 */
Heiko Carstens5a8a82b2009-01-14 14:14:25 +01001955SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
1956 int, maxevents, int, timeout, const sigset_t __user *, sigmask,
1957 size_t, sigsetsize)
Davide Libenzi7699acd2007-05-10 22:23:23 -07001958{
1959 int error;
1960 sigset_t ksigmask, sigsaved;
1961
1962 /*
1963 * If the caller wants a certain signal mask to be set during the wait,
1964 * we apply it here.
1965 */
1966 if (sigmask) {
1967 if (sigsetsize != sizeof(sigset_t))
1968 return -EINVAL;
1969 if (copy_from_user(&ksigmask, sigmask, sizeof(ksigmask)))
1970 return -EFAULT;
1971 sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
1972 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
1973 }
1974
1975 error = sys_epoll_wait(epfd, events, maxevents, timeout);
1976
1977 /*
1978 * If we changed the signal mask, we need to restore the original one.
1979 * In case we've got a signal while waiting, we do not restore the
1980 * signal mask yet, and we allow do_signal() to deliver the signal on
1981 * the way back to userspace, before the signal mask is restored.
1982 */
1983 if (sigmask) {
1984 if (error == -EINTR) {
1985 memcpy(&current->saved_sigmask, &sigsaved,
Davide Libenzic7ea7632007-05-15 01:40:47 -07001986 sizeof(sigsaved));
Roland McGrath4e4c22c2008-04-30 00:53:06 -07001987 set_restore_sigmask();
Davide Libenzi7699acd2007-05-10 22:23:23 -07001988 } else
1989 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1990 }
1991
1992 return error;
1993}
1994
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995static int __init eventpoll_init(void)
1996{
Davide Libenzi7ef99642008-12-01 13:13:55 -08001997 struct sysinfo si;
1998
1999 si_meminfo(&si);
Davide Libenzi9df04e12009-01-29 14:25:26 -08002000 /*
2001 * Allows top 4% of lomem to be allocated for epoll watches (per user).
2002 */
2003 max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
Davide Libenzi7ef99642008-12-01 13:13:55 -08002004 EP_ITEM_COST;
Robin Holt52bd19f72011-01-12 17:00:01 -08002005 BUG_ON(max_user_watches < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006
Davide Libenzi22bacca2011-02-25 14:44:12 -08002007 /*
2008 * Initialize the structure used to perform epoll file descriptor
2009 * inclusion loops checks.
2010 */
2011 ep_nested_calls_init(&poll_loop_ncalls);
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 /* Initialize the structure used to perform safe poll wait head wake ups */
Davide Libenzi5071f972009-03-31 15:24:10 -07002014 ep_nested_calls_init(&poll_safewake_ncalls);
2015
2016 /* Initialize the structure used to perform file's f_op->poll() calls */
2017 ep_nested_calls_init(&poll_readywalk_ncalls);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
Eric Wong39732ca2013-04-30 15:27:38 -07002019 /*
2020 * We can have many thousands of epitems, so prevent this from
2021 * using an extra cache line on 64-bit (and smaller) CPUs
2022 */
2023 BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);
2024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 /* Allocates slab cache used to allocate "struct epitem" items */
2026 epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),
Davide Libenzibb57c3e2009-03-31 15:24:12 -07002027 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028
2029 /* Allocates slab cache used to allocate "struct eppoll_entry" */
2030 pwq_cache = kmem_cache_create("eventpoll_pwq",
Davide Libenzibb57c3e2009-03-31 15:24:12 -07002031 sizeof(struct eppoll_entry), 0, SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034}
Davide Libenzicea69242007-05-10 22:23:22 -07002035fs_initcall(eventpoll_init);