blob: ce1d1711fbbaf1ff7fe9de6e419d6cd891890827 [file] [log] [blame]
Davide Libenzie1ad7462007-05-10 22:23:19 -07001/*
2 * fs/eventfd.c
3 *
4 * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
5 *
6 */
7
8#include <linux/file.h>
9#include <linux/poll.h>
10#include <linux/init.h>
11#include <linux/fs.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010012#include <linux/sched/signal.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070013#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070015#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/anon_inodes.h>
Adrian Bunk7747cdb2008-02-06 01:36:49 -080018#include <linux/syscalls.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050019#include <linux/export.h>
Davide Libenzi13389012009-06-30 11:41:11 -070020#include <linux/kref.h>
21#include <linux/eventfd.h>
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -080022#include <linux/proc_fs.h>
23#include <linux/seq_file.h>
Davide Libenzie1ad7462007-05-10 22:23:19 -070024
Jens Axboeeaef83c2020-02-02 08:23:03 -070025DEFINE_PER_CPU(int, eventfd_wake_count);
26
Davide Libenzie1ad7462007-05-10 22:23:19 -070027struct eventfd_ctx {
Davide Libenzi13389012009-06-30 11:41:11 -070028 struct kref kref;
Davide Libenzie1ad7462007-05-10 22:23:19 -070029 wait_queue_head_t wqh;
30 /*
31 * Every time that a write(2) is performed on an eventfd, the
32 * value of the __u64 being written is added to "count" and a
33 * wakeup is performed on "wqh". A read(2) will return the "count"
34 * value to userspace, and will reset "count" to zero. The kernel
Davide Libenzi13389012009-06-30 11:41:11 -070035 * side eventfd_signal() also, adds to the "count" counter and
Davide Libenzie1ad7462007-05-10 22:23:19 -070036 * issue a wakeup.
37 */
38 __u64 count;
Davide Libenzibcd0b232009-03-31 15:24:18 -070039 unsigned int flags;
Davide Libenzie1ad7462007-05-10 22:23:19 -070040};
41
Davide Libenzi13389012009-06-30 11:41:11 -070042/**
43 * eventfd_signal - Adds @n to the eventfd counter.
44 * @ctx: [in] Pointer to the eventfd context.
45 * @n: [in] Value of the counter to be added to the eventfd internal counter.
46 * The value cannot be negative.
47 *
48 * This function is supposed to be called by the kernel in paths that do not
49 * allow sleeping. In this function we allow the counter to reach the ULLONG_MAX
Linus Torvaldsa9a08842018-02-11 14:34:03 -080050 * value, and we signal this as overflow condition by returning a EPOLLERR
Davide Libenzi13389012009-06-30 11:41:11 -070051 * to poll(2).
52 *
Masanari Iida20d5a862015-09-22 12:04:17 +090053 * Returns the amount by which the counter was incremented. This will be less
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070054 * than @n if the counter has overflowed.
Davide Libenzie1ad7462007-05-10 22:23:19 -070055 */
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070056__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
Davide Libenzie1ad7462007-05-10 22:23:19 -070057{
Davide Libenzie1ad7462007-05-10 22:23:19 -070058 unsigned long flags;
59
Jens Axboeeaef83c2020-02-02 08:23:03 -070060 /*
61 * Deadlock or stack overflow issues can happen if we recurse here
62 * through waitqueue wakeup handlers. If the caller users potentially
63 * nested waitqueues with custom wakeup handlers, then it should
64 * check eventfd_signal_count() before calling this function. If
65 * it returns true, the eventfd_signal() call should be deferred to a
66 * safe context.
67 */
68 if (WARN_ON_ONCE(this_cpu_read(eventfd_wake_count)))
69 return 0;
70
Davide Libenzid48eb232007-05-18 12:02:33 -070071 spin_lock_irqsave(&ctx->wqh.lock, flags);
Jens Axboeeaef83c2020-02-02 08:23:03 -070072 this_cpu_inc(eventfd_wake_count);
Davide Libenzie1ad7462007-05-10 22:23:19 -070073 if (ULLONG_MAX - ctx->count < n)
Sha Zhengjuee62c6b2012-05-31 16:26:41 -070074 n = ULLONG_MAX - ctx->count;
Davide Libenzie1ad7462007-05-10 22:23:19 -070075 ctx->count += n;
76 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -080077 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Jens Axboeeaef83c2020-02-02 08:23:03 -070078 this_cpu_dec(eventfd_wake_count);
Davide Libenzid48eb232007-05-18 12:02:33 -070079 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
Davide Libenzie1ad7462007-05-10 22:23:19 -070080
81 return n;
82}
Rusty Russell57186072009-06-12 22:27:09 -060083EXPORT_SYMBOL_GPL(eventfd_signal);
Davide Libenzie1ad7462007-05-10 22:23:19 -070084
Davide Libenzi562787a2009-09-22 16:43:57 -070085static void eventfd_free_ctx(struct eventfd_ctx *ctx)
86{
87 kfree(ctx);
88}
89
Davide Libenzi13389012009-06-30 11:41:11 -070090static void eventfd_free(struct kref *kref)
91{
92 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref);
93
Davide Libenzi562787a2009-09-22 16:43:57 -070094 eventfd_free_ctx(ctx);
Davide Libenzi13389012009-06-30 11:41:11 -070095}
96
97/**
Davide Libenzi13389012009-06-30 11:41:11 -070098 * eventfd_ctx_put - Releases a reference to the internal eventfd context.
99 * @ctx: [in] Pointer to eventfd context.
100 *
101 * The eventfd context reference must have been previously acquired either
Eric Biggers105f2b72018-01-06 09:45:44 -0800102 * with eventfd_ctx_fdget() or eventfd_ctx_fileget().
Davide Libenzi13389012009-06-30 11:41:11 -0700103 */
104void eventfd_ctx_put(struct eventfd_ctx *ctx)
105{
106 kref_put(&ctx->kref, eventfd_free);
107}
108EXPORT_SYMBOL_GPL(eventfd_ctx_put);
109
Davide Libenzie1ad7462007-05-10 22:23:19 -0700110static int eventfd_release(struct inode *inode, struct file *file)
111{
Davide Libenzi13389012009-06-30 11:41:11 -0700112 struct eventfd_ctx *ctx = file->private_data;
113
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800114 wake_up_poll(&ctx->wqh, EPOLLHUP);
Davide Libenzi13389012009-06-30 11:41:11 -0700115 eventfd_ctx_put(ctx);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700116 return 0;
117}
118
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700119static __poll_t eventfd_poll(struct file *file, poll_table *wait)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700120{
121 struct eventfd_ctx *ctx = file->private_data;
Al Viro076ccb72017-07-03 01:02:18 -0400122 __poll_t events = 0;
Chris Masone22553e2015-02-17 13:46:07 -0800123 u64 count;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700124
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700125 poll_wait(file, &ctx->wqh, wait);
126
Paolo Bonzinia484c3d2016-03-22 14:27:14 -0700127 /*
128 * All writes to ctx->count occur within ctx->wqh.lock. This read
129 * can be done outside ctx->wqh.lock because we know that poll_wait
130 * takes that lock (through add_wait_queue) if our caller will sleep.
131 *
132 * The read _can_ therefore seep into add_wait_queue's critical
133 * section, but cannot move above it! add_wait_queue's spin_lock acts
134 * as an acquire barrier and ensures that the read be ordered properly
135 * against the writes. The following CAN happen and is safe:
136 *
137 * poll write
138 * ----------------- ------------
139 * lock ctx->wqh.lock (in poll_wait)
140 * count = ctx->count
141 * __add_wait_queue
142 * unlock ctx->wqh.lock
143 * lock ctx->qwh.lock
144 * ctx->count += n
145 * if (waitqueue_active)
146 * wake_up_locked_poll
147 * unlock ctx->qwh.lock
148 * eventfd_poll returns 0
149 *
150 * but the following, which would miss a wakeup, cannot happen:
151 *
152 * poll write
153 * ----------------- ------------
154 * count = ctx->count (INVALID!)
155 * lock ctx->qwh.lock
156 * ctx->count += n
157 * **waitqueue_active is false**
158 * **no wake_up_locked_poll!**
159 * unlock ctx->qwh.lock
160 * lock ctx->wqh.lock (in poll_wait)
161 * __add_wait_queue
162 * unlock ctx->wqh.lock
163 * eventfd_poll returns 0
164 */
165 count = READ_ONCE(ctx->count);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700166
Chris Masone22553e2015-02-17 13:46:07 -0800167 if (count > 0)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700168 events |= EPOLLIN;
Chris Masone22553e2015-02-17 13:46:07 -0800169 if (count == ULLONG_MAX)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800170 events |= EPOLLERR;
Chris Masone22553e2015-02-17 13:46:07 -0800171 if (ULLONG_MAX - 1 > count)
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700172 events |= EPOLLOUT;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700173
174 return events;
175}
176
Davide Libenzicb289d62010-01-13 09:34:36 -0800177static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700178{
Davide Libenzicb289d62010-01-13 09:34:36 -0800179 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
180 ctx->count -= *cnt;
181}
182
183/**
184 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
185 * @ctx: [in] Pointer to eventfd context.
186 * @wait: [in] Wait queue to be removed.
Randy Dunlap36182182011-02-20 20:08:35 -0800187 * @cnt: [out] Pointer to the 64-bit counter value.
Davide Libenzicb289d62010-01-13 09:34:36 -0800188 *
Randy Dunlap36182182011-02-20 20:08:35 -0800189 * Returns %0 if successful, or the following error codes:
Davide Libenzicb289d62010-01-13 09:34:36 -0800190 *
191 * -EAGAIN : The operation would have blocked.
192 *
193 * This is used to atomically remove a wait queue entry from the eventfd wait
194 * queue head, and read/reset the counter value.
195 */
Ingo Molnarac6424b2017-06-20 12:06:13 +0200196int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
Davide Libenzicb289d62010-01-13 09:34:36 -0800197 __u64 *cnt)
198{
199 unsigned long flags;
200
201 spin_lock_irqsave(&ctx->wqh.lock, flags);
202 eventfd_ctx_do_read(ctx, cnt);
203 __remove_wait_queue(&ctx->wqh, wait);
204 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800205 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzicb289d62010-01-13 09:34:36 -0800206 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
207
208 return *cnt != 0 ? 0 : -EAGAIN;
209}
210EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
211
Eric Biggersb6364572018-01-06 09:45:43 -0800212static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
213 loff_t *ppos)
Davide Libenzicb289d62010-01-13 09:34:36 -0800214{
Eric Biggersb6364572018-01-06 09:45:43 -0800215 struct eventfd_ctx *ctx = file->private_data;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700216 ssize_t res;
Eric Biggersb6364572018-01-06 09:45:43 -0800217 __u64 ucnt = 0;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700218 DECLARE_WAITQUEUE(wait, current);
219
Eric Biggersb6364572018-01-06 09:45:43 -0800220 if (count < sizeof(ucnt))
221 return -EINVAL;
222
Davide Libenzid48eb232007-05-18 12:02:33 -0700223 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700224 res = -EAGAIN;
Davide Libenzibcd0b232009-03-31 15:24:18 -0700225 if (ctx->count > 0)
Eric Biggersb6364572018-01-06 09:45:43 -0800226 res = sizeof(ucnt);
227 else if (!(file->f_flags & O_NONBLOCK)) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700228 __add_wait_queue(&ctx->wqh, &wait);
Davide Libenzicb289d62010-01-13 09:34:36 -0800229 for (;;) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700230 set_current_state(TASK_INTERRUPTIBLE);
231 if (ctx->count > 0) {
Eric Biggersb6364572018-01-06 09:45:43 -0800232 res = sizeof(ucnt);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700233 break;
234 }
235 if (signal_pending(current)) {
236 res = -ERESTARTSYS;
237 break;
238 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700239 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700240 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700241 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700242 }
243 __remove_wait_queue(&ctx->wqh, &wait);
244 __set_current_state(TASK_RUNNING);
245 }
Eric Biggersb6364572018-01-06 09:45:43 -0800246 if (likely(res > 0)) {
247 eventfd_ctx_do_read(ctx, &ucnt);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700248 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800249 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700250 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700251 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700252
Eric Biggersb6364572018-01-06 09:45:43 -0800253 if (res > 0 && put_user(ucnt, (__u64 __user *)buf))
254 return -EFAULT;
255
Davide Libenzie1ad7462007-05-10 22:23:19 -0700256 return res;
257}
258
259static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
260 loff_t *ppos)
261{
262 struct eventfd_ctx *ctx = file->private_data;
263 ssize_t res;
264 __u64 ucnt;
265 DECLARE_WAITQUEUE(wait, current);
266
267 if (count < sizeof(ucnt))
268 return -EINVAL;
269 if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
270 return -EFAULT;
271 if (ucnt == ULLONG_MAX)
272 return -EINVAL;
Davide Libenzid48eb232007-05-18 12:02:33 -0700273 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700274 res = -EAGAIN;
275 if (ULLONG_MAX - ctx->count > ucnt)
276 res = sizeof(ucnt);
277 else if (!(file->f_flags & O_NONBLOCK)) {
278 __add_wait_queue(&ctx->wqh, &wait);
279 for (res = 0;;) {
280 set_current_state(TASK_INTERRUPTIBLE);
281 if (ULLONG_MAX - ctx->count > ucnt) {
282 res = sizeof(ucnt);
283 break;
284 }
285 if (signal_pending(current)) {
286 res = -ERESTARTSYS;
287 break;
288 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700289 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700290 schedule();
Davide Libenzid48eb232007-05-18 12:02:33 -0700291 spin_lock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700292 }
293 __remove_wait_queue(&ctx->wqh, &wait);
294 __set_current_state(TASK_RUNNING);
295 }
Davide Libenzibcd0b232009-03-31 15:24:18 -0700296 if (likely(res > 0)) {
Davide Libenzie1ad7462007-05-10 22:23:19 -0700297 ctx->count += ucnt;
298 if (waitqueue_active(&ctx->wqh))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800299 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700300 }
Davide Libenzid48eb232007-05-18 12:02:33 -0700301 spin_unlock_irq(&ctx->wqh.lock);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700302
303 return res;
304}
305
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800306#ifdef CONFIG_PROC_FS
Joe Perchesa3816ab2014-09-29 16:08:25 -0700307static void eventfd_show_fdinfo(struct seq_file *m, struct file *f)
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800308{
309 struct eventfd_ctx *ctx = f->private_data;
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800310
311 spin_lock_irq(&ctx->wqh.lock);
Joe Perchesa3816ab2014-09-29 16:08:25 -0700312 seq_printf(m, "eventfd-count: %16llx\n",
313 (unsigned long long)ctx->count);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800314 spin_unlock_irq(&ctx->wqh.lock);
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800315}
316#endif
317
Davide Libenzie1ad7462007-05-10 22:23:19 -0700318static const struct file_operations eventfd_fops = {
Cyrill Gorcunovcbac5542012-12-17 16:04:57 -0800319#ifdef CONFIG_PROC_FS
320 .show_fdinfo = eventfd_show_fdinfo,
321#endif
Davide Libenzie1ad7462007-05-10 22:23:19 -0700322 .release = eventfd_release,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700323 .poll = eventfd_poll,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700324 .read = eventfd_read,
325 .write = eventfd_write,
Arnd Bergmann6038f372010-08-15 18:52:59 +0200326 .llseek = noop_llseek,
Davide Libenzie1ad7462007-05-10 22:23:19 -0700327};
328
Davide Libenzi13389012009-06-30 11:41:11 -0700329/**
330 * eventfd_fget - Acquire a reference of an eventfd file descriptor.
331 * @fd: [in] Eventfd file descriptor.
332 *
333 * Returns a pointer to the eventfd file structure in case of success, or the
334 * following error pointer:
335 *
336 * -EBADF : Invalid @fd file descriptor.
337 * -EINVAL : The @fd file descriptor is not an eventfd file.
338 */
Davide Libenzie1ad7462007-05-10 22:23:19 -0700339struct file *eventfd_fget(int fd)
340{
341 struct file *file;
342
343 file = fget(fd);
344 if (!file)
345 return ERR_PTR(-EBADF);
346 if (file->f_op != &eventfd_fops) {
347 fput(file);
348 return ERR_PTR(-EINVAL);
349 }
350
351 return file;
352}
Rusty Russell57186072009-06-12 22:27:09 -0600353EXPORT_SYMBOL_GPL(eventfd_fget);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700354
Davide Libenzi13389012009-06-30 11:41:11 -0700355/**
356 * eventfd_ctx_fdget - Acquires a reference to the internal eventfd context.
357 * @fd: [in] Eventfd file descriptor.
358 *
359 * Returns a pointer to the internal eventfd context, otherwise the error
360 * pointers returned by the following functions:
361 *
362 * eventfd_fget
363 */
364struct eventfd_ctx *eventfd_ctx_fdget(int fd)
365{
Davide Libenzi13389012009-06-30 11:41:11 -0700366 struct eventfd_ctx *ctx;
Al Viro36a74112013-12-23 16:51:33 -0500367 struct fd f = fdget(fd);
368 if (!f.file)
369 return ERR_PTR(-EBADF);
370 ctx = eventfd_ctx_fileget(f.file);
371 fdput(f);
Davide Libenzi13389012009-06-30 11:41:11 -0700372 return ctx;
373}
374EXPORT_SYMBOL_GPL(eventfd_ctx_fdget);
375
376/**
377 * eventfd_ctx_fileget - Acquires a reference to the internal eventfd context.
378 * @file: [in] Eventfd file pointer.
379 *
380 * Returns a pointer to the internal eventfd context, otherwise the error
381 * pointer:
382 *
383 * -EINVAL : The @fd file descriptor is not an eventfd file.
384 */
385struct eventfd_ctx *eventfd_ctx_fileget(struct file *file)
386{
Eric Biggers105f2b72018-01-06 09:45:44 -0800387 struct eventfd_ctx *ctx;
388
Davide Libenzi13389012009-06-30 11:41:11 -0700389 if (file->f_op != &eventfd_fops)
390 return ERR_PTR(-EINVAL);
391
Eric Biggers105f2b72018-01-06 09:45:44 -0800392 ctx = file->private_data;
393 kref_get(&ctx->kref);
394 return ctx;
Davide Libenzi13389012009-06-30 11:41:11 -0700395}
396EXPORT_SYMBOL_GPL(eventfd_ctx_fileget);
397
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100398static int do_eventfd(unsigned int count, int flags)
Davide Libenzie1ad7462007-05-10 22:23:19 -0700399{
Davide Libenzie1ad7462007-05-10 22:23:19 -0700400 struct eventfd_ctx *ctx;
Eric Biggers7d815162018-01-06 09:45:42 -0800401 int fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700402
Ulrich Dreppere38b36f2008-07-23 21:29:42 -0700403 /* Check the EFD_* constants for consistency. */
404 BUILD_BUG_ON(EFD_CLOEXEC != O_CLOEXEC);
405 BUILD_BUG_ON(EFD_NONBLOCK != O_NONBLOCK);
406
Davide Libenzibcd0b232009-03-31 15:24:18 -0700407 if (flags & ~EFD_FLAGS_SET)
Eric Biggers7d815162018-01-06 09:45:42 -0800408 return -EINVAL;
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700409
Davide Libenzie1ad7462007-05-10 22:23:19 -0700410 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
411 if (!ctx)
Eric Biggers7d815162018-01-06 09:45:42 -0800412 return -ENOMEM;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700413
Davide Libenzi13389012009-06-30 11:41:11 -0700414 kref_init(&ctx->kref);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700415 init_waitqueue_head(&ctx->wqh);
Davide Libenzie1ad7462007-05-10 22:23:19 -0700416 ctx->count = count;
Davide Libenzibcd0b232009-03-31 15:24:18 -0700417 ctx->flags = flags;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700418
Eric Biggers7d815162018-01-06 09:45:42 -0800419 fd = anon_inode_getfd("[eventfd]", &eventfd_fops, ctx,
420 O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS));
421 if (fd < 0)
Davide Libenzi562787a2009-09-22 16:43:57 -0700422 eventfd_free_ctx(ctx);
423
Al Viro2030a422008-02-23 06:46:49 -0500424 return fd;
Davide Libenzie1ad7462007-05-10 22:23:19 -0700425}
426
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100427SYSCALL_DEFINE2(eventfd2, unsigned int, count, int, flags)
428{
429 return do_eventfd(count, flags);
430}
431
Heiko Carstensd4e82042009-01-14 14:14:34 +0100432SYSCALL_DEFINE1(eventfd, unsigned int, count)
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700433{
Dominik Brodowski2fc96f82018-03-11 11:34:37 +0100434 return do_eventfd(count, 0);
Ulrich Drepperb087498e2008-07-23 21:29:25 -0700435}
Davide Libenzibcd0b232009-03-31 15:24:18 -0700436