blob: 96f55bf207ed91a425a6efd4e34223468c5fc138 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
4 *
5 * Implements an efficient asynchronous io interface.
6 *
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
8 *
9 * See ../COPYING for licensing terms.
10 */
Kent Overstreetcaf41672013-05-07 16:18:35 -070011#define pr_fmt(fmt) "%s: " fmt, __func__
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/errno.h>
16#include <linux/time.h>
17#include <linux/aio_abi.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050018#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/syscalls.h>
Jens Axboeb9d128f2009-10-29 13:59:26 +010020#include <linux/backing-dev.h>
Badari Pulavarty027445c2006-09-30 23:28:46 -070021#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/sched.h>
24#include <linux/fs.h>
25#include <linux/file.h>
26#include <linux/mm.h>
27#include <linux/mman.h>
Michael S. Tsirkin3d2d8272009-09-21 17:03:51 -070028#include <linux/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/slab.h>
30#include <linux/timer.h>
31#include <linux/aio.h>
32#include <linux/highmem.h>
33#include <linux/workqueue.h>
34#include <linux/security.h>
Davide Libenzi9c3060b2007-05-10 22:23:21 -070035#include <linux/eventfd.h>
Jeff Moyercfb1e332009-10-02 18:57:36 -040036#include <linux/blkdev.h>
Jeff Moyer9d85cba2010-05-26 14:44:26 -070037#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038
39#include <asm/kmap_types.h>
40#include <asm/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Kent Overstreet4e179bc2013-05-07 16:18:33 -070042#define AIO_RING_MAGIC 0xa10a10a1
43#define AIO_RING_COMPAT_FEATURES 1
44#define AIO_RING_INCOMPAT_FEATURES 0
45struct aio_ring {
46 unsigned id; /* kernel internal index number */
47 unsigned nr; /* number of io_events */
48 unsigned head;
49 unsigned tail;
50
51 unsigned magic;
52 unsigned compat_features;
53 unsigned incompat_features;
54 unsigned header_length; /* size of aio_ring */
55
56
57 struct io_event io_events[0];
58}; /* 128 bytes + ring size */
59
60#define AIO_RING_PAGES 8
61struct aio_ring_info {
62 unsigned long mmap_base;
63 unsigned long mmap_size;
64
65 struct page **ring_pages;
66 spinlock_t ring_lock;
67 long nr_pages;
68
69 unsigned nr, tail;
70
71 struct page *internal_pages[AIO_RING_PAGES];
72};
73
74static inline unsigned aio_ring_avail(struct aio_ring_info *info,
75 struct aio_ring *ring)
76{
77 return (ring->head + info->nr - 1 - ring->tail) % info->nr;
78}
79
80struct kioctx {
81 atomic_t users;
Kent Overstreet36f55882013-05-07 16:18:41 -070082 atomic_t dead;
Kent Overstreet4e179bc2013-05-07 16:18:33 -070083
84 /* This needs improving */
85 unsigned long user_id;
86 struct hlist_node list;
87
88 wait_queue_head_t wait;
89
90 spinlock_t ctx_lock;
91
Kent Overstreet11599eb2013-05-07 16:18:39 -070092 atomic_t reqs_active;
Kent Overstreet4e179bc2013-05-07 16:18:33 -070093 struct list_head active_reqs; /* used for cancellation */
94
95 /* sys_io_setup currently limits this to an unsigned int */
96 unsigned max_reqs;
97
98 struct aio_ring_info ring_info;
99
100 struct rcu_head rcu_head;
Kent Overstreet36f55882013-05-07 16:18:41 -0700101 struct work_struct rcu_work;
Kent Overstreet4e179bc2013-05-07 16:18:33 -0700102};
103
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104/*------ sysctl variables----*/
Zach Brownd55b5fd2005-11-07 00:59:31 -0800105static DEFINE_SPINLOCK(aio_nr_lock);
106unsigned long aio_nr; /* current system wide number of aio requests */
107unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108/*----end sysctl variables---*/
109
Christoph Lametere18b8902006-12-06 20:33:20 -0800110static struct kmem_cache *kiocb_cachep;
111static struct kmem_cache *kioctx_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113/* aio_setup
114 * Creates the slab caches used by the aio routines, panic on
115 * failure as this is done early during the boot sequence.
116 */
117static int __init aio_setup(void)
118{
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700119 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
120 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Kent Overstreetcaf41672013-05-07 16:18:35 -0700122 pr_debug("sizeof(struct page) = %zu\n", sizeof(struct page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
124 return 0;
125}
H Hartley Sweeten385773e2009-09-22 16:43:53 -0700126__initcall(aio_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128static void aio_free_ring(struct kioctx *ctx)
129{
130 struct aio_ring_info *info = &ctx->ring_info;
131 long i;
132
133 for (i=0; i<info->nr_pages; i++)
134 put_page(info->ring_pages[i]);
135
Al Viro936af152012-04-20 21:49:41 -0400136 if (info->mmap_size) {
Al Virobfce2812012-04-20 21:57:04 -0400137 vm_munmap(info->mmap_base, info->mmap_size);
Al Viro936af152012-04-20 21:49:41 -0400138 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
140 if (info->ring_pages && info->ring_pages != info->internal_pages)
141 kfree(info->ring_pages);
142 info->ring_pages = NULL;
143 info->nr = 0;
144}
145
146static int aio_setup_ring(struct kioctx *ctx)
147{
148 struct aio_ring *ring;
149 struct aio_ring_info *info = &ctx->ring_info;
150 unsigned nr_events = ctx->max_reqs;
Zach Brown41003a72013-05-07 16:18:25 -0700151 struct mm_struct *mm = current->mm;
Michel Lespinasse41badc12013-02-22 16:32:47 -0800152 unsigned long size, populate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153 int nr_pages;
154
155 /* Compensate for the ring buffer's head/tail overlap entry */
156 nr_events += 2; /* 1 is required, 2 for good luck */
157
158 size = sizeof(struct aio_ring);
159 size += sizeof(struct io_event) * nr_events;
160 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
161
162 if (nr_pages < 0)
163 return -EINVAL;
164
165 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
166
167 info->nr = 0;
168 info->ring_pages = info->internal_pages;
169 if (nr_pages > AIO_RING_PAGES) {
Oliver Neukum11b0b5a2006-03-25 03:08:13 -0800170 info->ring_pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 if (!info->ring_pages)
172 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
174
175 info->mmap_size = nr_pages * PAGE_SIZE;
Kent Overstreetcaf41672013-05-07 16:18:35 -0700176 pr_debug("attempting mmap of %lu bytes\n", info->mmap_size);
Zach Brown41003a72013-05-07 16:18:25 -0700177 down_write(&mm->mmap_sem);
Al Viroe3fc6292012-05-30 20:08:42 -0400178 info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size,
179 PROT_READ|PROT_WRITE,
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800180 MAP_ANONYMOUS|MAP_PRIVATE, 0,
181 &populate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (IS_ERR((void *)info->mmap_base)) {
Zach Brown41003a72013-05-07 16:18:25 -0700183 up_write(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 info->mmap_size = 0;
185 aio_free_ring(ctx);
186 return -EAGAIN;
187 }
188
Kent Overstreetcaf41672013-05-07 16:18:35 -0700189 pr_debug("mmap address: 0x%08lx\n", info->mmap_base);
Zach Brown41003a72013-05-07 16:18:25 -0700190 info->nr_pages = get_user_pages(current, mm, info->mmap_base, nr_pages,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 1, 0, info->ring_pages, NULL);
Zach Brown41003a72013-05-07 16:18:25 -0700192 up_write(&mm->mmap_sem);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 if (unlikely(info->nr_pages != nr_pages)) {
195 aio_free_ring(ctx);
196 return -EAGAIN;
197 }
Michel Lespinassebebeb3d2013-02-22 16:32:37 -0800198 if (populate)
Michel Lespinasse41badc12013-02-22 16:32:47 -0800199 mm_populate(info->mmap_base, populate);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201 ctx->user_id = info->mmap_base;
202
203 info->nr = nr_events; /* trusted copy */
204
Cong Wange8e3c3d2011-11-25 23:14:27 +0800205 ring = kmap_atomic(info->ring_pages[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 ring->nr = nr_events; /* user copy */
207 ring->id = ctx->user_id;
208 ring->head = ring->tail = 0;
209 ring->magic = AIO_RING_MAGIC;
210 ring->compat_features = AIO_RING_COMPAT_FEATURES;
211 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
212 ring->header_length = sizeof(struct aio_ring);
Cong Wange8e3c3d2011-11-25 23:14:27 +0800213 kunmap_atomic(ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 return 0;
216}
217
218
219/* aio_ring_event: returns a pointer to the event at the given index from
Cong Wange8e3c3d2011-11-25 23:14:27 +0800220 * kmap_atomic(). Release the pointer with put_aio_ring_event();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 */
222#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
223#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
224#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
225
Cong Wange8e3c3d2011-11-25 23:14:27 +0800226#define aio_ring_event(info, nr) ({ \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
228 struct io_event *__event; \
229 __event = kmap_atomic( \
Cong Wange8e3c3d2011-11-25 23:14:27 +0800230 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 __event += pos % AIO_EVENTS_PER_PAGE; \
232 __event; \
233})
234
Cong Wange8e3c3d2011-11-25 23:14:27 +0800235#define put_aio_ring_event(event) do { \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 struct io_event *__event = (event); \
237 (void)__event; \
Cong Wange8e3c3d2011-11-25 23:14:27 +0800238 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239} while(0)
240
Kent Overstreet906b9732013-05-07 16:18:31 -0700241static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb,
242 struct io_event *res)
243{
244 int (*cancel)(struct kiocb *, struct io_event *);
245 int ret = -EINVAL;
246
247 cancel = kiocb->ki_cancel;
248 kiocbSetCancelled(kiocb);
249 if (cancel) {
Kent Overstreet11599eb2013-05-07 16:18:39 -0700250 atomic_inc(&kiocb->ki_users);
Kent Overstreet906b9732013-05-07 16:18:31 -0700251 spin_unlock_irq(&ctx->ctx_lock);
252
253 memset(res, 0, sizeof(*res));
254 res->obj = (u64)(unsigned long)kiocb->ki_obj.user;
255 res->data = kiocb->ki_user_data;
256 ret = cancel(kiocb, res);
257
258 spin_lock_irq(&ctx->ctx_lock);
259 }
260
261 return ret;
262}
263
Kent Overstreet36f55882013-05-07 16:18:41 -0700264static void free_ioctx_rcu(struct rcu_head *head)
265{
266 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
267 kmem_cache_free(kioctx_cachep, ctx);
268}
269
270/*
271 * When this function runs, the kioctx has been removed from the "hash table"
272 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
273 * now it's safe to cancel any that need to be.
274 */
275static void free_ioctx(struct kioctx *ctx)
276{
277 struct io_event res;
278 struct kiocb *req;
279
280 spin_lock_irq(&ctx->ctx_lock);
281
282 while (!list_empty(&ctx->active_reqs)) {
283 req = list_first_entry(&ctx->active_reqs,
284 struct kiocb, ki_list);
285
286 list_del_init(&req->ki_list);
287 kiocb_cancel(ctx, req, &res);
288 }
289
290 spin_unlock_irq(&ctx->ctx_lock);
291
292 wait_event(ctx->wait, !atomic_read(&ctx->reqs_active));
293
294 aio_free_ring(ctx);
295
296 spin_lock(&aio_nr_lock);
297 BUG_ON(aio_nr - ctx->max_reqs > aio_nr);
298 aio_nr -= ctx->max_reqs;
299 spin_unlock(&aio_nr_lock);
300
301 pr_debug("freeing %p\n", ctx);
302
303 /*
304 * Here the call_rcu() is between the wait_event() for reqs_active to
305 * hit 0, and freeing the ioctx.
306 *
307 * aio_complete() decrements reqs_active, but it has to touch the ioctx
308 * after to issue a wakeup so we use rcu.
309 */
310 call_rcu(&ctx->rcu_head, free_ioctx_rcu);
311}
312
313static void put_ioctx(struct kioctx *ctx)
314{
315 if (unlikely(atomic_dec_and_test(&ctx->users)))
316 free_ioctx(ctx);
317}
318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319/* ioctx_alloc
320 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
321 */
322static struct kioctx *ioctx_alloc(unsigned nr_events)
323{
Zach Brown41003a72013-05-07 16:18:25 -0700324 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 struct kioctx *ctx;
Al Viroe23754f2012-03-06 14:33:22 -0500326 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
328 /* Prevent overflows */
329 if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
330 (nr_events > (0x10000000U / sizeof(struct kiocb)))) {
331 pr_debug("ENOMEM: nr_events too high\n");
332 return ERR_PTR(-EINVAL);
333 }
334
Al Viro2dd542b2012-03-10 23:10:35 -0500335 if (!nr_events || (unsigned long)nr_events > aio_max_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 return ERR_PTR(-EAGAIN);
337
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800338 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 if (!ctx)
340 return ERR_PTR(-ENOMEM);
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 ctx->max_reqs = nr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Al Viro86b62a22012-03-07 05:16:35 +0000344 atomic_set(&ctx->users, 2);
Kent Overstreet36f55882013-05-07 16:18:41 -0700345 atomic_set(&ctx->dead, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 spin_lock_init(&ctx->ctx_lock);
347 spin_lock_init(&ctx->ring_info.ring_lock);
348 init_waitqueue_head(&ctx->wait);
349
350 INIT_LIST_HEAD(&ctx->active_reqs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351
352 if (aio_setup_ring(ctx) < 0)
353 goto out_freectx;
354
355 /* limit the number of system wide aios */
Al Viro9fa1cb32012-03-10 23:14:05 -0500356 spin_lock(&aio_nr_lock);
Al Viro2dd542b2012-03-10 23:10:35 -0500357 if (aio_nr + nr_events > aio_max_nr ||
358 aio_nr + nr_events < aio_nr) {
Al Viro9fa1cb32012-03-10 23:14:05 -0500359 spin_unlock(&aio_nr_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 goto out_cleanup;
Al Viro2dd542b2012-03-10 23:10:35 -0500361 }
362 aio_nr += ctx->max_reqs;
Al Viro9fa1cb32012-03-10 23:14:05 -0500363 spin_unlock(&aio_nr_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
Jeff Moyer39fa0032008-04-29 01:03:48 -0700365 /* now link into global list. */
Jens Axboeabf137d2008-12-09 08:11:22 +0100366 spin_lock(&mm->ioctx_lock);
367 hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
368 spin_unlock(&mm->ioctx_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
Kent Overstreetcaf41672013-05-07 16:18:35 -0700370 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
Zach Brown41003a72013-05-07 16:18:25 -0700371 ctx, ctx->user_id, mm, ctx->ring_info.nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 return ctx;
373
374out_cleanup:
Al Viroe23754f2012-03-06 14:33:22 -0500375 err = -EAGAIN;
376 aio_free_ring(ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377out_freectx:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 kmem_cache_free(kioctx_cachep, ctx);
Kent Overstreetcaf41672013-05-07 16:18:35 -0700379 pr_debug("error allocating ioctx %d\n", err);
Al Viroe23754f2012-03-06 14:33:22 -0500380 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Kent Overstreet36f55882013-05-07 16:18:41 -0700383static void kill_ioctx_work(struct work_struct *work)
384{
385 struct kioctx *ctx = container_of(work, struct kioctx, rcu_work);
386
387 wake_up_all(&ctx->wait);
388 put_ioctx(ctx);
389}
390
391static void kill_ioctx_rcu(struct rcu_head *head)
392{
393 struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
394
395 INIT_WORK(&ctx->rcu_work, kill_ioctx_work);
396 schedule_work(&ctx->rcu_work);
397}
398
399/* kill_ioctx
400 * Cancels all outstanding aio requests on an aio context. Used
401 * when the processes owning a context have all exited to encourage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 * the rapid destruction of the kioctx.
403 */
Kent Overstreet36f55882013-05-07 16:18:41 -0700404static void kill_ioctx(struct kioctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Kent Overstreet36f55882013-05-07 16:18:41 -0700406 if (!atomic_xchg(&ctx->dead, 1)) {
407 hlist_del_rcu(&ctx->list);
408 /* Between hlist_del_rcu() and dropping the initial ref */
409 synchronize_rcu();
Al Viro06af1212012-03-20 16:26:24 -0400410
Kent Overstreet36f55882013-05-07 16:18:41 -0700411 /*
412 * We can't punt to workqueue here because put_ioctx() ->
413 * free_ioctx() will unmap the ringbuffer, and that has to be
414 * done in the original process's context. kill_ioctx_rcu/work()
415 * exist for exit_aio(), as in that path free_ioctx() won't do
416 * the unmap.
417 */
418 kill_ioctx_work(&ctx->rcu_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
422/* wait_on_sync_kiocb:
423 * Waits on the given sync kiocb to complete.
424 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -0800425ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
Kent Overstreet11599eb2013-05-07 16:18:39 -0700427 while (atomic_read(&iocb->ki_users)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 set_current_state(TASK_UNINTERRUPTIBLE);
Kent Overstreet11599eb2013-05-07 16:18:39 -0700429 if (!atomic_read(&iocb->ki_users))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 break;
Jeff Moyer41d10da2007-10-16 23:27:20 -0700431 io_schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 }
433 __set_current_state(TASK_RUNNING);
434 return iocb->ki_user_data;
435}
H Hartley Sweeten385773e2009-09-22 16:43:53 -0700436EXPORT_SYMBOL(wait_on_sync_kiocb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
Kent Overstreet36f55882013-05-07 16:18:41 -0700438/*
439 * exit_aio: called when the last user of mm goes away. At this point, there is
440 * no way for any new requests to be submited or any of the io_* syscalls to be
441 * called on the context.
442 *
443 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
444 * them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -0800446void exit_aio(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
Jens Axboeabf137d2008-12-09 08:11:22 +0100448 struct kioctx *ctx;
Kent Overstreet36f55882013-05-07 16:18:41 -0700449 struct hlist_node *n;
Jens Axboeabf137d2008-12-09 08:11:22 +0100450
Kent Overstreet36f55882013-05-07 16:18:41 -0700451 hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 if (1 != atomic_read(&ctx->users))
453 printk(KERN_DEBUG
454 "exit_aio:ioctx still alive: %d %d %d\n",
Kent Overstreet36f55882013-05-07 16:18:41 -0700455 atomic_read(&ctx->users),
456 atomic_read(&ctx->dead),
Kent Overstreet11599eb2013-05-07 16:18:39 -0700457 atomic_read(&ctx->reqs_active));
Al Viro936af152012-04-20 21:49:41 -0400458 /*
459 * We don't need to bother with munmap() here -
460 * exit_mmap(mm) is coming and it'll unmap everything.
461 * Since aio_free_ring() uses non-zero ->mmap_size
462 * as indicator that it needs to unmap the area,
463 * just set it to 0; aio_free_ring() is the only
464 * place that uses ->mmap_size, so it's safe.
Al Viro936af152012-04-20 21:49:41 -0400465 */
466 ctx->ring_info.mmap_size = 0;
Kent Overstreet36f55882013-05-07 16:18:41 -0700467
468 if (!atomic_xchg(&ctx->dead, 1)) {
469 hlist_del_rcu(&ctx->list);
470 call_rcu(&ctx->rcu_head, kill_ioctx_rcu);
471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
473}
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475/* aio_get_req
Kent Overstreet11599eb2013-05-07 16:18:39 -0700476 * Allocate a slot for an aio request. Increments the ki_users count
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 * of the kioctx so that the kioctx stays around until all requests are
478 * complete. Returns NULL if no requests are free.
479 *
Kent Overstreet11599eb2013-05-07 16:18:39 -0700480 * Returns with kiocb->ki_users set to 2. The io submit code path holds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 * an extra reference while submitting the i/o.
482 * This prevents races between the aio code path referencing the
483 * req (after submitting it) and aio_complete() freeing the req.
484 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -0800485static struct kiocb *__aio_get_req(struct kioctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486{
487 struct kiocb *req = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
489 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
490 if (unlikely(!req))
491 return NULL;
492
Zach Brown4faa5282005-10-17 16:43:33 -0700493 req->ki_flags = 0;
Kent Overstreet11599eb2013-05-07 16:18:39 -0700494 atomic_set(&req->ki_users, 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 req->ki_key = 0;
496 req->ki_ctx = ctx;
497 req->ki_cancel = NULL;
498 req->ki_retry = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 req->ki_dtor = NULL;
500 req->private = NULL;
Badari Pulavartyeed4e512006-09-30 23:28:49 -0700501 req->ki_iovec = NULL;
Davide Libenzi87c3a862009-03-18 17:04:19 -0700502 req->ki_eventfd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 return req;
505}
506
Jeff Moyer080d6762011-11-02 13:40:10 -0700507/*
508 * struct kiocb's are allocated in batches to reduce the number of
509 * times the ctx lock is acquired and released.
510 */
511#define KIOCB_BATCH_SIZE 32L
512struct kiocb_batch {
513 struct list_head head;
514 long count; /* number of requests left to allocate */
515};
516
517static void kiocb_batch_init(struct kiocb_batch *batch, long total)
518{
519 INIT_LIST_HEAD(&batch->head);
520 batch->count = total;
521}
522
Gleb Natapov69e47472012-01-08 17:07:28 +0200523static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
Jeff Moyer080d6762011-11-02 13:40:10 -0700524{
525 struct kiocb *req, *n;
526
Gleb Natapov69e47472012-01-08 17:07:28 +0200527 if (list_empty(&batch->head))
528 return;
529
530 spin_lock_irq(&ctx->ctx_lock);
Jeff Moyer080d6762011-11-02 13:40:10 -0700531 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
532 list_del(&req->ki_batch);
Gleb Natapov69e47472012-01-08 17:07:28 +0200533 list_del(&req->ki_list);
Jeff Moyer080d6762011-11-02 13:40:10 -0700534 kmem_cache_free(kiocb_cachep, req);
Kent Overstreet11599eb2013-05-07 16:18:39 -0700535 atomic_dec(&ctx->reqs_active);
Jeff Moyer080d6762011-11-02 13:40:10 -0700536 }
Gleb Natapov69e47472012-01-08 17:07:28 +0200537 spin_unlock_irq(&ctx->ctx_lock);
Jeff Moyer080d6762011-11-02 13:40:10 -0700538}
539
540/*
541 * Allocate a batch of kiocbs. This avoids taking and dropping the
542 * context lock a lot during setup.
543 */
544static int kiocb_batch_refill(struct kioctx *ctx, struct kiocb_batch *batch)
545{
546 unsigned short allocated, to_alloc;
547 long avail;
Jeff Moyer080d6762011-11-02 13:40:10 -0700548 struct kiocb *req, *n;
549 struct aio_ring *ring;
550
551 to_alloc = min(batch->count, KIOCB_BATCH_SIZE);
552 for (allocated = 0; allocated < to_alloc; allocated++) {
553 req = __aio_get_req(ctx);
554 if (!req)
555 /* allocation failed, go with what we've got */
556 break;
557 list_add(&req->ki_batch, &batch->head);
558 }
559
560 if (allocated == 0)
561 goto out;
562
Jeff Moyer080d6762011-11-02 13:40:10 -0700563 spin_lock_irq(&ctx->ctx_lock);
564 ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
565
Kent Overstreet11599eb2013-05-07 16:18:39 -0700566 avail = aio_ring_avail(&ctx->ring_info, ring) -
567 atomic_read(&ctx->reqs_active);
Jeff Moyer080d6762011-11-02 13:40:10 -0700568 BUG_ON(avail < 0);
Jeff Moyer080d6762011-11-02 13:40:10 -0700569 if (avail < allocated) {
570 /* Trim back the number of requests. */
571 list_for_each_entry_safe(req, n, &batch->head, ki_batch) {
572 list_del(&req->ki_batch);
573 kmem_cache_free(kiocb_cachep, req);
574 if (--allocated <= avail)
575 break;
576 }
577 }
578
579 batch->count -= allocated;
580 list_for_each_entry(req, &batch->head, ki_batch) {
581 list_add(&req->ki_list, &ctx->active_reqs);
Kent Overstreet11599eb2013-05-07 16:18:39 -0700582 atomic_inc(&ctx->reqs_active);
Jeff Moyer080d6762011-11-02 13:40:10 -0700583 }
584
585 kunmap_atomic(ring);
586 spin_unlock_irq(&ctx->ctx_lock);
587
588out:
589 return allocated;
590}
591
592static inline struct kiocb *aio_get_req(struct kioctx *ctx,
593 struct kiocb_batch *batch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594{
595 struct kiocb *req;
Jeff Moyer080d6762011-11-02 13:40:10 -0700596
597 if (list_empty(&batch->head))
598 if (kiocb_batch_refill(ctx, batch) == 0)
599 return NULL;
600 req = list_first_entry(&batch->head, struct kiocb, ki_batch);
601 list_del(&req->ki_batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return req;
603}
604
Kent Overstreet11599eb2013-05-07 16:18:39 -0700605static void kiocb_free(struct kiocb *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
Kent Overstreet1d98ebf2013-05-07 16:18:37 -0700607 if (req->ki_filp)
608 fput(req->ki_filp);
Davide Libenzi13389012009-06-30 11:41:11 -0700609 if (req->ki_eventfd != NULL)
610 eventfd_ctx_put(req->ki_eventfd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 if (req->ki_dtor)
612 req->ki_dtor(req);
Badari Pulavartyeed4e512006-09-30 23:28:49 -0700613 if (req->ki_iovec != &req->ki_inline_vec)
614 kfree(req->ki_iovec);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 kmem_cache_free(kiocb_cachep, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616}
617
Kent Overstreet2d684492013-05-07 16:18:29 -0700618void aio_put_req(struct kiocb *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619{
Kent Overstreet11599eb2013-05-07 16:18:39 -0700620 if (atomic_dec_and_test(&req->ki_users))
621 kiocb_free(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622}
H Hartley Sweeten385773e2009-09-22 16:43:53 -0700623EXPORT_SYMBOL(aio_put_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Adrian Bunkd5470b52008-04-29 00:58:57 -0700625static struct kioctx *lookup_ioctx(unsigned long ctx_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
Jens Axboeabf137d2008-12-09 08:11:22 +0100627 struct mm_struct *mm = current->mm;
Jeff Moyer65c24492009-03-18 17:04:21 -0700628 struct kioctx *ctx, *ret = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Jens Axboeabf137d2008-12-09 08:11:22 +0100630 rcu_read_lock();
631
Sasha Levinb67bfe02013-02-27 17:06:00 -0800632 hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) {
Kent Overstreet36f55882013-05-07 16:18:41 -0700633 if (ctx->user_id == ctx_id) {
634 atomic_inc(&ctx->users);
Jeff Moyer65c24492009-03-18 17:04:21 -0700635 ret = ctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 break;
637 }
Jens Axboeabf137d2008-12-09 08:11:22 +0100638 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
Jens Axboeabf137d2008-12-09 08:11:22 +0100640 rcu_read_unlock();
Jeff Moyer65c24492009-03-18 17:04:21 -0700641 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644/* aio_complete
645 * Called when the io request on the given iocb is complete.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 */
Kent Overstreet2d684492013-05-07 16:18:29 -0700647void aio_complete(struct kiocb *iocb, long res, long res2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648{
649 struct kioctx *ctx = iocb->ki_ctx;
650 struct aio_ring_info *info;
651 struct aio_ring *ring;
652 struct io_event *event;
653 unsigned long flags;
654 unsigned long tail;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
Zach Brown20dcae32005-11-13 16:07:33 -0800656 /*
657 * Special case handling for sync iocbs:
658 * - events go directly into the iocb for fast handling
659 * - the sync task with the iocb in its stack holds the single iocb
660 * ref, no other paths have a way to get another ref
661 * - the sync task helpfully left a reference to itself in the iocb
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 */
663 if (is_sync_kiocb(iocb)) {
Kent Overstreet11599eb2013-05-07 16:18:39 -0700664 BUG_ON(atomic_read(&iocb->ki_users) != 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 iocb->ki_user_data = res;
Kent Overstreet11599eb2013-05-07 16:18:39 -0700666 atomic_set(&iocb->ki_users, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 wake_up_process(iocb->ki_obj.tsk);
Kent Overstreet2d684492013-05-07 16:18:29 -0700668 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 }
670
671 info = &ctx->ring_info;
672
Kent Overstreet36f55882013-05-07 16:18:41 -0700673 /*
674 * Add a completion event to the ring buffer. Must be done holding
675 * ctx->ctx_lock to prevent other code from messing with the tail
676 * pointer since we might be called from irq context.
677 *
678 * Take rcu_read_lock() in case the kioctx is being destroyed, as we
679 * need to issue a wakeup after decrementing reqs_active.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 */
Kent Overstreet36f55882013-05-07 16:18:41 -0700681 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 spin_lock_irqsave(&ctx->ctx_lock, flags);
683
Kent Overstreet11599eb2013-05-07 16:18:39 -0700684 list_del(&iocb->ki_list); /* remove from active_reqs */
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 /*
687 * cancelled requests don't get events, userland was given one
688 * when the event got cancelled.
689 */
690 if (kiocbIsCancelled(iocb))
691 goto put_rq;
692
Cong Wange8e3c3d2011-11-25 23:14:27 +0800693 ring = kmap_atomic(info->ring_pages[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 tail = info->tail;
Cong Wange8e3c3d2011-11-25 23:14:27 +0800696 event = aio_ring_event(info, tail);
Ken Chen4bf69b22005-05-01 08:59:15 -0700697 if (++tail >= info->nr)
698 tail = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
700 event->obj = (u64)(unsigned long)iocb->ki_obj.user;
701 event->data = iocb->ki_user_data;
702 event->res = res;
703 event->res2 = res2;
704
Kent Overstreetcaf41672013-05-07 16:18:35 -0700705 pr_debug("%p[%lu]: %p: %p %Lx %lx %lx\n",
706 ctx, tail, iocb, iocb->ki_obj.user, iocb->ki_user_data,
707 res, res2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
709 /* after flagging the request as done, we
710 * must never even look at it again
711 */
712 smp_wmb(); /* make event visible before updating tail */
713
714 info->tail = tail;
715 ring->tail = tail;
716
Cong Wange8e3c3d2011-11-25 23:14:27 +0800717 put_aio_ring_event(event);
718 kunmap_atomic(ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
Davide Libenzi8d1c98b2008-04-10 21:29:19 -0700721
722 /*
723 * Check if the user asked us to deliver the result through an
724 * eventfd. The eventfd_signal() function is safe to be called
725 * from IRQ context.
726 */
Davide Libenzi87c3a862009-03-18 17:04:19 -0700727 if (iocb->ki_eventfd != NULL)
Davide Libenzi8d1c98b2008-04-10 21:29:19 -0700728 eventfd_signal(iocb->ki_eventfd, 1);
729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730put_rq:
731 /* everything turned out well, dispose of the aiocb. */
Kent Overstreet11599eb2013-05-07 16:18:39 -0700732 aio_put_req(iocb);
733 atomic_dec(&ctx->reqs_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
Quentin Barnes6cb2a212008-03-19 17:00:39 -0700735 /*
736 * We have to order our ring_info tail store above and test
737 * of the wait list below outside the wait lock. This is
738 * like in wake_up_bit() where clearing a bit has to be
739 * ordered with the unlocked test.
740 */
741 smp_mb();
742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 if (waitqueue_active(&ctx->wait))
744 wake_up(&ctx->wait);
745
Ken Chendee11c22007-02-03 01:13:45 -0800746 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
Kent Overstreet36f55882013-05-07 16:18:41 -0700747 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
H Hartley Sweeten385773e2009-09-22 16:43:53 -0700749EXPORT_SYMBOL(aio_complete);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
751/* aio_read_evt
752 * Pull an event off of the ioctx's event ring. Returns the number of
753 * events fetched (0 or 1 ;-)
754 * FIXME: make this use cmpxchg.
755 * TODO: make the ringbuffer user mmap()able (requires FIXME).
756 */
757static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
758{
759 struct aio_ring_info *info = &ioctx->ring_info;
760 struct aio_ring *ring;
761 unsigned long head;
762 int ret = 0;
763
Cong Wange8e3c3d2011-11-25 23:14:27 +0800764 ring = kmap_atomic(info->ring_pages[0]);
Kent Overstreetcaf41672013-05-07 16:18:35 -0700765 pr_debug("h%u t%u m%u\n", ring->head, ring->tail, ring->nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 if (ring->head == ring->tail)
768 goto out;
769
770 spin_lock(&info->ring_lock);
771
772 head = ring->head % info->nr;
773 if (head != ring->tail) {
Cong Wange8e3c3d2011-11-25 23:14:27 +0800774 struct io_event *evp = aio_ring_event(info, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 *ent = *evp;
776 head = (head + 1) % info->nr;
777 smp_mb(); /* finish reading the event before updatng the head */
778 ring->head = head;
779 ret = 1;
Cong Wange8e3c3d2011-11-25 23:14:27 +0800780 put_aio_ring_event(evp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 }
782 spin_unlock(&info->ring_lock);
783
784out:
Zhao Hongjiang91d80a82013-04-26 11:03:53 +0800785 kunmap_atomic(ring);
Kent Overstreetcaf41672013-05-07 16:18:35 -0700786 pr_debug("%d h%u t%u\n", ret, ring->head, ring->tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 return ret;
788}
789
790struct aio_timeout {
791 struct timer_list timer;
792 int timed_out;
793 struct task_struct *p;
794};
795
796static void timeout_func(unsigned long data)
797{
798 struct aio_timeout *to = (struct aio_timeout *)data;
799
800 to->timed_out = 1;
801 wake_up_process(to->p);
802}
803
804static inline void init_timeout(struct aio_timeout *to)
805{
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700806 setup_timer_on_stack(&to->timer, timeout_func, (unsigned long) to);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 to->timed_out = 0;
808 to->p = current;
809}
810
811static inline void set_timeout(long start_jiffies, struct aio_timeout *to,
812 const struct timespec *ts)
813{
814 to->timer.expires = start_jiffies + timespec_to_jiffies(ts);
815 if (time_after(to->timer.expires, jiffies))
816 add_timer(&to->timer);
817 else
818 to->timed_out = 1;
819}
820
821static inline void clear_timeout(struct aio_timeout *to)
822{
823 del_singleshot_timer_sync(&to->timer);
824}
825
826static int read_events(struct kioctx *ctx,
827 long min_nr, long nr,
828 struct io_event __user *event,
829 struct timespec __user *timeout)
830{
831 long start_jiffies = jiffies;
832 struct task_struct *tsk = current;
833 DECLARE_WAITQUEUE(wait, tsk);
834 int ret;
835 int i = 0;
836 struct io_event ent;
837 struct aio_timeout to;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 /* needed to zero any padding within an entry (there shouldn't be
840 * any, but C is fun!
841 */
842 memset(&ent, 0, sizeof(ent));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 ret = 0;
844 while (likely(i < nr)) {
845 ret = aio_read_evt(ctx, &ent);
846 if (unlikely(ret <= 0))
847 break;
848
Kent Overstreetcaf41672013-05-07 16:18:35 -0700849 pr_debug("%Lx %Lx %Lx %Lx\n",
850 ent.data, ent.obj, ent.res, ent.res2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 /* Could we split the check in two? */
853 ret = -EFAULT;
854 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
Kent Overstreetcaf41672013-05-07 16:18:35 -0700855 pr_debug("lost an event due to EFAULT.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 break;
857 }
858 ret = 0;
859
860 /* Good, event copied to userland, update counts. */
861 event ++;
862 i ++;
863 }
864
865 if (min_nr <= i)
866 return i;
867 if (ret)
868 return ret;
869
870 /* End fast path */
871
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 init_timeout(&to);
873 if (timeout) {
874 struct timespec ts;
875 ret = -EFAULT;
876 if (unlikely(copy_from_user(&ts, timeout, sizeof(ts))))
877 goto out;
878
879 set_timeout(start_jiffies, &to, &ts);
880 }
881
882 while (likely(i < nr)) {
883 add_wait_queue_exclusive(&ctx->wait, &wait);
884 do {
885 set_task_state(tsk, TASK_INTERRUPTIBLE);
886 ret = aio_read_evt(ctx, &ent);
887 if (ret)
888 break;
889 if (min_nr <= i)
890 break;
Kent Overstreet36f55882013-05-07 16:18:41 -0700891 if (unlikely(atomic_read(&ctx->dead))) {
Jeff Moyere92adcb2008-04-28 02:12:04 -0700892 ret = -EINVAL;
893 break;
894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 if (to.timed_out) /* Only check after read evt */
896 break;
Jeff Moyere00ba3d2007-12-04 23:45:02 -0800897 /* Try to only show up in io wait if there are ops
898 * in flight */
Kent Overstreet11599eb2013-05-07 16:18:39 -0700899 if (atomic_read(&ctx->reqs_active))
Jeff Moyere00ba3d2007-12-04 23:45:02 -0800900 io_schedule();
901 else
902 schedule();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (signal_pending(tsk)) {
904 ret = -EINTR;
905 break;
906 }
907 /*ret = aio_read_evt(ctx, &ent);*/
908 } while (1) ;
909
910 set_task_state(tsk, TASK_RUNNING);
911 remove_wait_queue(&ctx->wait, &wait);
912
913 if (unlikely(ret <= 0))
914 break;
915
916 ret = -EFAULT;
917 if (unlikely(copy_to_user(event, &ent, sizeof(ent)))) {
Kent Overstreetcaf41672013-05-07 16:18:35 -0700918 pr_debug("lost an event due to EFAULT.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 break;
920 }
921
922 /* Good, event copied to userland, update counts. */
923 event ++;
924 i ++;
925 }
926
927 if (timeout)
928 clear_timeout(&to);
929out:
Thomas Gleixnerc6f3a972008-04-30 00:55:03 -0700930 destroy_timer_on_stack(&to.timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 return i ? i : ret;
932}
933
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934/* sys_io_setup:
935 * Create an aio_context capable of receiving at least nr_events.
936 * ctxp must not point to an aio_context that already exists, and
937 * must be initialized to 0 prior to the call. On successful
938 * creation of the aio_context, *ctxp is filled in with the resulting
939 * handle. May fail with -EINVAL if *ctxp is not initialized,
940 * if the specified nr_events exceeds internal limits. May fail
941 * with -EAGAIN if the specified nr_events exceeds the user's limit
942 * of available events. May fail with -ENOMEM if insufficient kernel
943 * resources are available. May fail with -EFAULT if an invalid
944 * pointer is passed for ctxp. Will fail with -ENOSYS if not
945 * implemented.
946 */
Heiko Carstens002c8972009-01-14 14:14:18 +0100947SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
949 struct kioctx *ioctx = NULL;
950 unsigned long ctx;
951 long ret;
952
953 ret = get_user(ctx, ctxp);
954 if (unlikely(ret))
955 goto out;
956
957 ret = -EINVAL;
Zach Brownd55b5fd2005-11-07 00:59:31 -0800958 if (unlikely(ctx || nr_events == 0)) {
959 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
960 ctx, nr_events);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 goto out;
962 }
963
964 ioctx = ioctx_alloc(nr_events);
965 ret = PTR_ERR(ioctx);
966 if (!IS_ERR(ioctx)) {
967 ret = put_user(ioctx->user_id, ctxp);
Al Viroa2e18592012-03-20 16:27:57 -0400968 if (ret)
Kent Overstreet36f55882013-05-07 16:18:41 -0700969 kill_ioctx(ioctx);
Al Viroa2e18592012-03-20 16:27:57 -0400970 put_ioctx(ioctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 }
972
973out:
974 return ret;
975}
976
977/* sys_io_destroy:
978 * Destroy the aio_context specified. May cancel any outstanding
979 * AIOs and block on completion. Will fail with -ENOSYS if not
Satoru Takeuchi642b5122010-08-05 11:23:11 -0700980 * implemented. May fail with -EINVAL if the context pointed to
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 * is invalid.
982 */
Heiko Carstens002c8972009-01-14 14:14:18 +0100983SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984{
985 struct kioctx *ioctx = lookup_ioctx(ctx);
986 if (likely(NULL != ioctx)) {
Kent Overstreet36f55882013-05-07 16:18:41 -0700987 kill_ioctx(ioctx);
Al Viroa2e18592012-03-20 16:27:57 -0400988 put_ioctx(ioctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 return 0;
990 }
991 pr_debug("EINVAL: io_destroy: invalid context id\n");
992 return -EINVAL;
993}
994
Badari Pulavartyeed4e512006-09-30 23:28:49 -0700995static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret)
996{
997 struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg];
998
999 BUG_ON(ret <= 0);
1000
1001 while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) {
1002 ssize_t this = min((ssize_t)iov->iov_len, ret);
1003 iov->iov_base += this;
1004 iov->iov_len -= this;
1005 iocb->ki_left -= this;
1006 ret -= this;
1007 if (iov->iov_len == 0) {
1008 iocb->ki_cur_seg++;
1009 iov++;
1010 }
1011 }
1012
1013 /* the caller should not have done more io than what fit in
1014 * the remaining iovecs */
1015 BUG_ON(ret > 0 && iocb->ki_left == 0);
1016}
1017
1018static ssize_t aio_rw_vect_retry(struct kiocb *iocb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019{
1020 struct file *file = iocb->ki_filp;
1021 struct address_space *mapping = file->f_mapping;
1022 struct inode *inode = mapping->host;
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001023 ssize_t (*rw_op)(struct kiocb *, const struct iovec *,
1024 unsigned long, loff_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 ssize_t ret = 0;
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001026 unsigned short opcode;
1027
1028 if ((iocb->ki_opcode == IOCB_CMD_PREADV) ||
1029 (iocb->ki_opcode == IOCB_CMD_PREAD)) {
1030 rw_op = file->f_op->aio_read;
1031 opcode = IOCB_CMD_PREADV;
1032 } else {
1033 rw_op = file->f_op->aio_write;
1034 opcode = IOCB_CMD_PWRITEV;
1035 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036
Rusty Russellc2ec6682008-02-08 04:20:15 -08001037 /* This matches the pread()/pwrite() logic */
1038 if (iocb->ki_pos < 0)
1039 return -EINVAL;
1040
Al Viro8d71db42013-03-19 21:01:03 -04001041 if (opcode == IOCB_CMD_PWRITEV)
1042 file_start_write(file);
Zach Brown897f15f2005-09-30 11:58:55 -07001043 do {
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001044 ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg],
1045 iocb->ki_nr_segs - iocb->ki_cur_seg,
1046 iocb->ki_pos);
1047 if (ret > 0)
1048 aio_advance_iovec(iocb, ret);
Badari Pulavarty027445c2006-09-30 23:28:46 -07001049
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001050 /* retry all partial writes. retry partial reads as long as its a
1051 * regular file. */
Zach Brown353fb072005-09-30 11:58:56 -07001052 } while (ret > 0 && iocb->ki_left > 0 &&
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001053 (opcode == IOCB_CMD_PWRITEV ||
1054 (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode))));
Al Viro8d71db42013-03-19 21:01:03 -04001055 if (opcode == IOCB_CMD_PWRITEV)
1056 file_end_write(file);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
1058 /* This means we must have transferred all that we could */
1059 /* No need to retry anymore */
1060 if ((ret == 0) || (iocb->ki_left == 0))
1061 ret = iocb->ki_nbytes - iocb->ki_left;
1062
Rusty Russell7adfa2f2008-02-08 04:20:14 -08001063 /* If we managed to write some out we return that, rather than
1064 * the eventual error. */
1065 if (opcode == IOCB_CMD_PWRITEV
Zach Brown41003a72013-05-07 16:18:25 -07001066 && ret < 0 && ret != -EIOCBQUEUED
Rusty Russell7adfa2f2008-02-08 04:20:14 -08001067 && iocb->ki_nbytes - iocb->ki_left)
1068 ret = iocb->ki_nbytes - iocb->ki_left;
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 return ret;
1071}
1072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073static ssize_t aio_fdsync(struct kiocb *iocb)
1074{
1075 struct file *file = iocb->ki_filp;
1076 ssize_t ret = -EINVAL;
1077
1078 if (file->f_op->aio_fsync)
1079 ret = file->f_op->aio_fsync(iocb, 1);
1080 return ret;
1081}
1082
1083static ssize_t aio_fsync(struct kiocb *iocb)
1084{
1085 struct file *file = iocb->ki_filp;
1086 ssize_t ret = -EINVAL;
1087
1088 if (file->f_op->aio_fsync)
1089 ret = file->f_op->aio_fsync(iocb, 0);
1090 return ret;
1091}
1092
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001093static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001094{
1095 ssize_t ret;
1096
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001097#ifdef CONFIG_COMPAT
1098 if (compat)
1099 ret = compat_rw_copy_check_uvector(type,
1100 (struct compat_iovec __user *)kiocb->ki_buf,
1101 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
Christopher Yeohac34ebb2012-05-31 16:26:42 -07001102 &kiocb->ki_iovec);
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001103 else
1104#endif
1105 ret = rw_copy_check_uvector(type,
1106 (struct iovec __user *)kiocb->ki_buf,
1107 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
Christopher Yeohac34ebb2012-05-31 16:26:42 -07001108 &kiocb->ki_iovec);
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001109 if (ret < 0)
1110 goto out;
1111
Linus Torvaldsa70b52e2012-05-21 16:06:20 -07001112 ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret);
1113 if (ret < 0)
1114 goto out;
1115
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001116 kiocb->ki_nr_segs = kiocb->ki_nbytes;
1117 kiocb->ki_cur_seg = 0;
1118 /* ki_nbytes/left now reflect bytes instead of segs */
1119 kiocb->ki_nbytes = ret;
1120 kiocb->ki_left = ret;
1121
1122 ret = 0;
1123out:
1124 return ret;
1125}
1126
Linus Torvaldsa70b52e2012-05-21 16:06:20 -07001127static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb)
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001128{
Linus Torvaldsa70b52e2012-05-21 16:06:20 -07001129 int bytes;
1130
1131 bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left);
1132 if (bytes < 0)
1133 return bytes;
1134
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001135 kiocb->ki_iovec = &kiocb->ki_inline_vec;
1136 kiocb->ki_iovec->iov_base = kiocb->ki_buf;
Linus Torvaldsa70b52e2012-05-21 16:06:20 -07001137 kiocb->ki_iovec->iov_len = bytes;
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001138 kiocb->ki_nr_segs = 1;
1139 kiocb->ki_cur_seg = 0;
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001140 return 0;
1141}
1142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143/*
1144 * aio_setup_iocb:
1145 * Performs the initial checks and aio retry method
1146 * setup for the kiocb at the time of io submission.
1147 */
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001148static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149{
1150 struct file *file = kiocb->ki_filp;
1151 ssize_t ret = 0;
1152
1153 switch (kiocb->ki_opcode) {
1154 case IOCB_CMD_PREAD:
1155 ret = -EBADF;
1156 if (unlikely(!(file->f_mode & FMODE_READ)))
1157 break;
1158 ret = -EFAULT;
1159 if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf,
1160 kiocb->ki_left)))
1161 break;
Linus Torvaldsa70b52e2012-05-21 16:06:20 -07001162 ret = aio_setup_single_vector(READ, file, kiocb);
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001163 if (ret)
1164 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 ret = -EINVAL;
1166 if (file->f_op->aio_read)
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001167 kiocb->ki_retry = aio_rw_vect_retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 break;
1169 case IOCB_CMD_PWRITE:
1170 ret = -EBADF;
1171 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1172 break;
1173 ret = -EFAULT;
1174 if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf,
1175 kiocb->ki_left)))
1176 break;
Linus Torvaldsa70b52e2012-05-21 16:06:20 -07001177 ret = aio_setup_single_vector(WRITE, file, kiocb);
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001178 if (ret)
1179 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 ret = -EINVAL;
1181 if (file->f_op->aio_write)
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001182 kiocb->ki_retry = aio_rw_vect_retry;
1183 break;
1184 case IOCB_CMD_PREADV:
1185 ret = -EBADF;
1186 if (unlikely(!(file->f_mode & FMODE_READ)))
1187 break;
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001188 ret = aio_setup_vectored_rw(READ, kiocb, compat);
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001189 if (ret)
1190 break;
1191 ret = -EINVAL;
1192 if (file->f_op->aio_read)
1193 kiocb->ki_retry = aio_rw_vect_retry;
1194 break;
1195 case IOCB_CMD_PWRITEV:
1196 ret = -EBADF;
1197 if (unlikely(!(file->f_mode & FMODE_WRITE)))
1198 break;
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001199 ret = aio_setup_vectored_rw(WRITE, kiocb, compat);
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001200 if (ret)
1201 break;
1202 ret = -EINVAL;
1203 if (file->f_op->aio_write)
1204 kiocb->ki_retry = aio_rw_vect_retry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 break;
1206 case IOCB_CMD_FDSYNC:
1207 ret = -EINVAL;
1208 if (file->f_op->aio_fsync)
1209 kiocb->ki_retry = aio_fdsync;
1210 break;
1211 case IOCB_CMD_FSYNC:
1212 ret = -EINVAL;
1213 if (file->f_op->aio_fsync)
1214 kiocb->ki_retry = aio_fsync;
1215 break;
1216 default:
Kent Overstreetcaf41672013-05-07 16:18:35 -07001217 pr_debug("EINVAL: no operation provided\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 ret = -EINVAL;
1219 }
1220
1221 if (!kiocb->ki_retry)
1222 return ret;
1223
1224 return 0;
1225}
1226
Adrian Bunkd5470b52008-04-29 00:58:57 -07001227static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
Jeff Moyer080d6762011-11-02 13:40:10 -07001228 struct iocb *iocb, struct kiocb_batch *batch,
1229 bool compat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
1231 struct kiocb *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 ssize_t ret;
1233
1234 /* enforce forwards compatibility on users */
Davide Libenzi9c3060b2007-05-10 22:23:21 -07001235 if (unlikely(iocb->aio_reserved1 || iocb->aio_reserved2)) {
Kent Overstreetcaf41672013-05-07 16:18:35 -07001236 pr_debug("EINVAL: reserve field set\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 return -EINVAL;
1238 }
1239
1240 /* prevent overflows */
1241 if (unlikely(
1242 (iocb->aio_buf != (unsigned long)iocb->aio_buf) ||
1243 (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
1244 ((ssize_t)iocb->aio_nbytes < 0)
1245 )) {
1246 pr_debug("EINVAL: io_submit: overflow check\n");
1247 return -EINVAL;
1248 }
1249
Jeff Moyer080d6762011-11-02 13:40:10 -07001250 req = aio_get_req(ctx, batch); /* returns with 2 references to req */
Kent Overstreet1d98ebf2013-05-07 16:18:37 -07001251 if (unlikely(!req))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 return -EAGAIN;
Kent Overstreet1d98ebf2013-05-07 16:18:37 -07001253
1254 req->ki_filp = fget(iocb->aio_fildes);
1255 if (unlikely(!req->ki_filp)) {
1256 ret = -EBADF;
1257 goto out_put_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 }
Kent Overstreet1d98ebf2013-05-07 16:18:37 -07001259
Davide Libenzi9c3060b2007-05-10 22:23:21 -07001260 if (iocb->aio_flags & IOCB_FLAG_RESFD) {
1261 /*
1262 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1263 * instance of the file* now. The file descriptor must be
1264 * an eventfd() fd, and will be signaled for each completed
1265 * event using the eventfd_signal() function.
1266 */
Davide Libenzi13389012009-06-30 11:41:11 -07001267 req->ki_eventfd = eventfd_ctx_fdget((int) iocb->aio_resfd);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001268 if (IS_ERR(req->ki_eventfd)) {
Davide Libenzi9c3060b2007-05-10 22:23:21 -07001269 ret = PTR_ERR(req->ki_eventfd);
Davide Libenzi87c3a862009-03-18 17:04:19 -07001270 req->ki_eventfd = NULL;
Davide Libenzi9c3060b2007-05-10 22:23:21 -07001271 goto out_put_req;
1272 }
1273 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274
Ken Chen212079c2005-05-01 08:59:15 -07001275 ret = put_user(req->ki_key, &user_iocb->aio_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 if (unlikely(ret)) {
Kent Overstreetcaf41672013-05-07 16:18:35 -07001277 pr_debug("EFAULT: aio_key\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 goto out_put_req;
1279 }
1280
1281 req->ki_obj.user = user_iocb;
1282 req->ki_user_data = iocb->aio_data;
1283 req->ki_pos = iocb->aio_offset;
1284
1285 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1286 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1287 req->ki_opcode = iocb->aio_lio_opcode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001289 ret = aio_setup_iocb(req, compat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290
1291 if (ret)
1292 goto out_put_req;
1293
Zach Brown41003a72013-05-07 16:18:25 -07001294 if (unlikely(kiocbIsCancelled(req)))
1295 ret = -EINTR;
1296 else
1297 ret = req->ki_retry(req);
1298
1299 if (ret != -EIOCBQUEUED) {
1300 /*
1301 * There's no easy way to restart the syscall since other AIO's
1302 * may be already running. Just fail this IO with EINTR.
1303 */
1304 if (unlikely(ret == -ERESTARTSYS || ret == -ERESTARTNOINTR ||
1305 ret == -ERESTARTNOHAND ||
1306 ret == -ERESTART_RESTARTBLOCK))
1307 ret = -EINTR;
1308 aio_complete(req, ret, 0);
1309 }
Jeff Moyercfb1e332009-10-02 18:57:36 -04001310
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 aio_put_req(req); /* drop extra ref to req */
1312 return 0;
1313
1314out_put_req:
Kent Overstreet11599eb2013-05-07 16:18:39 -07001315 spin_lock_irq(&ctx->ctx_lock);
1316 list_del(&req->ki_list);
1317 spin_unlock_irq(&ctx->ctx_lock);
1318
1319 atomic_dec(&ctx->reqs_active);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 aio_put_req(req); /* drop extra ref to req */
1321 aio_put_req(req); /* drop i/o ref to req */
1322 return ret;
1323}
1324
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001325long do_io_submit(aio_context_t ctx_id, long nr,
1326 struct iocb __user *__user *iocbpp, bool compat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327{
1328 struct kioctx *ctx;
1329 long ret = 0;
Jeff Moyer080d6762011-11-02 13:40:10 -07001330 int i = 0;
Shaohua Li9f5b9422010-07-01 07:55:01 +02001331 struct blk_plug plug;
Jeff Moyer080d6762011-11-02 13:40:10 -07001332 struct kiocb_batch batch;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
1334 if (unlikely(nr < 0))
1335 return -EINVAL;
1336
Jeff Moyer75e1c702010-09-10 14:16:00 -07001337 if (unlikely(nr > LONG_MAX/sizeof(*iocbpp)))
1338 nr = LONG_MAX/sizeof(*iocbpp);
1339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340 if (unlikely(!access_ok(VERIFY_READ, iocbpp, (nr*sizeof(*iocbpp)))))
1341 return -EFAULT;
1342
1343 ctx = lookup_ioctx(ctx_id);
1344 if (unlikely(!ctx)) {
Kent Overstreetcaf41672013-05-07 16:18:35 -07001345 pr_debug("EINVAL: invalid context id\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 return -EINVAL;
1347 }
1348
Jeff Moyer080d6762011-11-02 13:40:10 -07001349 kiocb_batch_init(&batch, nr);
1350
Shaohua Li9f5b9422010-07-01 07:55:01 +02001351 blk_start_plug(&plug);
1352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 /*
1354 * AKPM: should this return a partial result if some of the IOs were
1355 * successfully submitted?
1356 */
1357 for (i=0; i<nr; i++) {
1358 struct iocb __user *user_iocb;
1359 struct iocb tmp;
1360
1361 if (unlikely(__get_user(user_iocb, iocbpp + i))) {
1362 ret = -EFAULT;
1363 break;
1364 }
1365
1366 if (unlikely(copy_from_user(&tmp, user_iocb, sizeof(tmp)))) {
1367 ret = -EFAULT;
1368 break;
1369 }
1370
Jeff Moyer080d6762011-11-02 13:40:10 -07001371 ret = io_submit_one(ctx, user_iocb, &tmp, &batch, compat);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 if (ret)
1373 break;
1374 }
Shaohua Li9f5b9422010-07-01 07:55:01 +02001375 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Gleb Natapov69e47472012-01-08 17:07:28 +02001377 kiocb_batch_free(ctx, &batch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 put_ioctx(ctx);
1379 return i ? i : ret;
1380}
1381
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001382/* sys_io_submit:
1383 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1384 * the number of iocbs queued. May return -EINVAL if the aio_context
1385 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1386 * *iocbpp[0] is not properly initialized, if the operation specified
1387 * is invalid for the file descriptor in the iocb. May fail with
1388 * -EFAULT if any of the data structures point to invalid data. May
1389 * fail with -EBADF if the file descriptor specified in the first
1390 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1391 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1392 * fail with -ENOSYS if not implemented.
1393 */
1394SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1395 struct iocb __user * __user *, iocbpp)
1396{
1397 return do_io_submit(ctx_id, nr, iocbpp, 0);
1398}
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400/* lookup_kiocb
1401 * Finds a given iocb for cancellation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 */
Adrian Bunk25ee7e32005-04-25 08:18:14 -07001403static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb,
1404 u32 key)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405{
1406 struct list_head *pos;
Zach Brownd00689a2005-11-13 16:07:34 -08001407
1408 assert_spin_locked(&ctx->ctx_lock);
1409
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 /* TODO: use a hash or array, this sucks. */
1411 list_for_each(pos, &ctx->active_reqs) {
1412 struct kiocb *kiocb = list_kiocb(pos);
1413 if (kiocb->ki_obj.user == iocb && kiocb->ki_key == key)
1414 return kiocb;
1415 }
1416 return NULL;
1417}
1418
1419/* sys_io_cancel:
1420 * Attempts to cancel an iocb previously passed to io_submit. If
1421 * the operation is successfully cancelled, the resulting event is
1422 * copied into the memory pointed to by result without being placed
1423 * into the completion queue and 0 is returned. May fail with
1424 * -EFAULT if any of the data structures pointed to are invalid.
1425 * May fail with -EINVAL if aio_context specified by ctx_id is
1426 * invalid. May fail with -EAGAIN if the iocb specified was not
1427 * cancelled. Will fail with -ENOSYS if not implemented.
1428 */
Heiko Carstens002c8972009-01-14 14:14:18 +01001429SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1430 struct io_event __user *, result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431{
Kent Overstreet906b9732013-05-07 16:18:31 -07001432 struct io_event res;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 struct kioctx *ctx;
1434 struct kiocb *kiocb;
1435 u32 key;
1436 int ret;
1437
1438 ret = get_user(key, &iocb->aio_key);
1439 if (unlikely(ret))
1440 return -EFAULT;
1441
1442 ctx = lookup_ioctx(ctx_id);
1443 if (unlikely(!ctx))
1444 return -EINVAL;
1445
1446 spin_lock_irq(&ctx->ctx_lock);
Kent Overstreet906b9732013-05-07 16:18:31 -07001447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 kiocb = lookup_kiocb(ctx, iocb, key);
Kent Overstreet906b9732013-05-07 16:18:31 -07001449 if (kiocb)
1450 ret = kiocb_cancel(ctx, kiocb, &res);
1451 else
1452 ret = -EINVAL;
1453
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 spin_unlock_irq(&ctx->ctx_lock);
1455
Kent Overstreet906b9732013-05-07 16:18:31 -07001456 if (!ret) {
1457 /* Cancellation succeeded -- copy the result
1458 * into the user's buffer.
1459 */
1460 if (copy_to_user(result, &res, sizeof(res)))
1461 ret = -EFAULT;
1462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463
1464 put_ioctx(ctx);
1465
1466 return ret;
1467}
1468
1469/* io_getevents:
1470 * Attempts to read at least min_nr events and up to nr events from
Satoru Takeuchi642b5122010-08-05 11:23:11 -07001471 * the completion queue for the aio_context specified by ctx_id. If
1472 * it succeeds, the number of read events is returned. May fail with
1473 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
1474 * out of range, if timeout is out of range. May fail with -EFAULT
1475 * if any of the memory specified is invalid. May return 0 or
1476 * < min_nr if the timeout specified by timeout has elapsed
1477 * before sufficient events are available, where timeout == NULL
1478 * specifies an infinite timeout. Note that the timeout pointed to by
1479 * timeout is relative and will be updated if not NULL and the
1480 * operation blocks. Will fail with -ENOSYS if not implemented.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 */
Heiko Carstens002c8972009-01-14 14:14:18 +01001482SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1483 long, min_nr,
1484 long, nr,
1485 struct io_event __user *, events,
1486 struct timespec __user *, timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487{
1488 struct kioctx *ioctx = lookup_ioctx(ctx_id);
1489 long ret = -EINVAL;
1490
1491 if (likely(ioctx)) {
Namhyung Kim2e410252011-01-12 17:01:08 -08001492 if (likely(min_nr <= nr && min_nr >= 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 ret = read_events(ioctx, min_nr, nr, events, timeout);
1494 put_ioctx(ioctx);
1495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 return ret;
1497}