blob: 45d5ef8dd0a87d39c707250f3911888faff2a87c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * An async IO implementation for Linux
3 * Written by Benjamin LaHaise <bcrl@kvack.org>
4 *
5 * Implements an efficient asynchronous io interface.
6 *
7 * Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
Christoph Hellwigbfe40372018-07-16 09:08:20 +02008 * Copyright 2018 Christoph Hellwig.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * See ../COPYING for licensing terms.
11 */
Kent Overstreetcaf41672013-05-07 16:18:35 -070012#define pr_fmt(fmt) "%s: " fmt, __func__
13
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/kernel.h>
15#include <linux/init.h>
16#include <linux/errno.h>
17#include <linux/time.h>
18#include <linux/aio_abi.h>
Paul Gortmaker630d9c42011-11-16 23:57:37 -050019#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/syscalls.h>
Jens Axboeb9d128f2009-10-29 13:59:26 +010021#include <linux/backing-dev.h>
Christoph Hellwig9018ccc2018-07-24 11:36:37 +020022#include <linux/refcount.h>
Badari Pulavarty027445c2006-09-30 23:28:46 -070023#include <linux/uio.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Ingo Molnar174cd4b2017-02-02 19:15:33 +010025#include <linux/sched/signal.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/fs.h>
27#include <linux/file.h>
28#include <linux/mm.h>
29#include <linux/mman.h>
Michael S. Tsirkin3d2d8272009-09-21 17:03:51 -070030#include <linux/mmu_context.h>
Kent Overstreete1bdd5f2013-04-26 10:58:39 +100031#include <linux/percpu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/slab.h>
33#include <linux/timer.h>
34#include <linux/aio.h>
35#include <linux/highmem.h>
36#include <linux/workqueue.h>
37#include <linux/security.h>
Davide Libenzi9c3060b2007-05-10 22:23:21 -070038#include <linux/eventfd.h>
Jeff Moyercfb1e332009-10-02 18:57:36 -040039#include <linux/blkdev.h>
Jeff Moyer9d85cba2010-05-26 14:44:26 -070040#include <linux/compat.h>
Gu Zheng36bc08c2013-07-16 17:56:16 +080041#include <linux/migrate.h>
42#include <linux/ramfs.h>
Kent Overstreet723be6e2013-05-28 15:14:48 -070043#include <linux/percpu-refcount.h>
Benjamin LaHaise71ad7492013-09-17 10:18:25 -040044#include <linux/mount.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
46#include <asm/kmap_types.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080047#include <linux/uaccess.h>
Jeff Moyera6136922018-12-11 12:37:49 -050048#include <linux/nospec.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Al Viro68d70d02013-06-19 15:26:04 +040050#include "internal.h"
51
Christoph Hellwigf3a27522018-03-30 11:19:25 +020052#define KIOCB_KEY 0
53
Kent Overstreet4e179bc2013-05-07 16:18:33 -070054#define AIO_RING_MAGIC 0xa10a10a1
55#define AIO_RING_COMPAT_FEATURES 1
56#define AIO_RING_INCOMPAT_FEATURES 0
57struct aio_ring {
58 unsigned id; /* kernel internal index number */
59 unsigned nr; /* number of io_events */
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -040060 unsigned head; /* Written to by userland or under ring_lock
61 * mutex by aio_read_events_ring(). */
Kent Overstreet4e179bc2013-05-07 16:18:33 -070062 unsigned tail;
63
64 unsigned magic;
65 unsigned compat_features;
66 unsigned incompat_features;
67 unsigned header_length; /* size of aio_ring */
68
69
70 struct io_event io_events[0];
71}; /* 128 bytes + ring size */
72
73#define AIO_RING_PAGES 8
Kent Overstreet4e179bc2013-05-07 16:18:33 -070074
Benjamin LaHaisedb446a02013-07-30 12:54:40 -040075struct kioctx_table {
Tejun Heod0264c02018-03-14 12:10:17 -070076 struct rcu_head rcu;
77 unsigned nr;
78 struct kioctx __rcu *table[];
Benjamin LaHaisedb446a02013-07-30 12:54:40 -040079};
80
Kent Overstreete1bdd5f2013-04-26 10:58:39 +100081struct kioctx_cpu {
82 unsigned reqs_available;
83};
84
Jens Axboedc48e562015-04-15 11:17:23 -060085struct ctx_rq_wait {
86 struct completion comp;
87 atomic_t count;
88};
89
Kent Overstreet4e179bc2013-05-07 16:18:33 -070090struct kioctx {
Kent Overstreet723be6e2013-05-28 15:14:48 -070091 struct percpu_ref users;
Kent Overstreet36f55882013-05-07 16:18:41 -070092 atomic_t dead;
Kent Overstreet4e179bc2013-05-07 16:18:33 -070093
Kent Overstreete34ecee2013-10-10 19:31:47 -070094 struct percpu_ref reqs;
95
Kent Overstreet4e179bc2013-05-07 16:18:33 -070096 unsigned long user_id;
Kent Overstreet4e179bc2013-05-07 16:18:33 -070097
Kent Overstreete1bdd5f2013-04-26 10:58:39 +100098 struct __percpu kioctx_cpu *cpu;
99
100 /*
101 * For percpu reqs_available, number of slots we move to/from global
102 * counter at a time:
103 */
104 unsigned req_batch;
Kent Overstreet3e845ce2013-05-07 16:18:51 -0700105 /*
106 * This is what userspace passed to io_setup(), it's not used for
107 * anything but counting against the global max_reqs quota.
108 *
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700109 * The real limit is nr_events - 1, which will be larger (see
Kent Overstreet3e845ce2013-05-07 16:18:51 -0700110 * aio_setup_ring())
111 */
Kent Overstreet4e179bc2013-05-07 16:18:33 -0700112 unsigned max_reqs;
113
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700114 /* Size of ringbuffer, in units of struct io_event */
115 unsigned nr_events;
Kent Overstreet4e179bc2013-05-07 16:18:33 -0700116
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700117 unsigned long mmap_base;
118 unsigned long mmap_size;
119
120 struct page **ring_pages;
121 long nr_pages;
122
Tejun Heof7298632018-03-14 12:45:15 -0700123 struct rcu_work free_rwork; /* see free_ioctx() */
Kent Overstreet4e23bca2013-05-07 16:18:56 -0700124
Anatol Pomozove02ba722014-04-15 11:31:33 -0700125 /*
126 * signals when all in-flight requests are done
127 */
Jens Axboedc48e562015-04-15 11:17:23 -0600128 struct ctx_rq_wait *rq_wait;
Anatol Pomozove02ba722014-04-15 11:31:33 -0700129
Kent Overstreet4e23bca2013-05-07 16:18:56 -0700130 struct {
Kent Overstreet34e83fc2013-04-26 10:58:39 +1000131 /*
132 * This counts the number of available slots in the ringbuffer,
133 * so we avoid overflowing it: it's decremented (if positive)
134 * when allocating a kiocb and incremented when the resulting
135 * io_event is pulled off the ringbuffer.
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000136 *
137 * We batch accesses to it with a percpu version.
Kent Overstreet34e83fc2013-04-26 10:58:39 +1000138 */
139 atomic_t reqs_available;
Kent Overstreet4e23bca2013-05-07 16:18:56 -0700140 } ____cacheline_aligned_in_smp;
141
142 struct {
143 spinlock_t ctx_lock;
144 struct list_head active_reqs; /* used for cancellation */
145 } ____cacheline_aligned_in_smp;
146
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700147 struct {
148 struct mutex ring_lock;
Kent Overstreet4e23bca2013-05-07 16:18:56 -0700149 wait_queue_head_t wait;
150 } ____cacheline_aligned_in_smp;
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700151
152 struct {
153 unsigned tail;
Benjamin LaHaised856f322014-08-24 13:14:05 -0400154 unsigned completed_events;
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700155 spinlock_t completion_lock;
Kent Overstreet4e23bca2013-05-07 16:18:56 -0700156 } ____cacheline_aligned_in_smp;
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700157
158 struct page *internal_pages[AIO_RING_PAGES];
Gu Zheng36bc08c2013-07-16 17:56:16 +0800159 struct file *aio_ring_file;
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400160
161 unsigned id;
Kent Overstreet4e179bc2013-05-07 16:18:33 -0700162};
163
Christoph Hellwiga3c0d432018-03-27 19:18:57 +0200164struct fsync_iocb {
165 struct work_struct work;
166 struct file *file;
167 bool datasync;
168};
169
Christoph Hellwigbfe40372018-07-16 09:08:20 +0200170struct poll_iocb {
171 struct file *file;
172 struct wait_queue_head *head;
173 __poll_t events;
174 bool woken;
175 bool cancelled;
176 struct wait_queue_entry wait;
177 struct work_struct work;
178};
179
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100180struct aio_kiocb {
Christoph Hellwig54843f82018-05-02 19:57:21 +0200181 union {
182 struct kiocb rw;
Christoph Hellwiga3c0d432018-03-27 19:18:57 +0200183 struct fsync_iocb fsync;
Christoph Hellwigbfe40372018-07-16 09:08:20 +0200184 struct poll_iocb poll;
Christoph Hellwig54843f82018-05-02 19:57:21 +0200185 };
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100186
187 struct kioctx *ki_ctx;
188 kiocb_cancel_fn *ki_cancel;
189
190 struct iocb __user *ki_user_iocb; /* user's aiocb */
191 __u64 ki_user_data; /* user's data for completion */
192
193 struct list_head ki_list; /* the aio core uses this
194 * for cancellation */
Christoph Hellwig9018ccc2018-07-24 11:36:37 +0200195 refcount_t ki_refcnt;
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100196
197 /*
198 * If the aio_resfd field of the userspace iocb is not zero,
199 * this is the underlying eventfd context to deliver events to.
200 */
201 struct eventfd_ctx *ki_eventfd;
202};
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204/*------ sysctl variables----*/
Zach Brownd55b5fd2005-11-07 00:59:31 -0800205static DEFINE_SPINLOCK(aio_nr_lock);
206unsigned long aio_nr; /* current system wide number of aio requests */
207unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208/*----end sysctl variables---*/
209
Christoph Lametere18b8902006-12-06 20:33:20 -0800210static struct kmem_cache *kiocb_cachep;
211static struct kmem_cache *kioctx_cachep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400213static struct vfsmount *aio_mnt;
214
215static const struct file_operations aio_ring_fops;
216static const struct address_space_operations aio_ctx_aops;
217
218static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
219{
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400220 struct file *file;
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400221 struct inode *inode = alloc_anon_inode(aio_mnt->mnt_sb);
Dan Carpenter7f626562013-11-13 10:49:40 +0300222 if (IS_ERR(inode))
223 return ERR_CAST(inode);
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400224
225 inode->i_mapping->a_ops = &aio_ctx_aops;
226 inode->i_mapping->private_data = ctx;
227 inode->i_size = PAGE_SIZE * nr_pages;
228
Al Virod93aa9d2018-06-09 09:40:05 -0400229 file = alloc_file_pseudo(inode, aio_mnt, "[aio]",
230 O_RDWR, &aio_ring_fops);
Al Viroc9c554f2018-07-11 14:19:04 -0400231 if (IS_ERR(file))
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400232 iput(inode);
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400233 return file;
234}
235
236static struct dentry *aio_mount(struct file_system_type *fs_type,
237 int flags, const char *dev_name, void *data)
238{
Al Virod93aa9d2018-06-09 09:40:05 -0400239 struct dentry *root = mount_pseudo(fs_type, "aio:", NULL, NULL,
Jann Horn22f6b4d2016-09-16 00:31:22 +0200240 AIO_RING_MAGIC);
241
242 if (!IS_ERR(root))
243 root->d_sb->s_iflags |= SB_I_NOEXEC;
244 return root;
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400245}
246
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247/* aio_setup
248 * Creates the slab caches used by the aio routines, panic on
249 * failure as this is done early during the boot sequence.
250 */
251static int __init aio_setup(void)
252{
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400253 static struct file_system_type aio_fs = {
254 .name = "aio",
255 .mount = aio_mount,
256 .kill_sb = kill_anon_super,
257 };
258 aio_mnt = kern_mount(&aio_fs);
259 if (IS_ERR(aio_mnt))
260 panic("Failed to create aio fs mount.");
261
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100262 kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
Christoph Lameter0a31bd52007-05-06 14:49:57 -0700263 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 return 0;
265}
H Hartley Sweeten385773e2009-09-22 16:43:53 -0700266__initcall(aio_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400268static void put_aio_ring_file(struct kioctx *ctx)
269{
270 struct file *aio_ring_file = ctx->aio_ring_file;
Rasmus Villemoesde04e762016-09-15 00:25:03 +0200271 struct address_space *i_mapping;
272
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400273 if (aio_ring_file) {
Al Viro45063092016-12-04 18:24:56 -0500274 truncate_setsize(file_inode(aio_ring_file), 0);
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400275
276 /* Prevent further access to the kioctx from migratepages */
Al Viro45063092016-12-04 18:24:56 -0500277 i_mapping = aio_ring_file->f_mapping;
Rasmus Villemoesde04e762016-09-15 00:25:03 +0200278 spin_lock(&i_mapping->private_lock);
279 i_mapping->private_data = NULL;
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400280 ctx->aio_ring_file = NULL;
Rasmus Villemoesde04e762016-09-15 00:25:03 +0200281 spin_unlock(&i_mapping->private_lock);
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400282
283 fput(aio_ring_file);
284 }
285}
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287static void aio_free_ring(struct kioctx *ctx)
288{
Gu Zheng36bc08c2013-07-16 17:56:16 +0800289 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400291 /* Disconnect the kiotx from the ring file. This prevents future
292 * accesses to the kioctx from page migration.
293 */
294 put_aio_ring_file(ctx);
295
Gu Zheng36bc08c2013-07-16 17:56:16 +0800296 for (i = 0; i < ctx->nr_pages; i++) {
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500297 struct page *page;
Gu Zheng36bc08c2013-07-16 17:56:16 +0800298 pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
299 page_count(ctx->ring_pages[i]));
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500300 page = ctx->ring_pages[i];
301 if (!page)
302 continue;
303 ctx->ring_pages[i] = NULL;
304 put_page(page);
Gu Zheng36bc08c2013-07-16 17:56:16 +0800305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Sasha Levinddb8c452013-11-19 17:33:03 -0500307 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) {
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700308 kfree(ctx->ring_pages);
Sasha Levinddb8c452013-11-19 17:33:03 -0500309 ctx->ring_pages = NULL;
310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
Oleg Nesterov5477e702015-09-04 15:48:04 -0700313static int aio_ring_mremap(struct vm_area_struct *vma)
Gu Zheng36bc08c2013-07-16 17:56:16 +0800314{
Oleg Nesterov5477e702015-09-04 15:48:04 -0700315 struct file *file = vma->vm_file;
Pavel Emelyanove4a0d3e2014-09-18 19:56:17 +0400316 struct mm_struct *mm = vma->vm_mm;
317 struct kioctx_table *table;
Al Virob2edffd2015-04-06 17:48:54 -0400318 int i, res = -EINVAL;
Pavel Emelyanove4a0d3e2014-09-18 19:56:17 +0400319
320 spin_lock(&mm->ioctx_lock);
321 rcu_read_lock();
322 table = rcu_dereference(mm->ioctx_table);
323 for (i = 0; i < table->nr; i++) {
324 struct kioctx *ctx;
325
Tejun Heod0264c02018-03-14 12:10:17 -0700326 ctx = rcu_dereference(table->table[i]);
Pavel Emelyanove4a0d3e2014-09-18 19:56:17 +0400327 if (ctx && ctx->aio_ring_file == file) {
Al Virob2edffd2015-04-06 17:48:54 -0400328 if (!atomic_read(&ctx->dead)) {
329 ctx->user_id = ctx->mmap_base = vma->vm_start;
330 res = 0;
331 }
Pavel Emelyanove4a0d3e2014-09-18 19:56:17 +0400332 break;
333 }
334 }
335
336 rcu_read_unlock();
337 spin_unlock(&mm->ioctx_lock);
Al Virob2edffd2015-04-06 17:48:54 -0400338 return res;
Pavel Emelyanove4a0d3e2014-09-18 19:56:17 +0400339}
340
Oleg Nesterov5477e702015-09-04 15:48:04 -0700341static const struct vm_operations_struct aio_ring_vm_ops = {
342 .mremap = aio_ring_mremap,
343#if IS_ENABLED(CONFIG_MMU)
344 .fault = filemap_fault,
345 .map_pages = filemap_map_pages,
346 .page_mkwrite = filemap_page_mkwrite,
347#endif
348};
349
350static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma)
351{
352 vma->vm_flags |= VM_DONTEXPAND;
353 vma->vm_ops = &aio_ring_vm_ops;
354 return 0;
355}
356
Gu Zheng36bc08c2013-07-16 17:56:16 +0800357static const struct file_operations aio_ring_fops = {
358 .mmap = aio_ring_mmap,
359};
360
Benjamin LaHaise0c453552013-07-17 09:34:24 -0400361#if IS_ENABLED(CONFIG_MIGRATION)
Gu Zheng36bc08c2013-07-16 17:56:16 +0800362static int aio_migratepage(struct address_space *mapping, struct page *new,
363 struct page *old, enum migrate_mode mode)
364{
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400365 struct kioctx *ctx;
Gu Zheng36bc08c2013-07-16 17:56:16 +0800366 unsigned long flags;
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400367 pgoff_t idx;
Gu Zheng36bc08c2013-07-16 17:56:16 +0800368 int rc;
369
Jérôme Glisse2916ecc2017-09-08 16:12:06 -0700370 /*
371 * We cannot support the _NO_COPY case here, because copy needs to
372 * happen under the ctx->completion_lock. That does not work with the
373 * migration workflow of MIGRATE_SYNC_NO_COPY.
374 */
375 if (mode == MIGRATE_SYNC_NO_COPY)
376 return -EINVAL;
377
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500378 rc = 0;
379
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400380 /* mapping->private_lock here protects against the kioctx teardown. */
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500381 spin_lock(&mapping->private_lock);
382 ctx = mapping->private_data;
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400383 if (!ctx) {
384 rc = -EINVAL;
385 goto out;
386 }
387
388 /* The ring_lock mutex. The prevents aio_read_events() from writing
389 * to the ring's head, and prevents page migration from mucking in
390 * a partially initialized kiotx.
391 */
392 if (!mutex_trylock(&ctx->ring_lock)) {
393 rc = -EAGAIN;
394 goto out;
395 }
396
397 idx = old->index;
398 if (idx < (pgoff_t)ctx->nr_pages) {
399 /* Make sure the old page hasn't already been changed */
400 if (ctx->ring_pages[idx] != old)
401 rc = -EAGAIN;
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500402 } else
403 rc = -EINVAL;
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500404
405 if (rc != 0)
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400406 goto out_unlock;
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500407
Gu Zheng36bc08c2013-07-16 17:56:16 +0800408 /* Writeback must be complete */
409 BUG_ON(PageWriteback(old));
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500410 get_page(new);
Gu Zheng36bc08c2013-07-16 17:56:16 +0800411
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500412 rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
Gu Zheng36bc08c2013-07-16 17:56:16 +0800413 if (rc != MIGRATEPAGE_SUCCESS) {
Benjamin LaHaise8e321fe2013-12-21 17:56:08 -0500414 put_page(new);
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400415 goto out_unlock;
Gu Zheng36bc08c2013-07-16 17:56:16 +0800416 }
417
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400418 /* Take completion_lock to prevent other writes to the ring buffer
419 * while the old page is copied to the new. This prevents new
420 * events from being lost.
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400421 */
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400422 spin_lock_irqsave(&ctx->completion_lock, flags);
423 migrate_page_copy(new, old);
424 BUG_ON(ctx->ring_pages[idx] != old);
425 ctx->ring_pages[idx] = new;
426 spin_unlock_irqrestore(&ctx->completion_lock, flags);
427
428 /* The old page is no longer accessible. */
429 put_page(old);
430
431out_unlock:
432 mutex_unlock(&ctx->ring_lock);
433out:
Benjamin LaHaise5e9ae2e2013-09-26 20:34:51 -0400434 spin_unlock(&mapping->private_lock);
Gu Zheng36bc08c2013-07-16 17:56:16 +0800435 return rc;
436}
Benjamin LaHaise0c453552013-07-17 09:34:24 -0400437#endif
Gu Zheng36bc08c2013-07-16 17:56:16 +0800438
439static const struct address_space_operations aio_ctx_aops = {
Gu Zheng835f2522014-11-06 17:46:21 +0800440 .set_page_dirty = __set_page_dirty_no_writeback,
Benjamin LaHaise0c453552013-07-17 09:34:24 -0400441#if IS_ENABLED(CONFIG_MIGRATION)
Gu Zheng36bc08c2013-07-16 17:56:16 +0800442 .migratepage = aio_migratepage,
Benjamin LaHaise0c453552013-07-17 09:34:24 -0400443#endif
Gu Zheng36bc08c2013-07-16 17:56:16 +0800444};
445
Mauricio Faria de Oliveira2a8a9862017-07-05 10:53:16 -0300446static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
448 struct aio_ring *ring;
Zach Brown41003a72013-05-07 16:18:25 -0700449 struct mm_struct *mm = current->mm;
Linus Torvalds3dc9acb2013-12-20 05:11:12 +0900450 unsigned long size, unused;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 int nr_pages;
Gu Zheng36bc08c2013-07-16 17:56:16 +0800452 int i;
453 struct file *file;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
455 /* Compensate for the ring buffer's head/tail overlap entry */
456 nr_events += 2; /* 1 is required, 2 for good luck */
457
458 size = sizeof(struct aio_ring);
459 size += sizeof(struct io_event) * nr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Gu Zheng36bc08c2013-07-16 17:56:16 +0800461 nr_pages = PFN_UP(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if (nr_pages < 0)
463 return -EINVAL;
464
Benjamin LaHaise71ad7492013-09-17 10:18:25 -0400465 file = aio_private_file(ctx, nr_pages);
Gu Zheng36bc08c2013-07-16 17:56:16 +0800466 if (IS_ERR(file)) {
467 ctx->aio_ring_file = NULL;
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400468 return -ENOMEM;
Gu Zheng36bc08c2013-07-16 17:56:16 +0800469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Gu Zheng36bc08c2013-07-16 17:56:16 +0800471 ctx->aio_ring_file = file;
472 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
473 / sizeof(struct io_event);
474
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700475 ctx->ring_pages = ctx->internal_pages;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 if (nr_pages > AIO_RING_PAGES) {
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700477 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
478 GFP_KERNEL);
Gu Zhengd1b94322013-12-04 18:19:06 +0800479 if (!ctx->ring_pages) {
480 put_aio_ring_file(ctx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 return -ENOMEM;
Gu Zhengd1b94322013-12-04 18:19:06 +0800482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
484
Linus Torvalds3dc9acb2013-12-20 05:11:12 +0900485 for (i = 0; i < nr_pages; i++) {
486 struct page *page;
Al Viro45063092016-12-04 18:24:56 -0500487 page = find_or_create_page(file->f_mapping,
Linus Torvalds3dc9acb2013-12-20 05:11:12 +0900488 i, GFP_HIGHUSER | __GFP_ZERO);
489 if (!page)
490 break;
491 pr_debug("pid(%d) page[%d]->count=%d\n",
492 current->pid, i, page_count(page));
493 SetPageUptodate(page);
Linus Torvalds3dc9acb2013-12-20 05:11:12 +0900494 unlock_page(page);
495
496 ctx->ring_pages[i] = page;
497 }
498 ctx->nr_pages = i;
499
500 if (unlikely(i != nr_pages)) {
501 aio_free_ring(ctx);
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400502 return -ENOMEM;
Linus Torvalds3dc9acb2013-12-20 05:11:12 +0900503 }
504
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700505 ctx->mmap_size = nr_pages * PAGE_SIZE;
506 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size);
Gu Zheng36bc08c2013-07-16 17:56:16 +0800507
Michal Hocko013373e2016-05-23 16:25:59 -0700508 if (down_write_killable(&mm->mmap_sem)) {
509 ctx->mmap_size = 0;
510 aio_free_ring(ctx);
511 return -EINTR;
512 }
513
Gu Zheng36bc08c2013-07-16 17:56:16 +0800514 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
515 PROT_READ | PROT_WRITE,
Mike Rapoport897ab3e2017-02-24 14:58:22 -0800516 MAP_SHARED, 0, &unused, NULL);
Linus Torvalds3dc9acb2013-12-20 05:11:12 +0900517 up_write(&mm->mmap_sem);
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700518 if (IS_ERR((void *)ctx->mmap_base)) {
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700519 ctx->mmap_size = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 aio_free_ring(ctx);
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400521 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 }
523
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700524 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
Benjamin LaHaised6c355c2013-09-09 11:57:59 -0400525
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700526 ctx->user_id = ctx->mmap_base;
527 ctx->nr_events = nr_events; /* trusted copy */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700529 ring = kmap_atomic(ctx->ring_pages[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 ring->nr = nr_events; /* user copy */
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400531 ring->id = ~0U;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 ring->head = ring->tail = 0;
533 ring->magic = AIO_RING_MAGIC;
534 ring->compat_features = AIO_RING_COMPAT_FEATURES;
535 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
536 ring->header_length = sizeof(struct aio_ring);
Cong Wange8e3c3d2011-11-25 23:14:27 +0800537 kunmap_atomic(ring);
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700538 flush_dcache_page(ctx->ring_pages[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539
540 return 0;
541}
542
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
544#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
545#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
546
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100547void kiocb_set_cancel_fn(struct kiocb *iocb, kiocb_cancel_fn *cancel)
Kent Overstreet0460fef2013-05-07 16:18:49 -0700548{
Christoph Hellwig54843f82018-05-02 19:57:21 +0200549 struct aio_kiocb *req = container_of(iocb, struct aio_kiocb, rw);
Kent Overstreet0460fef2013-05-07 16:18:49 -0700550 struct kioctx *ctx = req->ki_ctx;
551 unsigned long flags;
552
Christoph Hellwig75321b52018-04-09 14:57:56 +0200553 if (WARN_ON_ONCE(!list_empty(&req->ki_list)))
554 return;
555
Kent Overstreet0460fef2013-05-07 16:18:49 -0700556 spin_lock_irqsave(&ctx->ctx_lock, flags);
Christoph Hellwig75321b52018-04-09 14:57:56 +0200557 list_add_tail(&req->ki_list, &ctx->active_reqs);
Kent Overstreet0460fef2013-05-07 16:18:49 -0700558 req->ki_cancel = cancel;
Kent Overstreet0460fef2013-05-07 16:18:49 -0700559 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
560}
561EXPORT_SYMBOL(kiocb_set_cancel_fn);
562
Tejun Heoa6d7cff2018-03-14 12:10:17 -0700563/*
564 * free_ioctx() should be RCU delayed to synchronize against the RCU
565 * protected lookup_ioctx() and also needs process context to call
Tejun Heof7298632018-03-14 12:45:15 -0700566 * aio_free_ring(). Use rcu_work.
Tejun Heoa6d7cff2018-03-14 12:10:17 -0700567 */
Kent Overstreete34ecee2013-10-10 19:31:47 -0700568static void free_ioctx(struct work_struct *work)
Kent Overstreet36f55882013-05-07 16:18:41 -0700569{
Tejun Heof7298632018-03-14 12:45:15 -0700570 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx,
571 free_rwork);
Kent Overstreete34ecee2013-10-10 19:31:47 -0700572 pr_debug("freeing %p\n", ctx);
573
574 aio_free_ring(ctx);
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000575 free_percpu(ctx->cpu);
Tejun Heo9a1049d2014-06-28 08:10:14 -0400576 percpu_ref_exit(&ctx->reqs);
577 percpu_ref_exit(&ctx->users);
Kent Overstreet36f55882013-05-07 16:18:41 -0700578 kmem_cache_free(kioctx_cachep, ctx);
579}
580
Kent Overstreete34ecee2013-10-10 19:31:47 -0700581static void free_ioctx_reqs(struct percpu_ref *ref)
582{
583 struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
584
Anatol Pomozove02ba722014-04-15 11:31:33 -0700585 /* At this point we know that there are no any in-flight requests */
Jens Axboedc48e562015-04-15 11:17:23 -0600586 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
587 complete(&ctx->rq_wait->comp);
Anatol Pomozove02ba722014-04-15 11:31:33 -0700588
Tejun Heoa6d7cff2018-03-14 12:10:17 -0700589 /* Synchronize against RCU protected table->table[] dereferences */
Tejun Heof7298632018-03-14 12:45:15 -0700590 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx);
591 queue_rcu_work(system_wq, &ctx->free_rwork);
Kent Overstreete34ecee2013-10-10 19:31:47 -0700592}
593
Kent Overstreet36f55882013-05-07 16:18:41 -0700594/*
595 * When this function runs, the kioctx has been removed from the "hash table"
596 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
597 * now it's safe to cancel any that need to be.
598 */
Kent Overstreete34ecee2013-10-10 19:31:47 -0700599static void free_ioctx_users(struct percpu_ref *ref)
Kent Overstreet36f55882013-05-07 16:18:41 -0700600{
Kent Overstreete34ecee2013-10-10 19:31:47 -0700601 struct kioctx *ctx = container_of(ref, struct kioctx, users);
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100602 struct aio_kiocb *req;
Kent Overstreet36f55882013-05-07 16:18:41 -0700603
604 spin_lock_irq(&ctx->ctx_lock);
605
606 while (!list_empty(&ctx->active_reqs)) {
607 req = list_first_entry(&ctx->active_reqs,
Christoph Hellwig04b2fa92015-02-02 14:49:06 +0100608 struct aio_kiocb, ki_list);
Christoph Hellwig888933f2018-05-23 14:11:02 +0200609 req->ki_cancel(&req->rw);
Al Viro4faa99962018-05-23 22:53:22 -0400610 list_del_init(&req->ki_list);
Kent Overstreet36f55882013-05-07 16:18:41 -0700611 }
612
613 spin_unlock_irq(&ctx->ctx_lock);
614
Kent Overstreete34ecee2013-10-10 19:31:47 -0700615 percpu_ref_kill(&ctx->reqs);
616 percpu_ref_put(&ctx->reqs);
Kent Overstreet36f55882013-05-07 16:18:41 -0700617}
618
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400619static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
620{
621 unsigned i, new_nr;
622 struct kioctx_table *table, *old;
623 struct aio_ring *ring;
624
625 spin_lock(&mm->ioctx_lock);
Oleg Nesterov855ef0d2014-04-30 16:16:36 +0200626 table = rcu_dereference_raw(mm->ioctx_table);
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400627
628 while (1) {
629 if (table)
630 for (i = 0; i < table->nr; i++)
Tejun Heod0264c02018-03-14 12:10:17 -0700631 if (!rcu_access_pointer(table->table[i])) {
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400632 ctx->id = i;
Tejun Heod0264c02018-03-14 12:10:17 -0700633 rcu_assign_pointer(table->table[i], ctx);
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400634 spin_unlock(&mm->ioctx_lock);
635
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400636 /* While kioctx setup is in progress,
637 * we are protected from page migration
638 * changes ring_pages by ->ring_lock.
639 */
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400640 ring = kmap_atomic(ctx->ring_pages[0]);
641 ring->id = ctx->id;
642 kunmap_atomic(ring);
643 return 0;
644 }
645
646 new_nr = (table ? table->nr : 1) * 4;
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400647 spin_unlock(&mm->ioctx_lock);
648
649 table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) *
650 new_nr, GFP_KERNEL);
651 if (!table)
652 return -ENOMEM;
653
654 table->nr = new_nr;
655
656 spin_lock(&mm->ioctx_lock);
Oleg Nesterov855ef0d2014-04-30 16:16:36 +0200657 old = rcu_dereference_raw(mm->ioctx_table);
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400658
659 if (!old) {
660 rcu_assign_pointer(mm->ioctx_table, table);
661 } else if (table->nr > old->nr) {
662 memcpy(table->table, old->table,
663 old->nr * sizeof(struct kioctx *));
664
665 rcu_assign_pointer(mm->ioctx_table, table);
666 kfree_rcu(old, rcu);
667 } else {
668 kfree(table);
669 table = old;
670 }
671 }
672}
673
Kent Overstreete34ecee2013-10-10 19:31:47 -0700674static void aio_nr_sub(unsigned nr)
675{
676 spin_lock(&aio_nr_lock);
677 if (WARN_ON(aio_nr - nr > aio_nr))
678 aio_nr = 0;
679 else
680 aio_nr -= nr;
681 spin_unlock(&aio_nr_lock);
682}
683
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684/* ioctx_alloc
685 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
686 */
687static struct kioctx *ioctx_alloc(unsigned nr_events)
688{
Zach Brown41003a72013-05-07 16:18:25 -0700689 struct mm_struct *mm = current->mm;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 struct kioctx *ctx;
Al Viroe23754f2012-03-06 14:33:22 -0500691 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000693 /*
Mauricio Faria de Oliveira2a8a9862017-07-05 10:53:16 -0300694 * Store the original nr_events -- what userspace passed to io_setup(),
695 * for counting against the global limit -- before it changes.
696 */
697 unsigned int max_reqs = nr_events;
698
699 /*
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000700 * We keep track of the number of available ringbuffer slots, to prevent
701 * overflow (reqs_available), and we also use percpu counters for this.
702 *
703 * So since up to half the slots might be on other cpu's percpu counters
704 * and unavailable, double nr_events so userspace sees what they
705 * expected: additionally, we move req_batch slots to/from percpu
706 * counters at a time, so make sure that isn't 0:
707 */
708 nr_events = max(nr_events, num_possible_cpus() * 4);
709 nr_events *= 2;
710
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 /* Prevent overflows */
Al Viro08397ac2015-03-31 11:43:52 -0400712 if (nr_events > (0x10000000U / sizeof(struct io_event))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 pr_debug("ENOMEM: nr_events too high\n");
714 return ERR_PTR(-EINVAL);
715 }
716
Mauricio Faria de Oliveira2a8a9862017-07-05 10:53:16 -0300717 if (!nr_events || (unsigned long)max_reqs > aio_max_nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 return ERR_PTR(-EAGAIN);
719
Robert P. J. Dayc3762222007-02-10 01:45:03 -0800720 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 if (!ctx)
722 return ERR_PTR(-ENOMEM);
723
Mauricio Faria de Oliveira2a8a9862017-07-05 10:53:16 -0300724 ctx->max_reqs = max_reqs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400726 spin_lock_init(&ctx->ctx_lock);
727 spin_lock_init(&ctx->completion_lock);
728 mutex_init(&ctx->ring_lock);
729 /* Protect against page migration throughout kiotx setup by keeping
730 * the ring_lock mutex held until setup is complete. */
731 mutex_lock(&ctx->ring_lock);
732 init_waitqueue_head(&ctx->wait);
733
734 INIT_LIST_HEAD(&ctx->active_reqs);
735
Tejun Heo2aad2a82014-09-24 13:31:50 -0400736 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL))
Kent Overstreete34ecee2013-10-10 19:31:47 -0700737 goto err;
738
Tejun Heo2aad2a82014-09-24 13:31:50 -0400739 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL))
Kent Overstreete34ecee2013-10-10 19:31:47 -0700740 goto err;
Kent Overstreet723be6e2013-05-28 15:14:48 -0700741
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000742 ctx->cpu = alloc_percpu(struct kioctx_cpu);
743 if (!ctx->cpu)
Kent Overstreete34ecee2013-10-10 19:31:47 -0700744 goto err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Mauricio Faria de Oliveira2a8a9862017-07-05 10:53:16 -0300746 err = aio_setup_ring(ctx, nr_events);
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400747 if (err < 0)
Kent Overstreete34ecee2013-10-10 19:31:47 -0700748 goto err;
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000749
Kent Overstreet34e83fc2013-04-26 10:58:39 +1000750 atomic_set(&ctx->reqs_available, ctx->nr_events - 1);
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000751 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4);
Benjamin LaHaise6878ea72013-07-31 10:34:18 -0400752 if (ctx->req_batch < 1)
753 ctx->req_batch = 1;
Kent Overstreet34e83fc2013-04-26 10:58:39 +1000754
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 /* limit the number of system wide aios */
Al Viro9fa1cb32012-03-10 23:14:05 -0500756 spin_lock(&aio_nr_lock);
Mauricio Faria de Oliveira2a8a9862017-07-05 10:53:16 -0300757 if (aio_nr + ctx->max_reqs > aio_max_nr ||
758 aio_nr + ctx->max_reqs < aio_nr) {
Al Viro9fa1cb32012-03-10 23:14:05 -0500759 spin_unlock(&aio_nr_lock);
Kent Overstreete34ecee2013-10-10 19:31:47 -0700760 err = -EAGAIN;
Gu Zhengd1b94322013-12-04 18:19:06 +0800761 goto err_ctx;
Al Viro2dd542b2012-03-10 23:10:35 -0500762 }
763 aio_nr += ctx->max_reqs;
Al Viro9fa1cb32012-03-10 23:14:05 -0500764 spin_unlock(&aio_nr_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Benjamin LaHaise18816862013-12-21 15:49:28 -0500766 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
767 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
Kent Overstreet723be6e2013-05-28 15:14:48 -0700768
Benjamin LaHaiseda903822013-08-05 13:21:43 -0400769 err = ioctx_add_table(ctx, mm);
770 if (err)
Kent Overstreete34ecee2013-10-10 19:31:47 -0700771 goto err_cleanup;
Benjamin LaHaiseda903822013-08-05 13:21:43 -0400772
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400773 /* Release the ring_lock mutex now that all setup is complete. */
774 mutex_unlock(&ctx->ring_lock);
775
Kent Overstreetcaf41672013-05-07 16:18:35 -0700776 pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700777 ctx, ctx->user_id, mm, ctx->nr_events);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 return ctx;
779
Kent Overstreete34ecee2013-10-10 19:31:47 -0700780err_cleanup:
781 aio_nr_sub(ctx->max_reqs);
Gu Zhengd1b94322013-12-04 18:19:06 +0800782err_ctx:
Al Virodeeb8522015-04-06 17:57:44 -0400783 atomic_set(&ctx->dead, 1);
784 if (ctx->mmap_size)
785 vm_munmap(ctx->mmap_base, ctx->mmap_size);
Gu Zhengd1b94322013-12-04 18:19:06 +0800786 aio_free_ring(ctx);
Kent Overstreete34ecee2013-10-10 19:31:47 -0700787err:
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -0400788 mutex_unlock(&ctx->ring_lock);
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000789 free_percpu(ctx->cpu);
Tejun Heo9a1049d2014-06-28 08:10:14 -0400790 percpu_ref_exit(&ctx->reqs);
791 percpu_ref_exit(&ctx->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 kmem_cache_free(kioctx_cachep, ctx);
Kent Overstreetcaf41672013-05-07 16:18:35 -0700793 pr_debug("error allocating ioctx %d\n", err);
Al Viroe23754f2012-03-06 14:33:22 -0500794 return ERR_PTR(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795}
796
Kent Overstreet36f55882013-05-07 16:18:41 -0700797/* kill_ioctx
798 * Cancels all outstanding aio requests on an aio context. Used
799 * when the processes owning a context have all exited to encourage
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 * the rapid destruction of the kioctx.
801 */
Benjamin LaHaisefb2d4482014-04-29 12:45:17 -0400802static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
Jens Axboedc48e562015-04-15 11:17:23 -0600803 struct ctx_rq_wait *wait)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
Benjamin LaHaisefa88b6f2014-04-29 12:55:48 -0400805 struct kioctx_table *table;
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400806
Benjamin LaHaisefa88b6f2014-04-29 12:55:48 -0400807 spin_lock(&mm->ioctx_lock);
Al Virob2edffd2015-04-06 17:48:54 -0400808 if (atomic_xchg(&ctx->dead, 1)) {
809 spin_unlock(&mm->ioctx_lock);
810 return -EINVAL;
811 }
812
Oleg Nesterov855ef0d2014-04-30 16:16:36 +0200813 table = rcu_dereference_raw(mm->ioctx_table);
Tejun Heod0264c02018-03-14 12:10:17 -0700814 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id]));
815 RCU_INIT_POINTER(table->table[ctx->id], NULL);
Benjamin LaHaisefa88b6f2014-04-29 12:55:48 -0400816 spin_unlock(&mm->ioctx_lock);
Kent Overstreet4fcc7122013-06-12 14:04:59 -0700817
Tejun Heoa6d7cff2018-03-14 12:10:17 -0700818 /* free_ioctx_reqs() will do the necessary RCU synchronization */
Benjamin LaHaisefa88b6f2014-04-29 12:55:48 -0400819 wake_up_all(&ctx->wait);
Kent Overstreet4fcc7122013-06-12 14:04:59 -0700820
Benjamin LaHaisefa88b6f2014-04-29 12:55:48 -0400821 /*
822 * It'd be more correct to do this in free_ioctx(), after all
823 * the outstanding kiocbs have finished - but by then io_destroy
824 * has already returned, so io_setup() could potentially return
825 * -EAGAIN with no ioctxs actually in use (as far as userspace
826 * could tell).
827 */
828 aio_nr_sub(ctx->max_reqs);
Benjamin LaHaisefb2d4482014-04-29 12:45:17 -0400829
Benjamin LaHaisefa88b6f2014-04-29 12:55:48 -0400830 if (ctx->mmap_size)
831 vm_munmap(ctx->mmap_base, ctx->mmap_size);
832
Jens Axboedc48e562015-04-15 11:17:23 -0600833 ctx->rq_wait = wait;
Benjamin LaHaisefa88b6f2014-04-29 12:55:48 -0400834 percpu_ref_kill(&ctx->users);
835 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
Kent Overstreet36f55882013-05-07 16:18:41 -0700838/*
839 * exit_aio: called when the last user of mm goes away. At this point, there is
840 * no way for any new requests to be submited or any of the io_* syscalls to be
841 * called on the context.
842 *
843 * There may be outstanding kiocbs, but free_ioctx() will explicitly wait on
844 * them.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 */
Harvey Harrisonfc9b52c2008-02-08 04:19:52 -0800846void exit_aio(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
Oleg Nesterov4b70ac52014-04-30 19:02:48 +0200848 struct kioctx_table *table = rcu_dereference_raw(mm->ioctx_table);
Jens Axboedc48e562015-04-15 11:17:23 -0600849 struct ctx_rq_wait wait;
850 int i, skipped;
Jens Axboeabf137d2008-12-09 08:11:22 +0100851
Oleg Nesterov4b70ac52014-04-30 19:02:48 +0200852 if (!table)
853 return;
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400854
Jens Axboedc48e562015-04-15 11:17:23 -0600855 atomic_set(&wait.count, table->nr);
856 init_completion(&wait.comp);
857
858 skipped = 0;
Oleg Nesterov4b70ac52014-04-30 19:02:48 +0200859 for (i = 0; i < table->nr; ++i) {
Tejun Heod0264c02018-03-14 12:10:17 -0700860 struct kioctx *ctx =
861 rcu_dereference_protected(table->table[i], true);
Benjamin LaHaisedb446a02013-07-30 12:54:40 -0400862
Jens Axboedc48e562015-04-15 11:17:23 -0600863 if (!ctx) {
864 skipped++;
Oleg Nesterov4b70ac52014-04-30 19:02:48 +0200865 continue;
Jens Axboedc48e562015-04-15 11:17:23 -0600866 }
867
Al Viro936af152012-04-20 21:49:41 -0400868 /*
Oleg Nesterov4b70ac52014-04-30 19:02:48 +0200869 * We don't need to bother with munmap() here - exit_mmap(mm)
870 * is coming and it'll unmap everything. And we simply can't,
871 * this is not necessarily our ->mm.
872 * Since kill_ioctx() uses non-zero ->mmap_size as indicator
873 * that it needs to unmap the area, just set it to 0.
Al Viro936af152012-04-20 21:49:41 -0400874 */
Kent Overstreet58c85dc2013-05-07 16:18:55 -0700875 ctx->mmap_size = 0;
Jens Axboedc48e562015-04-15 11:17:23 -0600876 kill_ioctx(mm, ctx, &wait);
877 }
Kent Overstreet36f55882013-05-07 16:18:41 -0700878
Jens Axboedc48e562015-04-15 11:17:23 -0600879 if (!atomic_sub_and_test(skipped, &wait.count)) {
Gu Zheng6098b452014-09-03 17:45:44 +0800880 /* Wait until all IO for the context are done. */
Jens Axboedc48e562015-04-15 11:17:23 -0600881 wait_for_completion(&wait.comp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 }
Oleg Nesterov4b70ac52014-04-30 19:02:48 +0200883
884 RCU_INIT_POINTER(mm->ioctx_table, NULL);
885 kfree(table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886}
887
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000888static void put_reqs_available(struct kioctx *ctx, unsigned nr)
889{
890 struct kioctx_cpu *kcpu;
Benjamin LaHaise263782c2014-07-14 12:49:26 -0400891 unsigned long flags;
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000892
Benjamin LaHaise263782c2014-07-14 12:49:26 -0400893 local_irq_save(flags);
Benjamin LaHaisebe6fb452014-07-22 09:56:56 -0400894 kcpu = this_cpu_ptr(ctx->cpu);
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000895 kcpu->reqs_available += nr;
Benjamin LaHaise263782c2014-07-14 12:49:26 -0400896
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000897 while (kcpu->reqs_available >= ctx->req_batch * 2) {
898 kcpu->reqs_available -= ctx->req_batch;
899 atomic_add(ctx->req_batch, &ctx->reqs_available);
900 }
901
Benjamin LaHaise263782c2014-07-14 12:49:26 -0400902 local_irq_restore(flags);
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000903}
904
905static bool get_reqs_available(struct kioctx *ctx)
906{
907 struct kioctx_cpu *kcpu;
908 bool ret = false;
Benjamin LaHaise263782c2014-07-14 12:49:26 -0400909 unsigned long flags;
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000910
Benjamin LaHaise263782c2014-07-14 12:49:26 -0400911 local_irq_save(flags);
Benjamin LaHaisebe6fb452014-07-22 09:56:56 -0400912 kcpu = this_cpu_ptr(ctx->cpu);
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000913 if (!kcpu->reqs_available) {
914 int old, avail = atomic_read(&ctx->reqs_available);
915
916 do {
917 if (avail < ctx->req_batch)
918 goto out;
919
920 old = avail;
921 avail = atomic_cmpxchg(&ctx->reqs_available,
922 avail, avail - ctx->req_batch);
923 } while (avail != old);
924
925 kcpu->reqs_available += ctx->req_batch;
926 }
927
928 ret = true;
929 kcpu->reqs_available--;
930out:
Benjamin LaHaise263782c2014-07-14 12:49:26 -0400931 local_irq_restore(flags);
Kent Overstreete1bdd5f2013-04-26 10:58:39 +1000932 return ret;
933}
934
Benjamin LaHaised856f322014-08-24 13:14:05 -0400935/* refill_reqs_available
936 * Updates the reqs_available reference counts used for tracking the
937 * number of free slots in the completion ring. This can be called
938 * from aio_complete() (to optimistically update reqs_available) or
939 * from aio_get_req() (the we're out of events case). It must be
940 * called holding ctx->completion_lock.
941 */
942static void refill_reqs_available(struct kioctx *ctx, unsigned head,
943 unsigned tail)
944{
945 unsigned events_in_ring, completed;
946
947 /* Clamp head since userland can write to it. */
948 head %= ctx->nr_events;
949 if (head <= tail)
950 events_in_ring = tail - head;
951 else
952 events_in_ring = ctx->nr_events - (head - tail);
953
954 completed = ctx->completed_events;
955 if (events_in_ring < completed)
956 completed -= events_in_ring;
957 else
958 completed = 0;
959
960 if (!completed)
961 return;
962
963 ctx->completed_events -= completed;
964 put_reqs_available(ctx, completed);
965}
966
967/* user_refill_reqs_available
968 * Called to refill reqs_available when aio_get_req() encounters an
969 * out of space in the completion ring.
970 */
971static void user_refill_reqs_available(struct kioctx *ctx)
972{
973 spin_lock_irq(&ctx->completion_lock);
974 if (ctx->completed_events) {
975 struct aio_ring *ring;
976 unsigned head;
977
978 /* Access of ring->head may race with aio_read_events_ring()
979 * here, but that's okay since whether we read the old version
980 * or the new version, and either will be valid. The important
981 * part is that head cannot pass tail since we prevent
982 * aio_complete() from updating tail by holding
983 * ctx->completion_lock. Even if head is invalid, the check
984 * against ctx->completed_events below will make sure we do the
985 * safe/right thing.
986 */
987 ring = kmap_atomic(ctx->ring_pages[0]);
988 head = ring->head;
989 kunmap_atomic(ring);
990
991 refill_reqs_available(ctx, head, ctx->tail);
992 }
993
994 spin_unlock_irq(&ctx->completion_lock);
995}
996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997/* aio_get_req
Kent Overstreet57282d82013-05-13 13:42:52 -0700998 * Allocate a slot for an aio request.
999 * Returns NULL if no requests are free.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 */
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001001static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002{
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001003 struct aio_kiocb *req;
Kent Overstreeta1c8eae2013-05-07 16:18:53 -07001004
Benjamin LaHaised856f322014-08-24 13:14:05 -04001005 if (!get_reqs_available(ctx)) {
1006 user_refill_reqs_available(ctx);
1007 if (!get_reqs_available(ctx))
1008 return NULL;
1009 }
Kent Overstreeta1c8eae2013-05-07 16:18:53 -07001010
Kent Overstreet0460fef2013-05-07 16:18:49 -07001011 req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 if (unlikely(!req))
Kent Overstreeta1c8eae2013-05-07 16:18:53 -07001013 goto out_put;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014
Kent Overstreete34ecee2013-10-10 19:31:47 -07001015 percpu_ref_get(&ctx->reqs);
Christoph Hellwig75321b52018-04-09 14:57:56 +02001016 INIT_LIST_HEAD(&req->ki_list);
Christoph Hellwig9018ccc2018-07-24 11:36:37 +02001017 refcount_set(&req->ki_refcnt, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 req->ki_ctx = ctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 return req;
Kent Overstreeta1c8eae2013-05-07 16:18:53 -07001020out_put:
Kent Overstreete1bdd5f2013-04-26 10:58:39 +10001021 put_reqs_available(ctx, 1);
Kent Overstreeta1c8eae2013-05-07 16:18:53 -07001022 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023}
1024
Adrian Bunkd5470b52008-04-29 00:58:57 -07001025static struct kioctx *lookup_ioctx(unsigned long ctx_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026{
Benjamin LaHaisedb446a02013-07-30 12:54:40 -04001027 struct aio_ring __user *ring = (void __user *)ctx_id;
Jens Axboeabf137d2008-12-09 08:11:22 +01001028 struct mm_struct *mm = current->mm;
Jeff Moyer65c24492009-03-18 17:04:21 -07001029 struct kioctx *ctx, *ret = NULL;
Benjamin LaHaisedb446a02013-07-30 12:54:40 -04001030 struct kioctx_table *table;
1031 unsigned id;
1032
1033 if (get_user(id, &ring->id))
1034 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
Jens Axboeabf137d2008-12-09 08:11:22 +01001036 rcu_read_lock();
Benjamin LaHaisedb446a02013-07-30 12:54:40 -04001037 table = rcu_dereference(mm->ioctx_table);
Jens Axboeabf137d2008-12-09 08:11:22 +01001038
Benjamin LaHaisedb446a02013-07-30 12:54:40 -04001039 if (!table || id >= table->nr)
1040 goto out;
1041
Jeff Moyera6136922018-12-11 12:37:49 -05001042 id = array_index_nospec(id, table->nr);
Tejun Heod0264c02018-03-14 12:10:17 -07001043 ctx = rcu_dereference(table->table[id]);
Benjamin LaHaisef30d7042013-08-07 18:23:48 -04001044 if (ctx && ctx->user_id == ctx_id) {
Al Virobaf10562018-05-20 16:46:23 -04001045 if (percpu_ref_tryget_live(&ctx->users))
1046 ret = ctx;
Jens Axboeabf137d2008-12-09 08:11:22 +01001047 }
Benjamin LaHaisedb446a02013-07-30 12:54:40 -04001048out:
Jens Axboeabf137d2008-12-09 08:11:22 +01001049 rcu_read_unlock();
Jeff Moyer65c24492009-03-18 17:04:21 -07001050 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051}
1052
Christoph Hellwig9018ccc2018-07-24 11:36:37 +02001053static inline void iocb_put(struct aio_kiocb *iocb)
1054{
1055 if (refcount_read(&iocb->ki_refcnt) == 0 ||
1056 refcount_dec_and_test(&iocb->ki_refcnt)) {
1057 percpu_ref_put(&iocb->ki_ctx->reqs);
1058 kmem_cache_free(kiocb_cachep, iocb);
1059 }
1060}
1061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062/* aio_complete
1063 * Called when the io request on the given iocb is complete.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 */
Christoph Hellwig54843f82018-05-02 19:57:21 +02001065static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066{
1067 struct kioctx *ctx = iocb->ki_ctx;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 struct aio_ring *ring;
Kent Overstreet21b40202013-05-07 16:18:47 -07001069 struct io_event *ev_page, *event;
Benjamin LaHaised856f322014-08-24 13:14:05 -04001070 unsigned tail, pos, head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 /*
Kent Overstreet0460fef2013-05-07 16:18:49 -07001074 * Add a completion event to the ring buffer. Must be done holding
Tang Chen4b30f072013-07-03 15:09:16 -07001075 * ctx->completion_lock to prevent other code from messing with the tail
Kent Overstreet0460fef2013-05-07 16:18:49 -07001076 * pointer since we might be called from irq context.
1077 */
1078 spin_lock_irqsave(&ctx->completion_lock, flags);
1079
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001080 tail = ctx->tail;
Kent Overstreet21b40202013-05-07 16:18:47 -07001081 pos = tail + AIO_EVENTS_OFFSET;
1082
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001083 if (++tail >= ctx->nr_events)
Ken Chen4bf69b22005-05-01 08:59:15 -07001084 tail = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001086 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
Kent Overstreet21b40202013-05-07 16:18:47 -07001087 event = ev_page + pos % AIO_EVENTS_PER_PAGE;
1088
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001089 event->obj = (u64)(unsigned long)iocb->ki_user_iocb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 event->data = iocb->ki_user_data;
1091 event->res = res;
1092 event->res2 = res2;
1093
Kent Overstreet21b40202013-05-07 16:18:47 -07001094 kunmap_atomic(ev_page);
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001095 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]);
Kent Overstreet21b40202013-05-07 16:18:47 -07001096
1097 pr_debug("%p[%u]: %p: %p %Lx %lx %lx\n",
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001098 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data,
Kent Overstreetcaf41672013-05-07 16:18:35 -07001099 res, res2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
1101 /* after flagging the request as done, we
1102 * must never even look at it again
1103 */
1104 smp_wmb(); /* make event visible before updating tail */
1105
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001106 ctx->tail = tail;
Kent Overstreet21b40202013-05-07 16:18:47 -07001107
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001108 ring = kmap_atomic(ctx->ring_pages[0]);
Benjamin LaHaised856f322014-08-24 13:14:05 -04001109 head = ring->head;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 ring->tail = tail;
Cong Wange8e3c3d2011-11-25 23:14:27 +08001111 kunmap_atomic(ring);
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001112 flush_dcache_page(ctx->ring_pages[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
Benjamin LaHaised856f322014-08-24 13:14:05 -04001114 ctx->completed_events++;
1115 if (ctx->completed_events > 1)
1116 refill_reqs_available(ctx, head, tail);
Kent Overstreet0460fef2013-05-07 16:18:49 -07001117 spin_unlock_irqrestore(&ctx->completion_lock, flags);
1118
Kent Overstreet21b40202013-05-07 16:18:47 -07001119 pr_debug("added to ring %p at [%u]\n", iocb, tail);
Davide Libenzi8d1c98b2008-04-10 21:29:19 -07001120
1121 /*
1122 * Check if the user asked us to deliver the result through an
1123 * eventfd. The eventfd_signal() function is safe to be called
1124 * from IRQ context.
1125 */
Christoph Hellwig54843f82018-05-02 19:57:21 +02001126 if (iocb->ki_eventfd) {
Davide Libenzi8d1c98b2008-04-10 21:29:19 -07001127 eventfd_signal(iocb->ki_eventfd, 1);
Christoph Hellwig54843f82018-05-02 19:57:21 +02001128 eventfd_ctx_put(iocb->ki_eventfd);
1129 }
Davide Libenzi8d1c98b2008-04-10 21:29:19 -07001130
Quentin Barnes6cb2a212008-03-19 17:00:39 -07001131 /*
1132 * We have to order our ring_info tail store above and test
1133 * of the wait list below outside the wait lock. This is
1134 * like in wake_up_bit() where clearing a bit has to be
1135 * ordered with the unlocked test.
1136 */
1137 smp_mb();
1138
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 if (waitqueue_active(&ctx->wait))
1140 wake_up(&ctx->wait);
Christoph Hellwig9018ccc2018-07-24 11:36:37 +02001141 iocb_put(iocb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142}
1143
Gu Zheng2be4e7d2014-07-23 18:03:53 +08001144/* aio_read_events_ring
Kent Overstreeta31ad382013-05-07 16:18:45 -07001145 * Pull an event off of the ioctx's event ring. Returns the number of
1146 * events fetched
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 */
Kent Overstreeta31ad382013-05-07 16:18:45 -07001148static long aio_read_events_ring(struct kioctx *ctx,
1149 struct io_event __user *event, long nr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 struct aio_ring *ring;
Kent Overstreet5ffac122013-05-09 15:36:07 -07001152 unsigned head, tail, pos;
Kent Overstreeta31ad382013-05-07 16:18:45 -07001153 long ret = 0;
1154 int copy_ret;
1155
Dave Chinner9c9ce762015-02-03 19:29:05 -05001156 /*
1157 * The mutex can block and wake us up and that will cause
1158 * wait_event_interruptible_hrtimeout() to schedule without sleeping
1159 * and repeat. This should be rare enough that it doesn't cause
1160 * peformance issues. See the comment in read_events() for more detail.
1161 */
1162 sched_annotate_sleep();
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001163 mutex_lock(&ctx->ring_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Benjamin LaHaisefa8a53c2014-03-28 10:14:45 -04001165 /* Access to ->ring_pages here is protected by ctx->ring_lock. */
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001166 ring = kmap_atomic(ctx->ring_pages[0]);
Kent Overstreeta31ad382013-05-07 16:18:45 -07001167 head = ring->head;
Kent Overstreet5ffac122013-05-09 15:36:07 -07001168 tail = ring->tail;
Kent Overstreeta31ad382013-05-07 16:18:45 -07001169 kunmap_atomic(ring);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170
Jeff Moyer2ff396be2014-09-02 13:17:00 -04001171 /*
1172 * Ensure that once we've read the current tail pointer, that
1173 * we also see the events that were stored up to the tail.
1174 */
1175 smp_rmb();
1176
Kent Overstreet5ffac122013-05-09 15:36:07 -07001177 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events);
Kent Overstreeta31ad382013-05-07 16:18:45 -07001178
Kent Overstreet5ffac122013-05-09 15:36:07 -07001179 if (head == tail)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 goto out;
1181
Benjamin LaHaiseedfbbf32014-06-24 13:32:51 -04001182 head %= ctx->nr_events;
1183 tail %= ctx->nr_events;
1184
Kent Overstreeta31ad382013-05-07 16:18:45 -07001185 while (ret < nr) {
1186 long avail;
1187 struct io_event *ev;
1188 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
Kent Overstreet5ffac122013-05-09 15:36:07 -07001190 avail = (head <= tail ? tail : ctx->nr_events) - head;
1191 if (head == tail)
Kent Overstreeta31ad382013-05-07 16:18:45 -07001192 break;
1193
Kent Overstreeta31ad382013-05-07 16:18:45 -07001194 pos = head + AIO_EVENTS_OFFSET;
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001195 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE];
Kent Overstreeta31ad382013-05-07 16:18:45 -07001196 pos %= AIO_EVENTS_PER_PAGE;
1197
Al Virod2988bd42018-05-26 19:13:10 -04001198 avail = min(avail, nr - ret);
1199 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos);
1200
Kent Overstreeta31ad382013-05-07 16:18:45 -07001201 ev = kmap(page);
1202 copy_ret = copy_to_user(event + ret, ev + pos,
1203 sizeof(*ev) * avail);
1204 kunmap(page);
1205
1206 if (unlikely(copy_ret)) {
1207 ret = -EFAULT;
1208 goto out;
1209 }
1210
1211 ret += avail;
1212 head += avail;
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001213 head %= ctx->nr_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001216 ring = kmap_atomic(ctx->ring_pages[0]);
Kent Overstreeta31ad382013-05-07 16:18:45 -07001217 ring->head = head;
Zhao Hongjiang91d80a82013-04-26 11:03:53 +08001218 kunmap_atomic(ring);
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001219 flush_dcache_page(ctx->ring_pages[0]);
Kent Overstreeta31ad382013-05-07 16:18:45 -07001220
Kent Overstreet5ffac122013-05-09 15:36:07 -07001221 pr_debug("%li h%u t%u\n", ret, head, tail);
Kent Overstreeta31ad382013-05-07 16:18:45 -07001222out:
Kent Overstreet58c85dc2013-05-07 16:18:55 -07001223 mutex_unlock(&ctx->ring_lock);
Kent Overstreeta31ad382013-05-07 16:18:45 -07001224
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 return ret;
1226}
1227
Kent Overstreeta31ad382013-05-07 16:18:45 -07001228static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr,
1229 struct io_event __user *event, long *i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230{
Kent Overstreeta31ad382013-05-07 16:18:45 -07001231 long ret = aio_read_events_ring(ctx, event + *i, nr - *i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
Kent Overstreeta31ad382013-05-07 16:18:45 -07001233 if (ret > 0)
1234 *i += ret;
1235
1236 if (unlikely(atomic_read(&ctx->dead)))
1237 ret = -EINVAL;
1238
1239 if (!*i)
1240 *i = ret;
1241
1242 return ret < 0 || *i >= min_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Kent Overstreeta31ad382013-05-07 16:18:45 -07001245static long read_events(struct kioctx *ctx, long min_nr, long nr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 struct io_event __user *event,
Deepa Dinamanifa2e62a2017-08-04 21:12:32 -07001247 ktime_t until)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248{
Kent Overstreeta31ad382013-05-07 16:18:45 -07001249 long ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250
Kent Overstreeta31ad382013-05-07 16:18:45 -07001251 /*
1252 * Note that aio_read_events() is being called as the conditional - i.e.
1253 * we're calling it after prepare_to_wait() has set task state to
1254 * TASK_INTERRUPTIBLE.
1255 *
1256 * But aio_read_events() can block, and if it blocks it's going to flip
1257 * the task state back to TASK_RUNNING.
1258 *
1259 * This should be ok, provided it doesn't flip the state back to
1260 * TASK_RUNNING and return 0 too much - that causes us to spin. That
1261 * will only happen if the mutex_lock() call blocks, and we then find
1262 * the ringbuffer empty. So in practice we should be ok, but it's
1263 * something to be aware of when touching this code.
1264 */
Thomas Gleixner2456e852016-12-25 11:38:40 +01001265 if (until == 0)
Fam Zheng5f785de2014-11-06 20:44:36 +08001266 aio_read_events(ctx, min_nr, nr, event, &ret);
1267 else
1268 wait_event_interruptible_hrtimeout(ctx->wait,
1269 aio_read_events(ctx, min_nr, nr, event, &ret),
1270 until);
Kent Overstreeta31ad382013-05-07 16:18:45 -07001271 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272}
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274/* sys_io_setup:
1275 * Create an aio_context capable of receiving at least nr_events.
1276 * ctxp must not point to an aio_context that already exists, and
1277 * must be initialized to 0 prior to the call. On successful
1278 * creation of the aio_context, *ctxp is filled in with the resulting
1279 * handle. May fail with -EINVAL if *ctxp is not initialized,
1280 * if the specified nr_events exceeds internal limits. May fail
1281 * with -EAGAIN if the specified nr_events exceeds the user's limit
1282 * of available events. May fail with -ENOMEM if insufficient kernel
1283 * resources are available. May fail with -EFAULT if an invalid
1284 * pointer is passed for ctxp. Will fail with -ENOSYS if not
1285 * implemented.
1286 */
Heiko Carstens002c8972009-01-14 14:14:18 +01001287SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288{
1289 struct kioctx *ioctx = NULL;
1290 unsigned long ctx;
1291 long ret;
1292
1293 ret = get_user(ctx, ctxp);
1294 if (unlikely(ret))
1295 goto out;
1296
1297 ret = -EINVAL;
Zach Brownd55b5fd2005-11-07 00:59:31 -08001298 if (unlikely(ctx || nr_events == 0)) {
Kinglong Meeacd88d42015-02-04 21:15:59 +08001299 pr_debug("EINVAL: ctx %lu nr_events %u\n",
Zach Brownd55b5fd2005-11-07 00:59:31 -08001300 ctx, nr_events);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 goto out;
1302 }
1303
1304 ioctx = ioctx_alloc(nr_events);
1305 ret = PTR_ERR(ioctx);
1306 if (!IS_ERR(ioctx)) {
1307 ret = put_user(ioctx->user_id, ctxp);
Al Viroa2e18592012-03-20 16:27:57 -04001308 if (ret)
Anatol Pomozove02ba722014-04-15 11:31:33 -07001309 kill_ioctx(current->mm, ioctx, NULL);
Kent Overstreet723be6e2013-05-28 15:14:48 -07001310 percpu_ref_put(&ioctx->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 }
1312
1313out:
1314 return ret;
1315}
1316
Al Viroc00d2c72016-12-20 07:04:57 -05001317#ifdef CONFIG_COMPAT
1318COMPAT_SYSCALL_DEFINE2(io_setup, unsigned, nr_events, u32 __user *, ctx32p)
1319{
1320 struct kioctx *ioctx = NULL;
1321 unsigned long ctx;
1322 long ret;
1323
1324 ret = get_user(ctx, ctx32p);
1325 if (unlikely(ret))
1326 goto out;
1327
1328 ret = -EINVAL;
1329 if (unlikely(ctx || nr_events == 0)) {
1330 pr_debug("EINVAL: ctx %lu nr_events %u\n",
1331 ctx, nr_events);
1332 goto out;
1333 }
1334
1335 ioctx = ioctx_alloc(nr_events);
1336 ret = PTR_ERR(ioctx);
1337 if (!IS_ERR(ioctx)) {
1338 /* truncating is ok because it's a user address */
1339 ret = put_user((u32)ioctx->user_id, ctx32p);
1340 if (ret)
1341 kill_ioctx(current->mm, ioctx, NULL);
1342 percpu_ref_put(&ioctx->users);
1343 }
1344
1345out:
1346 return ret;
1347}
1348#endif
1349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350/* sys_io_destroy:
1351 * Destroy the aio_context specified. May cancel any outstanding
1352 * AIOs and block on completion. Will fail with -ENOSYS if not
Satoru Takeuchi642b5122010-08-05 11:23:11 -07001353 * implemented. May fail with -EINVAL if the context pointed to
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 * is invalid.
1355 */
Heiko Carstens002c8972009-01-14 14:14:18 +01001356SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357{
1358 struct kioctx *ioctx = lookup_ioctx(ctx);
1359 if (likely(NULL != ioctx)) {
Jens Axboedc48e562015-04-15 11:17:23 -06001360 struct ctx_rq_wait wait;
Benjamin LaHaisefb2d4482014-04-29 12:45:17 -04001361 int ret;
Anatol Pomozove02ba722014-04-15 11:31:33 -07001362
Jens Axboedc48e562015-04-15 11:17:23 -06001363 init_completion(&wait.comp);
1364 atomic_set(&wait.count, 1);
1365
Anatol Pomozove02ba722014-04-15 11:31:33 -07001366 /* Pass requests_done to kill_ioctx() where it can be set
1367 * in a thread-safe way. If we try to set it here then we have
1368 * a race condition if two io_destroy() called simultaneously.
1369 */
Jens Axboedc48e562015-04-15 11:17:23 -06001370 ret = kill_ioctx(current->mm, ioctx, &wait);
Kent Overstreet723be6e2013-05-28 15:14:48 -07001371 percpu_ref_put(&ioctx->users);
Anatol Pomozove02ba722014-04-15 11:31:33 -07001372
1373 /* Wait until all IO for the context are done. Otherwise kernel
1374 * keep using user-space buffers even if user thinks the context
1375 * is destroyed.
1376 */
Benjamin LaHaisefb2d4482014-04-29 12:45:17 -04001377 if (!ret)
Jens Axboedc48e562015-04-15 11:17:23 -06001378 wait_for_completion(&wait.comp);
Anatol Pomozove02ba722014-04-15 11:31:33 -07001379
Benjamin LaHaisefb2d4482014-04-29 12:45:17 -04001380 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 }
Kinglong Meeacd88d42015-02-04 21:15:59 +08001382 pr_debug("EINVAL: invalid context id\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 return -EINVAL;
1384}
1385
Al Viro3c96c7f2018-05-28 13:37:43 -04001386static void aio_remove_iocb(struct aio_kiocb *iocb)
1387{
1388 struct kioctx *ctx = iocb->ki_ctx;
1389 unsigned long flags;
1390
1391 spin_lock_irqsave(&ctx->ctx_lock, flags);
1392 list_del(&iocb->ki_list);
1393 spin_unlock_irqrestore(&ctx->ctx_lock, flags);
1394}
1395
Christoph Hellwig54843f82018-05-02 19:57:21 +02001396static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
1397{
1398 struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
1399
Al Viro3c96c7f2018-05-28 13:37:43 -04001400 if (!list_empty_careful(&iocb->ki_list))
1401 aio_remove_iocb(iocb);
1402
Christoph Hellwig54843f82018-05-02 19:57:21 +02001403 if (kiocb->ki_flags & IOCB_WRITE) {
1404 struct inode *inode = file_inode(kiocb->ki_filp);
1405
1406 /*
1407 * Tell lockdep we inherited freeze protection from submission
1408 * thread.
1409 */
1410 if (S_ISREG(inode->i_mode))
1411 __sb_writers_acquired(inode->i_sb, SB_FREEZE_WRITE);
1412 file_end_write(kiocb->ki_filp);
1413 }
1414
1415 fput(kiocb->ki_filp);
1416 aio_complete(iocb, res, res2);
1417}
1418
1419static int aio_prep_rw(struct kiocb *req, struct iocb *iocb)
1420{
1421 int ret;
1422
1423 req->ki_filp = fget(iocb->aio_fildes);
1424 if (unlikely(!req->ki_filp))
1425 return -EBADF;
1426 req->ki_complete = aio_complete_rw;
1427 req->ki_pos = iocb->aio_offset;
1428 req->ki_flags = iocb_flags(req->ki_filp);
1429 if (iocb->aio_flags & IOCB_FLAG_RESFD)
1430 req->ki_flags |= IOCB_EVENTFD;
Adam Manzanaresfc287242018-05-22 10:52:18 -07001431 req->ki_hint = ki_hint_validate(file_write_hint(req->ki_filp));
Adam Manzanaresd9a08a92018-05-22 10:52:19 -07001432 if (iocb->aio_flags & IOCB_FLAG_IOPRIO) {
1433 /*
1434 * If the IOCB_FLAG_IOPRIO flag of aio_flags is set, then
1435 * aio_reqprio is interpreted as an I/O scheduling
1436 * class and priority.
1437 */
1438 ret = ioprio_check_cap(iocb->aio_reqprio);
1439 if (ret) {
Adam Manzanares9a6d9a62018-06-04 10:59:57 -07001440 pr_debug("aio ioprio check cap error: %d\n", ret);
Jens Axboedf66ef62018-11-17 07:43:42 -07001441 fput(req->ki_filp);
Adam Manzanares9a6d9a62018-06-04 10:59:57 -07001442 return ret;
Adam Manzanaresd9a08a92018-05-22 10:52:19 -07001443 }
1444
1445 req->ki_ioprio = iocb->aio_reqprio;
1446 } else
1447 req->ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
1448
Christoph Hellwig54843f82018-05-02 19:57:21 +02001449 ret = kiocb_set_rw_flags(req, iocb->aio_rw_flags);
1450 if (unlikely(ret))
1451 fput(req->ki_filp);
1452 return ret;
1453}
1454
Christoph Hellwig89319d312016-10-30 11:42:03 -05001455static int aio_setup_rw(int rw, struct iocb *iocb, struct iovec **iovec,
1456 bool vectored, bool compat, struct iov_iter *iter)
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001457{
Christoph Hellwig89319d312016-10-30 11:42:03 -05001458 void __user *buf = (void __user *)(uintptr_t)iocb->aio_buf;
1459 size_t len = iocb->aio_nbytes;
1460
1461 if (!vectored) {
1462 ssize_t ret = import_single_range(rw, buf, len, *iovec, iter);
1463 *iovec = NULL;
1464 return ret;
1465 }
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001466#ifdef CONFIG_COMPAT
1467 if (compat)
Christoph Hellwig89319d312016-10-30 11:42:03 -05001468 return compat_import_iovec(rw, buf, len, UIO_FASTIOV, iovec,
1469 iter);
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001470#endif
Christoph Hellwig89319d312016-10-30 11:42:03 -05001471 return import_iovec(rw, buf, len, UIO_FASTIOV, iovec, iter);
Badari Pulavartyeed4e512006-09-30 23:28:49 -07001472}
1473
Al Viro9061d142018-05-26 19:11:40 -04001474static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475{
Christoph Hellwig89319d312016-10-30 11:42:03 -05001476 switch (ret) {
1477 case -EIOCBQUEUED:
Al Viro9061d142018-05-26 19:11:40 -04001478 break;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001479 case -ERESTARTSYS:
1480 case -ERESTARTNOINTR:
1481 case -ERESTARTNOHAND:
1482 case -ERESTART_RESTARTBLOCK:
Kent Overstreet41ef4eb2013-05-07 16:19:11 -07001483 /*
1484 * There's no easy way to restart the syscall since other AIO's
1485 * may be already running. Just fail this IO with EINTR.
1486 */
Christoph Hellwig89319d312016-10-30 11:42:03 -05001487 ret = -EINTR;
1488 /*FALLTHRU*/
1489 default:
Christoph Hellwig54843f82018-05-02 19:57:21 +02001490 aio_complete_rw(req, ret, 0);
Kent Overstreet41ef4eb2013-05-07 16:19:11 -07001491 }
Christoph Hellwig89319d312016-10-30 11:42:03 -05001492}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493
Christoph Hellwig89319d312016-10-30 11:42:03 -05001494static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored,
1495 bool compat)
1496{
Christoph Hellwig89319d312016-10-30 11:42:03 -05001497 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1498 struct iov_iter iter;
Christoph Hellwig54843f82018-05-02 19:57:21 +02001499 struct file *file;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001500 ssize_t ret;
1501
Christoph Hellwig54843f82018-05-02 19:57:21 +02001502 ret = aio_prep_rw(req, iocb);
1503 if (ret)
1504 return ret;
1505 file = req->ki_filp;
1506
1507 ret = -EBADF;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001508 if (unlikely(!(file->f_mode & FMODE_READ)))
Christoph Hellwig54843f82018-05-02 19:57:21 +02001509 goto out_fput;
1510 ret = -EINVAL;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001511 if (unlikely(!file->f_op->read_iter))
Christoph Hellwig54843f82018-05-02 19:57:21 +02001512 goto out_fput;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001513
1514 ret = aio_setup_rw(READ, iocb, &iovec, vectored, compat, &iter);
1515 if (ret)
Christoph Hellwig54843f82018-05-02 19:57:21 +02001516 goto out_fput;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001517 ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter));
1518 if (!ret)
Al Viro9061d142018-05-26 19:11:40 -04001519 aio_rw_done(req, call_read_iter(file, req, &iter));
Christoph Hellwig89319d312016-10-30 11:42:03 -05001520 kfree(iovec);
Christoph Hellwig54843f82018-05-02 19:57:21 +02001521out_fput:
Al Viro9061d142018-05-26 19:11:40 -04001522 if (unlikely(ret))
Christoph Hellwig54843f82018-05-02 19:57:21 +02001523 fput(file);
Christoph Hellwig89319d312016-10-30 11:42:03 -05001524 return ret;
1525}
1526
1527static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
1528 bool compat)
1529{
Christoph Hellwig89319d312016-10-30 11:42:03 -05001530 struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
1531 struct iov_iter iter;
Christoph Hellwig54843f82018-05-02 19:57:21 +02001532 struct file *file;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001533 ssize_t ret;
1534
Christoph Hellwig54843f82018-05-02 19:57:21 +02001535 ret = aio_prep_rw(req, iocb);
1536 if (ret)
1537 return ret;
1538 file = req->ki_filp;
1539
1540 ret = -EBADF;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001541 if (unlikely(!(file->f_mode & FMODE_WRITE)))
Christoph Hellwig54843f82018-05-02 19:57:21 +02001542 goto out_fput;
1543 ret = -EINVAL;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001544 if (unlikely(!file->f_op->write_iter))
Christoph Hellwig54843f82018-05-02 19:57:21 +02001545 goto out_fput;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001546
1547 ret = aio_setup_rw(WRITE, iocb, &iovec, vectored, compat, &iter);
1548 if (ret)
Christoph Hellwig54843f82018-05-02 19:57:21 +02001549 goto out_fput;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001550 ret = rw_verify_area(WRITE, file, &req->ki_pos, iov_iter_count(&iter));
1551 if (!ret) {
Jan Kara70fe2f42016-10-30 11:42:04 -05001552 /*
Christoph Hellwig92ce4722018-04-06 09:28:17 +02001553 * Open-code file_start_write here to grab freeze protection,
Christoph Hellwig54843f82018-05-02 19:57:21 +02001554 * which will be released by another thread in
1555 * aio_complete_rw(). Fool lockdep by telling it the lock got
1556 * released so that it doesn't complain about the held lock when
1557 * we return to userspace.
Jan Kara70fe2f42016-10-30 11:42:04 -05001558 */
Christoph Hellwig92ce4722018-04-06 09:28:17 +02001559 if (S_ISREG(file_inode(file)->i_mode)) {
1560 __sb_start_write(file_inode(file)->i_sb, SB_FREEZE_WRITE, true);
Shaohua Lia12f1ae2016-12-13 12:09:56 -08001561 __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
Christoph Hellwig92ce4722018-04-06 09:28:17 +02001562 }
1563 req->ki_flags |= IOCB_WRITE;
Al Viro9061d142018-05-26 19:11:40 -04001564 aio_rw_done(req, call_write_iter(file, req, &iter));
Christoph Hellwig89319d312016-10-30 11:42:03 -05001565 }
1566 kfree(iovec);
Christoph Hellwig54843f82018-05-02 19:57:21 +02001567out_fput:
Al Viro9061d142018-05-26 19:11:40 -04001568 if (unlikely(ret))
Christoph Hellwig54843f82018-05-02 19:57:21 +02001569 fput(file);
Christoph Hellwig89319d312016-10-30 11:42:03 -05001570 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571}
1572
Christoph Hellwiga3c0d432018-03-27 19:18:57 +02001573static void aio_fsync_work(struct work_struct *work)
1574{
1575 struct fsync_iocb *req = container_of(work, struct fsync_iocb, work);
1576 int ret;
1577
1578 ret = vfs_fsync(req->file, req->datasync);
1579 fput(req->file);
1580 aio_complete(container_of(req, struct aio_kiocb, fsync), ret, 0);
1581}
1582
1583static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
1584{
1585 if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
1586 iocb->aio_rw_flags))
1587 return -EINVAL;
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07001588
Christoph Hellwiga3c0d432018-03-27 19:18:57 +02001589 req->file = fget(iocb->aio_fildes);
1590 if (unlikely(!req->file))
1591 return -EBADF;
1592 if (unlikely(!req->file->f_op->fsync)) {
1593 fput(req->file);
1594 return -EINVAL;
1595 }
1596
1597 req->datasync = datasync;
1598 INIT_WORK(&req->work, aio_fsync_work);
1599 schedule_work(&req->work);
Al Viro9061d142018-05-26 19:11:40 -04001600 return 0;
Christoph Hellwiga3c0d432018-03-27 19:18:57 +02001601}
1602
Christoph Hellwigbfe40372018-07-16 09:08:20 +02001603static inline void aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
1604{
1605 struct file *file = iocb->poll.file;
1606
1607 aio_complete(iocb, mangle_poll(mask), 0);
1608 fput(file);
1609}
1610
1611static void aio_poll_complete_work(struct work_struct *work)
1612{
1613 struct poll_iocb *req = container_of(work, struct poll_iocb, work);
1614 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
1615 struct poll_table_struct pt = { ._key = req->events };
1616 struct kioctx *ctx = iocb->ki_ctx;
1617 __poll_t mask = 0;
1618
1619 if (!READ_ONCE(req->cancelled))
1620 mask = vfs_poll(req->file, &pt) & req->events;
1621
1622 /*
1623 * Note that ->ki_cancel callers also delete iocb from active_reqs after
1624 * calling ->ki_cancel. We need the ctx_lock roundtrip here to
1625 * synchronize with them. In the cancellation case the list_del_init
1626 * itself is not actually needed, but harmless so we keep it in to
1627 * avoid further branches in the fast path.
1628 */
1629 spin_lock_irq(&ctx->ctx_lock);
1630 if (!mask && !READ_ONCE(req->cancelled)) {
1631 add_wait_queue(req->head, &req->wait);
1632 spin_unlock_irq(&ctx->ctx_lock);
1633 return;
1634 }
1635 list_del_init(&iocb->ki_list);
1636 spin_unlock_irq(&ctx->ctx_lock);
1637
1638 aio_poll_complete(iocb, mask);
1639}
1640
1641/* assumes we are called with irqs disabled */
1642static int aio_poll_cancel(struct kiocb *iocb)
1643{
1644 struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
1645 struct poll_iocb *req = &aiocb->poll;
1646
1647 spin_lock(&req->head->lock);
1648 WRITE_ONCE(req->cancelled, true);
1649 if (!list_empty(&req->wait.entry)) {
1650 list_del_init(&req->wait.entry);
1651 schedule_work(&aiocb->poll.work);
1652 }
1653 spin_unlock(&req->head->lock);
1654
1655 return 0;
1656}
1657
1658static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
1659 void *key)
1660{
1661 struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
Christoph Hellwige8693bc2018-07-16 12:25:17 +02001662 struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
Christoph Hellwigbfe40372018-07-16 09:08:20 +02001663 __poll_t mask = key_to_poll(key);
Bart Van Asschef5e66cd2019-02-08 16:59:49 -08001664 unsigned long flags;
Christoph Hellwigbfe40372018-07-16 09:08:20 +02001665
1666 req->woken = true;
1667
1668 /* for instances that support it check for an event match first: */
Christoph Hellwige8693bc2018-07-16 12:25:17 +02001669 if (mask) {
1670 if (!(mask & req->events))
1671 return 0;
1672
Bart Van Asschef5e66cd2019-02-08 16:59:49 -08001673 /*
1674 * Try to complete the iocb inline if we can. Use
1675 * irqsave/irqrestore because not all filesystems (e.g. fuse)
1676 * call this function with IRQs disabled and because IRQs
1677 * have to be disabled before ctx_lock is obtained.
1678 */
1679 if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
Christoph Hellwige8693bc2018-07-16 12:25:17 +02001680 list_del(&iocb->ki_list);
Bart Van Asschef5e66cd2019-02-08 16:59:49 -08001681 spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
Christoph Hellwige8693bc2018-07-16 12:25:17 +02001682
1683 list_del_init(&req->wait.entry);
1684 aio_poll_complete(iocb, mask);
1685 return 1;
1686 }
1687 }
Christoph Hellwigbfe40372018-07-16 09:08:20 +02001688
1689 list_del_init(&req->wait.entry);
1690 schedule_work(&req->work);
1691 return 1;
1692}
1693
1694struct aio_poll_table {
1695 struct poll_table_struct pt;
1696 struct aio_kiocb *iocb;
1697 int error;
1698};
1699
1700static void
1701aio_poll_queue_proc(struct file *file, struct wait_queue_head *head,
1702 struct poll_table_struct *p)
1703{
1704 struct aio_poll_table *pt = container_of(p, struct aio_poll_table, pt);
1705
1706 /* multiple wait queues per file are not supported */
1707 if (unlikely(pt->iocb->poll.head)) {
1708 pt->error = -EINVAL;
1709 return;
1710 }
1711
1712 pt->error = 0;
1713 pt->iocb->poll.head = head;
1714 add_wait_queue(head, &pt->iocb->poll.wait);
1715}
1716
1717static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
1718{
1719 struct kioctx *ctx = aiocb->ki_ctx;
1720 struct poll_iocb *req = &aiocb->poll;
1721 struct aio_poll_table apt;
1722 __poll_t mask;
1723
1724 /* reject any unknown events outside the normal event mask. */
1725 if ((u16)iocb->aio_buf != iocb->aio_buf)
1726 return -EINVAL;
1727 /* reject fields that are not defined for poll */
1728 if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
1729 return -EINVAL;
1730
1731 INIT_WORK(&req->work, aio_poll_complete_work);
1732 req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
1733 req->file = fget(iocb->aio_fildes);
1734 if (unlikely(!req->file))
1735 return -EBADF;
1736
1737 apt.pt._qproc = aio_poll_queue_proc;
1738 apt.pt._key = req->events;
1739 apt.iocb = aiocb;
1740 apt.error = -EINVAL; /* same as no support for IOCB_CMD_POLL */
1741
1742 /* initialized the list so that we can do list_empty checks */
1743 INIT_LIST_HEAD(&req->wait.entry);
1744 init_waitqueue_func_entry(&req->wait, aio_poll_wake);
1745
1746 /* one for removal from waitqueue, one for this function */
1747 refcount_set(&aiocb->ki_refcnt, 2);
1748
1749 mask = vfs_poll(req->file, &apt.pt) & req->events;
1750 if (unlikely(!req->head)) {
1751 /* we did not manage to set up a waitqueue, done */
1752 goto out;
1753 }
1754
1755 spin_lock_irq(&ctx->ctx_lock);
1756 spin_lock(&req->head->lock);
1757 if (req->woken) {
1758 /* wake_up context handles the rest */
1759 mask = 0;
1760 apt.error = 0;
1761 } else if (mask || apt.error) {
1762 /* if we get an error or a mask we are done */
1763 WARN_ON_ONCE(list_empty(&req->wait.entry));
1764 list_del_init(&req->wait.entry);
1765 } else {
1766 /* actually waiting for an event */
1767 list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
1768 aiocb->ki_cancel = aio_poll_cancel;
1769 }
1770 spin_unlock(&req->head->lock);
1771 spin_unlock_irq(&ctx->ctx_lock);
1772
1773out:
1774 if (unlikely(apt.error)) {
1775 fput(req->file);
1776 return apt.error;
1777 }
1778
1779 if (mask)
1780 aio_poll_complete(aiocb, mask);
1781 iocb_put(aiocb);
1782 return 0;
1783}
1784
Adrian Bunkd5470b52008-04-29 00:58:57 -07001785static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
Al Viro95af8492018-05-26 19:43:16 -04001786 bool compat)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787{
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001788 struct aio_kiocb *req;
Al Viro95af8492018-05-26 19:43:16 -04001789 struct iocb iocb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 ssize_t ret;
1791
Al Viro95af8492018-05-26 19:43:16 -04001792 if (unlikely(copy_from_user(&iocb, user_iocb, sizeof(iocb))))
1793 return -EFAULT;
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 /* enforce forwards compatibility on users */
Al Viro95af8492018-05-26 19:43:16 -04001796 if (unlikely(iocb.aio_reserved2)) {
Kent Overstreetcaf41672013-05-07 16:18:35 -07001797 pr_debug("EINVAL: reserve field set\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 return -EINVAL;
1799 }
1800
1801 /* prevent overflows */
1802 if (unlikely(
Al Viro95af8492018-05-26 19:43:16 -04001803 (iocb.aio_buf != (unsigned long)iocb.aio_buf) ||
1804 (iocb.aio_nbytes != (size_t)iocb.aio_nbytes) ||
1805 ((ssize_t)iocb.aio_nbytes < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 )) {
Kinglong Meeacd88d42015-02-04 21:15:59 +08001807 pr_debug("EINVAL: overflow check\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 return -EINVAL;
1809 }
1810
Kent Overstreet41ef4eb2013-05-07 16:19:11 -07001811 req = aio_get_req(ctx);
Kent Overstreet1d98ebf2013-05-07 16:18:37 -07001812 if (unlikely(!req))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 return -EAGAIN;
Kent Overstreet1d98ebf2013-05-07 16:18:37 -07001814
Al Viro95af8492018-05-26 19:43:16 -04001815 if (iocb.aio_flags & IOCB_FLAG_RESFD) {
Davide Libenzi9c3060b2007-05-10 22:23:21 -07001816 /*
1817 * If the IOCB_FLAG_RESFD flag of aio_flags is set, get an
1818 * instance of the file* now. The file descriptor must be
1819 * an eventfd() fd, and will be signaled for each completed
1820 * event using the eventfd_signal() function.
1821 */
Al Viro95af8492018-05-26 19:43:16 -04001822 req->ki_eventfd = eventfd_ctx_fdget((int) iocb.aio_resfd);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -07001823 if (IS_ERR(req->ki_eventfd)) {
Davide Libenzi9c3060b2007-05-10 22:23:21 -07001824 ret = PTR_ERR(req->ki_eventfd);
Davide Libenzi87c3a862009-03-18 17:04:19 -07001825 req->ki_eventfd = NULL;
Davide Libenzi9c3060b2007-05-10 22:23:21 -07001826 goto out_put_req;
1827 }
Goldwyn Rodrigues9830f4b2017-06-20 07:05:42 -05001828 }
1829
Kent Overstreet8a660892013-05-07 16:19:10 -07001830 ret = put_user(KIOCB_KEY, &user_iocb->aio_key);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 if (unlikely(ret)) {
Kent Overstreetcaf41672013-05-07 16:18:35 -07001832 pr_debug("EFAULT: aio_key\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 goto out_put_req;
1834 }
1835
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001836 req->ki_user_iocb = user_iocb;
Al Viro95af8492018-05-26 19:43:16 -04001837 req->ki_user_data = iocb.aio_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Al Viro95af8492018-05-26 19:43:16 -04001839 switch (iocb.aio_lio_opcode) {
Christoph Hellwig89319d312016-10-30 11:42:03 -05001840 case IOCB_CMD_PREAD:
Al Viro95af8492018-05-26 19:43:16 -04001841 ret = aio_read(&req->rw, &iocb, false, compat);
Christoph Hellwig89319d312016-10-30 11:42:03 -05001842 break;
1843 case IOCB_CMD_PWRITE:
Al Viro95af8492018-05-26 19:43:16 -04001844 ret = aio_write(&req->rw, &iocb, false, compat);
Christoph Hellwig89319d312016-10-30 11:42:03 -05001845 break;
1846 case IOCB_CMD_PREADV:
Al Viro95af8492018-05-26 19:43:16 -04001847 ret = aio_read(&req->rw, &iocb, true, compat);
Christoph Hellwig89319d312016-10-30 11:42:03 -05001848 break;
1849 case IOCB_CMD_PWRITEV:
Al Viro95af8492018-05-26 19:43:16 -04001850 ret = aio_write(&req->rw, &iocb, true, compat);
Christoph Hellwig89319d312016-10-30 11:42:03 -05001851 break;
Christoph Hellwiga3c0d432018-03-27 19:18:57 +02001852 case IOCB_CMD_FSYNC:
Al Viro95af8492018-05-26 19:43:16 -04001853 ret = aio_fsync(&req->fsync, &iocb, false);
Christoph Hellwiga3c0d432018-03-27 19:18:57 +02001854 break;
1855 case IOCB_CMD_FDSYNC:
Al Viro95af8492018-05-26 19:43:16 -04001856 ret = aio_fsync(&req->fsync, &iocb, true);
Christoph Hellwigac060cb2018-05-28 07:19:49 +02001857 break;
Christoph Hellwigbfe40372018-07-16 09:08:20 +02001858 case IOCB_CMD_POLL:
1859 ret = aio_poll(req, &iocb);
1860 break;
Christoph Hellwig89319d312016-10-30 11:42:03 -05001861 default:
Al Viro95af8492018-05-26 19:43:16 -04001862 pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
Christoph Hellwig89319d312016-10-30 11:42:03 -05001863 ret = -EINVAL;
1864 break;
1865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866
Christoph Hellwig92ce4722018-04-06 09:28:17 +02001867 /*
Al Viro9061d142018-05-26 19:11:40 -04001868 * If ret is 0, we'd either done aio_complete() ourselves or have
1869 * arranged for that to be done asynchronously. Anything non-zero
1870 * means that we need to destroy req ourselves.
Christoph Hellwig92ce4722018-04-06 09:28:17 +02001871 */
Al Viro9061d142018-05-26 19:11:40 -04001872 if (ret)
Christoph Hellwig89319d312016-10-30 11:42:03 -05001873 goto out_put_req;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875out_put_req:
Kent Overstreete1bdd5f2013-04-26 10:58:39 +10001876 put_reqs_available(ctx, 1);
Kent Overstreete34ecee2013-10-10 19:31:47 -07001877 percpu_ref_put(&ctx->reqs);
Christoph Hellwig54843f82018-05-02 19:57:21 +02001878 if (req->ki_eventfd)
1879 eventfd_ctx_put(req->ki_eventfd);
1880 kmem_cache_free(kiocb_cachep, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 return ret;
1882}
1883
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001884/* sys_io_submit:
1885 * Queue the nr iocbs pointed to by iocbpp for processing. Returns
1886 * the number of iocbs queued. May return -EINVAL if the aio_context
1887 * specified by ctx_id is invalid, if nr is < 0, if the iocb at
1888 * *iocbpp[0] is not properly initialized, if the operation specified
1889 * is invalid for the file descriptor in the iocb. May fail with
1890 * -EFAULT if any of the data structures point to invalid data. May
1891 * fail with -EBADF if the file descriptor specified in the first
1892 * iocb is invalid. May fail with -EAGAIN if insufficient resources
1893 * are available to queue any iocbs. Will return 0 if nr is 0. Will
1894 * fail with -ENOSYS if not implemented.
1895 */
1896SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
1897 struct iocb __user * __user *, iocbpp)
1898{
Al Viro67ba0492018-05-26 20:10:07 -04001899 struct kioctx *ctx;
1900 long ret = 0;
1901 int i = 0;
1902 struct blk_plug plug;
1903
1904 if (unlikely(nr < 0))
1905 return -EINVAL;
1906
Al Viro67ba0492018-05-26 20:10:07 -04001907 ctx = lookup_ioctx(ctx_id);
1908 if (unlikely(!ctx)) {
1909 pr_debug("EINVAL: invalid context id\n");
1910 return -EINVAL;
1911 }
1912
Al Viro1da92772018-05-26 20:10:07 -04001913 if (nr > ctx->nr_events)
1914 nr = ctx->nr_events;
1915
Al Viro67ba0492018-05-26 20:10:07 -04001916 blk_start_plug(&plug);
1917 for (i = 0; i < nr; i++) {
1918 struct iocb __user *user_iocb;
1919
1920 if (unlikely(get_user(user_iocb, iocbpp + i))) {
1921 ret = -EFAULT;
1922 break;
1923 }
1924
1925 ret = io_submit_one(ctx, user_iocb, false);
1926 if (ret)
1927 break;
1928 }
1929 blk_finish_plug(&plug);
1930
1931 percpu_ref_put(&ctx->users);
1932 return i ? i : ret;
Jeff Moyer9d85cba2010-05-26 14:44:26 -07001933}
1934
Al Viroc00d2c72016-12-20 07:04:57 -05001935#ifdef CONFIG_COMPAT
Al Viroc00d2c72016-12-20 07:04:57 -05001936COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
Al Viro67ba0492018-05-26 20:10:07 -04001937 int, nr, compat_uptr_t __user *, iocbpp)
Al Viroc00d2c72016-12-20 07:04:57 -05001938{
Al Viro67ba0492018-05-26 20:10:07 -04001939 struct kioctx *ctx;
1940 long ret = 0;
1941 int i = 0;
1942 struct blk_plug plug;
Al Viroc00d2c72016-12-20 07:04:57 -05001943
1944 if (unlikely(nr < 0))
1945 return -EINVAL;
1946
Al Viro67ba0492018-05-26 20:10:07 -04001947 ctx = lookup_ioctx(ctx_id);
1948 if (unlikely(!ctx)) {
1949 pr_debug("EINVAL: invalid context id\n");
1950 return -EINVAL;
1951 }
1952
Al Viro1da92772018-05-26 20:10:07 -04001953 if (nr > ctx->nr_events)
1954 nr = ctx->nr_events;
1955
Al Viro67ba0492018-05-26 20:10:07 -04001956 blk_start_plug(&plug);
1957 for (i = 0; i < nr; i++) {
1958 compat_uptr_t user_iocb;
1959
1960 if (unlikely(get_user(user_iocb, iocbpp + i))) {
1961 ret = -EFAULT;
1962 break;
1963 }
1964
1965 ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
1966 if (ret)
1967 break;
1968 }
1969 blk_finish_plug(&plug);
1970
1971 percpu_ref_put(&ctx->users);
1972 return i ? i : ret;
Al Viroc00d2c72016-12-20 07:04:57 -05001973}
1974#endif
1975
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976/* lookup_kiocb
1977 * Finds a given iocb for cancellation.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 */
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001979static struct aio_kiocb *
Christoph Hellwigf3a27522018-03-30 11:19:25 +02001980lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981{
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001982 struct aio_kiocb *kiocb;
Zach Brownd00689a2005-11-13 16:07:34 -08001983
1984 assert_spin_locked(&ctx->ctx_lock);
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 /* TODO: use a hash or array, this sucks. */
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01001987 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) {
1988 if (kiocb->ki_user_iocb == iocb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 return kiocb;
1990 }
1991 return NULL;
1992}
1993
1994/* sys_io_cancel:
1995 * Attempts to cancel an iocb previously passed to io_submit. If
1996 * the operation is successfully cancelled, the resulting event is
1997 * copied into the memory pointed to by result without being placed
1998 * into the completion queue and 0 is returned. May fail with
1999 * -EFAULT if any of the data structures pointed to are invalid.
2000 * May fail with -EINVAL if aio_context specified by ctx_id is
2001 * invalid. May fail with -EAGAIN if the iocb specified was not
2002 * cancelled. Will fail with -ENOSYS if not implemented.
2003 */
Heiko Carstens002c8972009-01-14 14:14:18 +01002004SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
2005 struct io_event __user *, result)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 struct kioctx *ctx;
Christoph Hellwig04b2fa92015-02-02 14:49:06 +01002008 struct aio_kiocb *kiocb;
Christoph Hellwig888933f2018-05-23 14:11:02 +02002009 int ret = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 u32 key;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
Christoph Hellwigf3a27522018-03-30 11:19:25 +02002012 if (unlikely(get_user(key, &iocb->aio_key)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 return -EFAULT;
Christoph Hellwigf3a27522018-03-30 11:19:25 +02002014 if (unlikely(key != KIOCB_KEY))
2015 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 ctx = lookup_ioctx(ctx_id);
2018 if (unlikely(!ctx))
2019 return -EINVAL;
2020
2021 spin_lock_irq(&ctx->ctx_lock);
Christoph Hellwigf3a27522018-03-30 11:19:25 +02002022 kiocb = lookup_kiocb(ctx, iocb);
Christoph Hellwig888933f2018-05-23 14:11:02 +02002023 if (kiocb) {
2024 ret = kiocb->ki_cancel(&kiocb->rw);
2025 list_del_init(&kiocb->ki_list);
2026 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 spin_unlock_irq(&ctx->ctx_lock);
2028
Kent Overstreet906b9732013-05-07 16:18:31 -07002029 if (!ret) {
Kent Overstreetbec68faa2013-05-13 14:45:08 -07002030 /*
2031 * The result argument is no longer used - the io_event is
2032 * always delivered via the ring buffer. -EINPROGRESS indicates
2033 * cancellation is progress:
Kent Overstreet906b9732013-05-07 16:18:31 -07002034 */
Kent Overstreetbec68faa2013-05-13 14:45:08 -07002035 ret = -EINPROGRESS;
Kent Overstreet906b9732013-05-07 16:18:31 -07002036 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
Kent Overstreet723be6e2013-05-28 15:14:48 -07002038 percpu_ref_put(&ctx->users);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039
2040 return ret;
2041}
2042
Deepa Dinamanifa2e62a2017-08-04 21:12:32 -07002043static long do_io_getevents(aio_context_t ctx_id,
2044 long min_nr,
2045 long nr,
2046 struct io_event __user *events,
2047 struct timespec64 *ts)
2048{
2049 ktime_t until = ts ? timespec64_to_ktime(*ts) : KTIME_MAX;
2050 struct kioctx *ioctx = lookup_ioctx(ctx_id);
2051 long ret = -EINVAL;
2052
2053 if (likely(ioctx)) {
2054 if (likely(min_nr <= nr && min_nr >= 0))
2055 ret = read_events(ioctx, min_nr, nr, events, until);
2056 percpu_ref_put(&ioctx->users);
2057 }
2058
2059 return ret;
2060}
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062/* io_getevents:
2063 * Attempts to read at least min_nr events and up to nr events from
Satoru Takeuchi642b5122010-08-05 11:23:11 -07002064 * the completion queue for the aio_context specified by ctx_id. If
2065 * it succeeds, the number of read events is returned. May fail with
2066 * -EINVAL if ctx_id is invalid, if min_nr is out of range, if nr is
2067 * out of range, if timeout is out of range. May fail with -EFAULT
2068 * if any of the memory specified is invalid. May return 0 or
2069 * < min_nr if the timeout specified by timeout has elapsed
2070 * before sufficient events are available, where timeout == NULL
2071 * specifies an infinite timeout. Note that the timeout pointed to by
Jeff Moyer69008072013-05-24 15:55:24 -07002072 * timeout is relative. Will fail with -ENOSYS if not implemented.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 */
Heiko Carstens002c8972009-01-14 14:14:18 +01002074SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
2075 long, min_nr,
2076 long, nr,
2077 struct io_event __user *, events,
2078 struct timespec __user *, timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079{
Deepa Dinamanifa2e62a2017-08-04 21:12:32 -07002080 struct timespec64 ts;
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002081 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002083 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2084 return -EFAULT;
2085
2086 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2087 if (!ret && signal_pending(current))
2088 ret = -EINTR;
2089 return ret;
2090}
2091
Christoph Hellwig9ba546c2018-07-11 15:48:46 +02002092struct __aio_sigset {
2093 const sigset_t __user *sigmask;
2094 size_t sigsetsize;
2095};
2096
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002097SYSCALL_DEFINE6(io_pgetevents,
2098 aio_context_t, ctx_id,
2099 long, min_nr,
2100 long, nr,
2101 struct io_event __user *, events,
2102 struct timespec __user *, timeout,
2103 const struct __aio_sigset __user *, usig)
2104{
2105 struct __aio_sigset ksig = { NULL, };
2106 sigset_t ksigmask, sigsaved;
2107 struct timespec64 ts;
2108 int ret;
2109
2110 if (timeout && unlikely(get_timespec64(&ts, timeout)))
2111 return -EFAULT;
2112
2113 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2114 return -EFAULT;
2115
2116 if (ksig.sigmask) {
2117 if (ksig.sigsetsize != sizeof(sigset_t))
2118 return -EINVAL;
2119 if (copy_from_user(&ksigmask, ksig.sigmask, sizeof(ksigmask)))
Deepa Dinamanifa2e62a2017-08-04 21:12:32 -07002120 return -EFAULT;
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002121 sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2122 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 }
Deepa Dinamanifa2e62a2017-08-04 21:12:32 -07002124
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002125 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &ts : NULL);
2126 if (signal_pending(current)) {
2127 if (ksig.sigmask) {
2128 current->saved_sigmask = sigsaved;
2129 set_restore_sigmask();
2130 }
2131
2132 if (!ret)
2133 ret = -ERESTARTNOHAND;
2134 } else {
2135 if (ksig.sigmask)
2136 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2137 }
2138
2139 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140}
Al Viroc00d2c72016-12-20 07:04:57 -05002141
2142#ifdef CONFIG_COMPAT
2143COMPAT_SYSCALL_DEFINE5(io_getevents, compat_aio_context_t, ctx_id,
2144 compat_long_t, min_nr,
2145 compat_long_t, nr,
2146 struct io_event __user *, events,
2147 struct compat_timespec __user *, timeout)
2148{
Deepa Dinamanifa2e62a2017-08-04 21:12:32 -07002149 struct timespec64 t;
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002150 int ret;
Al Viroc00d2c72016-12-20 07:04:57 -05002151
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002152 if (timeout && compat_get_timespec64(&t, timeout))
2153 return -EFAULT;
2154
2155 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2156 if (!ret && signal_pending(current))
2157 ret = -EINTR;
2158 return ret;
2159}
2160
2161
2162struct __compat_aio_sigset {
2163 compat_sigset_t __user *sigmask;
2164 compat_size_t sigsetsize;
2165};
2166
2167COMPAT_SYSCALL_DEFINE6(io_pgetevents,
2168 compat_aio_context_t, ctx_id,
2169 compat_long_t, min_nr,
2170 compat_long_t, nr,
2171 struct io_event __user *, events,
2172 struct compat_timespec __user *, timeout,
2173 const struct __compat_aio_sigset __user *, usig)
2174{
2175 struct __compat_aio_sigset ksig = { NULL, };
2176 sigset_t ksigmask, sigsaved;
2177 struct timespec64 t;
2178 int ret;
2179
2180 if (timeout && compat_get_timespec64(&t, timeout))
2181 return -EFAULT;
2182
2183 if (usig && copy_from_user(&ksig, usig, sizeof(ksig)))
2184 return -EFAULT;
2185
2186 if (ksig.sigmask) {
2187 if (ksig.sigsetsize != sizeof(compat_sigset_t))
2188 return -EINVAL;
2189 if (get_compat_sigset(&ksigmask, ksig.sigmask))
Al Viroc00d2c72016-12-20 07:04:57 -05002190 return -EFAULT;
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002191 sigdelsetmask(&ksigmask, sigmask(SIGKILL) | sigmask(SIGSTOP));
2192 sigprocmask(SIG_SETMASK, &ksigmask, &sigsaved);
Al Viroc00d2c72016-12-20 07:04:57 -05002193 }
Deepa Dinamanifa2e62a2017-08-04 21:12:32 -07002194
Christoph Hellwig7a074e92018-05-02 19:51:00 +02002195 ret = do_io_getevents(ctx_id, min_nr, nr, events, timeout ? &t : NULL);
2196 if (signal_pending(current)) {
2197 if (ksig.sigmask) {
2198 current->saved_sigmask = sigsaved;
2199 set_restore_sigmask();
2200 }
2201 if (!ret)
2202 ret = -ERESTARTNOHAND;
2203 } else {
2204 if (ksig.sigmask)
2205 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2206 }
2207
2208 return ret;
Al Viroc00d2c72016-12-20 07:04:57 -05002209}
2210#endif