blob: 201d90f5d1b3b34be0fe47a4d55768bd2a35a3b2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software
Heinz Mauelshagen891ce202007-05-09 02:33:00 -07003 * Copyright (C) 2006 Red Hat GmbH
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Mike Snitzer4cc96132016-05-12 16:28:10 -04008#include "dm-core.h"
Mikulas Patocka952b3552009-12-10 23:51:57 +00009
Mikulas Patocka586e80e2008-10-21 17:44:59 +010010#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/bio.h>
Joe Thornber10f1d5d2014-06-27 15:29:04 -040013#include <linux/completion.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010018#include <linux/dm-io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Mikulas Patockaf1e53982009-12-10 23:51:58 +000020#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070024struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27};
28
Mikulas Patockaf1e53982009-12-10 23:51:58 +000029/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070033struct io {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +010034 unsigned long error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 atomic_t count;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070036 struct dm_io_client *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 io_notify_fn callback;
38 void *context;
Mikulas Patockabb91bc72011-08-02 12:32:01 +010039 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
Mikulas Patockaf1e53982009-12-10 23:51:58 +000041} __attribute__((aligned(DM_IO_MAX_REGIONS)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Mikulas Patocka952b3552009-12-10 23:51:57 +000043static struct kmem_cache *_dm_io_cache;
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/*
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070046 * Create a client with mempool and bioset.
47 */
Mikulas Patockabda8efe2011-05-29 13:03:09 +010048struct dm_io_client *dm_io_client_create(void)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070049{
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070050 struct dm_io_client *client;
Mike Snitzere8603132013-09-12 18:06:12 -040051 unsigned min_ios = dm_get_reserved_bio_based_ios();
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070052
Kent Overstreet7ff8f212018-06-05 05:26:33 -040053 client = kzalloc(sizeof(*client), GFP_KERNEL);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070054 if (!client)
55 return ERR_PTR(-ENOMEM);
56
Mike Snitzere8603132013-09-12 18:06:12 -040057 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070058 if (!client->pool)
59 goto bad;
60
Mike Snitzere8603132013-09-12 18:06:12 -040061 client->bios = bioset_create(min_ios, 0);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070062 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
Julia Lawall6f659852015-09-13 14:15:05 +020068 mempool_destroy(client->pool);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070069 kfree(client);
70 return ERR_PTR(-ENOMEM);
71}
72EXPORT_SYMBOL(dm_io_client_create);
73
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070074void dm_io_client_destroy(struct dm_io_client *client)
75{
76 mempool_destroy(client->pool);
77 bioset_free(client->bios);
78 kfree(client);
79}
80EXPORT_SYMBOL(dm_io_client_destroy);
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082/*-----------------------------------------------------------------
83 * We need to keep track of which region a bio is doing io for.
Mikulas Patockaf1e53982009-12-10 23:51:58 +000084 * To avoid a memory allocation to store just 5 or 6 bits, we
85 * ensure the 'struct io' pointer is aligned so enough low bits are
86 * always zero and then combine it with the region number directly in
87 * bi_private.
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 *---------------------------------------------------------------*/
Mikulas Patockaf1e53982009-12-10 23:51:58 +000089static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
90 unsigned region)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Mikulas Patockaf1e53982009-12-10 23:51:58 +000092 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
93 DMCRIT("Unaligned struct io pointer %p", io);
94 BUG();
95 }
96
97 bio->bi_private = (void *)((unsigned long)io | region);
Linus Torvalds1da177e2005-04-16 15:20:36 -070098}
99
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000100static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
101 unsigned *region)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000103 unsigned long val = (unsigned long)bio->bi_private;
104
105 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
106 *region = val & (DM_IO_MAX_REGIONS - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
109/*-----------------------------------------------------------------
110 * We need an io object to keep track of the number of bios that
111 * have been dispatched for a particular io.
112 *---------------------------------------------------------------*/
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400113static void complete_io(struct io *io)
114{
115 unsigned long error_bits = io->error_bits;
116 io_notify_fn fn = io->callback;
117 void *context = io->context;
118
119 if (io->vma_invalidate_size)
120 invalidate_kernel_vmap_range(io->vma_invalidate_address,
121 io->vma_invalidate_size);
122
123 mempool_free(io, io->client->pool);
124 fn(error_bits, context);
125}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static void dec_count(struct io *io, unsigned int region, int error)
128{
Tejun Heod87f4c12010-09-03 11:56:19 +0200129 if (error)
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100130 set_bit(region, &io->error_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400132 if (atomic_dec_and_test(&io->count))
133 complete_io(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200136static void endio(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137{
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700138 struct io *io;
139 unsigned region;
Sasha Levin9b81c842015-08-10 19:05:18 -0400140 int error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200142 if (bio->bi_error && bio_data_dir(bio) == READ)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 zero_fill_bio(bio);
144
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700145 /*
146 * The bio destructor in bio_put() may use the io object.
147 */
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000148 retrieve_io_and_region_from_bio(bio, &io, &region);
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700149
Sasha Levin9b81c842015-08-10 19:05:18 -0400150 error = bio->bi_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 bio_put(bio);
152
Sasha Levin9b81c842015-08-10 19:05:18 -0400153 dec_count(io, region, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154}
155
156/*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
160struct dpages {
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
164
165 unsigned context_u;
166 void *context_ptr;
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100167
168 void *vma_invalidate_address;
169 unsigned long vma_invalidate_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170};
171
172/*
173 * Functions for getting the pages from a list.
174 */
175static void list_get_page(struct dpages *dp,
176 struct page **p, unsigned long *len, unsigned *offset)
177{
178 unsigned o = dp->context_u;
179 struct page_list *pl = (struct page_list *) dp->context_ptr;
180
181 *p = pl->page;
182 *len = PAGE_SIZE - o;
183 *offset = o;
184}
185
186static void list_next_page(struct dpages *dp)
187{
188 struct page_list *pl = (struct page_list *) dp->context_ptr;
189 dp->context_ptr = pl->next;
190 dp->context_u = 0;
191}
192
193static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
194{
195 dp->get_page = list_get_page;
196 dp->next_page = list_next_page;
197 dp->context_u = offset;
198 dp->context_ptr = pl;
199}
200
201/*
202 * Functions for getting the pages from a bvec.
203 */
Mikulas Patockad73f9902014-02-12 16:37:30 -0500204static void bio_get_page(struct dpages *dp, struct page **p,
205 unsigned long *len, unsigned *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206{
Mikulas Patockad73f9902014-02-12 16:37:30 -0500207 struct bio_vec *bvec = dp->context_ptr;
208 *p = bvec->bv_page;
209 *len = bvec->bv_len - dp->context_u;
210 *offset = bvec->bv_offset + dp->context_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211}
212
Kent Overstreet003b5c52013-10-11 15:45:43 -0700213static void bio_next_page(struct dpages *dp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
Mikulas Patockad73f9902014-02-12 16:37:30 -0500215 struct bio_vec *bvec = dp->context_ptr;
216 dp->context_ptr = bvec + 1;
217 dp->context_u = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218}
219
Kent Overstreet003b5c52013-10-11 15:45:43 -0700220static void bio_dp_init(struct dpages *dp, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700222 dp->get_page = bio_get_page;
223 dp->next_page = bio_next_page;
Mikulas Patockad73f9902014-02-12 16:37:30 -0500224 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
225 dp->context_u = bio->bi_iter.bi_bvec_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226}
227
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700228/*
229 * Functions for getting the pages from a VMA.
230 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231static void vm_get_page(struct dpages *dp,
232 struct page **p, unsigned long *len, unsigned *offset)
233{
234 *p = vmalloc_to_page(dp->context_ptr);
235 *offset = dp->context_u;
236 *len = PAGE_SIZE - dp->context_u;
237}
238
239static void vm_next_page(struct dpages *dp)
240{
241 dp->context_ptr += PAGE_SIZE - dp->context_u;
242 dp->context_u = 0;
243}
244
245static void vm_dp_init(struct dpages *dp, void *data)
246{
247 dp->get_page = vm_get_page;
248 dp->next_page = vm_next_page;
Al Viro93bbf582016-01-02 13:30:54 -0500249 dp->context_u = offset_in_page(data);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 dp->context_ptr = data;
251}
252
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700253/*
254 * Functions for getting the pages from kernel memory.
255 */
256static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
257 unsigned *offset)
258{
259 *p = virt_to_page(dp->context_ptr);
260 *offset = dp->context_u;
261 *len = PAGE_SIZE - dp->context_u;
262}
263
264static void km_next_page(struct dpages *dp)
265{
266 dp->context_ptr += PAGE_SIZE - dp->context_u;
267 dp->context_u = 0;
268}
269
270static void km_dp_init(struct dpages *dp, void *data)
271{
272 dp->get_page = km_get_page;
273 dp->next_page = km_next_page;
Al Viro93bbf582016-01-02 13:30:54 -0500274 dp->context_u = offset_in_page(data);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700275 dp->context_ptr = data;
276}
277
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278/*-----------------------------------------------------------------
279 * IO routines that accept a list of pages.
280 *---------------------------------------------------------------*/
Mike Christiee6047142016-06-05 14:32:04 -0500281static void do_region(int op, int op_flags, unsigned region,
282 struct dm_io_region *where, struct dpages *dp,
283 struct io *io)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284{
285 struct bio *bio;
286 struct page *page;
287 unsigned long len;
288 unsigned offset;
289 unsigned num_bvecs;
290 sector_t remaining = where->count;
Milan Broz0c535e02012-03-07 19:09:37 +0000291 struct request_queue *q = bdev_get_queue(where->bdev);
Mike Snitzer70d6c402012-12-21 20:23:37 +0000292 unsigned short logical_block_size = queue_logical_block_size(q);
293 sector_t num_sectors;
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800294 unsigned int uninitialized_var(special_cmd_max_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800296 /*
297 * Reject unsupported discard and write same requests.
298 */
Mike Christiee6047142016-06-05 14:32:04 -0500299 if (op == REQ_OP_DISCARD)
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800300 special_cmd_max_sectors = q->limits.max_discard_sectors;
Mike Christiee6047142016-06-05 14:32:04 -0500301 else if (op == REQ_OP_WRITE_SAME)
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800302 special_cmd_max_sectors = q->limits.max_write_same_sectors;
Mike Christiee6047142016-06-05 14:32:04 -0500303 if ((op == REQ_OP_DISCARD || op == REQ_OP_WRITE_SAME) &&
304 special_cmd_max_sectors == 0) {
Mike Snitzerb6f93a12017-06-20 19:14:30 -0400305 atomic_inc(&io->count);
Darrick J. Wong37527b82015-02-13 11:05:37 -0800306 dec_count(io, region, -EOPNOTSUPP);
307 return;
308 }
309
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000310 /*
Mike Christiee6047142016-06-05 14:32:04 -0500311 * where->count may be zero if op holds a flush and we need to
Tejun Heod87f4c12010-09-03 11:56:19 +0200312 * send a zero-sized flush.
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000313 */
314 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 /*
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000316 * Allocate a suitably sized-bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 */
Mike Christiee6047142016-06-05 14:32:04 -0500318 if ((op == REQ_OP_DISCARD) || (op == REQ_OP_WRITE_SAME))
Milan Broz0c535e02012-03-07 19:09:37 +0000319 num_bvecs = 1;
320 else
Kent Overstreetb54ffb72015-05-19 14:31:01 +0200321 num_bvecs = min_t(int, BIO_MAX_PAGES,
Milan Broz0c535e02012-03-07 19:09:37 +0000322 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
323
Milan Brozbf17ce32007-05-09 02:33:05 -0700324 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700325 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 bio->bi_bdev = where->bdev;
327 bio->bi_end_io = endio;
Mike Christiee6047142016-06-05 14:32:04 -0500328 bio_set_op_attrs(bio, op, op_flags);
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000329 store_io_and_region_in_bio(bio, io, region);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Mike Christiee6047142016-06-05 14:32:04 -0500331 if (op == REQ_OP_DISCARD) {
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800332 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700333 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000334 remaining -= num_sectors;
Mike Christiee6047142016-06-05 14:32:04 -0500335 } else if (op == REQ_OP_WRITE_SAME) {
Mike Snitzer70d6c402012-12-21 20:23:37 +0000336 /*
337 * WRITE SAME only uses a single page.
338 */
339 dp->get_page(dp, &page, &len, &offset);
340 bio_add_page(bio, page, logical_block_size, offset);
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800341 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700342 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000343
344 offset = 0;
345 remaining -= num_sectors;
346 dp->next_page(dp);
Milan Broz0c535e02012-03-07 19:09:37 +0000347 } else while (remaining) {
348 /*
349 * Try and add as many pages as possible.
350 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 dp->get_page(dp, &page, &len, &offset);
352 len = min(len, to_bytes(remaining));
353 if (!bio_add_page(bio, page, len, offset))
354 break;
355
356 offset = 0;
357 remaining -= to_sector(len);
358 dp->next_page(dp);
359 }
360
361 atomic_inc(&io->count);
Mike Christie4e49ea42016-06-05 14:31:41 -0500362 submit_bio(bio);
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000363 } while (remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
Mike Christiee6047142016-06-05 14:32:04 -0500366static void dispatch_io(int op, int op_flags, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100367 struct dm_io_region *where, struct dpages *dp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 struct io *io, int sync)
369{
370 int i;
371 struct dpages old_pages = *dp;
372
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000373 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
374
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 if (sync)
Mike Christiee6047142016-06-05 14:32:04 -0500376 op_flags |= REQ_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
378 /*
379 * For multiple regions we need to be careful to rewind
380 * the dp object for each call to do_region.
381 */
382 for (i = 0; i < num_regions; i++) {
383 *dp = old_pages;
Mike Christie28a8f0d2016-06-05 14:32:25 -0500384 if (where[i].count || (op_flags & REQ_PREFLUSH))
Mike Christiee6047142016-06-05 14:32:04 -0500385 do_region(op, op_flags, i, where + i, dp, io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 }
387
388 /*
Heinz Mauelshagenf00b16a2006-12-08 02:41:01 -0800389 * Drop the extra reference that we were holding to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 * the io being completed too early.
391 */
392 dec_count(io, 0, 0);
393}
394
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400395struct sync_io {
396 unsigned long error_bits;
397 struct completion wait;
398};
399
400static void sync_io_complete(unsigned long error, void *context)
401{
402 struct sync_io *sio = context;
403
404 sio->error_bits = error;
405 complete(&sio->wait);
406}
407
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700408static int sync_io(struct dm_io_client *client, unsigned int num_regions,
Mike Christiee6047142016-06-05 14:32:04 -0500409 struct dm_io_region *where, int op, int op_flags,
410 struct dpages *dp, unsigned long *error_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411{
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400412 struct io *io;
413 struct sync_io sio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414
Mike Christiee6047142016-06-05 14:32:04 -0500415 if (num_regions > 1 && !op_is_write(op)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 WARN_ON(1);
417 return -EIO;
418 }
419
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400420 init_completion(&sio.wait);
421
422 io = mempool_alloc(client->pool, GFP_NOIO);
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000423 io->error_bits = 0;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000424 atomic_set(&io->count, 1); /* see dispatch_io() */
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000425 io->client = client;
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400426 io->callback = sync_io_complete;
427 io->context = &sio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100429 io->vma_invalidate_address = dp->vma_invalidate_address;
430 io->vma_invalidate_size = dp->vma_invalidate_size;
431
Mike Christiee6047142016-06-05 14:32:04 -0500432 dispatch_io(op, op_flags, num_regions, where, dp, io, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400434 wait_for_completion_io(&sio.wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700436 if (error_bits)
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400437 *error_bits = sio.error_bits;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700438
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400439 return sio.error_bits ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440}
441
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700442static int async_io(struct dm_io_client *client, unsigned int num_regions,
Mike Christiee6047142016-06-05 14:32:04 -0500443 struct dm_io_region *where, int op, int op_flags,
444 struct dpages *dp, io_notify_fn fn, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445{
446 struct io *io;
447
Mike Christiee6047142016-06-05 14:32:04 -0500448 if (num_regions > 1 && !op_is_write(op)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 WARN_ON(1);
450 fn(1, context);
451 return -EIO;
452 }
453
Milan Brozbf17ce32007-05-09 02:33:05 -0700454 io = mempool_alloc(client->pool, GFP_NOIO);
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100455 io->error_bits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 atomic_set(&io->count, 1); /* see dispatch_io() */
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700457 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 io->callback = fn;
459 io->context = context;
460
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100461 io->vma_invalidate_address = dp->vma_invalidate_address;
462 io->vma_invalidate_size = dp->vma_invalidate_size;
463
Mike Christiee6047142016-06-05 14:32:04 -0500464 dispatch_io(op, op_flags, num_regions, where, dp, io, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 return 0;
466}
467
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100468static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
469 unsigned long size)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700470{
471 /* Set up dpages based on memory type */
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100472
473 dp->vma_invalidate_address = NULL;
474 dp->vma_invalidate_size = 0;
475
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700476 switch (io_req->mem.type) {
477 case DM_IO_PAGE_LIST:
478 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
479 break;
480
Kent Overstreet003b5c52013-10-11 15:45:43 -0700481 case DM_IO_BIO:
482 bio_dp_init(dp, io_req->mem.ptr.bio);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700483 break;
484
485 case DM_IO_VMA:
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100486 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
Mike Christiee6047142016-06-05 14:32:04 -0500487 if (io_req->bi_op == REQ_OP_READ) {
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100488 dp->vma_invalidate_address = io_req->mem.ptr.vma;
489 dp->vma_invalidate_size = size;
490 }
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700491 vm_dp_init(dp, io_req->mem.ptr.vma);
492 break;
493
494 case DM_IO_KMEM:
495 km_dp_init(dp, io_req->mem.ptr.addr);
496 break;
497
498 default:
499 return -EINVAL;
500 }
501
502 return 0;
503}
504
505/*
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100506 * New collapsed (a)synchronous interface.
507 *
508 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
Jens Axboe1eff9d32016-08-05 15:35:16 -0600509 * the queue with blk_unplug() some time later or set REQ_SYNC in
510 * io_req->bi_opf. If you fail to do one of these, the IO will be submitted to
511 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700512 */
513int dm_io(struct dm_io_request *io_req, unsigned num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100514 struct dm_io_region *where, unsigned long *sync_error_bits)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700515{
516 int r;
517 struct dpages dp;
518
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100519 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700520 if (r)
521 return r;
522
523 if (!io_req->notify.fn)
524 return sync_io(io_req->client, num_regions, where,
Mike Christiee6047142016-06-05 14:32:04 -0500525 io_req->bi_op, io_req->bi_op_flags, &dp,
526 sync_error_bits);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700527
Mike Christiee6047142016-06-05 14:32:04 -0500528 return async_io(io_req->client, num_regions, where, io_req->bi_op,
529 io_req->bi_op_flags, &dp, io_req->notify.fn,
530 io_req->notify.context);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700531}
532EXPORT_SYMBOL(dm_io);
Mikulas Patocka952b3552009-12-10 23:51:57 +0000533
534int __init dm_io_init(void)
535{
536 _dm_io_cache = KMEM_CACHE(io, 0);
537 if (!_dm_io_cache)
538 return -ENOMEM;
539
540 return 0;
541}
542
543void dm_io_exit(void)
544{
545 kmem_cache_destroy(_dm_io_cache);
546 _dm_io_cache = NULL;
547}