blob: 74adcd2c967ec8680d0cfd5c9d876c93074dbe10 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software
Heinz Mauelshagen891ce202007-05-09 02:33:00 -07003 * Copyright (C) 2006 Red Hat GmbH
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka952b3552009-12-10 23:51:57 +00008#include "dm.h"
9
Mikulas Patocka586e80e2008-10-21 17:44:59 +010010#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/bio.h>
Joe Thornber10f1d5d2014-06-27 15:29:04 -040013#include <linux/completion.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010018#include <linux/dm-io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Mikulas Patockaf1e53982009-12-10 23:51:58 +000020#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070024struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27};
28
Mikulas Patockaf1e53982009-12-10 23:51:58 +000029/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070033struct io {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +010034 unsigned long error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 atomic_t count;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070036 struct dm_io_client *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 io_notify_fn callback;
38 void *context;
Mikulas Patockabb91bc72011-08-02 12:32:01 +010039 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
Mikulas Patockaf1e53982009-12-10 23:51:58 +000041} __attribute__((aligned(DM_IO_MAX_REGIONS)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Mikulas Patocka952b3552009-12-10 23:51:57 +000043static struct kmem_cache *_dm_io_cache;
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/*
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070046 * Create a client with mempool and bioset.
47 */
Mikulas Patockabda8efe2011-05-29 13:03:09 +010048struct dm_io_client *dm_io_client_create(void)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070049{
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070050 struct dm_io_client *client;
Mike Snitzere8603132013-09-12 18:06:12 -040051 unsigned min_ios = dm_get_reserved_bio_based_ios();
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070052
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
Mike Snitzere8603132013-09-12 18:06:12 -040057 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070058 if (!client->pool)
59 goto bad;
60
Mike Snitzere8603132013-09-12 18:06:12 -040061 client->bios = bioset_create(min_ios, 0);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070062 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
68 if (client->pool)
69 mempool_destroy(client->pool);
70 kfree(client);
71 return ERR_PTR(-ENOMEM);
72}
73EXPORT_SYMBOL(dm_io_client_create);
74
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070075void dm_io_client_destroy(struct dm_io_client *client)
76{
77 mempool_destroy(client->pool);
78 bioset_free(client->bios);
79 kfree(client);
80}
81EXPORT_SYMBOL(dm_io_client_destroy);
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
Mikulas Patockaf1e53982009-12-10 23:51:58 +000085 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
88 * bi_private.
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 *---------------------------------------------------------------*/
Mikulas Patockaf1e53982009-12-10 23:51:58 +000090static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91 unsigned region)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Mikulas Patockaf1e53982009-12-10 23:51:58 +000093 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 DMCRIT("Unaligned struct io pointer %p", io);
95 BUG();
96 }
97
98 bio->bi_private = (void *)((unsigned long)io | region);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102 unsigned *region)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000104 unsigned long val = (unsigned long)bio->bi_private;
105
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 *region = val & (DM_IO_MAX_REGIONS - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
110/*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400114static void complete_io(struct io *io)
115{
116 unsigned long error_bits = io->error_bits;
117 io_notify_fn fn = io->callback;
118 void *context = io->context;
119
120 if (io->vma_invalidate_size)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size);
123
124 mempool_free(io, io->client->pool);
125 fn(error_bits, context);
126}
127
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128static void dec_count(struct io *io, unsigned int region, int error)
129{
Tejun Heod87f4c12010-09-03 11:56:19 +0200130 if (error)
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100131 set_bit(region, &io->error_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400133 if (atomic_dec_and_test(&io->count))
134 complete_io(io);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
NeilBrown6712ecf2007-09-27 12:47:43 +0200137static void endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700139 struct io *io;
140 unsigned region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 if (error && bio_data_dir(bio) == READ)
143 zero_fill_bio(bio);
144
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700145 /*
146 * The bio destructor in bio_put() may use the io object.
147 */
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000148 retrieve_io_and_region_from_bio(bio, &io, &region);
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700149
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 bio_put(bio);
151
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700152 dec_count(io, region, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153}
154
155/*-----------------------------------------------------------------
156 * These little objects provide an abstraction for getting a new
157 * destination page for io.
158 *---------------------------------------------------------------*/
159struct dpages {
160 void (*get_page)(struct dpages *dp,
161 struct page **p, unsigned long *len, unsigned *offset);
162 void (*next_page)(struct dpages *dp);
163
164 unsigned context_u;
165 void *context_ptr;
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100166
167 void *vma_invalidate_address;
168 unsigned long vma_invalidate_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169};
170
171/*
172 * Functions for getting the pages from a list.
173 */
174static void list_get_page(struct dpages *dp,
175 struct page **p, unsigned long *len, unsigned *offset)
176{
177 unsigned o = dp->context_u;
178 struct page_list *pl = (struct page_list *) dp->context_ptr;
179
180 *p = pl->page;
181 *len = PAGE_SIZE - o;
182 *offset = o;
183}
184
185static void list_next_page(struct dpages *dp)
186{
187 struct page_list *pl = (struct page_list *) dp->context_ptr;
188 dp->context_ptr = pl->next;
189 dp->context_u = 0;
190}
191
192static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
193{
194 dp->get_page = list_get_page;
195 dp->next_page = list_next_page;
196 dp->context_u = offset;
197 dp->context_ptr = pl;
198}
199
200/*
201 * Functions for getting the pages from a bvec.
202 */
Mikulas Patockad73f9902014-02-12 16:37:30 -0500203static void bio_get_page(struct dpages *dp, struct page **p,
204 unsigned long *len, unsigned *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Mikulas Patockad73f9902014-02-12 16:37:30 -0500206 struct bio_vec *bvec = dp->context_ptr;
207 *p = bvec->bv_page;
208 *len = bvec->bv_len - dp->context_u;
209 *offset = bvec->bv_offset + dp->context_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
Kent Overstreet003b5c52013-10-11 15:45:43 -0700212static void bio_next_page(struct dpages *dp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213{
Mikulas Patockad73f9902014-02-12 16:37:30 -0500214 struct bio_vec *bvec = dp->context_ptr;
215 dp->context_ptr = bvec + 1;
216 dp->context_u = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
Kent Overstreet003b5c52013-10-11 15:45:43 -0700219static void bio_dp_init(struct dpages *dp, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700221 dp->get_page = bio_get_page;
222 dp->next_page = bio_next_page;
Mikulas Patockad73f9902014-02-12 16:37:30 -0500223 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
224 dp->context_u = bio->bi_iter.bi_bvec_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
226
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700227/*
228 * Functions for getting the pages from a VMA.
229 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230static void vm_get_page(struct dpages *dp,
231 struct page **p, unsigned long *len, unsigned *offset)
232{
233 *p = vmalloc_to_page(dp->context_ptr);
234 *offset = dp->context_u;
235 *len = PAGE_SIZE - dp->context_u;
236}
237
238static void vm_next_page(struct dpages *dp)
239{
240 dp->context_ptr += PAGE_SIZE - dp->context_u;
241 dp->context_u = 0;
242}
243
244static void vm_dp_init(struct dpages *dp, void *data)
245{
246 dp->get_page = vm_get_page;
247 dp->next_page = vm_next_page;
248 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249 dp->context_ptr = data;
250}
251
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
257{
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277/*-----------------------------------------------------------------
278 * IO routines that accept a list of pages.
279 *---------------------------------------------------------------*/
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100280static void do_region(int rw, unsigned region, struct dm_io_region *where,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 struct dpages *dp, struct io *io)
282{
283 struct bio *bio;
284 struct page *page;
285 unsigned long len;
286 unsigned offset;
287 unsigned num_bvecs;
288 sector_t remaining = where->count;
Milan Broz0c535e02012-03-07 19:09:37 +0000289 struct request_queue *q = bdev_get_queue(where->bdev);
Mike Snitzer70d6c402012-12-21 20:23:37 +0000290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors;
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800292 unsigned int uninitialized_var(special_cmd_max_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800294 /*
295 * Reject unsupported discard and write same requests.
296 */
297 if (rw & REQ_DISCARD)
298 special_cmd_max_sectors = q->limits.max_discard_sectors;
299 else if (rw & REQ_WRITE_SAME)
300 special_cmd_max_sectors = q->limits.max_write_same_sectors;
301 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
Darrick J. Wong37527b82015-02-13 11:05:37 -0800302 dec_count(io, region, -EOPNOTSUPP);
303 return;
304 }
305
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000306 /*
Tejun Heod87f4c12010-09-03 11:56:19 +0200307 * where->count may be zero if rw holds a flush and we need to
308 * send a zero-sized flush.
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000309 */
310 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 /*
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000312 * Allocate a suitably sized-bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 */
Mike Snitzer70d6c402012-12-21 20:23:37 +0000314 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
Milan Broz0c535e02012-03-07 19:09:37 +0000315 num_bvecs = 1;
316 else
317 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
318 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
319
Milan Brozbf17ce32007-05-09 02:33:05 -0700320 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700321 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 bio->bi_bdev = where->bdev;
323 bio->bi_end_io = endio;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000324 store_io_and_region_in_bio(bio, io, region);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
Milan Broz0c535e02012-03-07 19:09:37 +0000326 if (rw & REQ_DISCARD) {
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800327 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700328 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000329 remaining -= num_sectors;
330 } else if (rw & REQ_WRITE_SAME) {
331 /*
332 * WRITE SAME only uses a single page.
333 */
334 dp->get_page(dp, &page, &len, &offset);
335 bio_add_page(bio, page, logical_block_size, offset);
Darrick J. Wonge5db2982015-02-27 10:44:38 -0800336 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700337 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000338
339 offset = 0;
340 remaining -= num_sectors;
341 dp->next_page(dp);
Milan Broz0c535e02012-03-07 19:09:37 +0000342 } else while (remaining) {
343 /*
344 * Try and add as many pages as possible.
345 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 dp->get_page(dp, &page, &len, &offset);
347 len = min(len, to_bytes(remaining));
348 if (!bio_add_page(bio, page, len, offset))
349 break;
350
351 offset = 0;
352 remaining -= to_sector(len);
353 dp->next_page(dp);
354 }
355
356 atomic_inc(&io->count);
357 submit_bio(rw, bio);
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000358 } while (remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
360
361static void dispatch_io(int rw, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100362 struct dm_io_region *where, struct dpages *dp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 struct io *io, int sync)
364{
365 int i;
366 struct dpages old_pages = *dp;
367
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000368 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 if (sync)
Jens Axboe721a9602011-03-09 11:56:30 +0100371 rw |= REQ_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 /*
374 * For multiple regions we need to be careful to rewind
375 * the dp object for each call to do_region.
376 */
377 for (i = 0; i < num_regions; i++) {
378 *dp = old_pages;
Tejun Heod87f4c12010-09-03 11:56:19 +0200379 if (where[i].count || (rw & REQ_FLUSH))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 do_region(rw, i, where + i, dp, io);
381 }
382
383 /*
Heinz Mauelshagenf00b16a2006-12-08 02:41:01 -0800384 * Drop the extra reference that we were holding to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 * the io being completed too early.
386 */
387 dec_count(io, 0, 0);
388}
389
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400390struct sync_io {
391 unsigned long error_bits;
392 struct completion wait;
393};
394
395static void sync_io_complete(unsigned long error, void *context)
396{
397 struct sync_io *sio = context;
398
399 sio->error_bits = error;
400 complete(&sio->wait);
401}
402
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700403static int sync_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100404 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700405 unsigned long *error_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400407 struct io *io;
408 struct sync_io sio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100410 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 WARN_ON(1);
412 return -EIO;
413 }
414
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400415 init_completion(&sio.wait);
416
417 io = mempool_alloc(client->pool, GFP_NOIO);
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000418 io->error_bits = 0;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000419 atomic_set(&io->count, 1); /* see dispatch_io() */
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000420 io->client = client;
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400421 io->callback = sync_io_complete;
422 io->context = &sio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100424 io->vma_invalidate_address = dp->vma_invalidate_address;
425 io->vma_invalidate_size = dp->vma_invalidate_size;
426
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000427 dispatch_io(rw, num_regions, where, dp, io, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400429 wait_for_completion_io(&sio.wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700431 if (error_bits)
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400432 *error_bits = sio.error_bits;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700433
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400434 return sio.error_bits ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435}
436
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700437static int async_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100438 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700439 io_notify_fn fn, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440{
441 struct io *io;
442
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100443 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 WARN_ON(1);
445 fn(1, context);
446 return -EIO;
447 }
448
Milan Brozbf17ce32007-05-09 02:33:05 -0700449 io = mempool_alloc(client->pool, GFP_NOIO);
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100450 io->error_bits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 atomic_set(&io->count, 1); /* see dispatch_io() */
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700452 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 io->callback = fn;
454 io->context = context;
455
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100456 io->vma_invalidate_address = dp->vma_invalidate_address;
457 io->vma_invalidate_size = dp->vma_invalidate_size;
458
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 dispatch_io(rw, num_regions, where, dp, io, 0);
460 return 0;
461}
462
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100463static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
464 unsigned long size)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700465{
466 /* Set up dpages based on memory type */
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100467
468 dp->vma_invalidate_address = NULL;
469 dp->vma_invalidate_size = 0;
470
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700471 switch (io_req->mem.type) {
472 case DM_IO_PAGE_LIST:
473 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
474 break;
475
Kent Overstreet003b5c52013-10-11 15:45:43 -0700476 case DM_IO_BIO:
477 bio_dp_init(dp, io_req->mem.ptr.bio);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700478 break;
479
480 case DM_IO_VMA:
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100481 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
482 if ((io_req->bi_rw & RW_MASK) == READ) {
483 dp->vma_invalidate_address = io_req->mem.ptr.vma;
484 dp->vma_invalidate_size = size;
485 }
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700486 vm_dp_init(dp, io_req->mem.ptr.vma);
487 break;
488
489 case DM_IO_KMEM:
490 km_dp_init(dp, io_req->mem.ptr.addr);
491 break;
492
493 default:
494 return -EINVAL;
495 }
496
497 return 0;
498}
499
500/*
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100501 * New collapsed (a)synchronous interface.
502 *
503 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
Joe Thornber97e7cdf2014-06-30 13:26:30 -0400504 * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
505 * If you fail to do one of these, the IO will be submitted to the disk after
506 * q->unplug_delay, which defaults to 3ms in blk-settings.c.
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700507 */
508int dm_io(struct dm_io_request *io_req, unsigned num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100509 struct dm_io_region *where, unsigned long *sync_error_bits)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700510{
511 int r;
512 struct dpages dp;
513
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100514 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700515 if (r)
516 return r;
517
518 if (!io_req->notify.fn)
519 return sync_io(io_req->client, num_regions, where,
520 io_req->bi_rw, &dp, sync_error_bits);
521
522 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
523 &dp, io_req->notify.fn, io_req->notify.context);
524}
525EXPORT_SYMBOL(dm_io);
Mikulas Patocka952b3552009-12-10 23:51:57 +0000526
527int __init dm_io_init(void)
528{
529 _dm_io_cache = KMEM_CACHE(io, 0);
530 if (!_dm_io_cache)
531 return -ENOMEM;
532
533 return 0;
534}
535
536void dm_io_exit(void)
537{
538 kmem_cache_destroy(_dm_io_cache);
539 _dm_io_cache = NULL;
540}