blob: b2b8a10e842784de5454e2639474f1a208b4b3f1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software
Heinz Mauelshagen891ce202007-05-09 02:33:00 -07003 * Copyright (C) 2006 Red Hat GmbH
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka952b3552009-12-10 23:51:57 +00008#include "dm.h"
9
Mikulas Patocka586e80e2008-10-21 17:44:59 +010010#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010017#include <linux/dm-io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Mikulas Patockaf1e53982009-12-10 23:51:58 +000019#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG
22
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070023struct dm_io_client {
24 mempool_t *pool;
25 struct bio_set *bios;
26};
27
Mikulas Patockaf1e53982009-12-10 23:51:58 +000028/*
29 * Aligning 'struct io' reduces the number of bits required to store
30 * its address. Refer to store_io_and_region_in_bio() below.
31 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070032struct io {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +010033 unsigned long error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 atomic_t count;
35 struct task_struct *sleeper;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070036 struct dm_io_client *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 io_notify_fn callback;
38 void *context;
Mikulas Patockabb91bc72011-08-02 12:32:01 +010039 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
Mikulas Patockaf1e53982009-12-10 23:51:58 +000041} __attribute__((aligned(DM_IO_MAX_REGIONS)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Mikulas Patocka952b3552009-12-10 23:51:57 +000043static struct kmem_cache *_dm_io_cache;
44
Linus Torvalds1da177e2005-04-16 15:20:36 -070045/*
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070046 * Create a client with mempool and bioset.
47 */
Mikulas Patockabda8efe2011-05-29 13:03:09 +010048struct dm_io_client *dm_io_client_create(void)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070049{
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070050 struct dm_io_client *client;
Mike Snitzere8603132013-09-12 18:06:12 -040051 unsigned min_ios = dm_get_reserved_bio_based_ios();
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070052
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
Mike Snitzere8603132013-09-12 18:06:12 -040057 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070058 if (!client->pool)
59 goto bad;
60
Mike Snitzere8603132013-09-12 18:06:12 -040061 client->bios = bioset_create(min_ios, 0);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070062 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
68 if (client->pool)
69 mempool_destroy(client->pool);
70 kfree(client);
71 return ERR_PTR(-ENOMEM);
72}
73EXPORT_SYMBOL(dm_io_client_create);
74
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070075void dm_io_client_destroy(struct dm_io_client *client)
76{
77 mempool_destroy(client->pool);
78 bioset_free(client->bios);
79 kfree(client);
80}
81EXPORT_SYMBOL(dm_io_client_destroy);
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083/*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
Mikulas Patockaf1e53982009-12-10 23:51:58 +000085 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
88 * bi_private.
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 *---------------------------------------------------------------*/
Mikulas Patockaf1e53982009-12-10 23:51:58 +000090static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91 unsigned region)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Mikulas Patockaf1e53982009-12-10 23:51:58 +000093 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 DMCRIT("Unaligned struct io pointer %p", io);
95 BUG();
96 }
97
98 bio->bi_private = (void *)((unsigned long)io | region);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099}
100
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102 unsigned *region)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000104 unsigned long val = (unsigned long)bio->bi_private;
105
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 *region = val & (DM_IO_MAX_REGIONS - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108}
109
110/*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
114static void dec_count(struct io *io, unsigned int region, int error)
115{
Tejun Heod87f4c12010-09-03 11:56:19 +0200116 if (error)
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100117 set_bit(region, &io->error_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
119 if (atomic_dec_and_test(&io->count)) {
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100120 if (io->vma_invalidate_size)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size);
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 if (io->sleeper)
125 wake_up_process(io->sleeper);
126
127 else {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100128 unsigned long r = io->error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 io_notify_fn fn = io->callback;
130 void *context = io->context;
131
Milan Brozbf17ce32007-05-09 02:33:05 -0700132 mempool_free(io, io->client->pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 fn(r, context);
134 }
135 }
136}
137
NeilBrown6712ecf2007-09-27 12:47:43 +0200138static void endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700140 struct io *io;
141 unsigned region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 if (error && bio_data_dir(bio) == READ)
144 zero_fill_bio(bio);
145
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700146 /*
147 * The bio destructor in bio_put() may use the io object.
148 */
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000149 retrieve_io_and_region_from_bio(bio, &io, &region);
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 bio_put(bio);
152
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700153 dec_count(io, region, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154}
155
156/*-----------------------------------------------------------------
157 * These little objects provide an abstraction for getting a new
158 * destination page for io.
159 *---------------------------------------------------------------*/
160struct dpages {
161 void (*get_page)(struct dpages *dp,
162 struct page **p, unsigned long *len, unsigned *offset);
163 void (*next_page)(struct dpages *dp);
164
165 unsigned context_u;
166 void *context_ptr;
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100167
168 void *vma_invalidate_address;
169 unsigned long vma_invalidate_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170};
171
172/*
173 * Functions for getting the pages from a list.
174 */
175static void list_get_page(struct dpages *dp,
176 struct page **p, unsigned long *len, unsigned *offset)
177{
178 unsigned o = dp->context_u;
179 struct page_list *pl = (struct page_list *) dp->context_ptr;
180
181 *p = pl->page;
182 *len = PAGE_SIZE - o;
183 *offset = o;
184}
185
186static void list_next_page(struct dpages *dp)
187{
188 struct page_list *pl = (struct page_list *) dp->context_ptr;
189 dp->context_ptr = pl->next;
190 dp->context_u = 0;
191}
192
193static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
194{
195 dp->get_page = list_get_page;
196 dp->next_page = list_next_page;
197 dp->context_u = offset;
198 dp->context_ptr = pl;
199}
200
201/*
202 * Functions for getting the pages from a bvec.
203 */
Kent Overstreet003b5c52013-10-11 15:45:43 -0700204static void bio_get_page(struct dpages *dp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 struct page **p, unsigned long *len, unsigned *offset)
206{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700207 struct bio *bio = dp->context_ptr;
208 struct bio_vec bvec = bio_iovec(bio);
209 *p = bvec.bv_page;
210 *len = bvec.bv_len;
211 *offset = bvec.bv_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Kent Overstreet003b5c52013-10-11 15:45:43 -0700214static void bio_next_page(struct dpages *dp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700216 struct bio *bio = dp->context_ptr;
217 struct bio_vec bvec = bio_iovec(bio);
218
219 bio_advance(bio, bvec.bv_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
221
Kent Overstreet003b5c52013-10-11 15:45:43 -0700222static void bio_dp_init(struct dpages *dp, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700224 dp->get_page = bio_get_page;
225 dp->next_page = bio_next_page;
226 dp->context_ptr = bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700229/*
230 * Functions for getting the pages from a VMA.
231 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232static void vm_get_page(struct dpages *dp,
233 struct page **p, unsigned long *len, unsigned *offset)
234{
235 *p = vmalloc_to_page(dp->context_ptr);
236 *offset = dp->context_u;
237 *len = PAGE_SIZE - dp->context_u;
238}
239
240static void vm_next_page(struct dpages *dp)
241{
242 dp->context_ptr += PAGE_SIZE - dp->context_u;
243 dp->context_u = 0;
244}
245
246static void vm_dp_init(struct dpages *dp, void *data)
247{
248 dp->get_page = vm_get_page;
249 dp->next_page = vm_next_page;
250 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
251 dp->context_ptr = data;
252}
253
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700254/*
255 * Functions for getting the pages from kernel memory.
256 */
257static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
258 unsigned *offset)
259{
260 *p = virt_to_page(dp->context_ptr);
261 *offset = dp->context_u;
262 *len = PAGE_SIZE - dp->context_u;
263}
264
265static void km_next_page(struct dpages *dp)
266{
267 dp->context_ptr += PAGE_SIZE - dp->context_u;
268 dp->context_u = 0;
269}
270
271static void km_dp_init(struct dpages *dp, void *data)
272{
273 dp->get_page = km_get_page;
274 dp->next_page = km_next_page;
275 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
276 dp->context_ptr = data;
277}
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279/*-----------------------------------------------------------------
280 * IO routines that accept a list of pages.
281 *---------------------------------------------------------------*/
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100282static void do_region(int rw, unsigned region, struct dm_io_region *where,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 struct dpages *dp, struct io *io)
284{
285 struct bio *bio;
286 struct page *page;
287 unsigned long len;
288 unsigned offset;
289 unsigned num_bvecs;
290 sector_t remaining = where->count;
Milan Broz0c535e02012-03-07 19:09:37 +0000291 struct request_queue *q = bdev_get_queue(where->bdev);
Mike Snitzer70d6c402012-12-21 20:23:37 +0000292 unsigned short logical_block_size = queue_logical_block_size(q);
293 sector_t num_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000295 /*
Tejun Heod87f4c12010-09-03 11:56:19 +0200296 * where->count may be zero if rw holds a flush and we need to
297 * send a zero-sized flush.
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000298 */
299 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 /*
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000301 * Allocate a suitably sized-bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 */
Mike Snitzer70d6c402012-12-21 20:23:37 +0000303 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
Milan Broz0c535e02012-03-07 19:09:37 +0000304 num_bvecs = 1;
305 else
306 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
307 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
308
Milan Brozbf17ce32007-05-09 02:33:05 -0700309 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700310 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 bio->bi_bdev = where->bdev;
312 bio->bi_end_io = endio;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000313 store_io_and_region_in_bio(bio, io, region);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Milan Broz0c535e02012-03-07 19:09:37 +0000315 if (rw & REQ_DISCARD) {
Mike Snitzer70d6c402012-12-21 20:23:37 +0000316 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700317 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000318 remaining -= num_sectors;
319 } else if (rw & REQ_WRITE_SAME) {
320 /*
321 * WRITE SAME only uses a single page.
322 */
323 dp->get_page(dp, &page, &len, &offset);
324 bio_add_page(bio, page, logical_block_size, offset);
325 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700326 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000327
328 offset = 0;
329 remaining -= num_sectors;
330 dp->next_page(dp);
Milan Broz0c535e02012-03-07 19:09:37 +0000331 } else while (remaining) {
332 /*
333 * Try and add as many pages as possible.
334 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 dp->get_page(dp, &page, &len, &offset);
336 len = min(len, to_bytes(remaining));
337 if (!bio_add_page(bio, page, len, offset))
338 break;
339
340 offset = 0;
341 remaining -= to_sector(len);
342 dp->next_page(dp);
343 }
344
345 atomic_inc(&io->count);
346 submit_bio(rw, bio);
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000347 } while (remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
350static void dispatch_io(int rw, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100351 struct dm_io_region *where, struct dpages *dp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 struct io *io, int sync)
353{
354 int i;
355 struct dpages old_pages = *dp;
356
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000357 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 if (sync)
Jens Axboe721a9602011-03-09 11:56:30 +0100360 rw |= REQ_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 /*
363 * For multiple regions we need to be careful to rewind
364 * the dp object for each call to do_region.
365 */
366 for (i = 0; i < num_regions; i++) {
367 *dp = old_pages;
Tejun Heod87f4c12010-09-03 11:56:19 +0200368 if (where[i].count || (rw & REQ_FLUSH))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 do_region(rw, i, where + i, dp, io);
370 }
371
372 /*
Heinz Mauelshagenf00b16a2006-12-08 02:41:01 -0800373 * Drop the extra reference that we were holding to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 * the io being completed too early.
375 */
376 dec_count(io, 0, 0);
377}
378
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700379static int sync_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100380 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700381 unsigned long *error_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000383 /*
384 * gcc <= 4.3 can't do the alignment for stack variables, so we must
385 * align it on our own.
386 * volatile prevents the optimizer from removing or reusing
387 * "io_" field from the stack frame (allowed in ANSI C).
388 */
389 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
390 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100392 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 WARN_ON(1);
394 return -EIO;
395 }
396
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000397 io->error_bits = 0;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000398 atomic_set(&io->count, 1); /* see dispatch_io() */
399 io->sleeper = current;
400 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100402 io->vma_invalidate_address = dp->vma_invalidate_address;
403 io->vma_invalidate_size = dp->vma_invalidate_size;
404
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000405 dispatch_io(rw, num_regions, where, dp, io, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406
407 while (1) {
408 set_current_state(TASK_UNINTERRUPTIBLE);
409
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000410 if (!atomic_read(&io->count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 break;
412
413 io_schedule();
414 }
415 set_current_state(TASK_RUNNING);
416
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700417 if (error_bits)
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000418 *error_bits = io->error_bits;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700419
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000420 return io->error_bits ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700423static int async_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100424 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700425 io_notify_fn fn, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426{
427 struct io *io;
428
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100429 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 WARN_ON(1);
431 fn(1, context);
432 return -EIO;
433 }
434
Milan Brozbf17ce32007-05-09 02:33:05 -0700435 io = mempool_alloc(client->pool, GFP_NOIO);
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100436 io->error_bits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 atomic_set(&io->count, 1); /* see dispatch_io() */
438 io->sleeper = NULL;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700439 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 io->callback = fn;
441 io->context = context;
442
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100443 io->vma_invalidate_address = dp->vma_invalidate_address;
444 io->vma_invalidate_size = dp->vma_invalidate_size;
445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 dispatch_io(rw, num_regions, where, dp, io, 0);
447 return 0;
448}
449
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100450static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
451 unsigned long size)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700452{
453 /* Set up dpages based on memory type */
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100454
455 dp->vma_invalidate_address = NULL;
456 dp->vma_invalidate_size = 0;
457
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700458 switch (io_req->mem.type) {
459 case DM_IO_PAGE_LIST:
460 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
461 break;
462
Kent Overstreet003b5c52013-10-11 15:45:43 -0700463 case DM_IO_BIO:
464 bio_dp_init(dp, io_req->mem.ptr.bio);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700465 break;
466
467 case DM_IO_VMA:
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100468 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
469 if ((io_req->bi_rw & RW_MASK) == READ) {
470 dp->vma_invalidate_address = io_req->mem.ptr.vma;
471 dp->vma_invalidate_size = size;
472 }
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700473 vm_dp_init(dp, io_req->mem.ptr.vma);
474 break;
475
476 case DM_IO_KMEM:
477 km_dp_init(dp, io_req->mem.ptr.addr);
478 break;
479
480 default:
481 return -EINVAL;
482 }
483
484 return 0;
485}
486
487/*
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100488 * New collapsed (a)synchronous interface.
489 *
490 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200491 * the queue with blk_unplug() some time later or set REQ_SYNC in
492io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100493 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700494 */
495int dm_io(struct dm_io_request *io_req, unsigned num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100496 struct dm_io_region *where, unsigned long *sync_error_bits)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700497{
498 int r;
499 struct dpages dp;
500
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100501 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700502 if (r)
503 return r;
504
505 if (!io_req->notify.fn)
506 return sync_io(io_req->client, num_regions, where,
507 io_req->bi_rw, &dp, sync_error_bits);
508
509 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
510 &dp, io_req->notify.fn, io_req->notify.context);
511}
512EXPORT_SYMBOL(dm_io);
Mikulas Patocka952b3552009-12-10 23:51:57 +0000513
514int __init dm_io_init(void)
515{
516 _dm_io_cache = KMEM_CACHE(io, 0);
517 if (!_dm_io_cache)
518 return -ENOMEM;
519
520 return 0;
521}
522
523void dm_io_exit(void)
524{
525 kmem_cache_destroy(_dm_io_cache);
526 _dm_io_cache = NULL;
527}