blob: db404a0f7e2c83ead70bbf32e2346ecd60aa2edf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software
Heinz Mauelshagen891ce202007-05-09 02:33:00 -07003 * Copyright (C) 2006 Red Hat GmbH
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka952b3552009-12-10 23:51:57 +00008#include "dm.h"
9
Mikulas Patocka586e80e2008-10-21 17:44:59 +010010#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/bio.h>
Joe Thornber10f1d5d2014-06-27 15:29:04 -040013#include <linux/completion.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010018#include <linux/dm-io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
Mikulas Patockaf1e53982009-12-10 23:51:58 +000020#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070024struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27};
28
Mikulas Patockaf1e53982009-12-10 23:51:58 +000029/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070033struct io {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +010034 unsigned long error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 atomic_t count;
Joe Thornber10f1d5d2014-06-27 15:29:04 -040036 struct completion *wait;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070037 struct dm_io_client *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 io_notify_fn callback;
39 void *context;
Mikulas Patockabb91bc72011-08-02 12:32:01 +010040 void *vma_invalidate_address;
41 unsigned long vma_invalidate_size;
Mikulas Patockaf1e53982009-12-10 23:51:58 +000042} __attribute__((aligned(DM_IO_MAX_REGIONS)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Mikulas Patocka952b3552009-12-10 23:51:57 +000044static struct kmem_cache *_dm_io_cache;
45
Linus Torvalds1da177e2005-04-16 15:20:36 -070046/*
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070047 * Create a client with mempool and bioset.
48 */
Mikulas Patockabda8efe2011-05-29 13:03:09 +010049struct dm_io_client *dm_io_client_create(void)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070050{
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070051 struct dm_io_client *client;
Mike Snitzere8603132013-09-12 18:06:12 -040052 unsigned min_ios = dm_get_reserved_bio_based_ios();
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070053
54 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client)
56 return ERR_PTR(-ENOMEM);
57
Mike Snitzere8603132013-09-12 18:06:12 -040058 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070059 if (!client->pool)
60 goto bad;
61
Mike Snitzere8603132013-09-12 18:06:12 -040062 client->bios = bioset_create(min_ios, 0);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070063 if (!client->bios)
64 goto bad;
65
66 return client;
67
68 bad:
69 if (client->pool)
70 mempool_destroy(client->pool);
71 kfree(client);
72 return ERR_PTR(-ENOMEM);
73}
74EXPORT_SYMBOL(dm_io_client_create);
75
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070076void dm_io_client_destroy(struct dm_io_client *client)
77{
78 mempool_destroy(client->pool);
79 bioset_free(client->bios);
80 kfree(client);
81}
82EXPORT_SYMBOL(dm_io_client_destroy);
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*-----------------------------------------------------------------
85 * We need to keep track of which region a bio is doing io for.
Mikulas Patockaf1e53982009-12-10 23:51:58 +000086 * To avoid a memory allocation to store just 5 or 6 bits, we
87 * ensure the 'struct io' pointer is aligned so enough low bits are
88 * always zero and then combine it with the region number directly in
89 * bi_private.
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 *---------------------------------------------------------------*/
Mikulas Patockaf1e53982009-12-10 23:51:58 +000091static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
92 unsigned region)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Mikulas Patockaf1e53982009-12-10 23:51:58 +000094 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
95 DMCRIT("Unaligned struct io pointer %p", io);
96 BUG();
97 }
98
99 bio->bi_private = (void *)((unsigned long)io | region);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000102static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
103 unsigned *region)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000105 unsigned long val = (unsigned long)bio->bi_private;
106
107 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
108 *region = val & (DM_IO_MAX_REGIONS - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
111/*-----------------------------------------------------------------
112 * We need an io object to keep track of the number of bios that
113 * have been dispatched for a particular io.
114 *---------------------------------------------------------------*/
115static void dec_count(struct io *io, unsigned int region, int error)
116{
Tejun Heod87f4c12010-09-03 11:56:19 +0200117 if (error)
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100118 set_bit(region, &io->error_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120 if (atomic_dec_and_test(&io->count)) {
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100121 if (io->vma_invalidate_size)
122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
123 io->vma_invalidate_size);
124
Joe Thornber10f1d5d2014-06-27 15:29:04 -0400125 if (io->wait)
126 complete(io->wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 else {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100129 unsigned long r = io->error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 io_notify_fn fn = io->callback;
131 void *context = io->context;
132
Milan Brozbf17ce32007-05-09 02:33:05 -0700133 mempool_free(io, io->client->pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 fn(r, context);
135 }
136 }
137}
138
NeilBrown6712ecf2007-09-27 12:47:43 +0200139static void endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700141 struct io *io;
142 unsigned region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 if (error && bio_data_dir(bio) == READ)
145 zero_fill_bio(bio);
146
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700147 /*
148 * The bio destructor in bio_put() may use the io object.
149 */
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000150 retrieve_io_and_region_from_bio(bio, &io, &region);
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 bio_put(bio);
153
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700154 dec_count(io, region, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
157/*-----------------------------------------------------------------
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
161struct dpages {
162 void (*get_page)(struct dpages *dp,
163 struct page **p, unsigned long *len, unsigned *offset);
164 void (*next_page)(struct dpages *dp);
165
166 unsigned context_u;
167 void *context_ptr;
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100168
169 void *vma_invalidate_address;
170 unsigned long vma_invalidate_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171};
172
173/*
174 * Functions for getting the pages from a list.
175 */
176static void list_get_page(struct dpages *dp,
177 struct page **p, unsigned long *len, unsigned *offset)
178{
179 unsigned o = dp->context_u;
180 struct page_list *pl = (struct page_list *) dp->context_ptr;
181
182 *p = pl->page;
183 *len = PAGE_SIZE - o;
184 *offset = o;
185}
186
187static void list_next_page(struct dpages *dp)
188{
189 struct page_list *pl = (struct page_list *) dp->context_ptr;
190 dp->context_ptr = pl->next;
191 dp->context_u = 0;
192}
193
194static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
195{
196 dp->get_page = list_get_page;
197 dp->next_page = list_next_page;
198 dp->context_u = offset;
199 dp->context_ptr = pl;
200}
201
202/*
203 * Functions for getting the pages from a bvec.
204 */
Mikulas Patockad73f9902014-02-12 16:37:30 -0500205static void bio_get_page(struct dpages *dp, struct page **p,
206 unsigned long *len, unsigned *offset)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207{
Mikulas Patockad73f9902014-02-12 16:37:30 -0500208 struct bio_vec *bvec = dp->context_ptr;
209 *p = bvec->bv_page;
210 *len = bvec->bv_len - dp->context_u;
211 *offset = bvec->bv_offset + dp->context_u;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212}
213
Kent Overstreet003b5c52013-10-11 15:45:43 -0700214static void bio_next_page(struct dpages *dp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Mikulas Patockad73f9902014-02-12 16:37:30 -0500216 struct bio_vec *bvec = dp->context_ptr;
217 dp->context_ptr = bvec + 1;
218 dp->context_u = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219}
220
Kent Overstreet003b5c52013-10-11 15:45:43 -0700221static void bio_dp_init(struct dpages *dp, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222{
Kent Overstreet003b5c52013-10-11 15:45:43 -0700223 dp->get_page = bio_get_page;
224 dp->next_page = bio_next_page;
Mikulas Patockad73f9902014-02-12 16:37:30 -0500225 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
226 dp->context_u = bio->bi_iter.bi_bvec_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700229/*
230 * Functions for getting the pages from a VMA.
231 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232static void vm_get_page(struct dpages *dp,
233 struct page **p, unsigned long *len, unsigned *offset)
234{
235 *p = vmalloc_to_page(dp->context_ptr);
236 *offset = dp->context_u;
237 *len = PAGE_SIZE - dp->context_u;
238}
239
240static void vm_next_page(struct dpages *dp)
241{
242 dp->context_ptr += PAGE_SIZE - dp->context_u;
243 dp->context_u = 0;
244}
245
246static void vm_dp_init(struct dpages *dp, void *data)
247{
248 dp->get_page = vm_get_page;
249 dp->next_page = vm_next_page;
250 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
251 dp->context_ptr = data;
252}
253
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700254/*
255 * Functions for getting the pages from kernel memory.
256 */
257static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
258 unsigned *offset)
259{
260 *p = virt_to_page(dp->context_ptr);
261 *offset = dp->context_u;
262 *len = PAGE_SIZE - dp->context_u;
263}
264
265static void km_next_page(struct dpages *dp)
266{
267 dp->context_ptr += PAGE_SIZE - dp->context_u;
268 dp->context_u = 0;
269}
270
271static void km_dp_init(struct dpages *dp, void *data)
272{
273 dp->get_page = km_get_page;
274 dp->next_page = km_next_page;
275 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
276 dp->context_ptr = data;
277}
278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279/*-----------------------------------------------------------------
280 * IO routines that accept a list of pages.
281 *---------------------------------------------------------------*/
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100282static void do_region(int rw, unsigned region, struct dm_io_region *where,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283 struct dpages *dp, struct io *io)
284{
285 struct bio *bio;
286 struct page *page;
287 unsigned long len;
288 unsigned offset;
289 unsigned num_bvecs;
290 sector_t remaining = where->count;
Milan Broz0c535e02012-03-07 19:09:37 +0000291 struct request_queue *q = bdev_get_queue(where->bdev);
Mike Snitzer70d6c402012-12-21 20:23:37 +0000292 unsigned short logical_block_size = queue_logical_block_size(q);
293 sector_t num_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000295 /*
Tejun Heod87f4c12010-09-03 11:56:19 +0200296 * where->count may be zero if rw holds a flush and we need to
297 * send a zero-sized flush.
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000298 */
299 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 /*
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000301 * Allocate a suitably sized-bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 */
Mike Snitzer70d6c402012-12-21 20:23:37 +0000303 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
Milan Broz0c535e02012-03-07 19:09:37 +0000304 num_bvecs = 1;
305 else
306 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
307 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
308
Milan Brozbf17ce32007-05-09 02:33:05 -0700309 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700310 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 bio->bi_bdev = where->bdev;
312 bio->bi_end_io = endio;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000313 store_io_and_region_in_bio(bio, io, region);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314
Milan Broz0c535e02012-03-07 19:09:37 +0000315 if (rw & REQ_DISCARD) {
Mike Snitzer70d6c402012-12-21 20:23:37 +0000316 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700317 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000318 remaining -= num_sectors;
319 } else if (rw & REQ_WRITE_SAME) {
320 /*
321 * WRITE SAME only uses a single page.
322 */
323 dp->get_page(dp, &page, &len, &offset);
324 bio_add_page(bio, page, logical_block_size, offset);
325 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
Kent Overstreet4f024f32013-10-11 15:44:27 -0700326 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
Mike Snitzer70d6c402012-12-21 20:23:37 +0000327
328 offset = 0;
329 remaining -= num_sectors;
330 dp->next_page(dp);
Milan Broz0c535e02012-03-07 19:09:37 +0000331 } else while (remaining) {
332 /*
333 * Try and add as many pages as possible.
334 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 dp->get_page(dp, &page, &len, &offset);
336 len = min(len, to_bytes(remaining));
337 if (!bio_add_page(bio, page, len, offset))
338 break;
339
340 offset = 0;
341 remaining -= to_sector(len);
342 dp->next_page(dp);
343 }
344
345 atomic_inc(&io->count);
346 submit_bio(rw, bio);
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000347 } while (remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
350static void dispatch_io(int rw, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100351 struct dm_io_region *where, struct dpages *dp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 struct io *io, int sync)
353{
354 int i;
355 struct dpages old_pages = *dp;
356
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000357 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
358
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 if (sync)
Jens Axboe721a9602011-03-09 11:56:30 +0100360 rw |= REQ_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 /*
363 * For multiple regions we need to be careful to rewind
364 * the dp object for each call to do_region.
365 */
366 for (i = 0; i < num_regions; i++) {
367 *dp = old_pages;
Tejun Heod87f4c12010-09-03 11:56:19 +0200368 if (where[i].count || (rw & REQ_FLUSH))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 do_region(rw, i, where + i, dp, io);
370 }
371
372 /*
Heinz Mauelshagenf00b16a2006-12-08 02:41:01 -0800373 * Drop the extra reference that we were holding to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 * the io being completed too early.
375 */
376 dec_count(io, 0, 0);
377}
378
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700379static int sync_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100380 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700381 unsigned long *error_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000383 /*
384 * gcc <= 4.3 can't do the alignment for stack variables, so we must
385 * align it on our own.
386 * volatile prevents the optimizer from removing or reusing
387 * "io_" field from the stack frame (allowed in ANSI C).
388 */
389 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
390 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
Joe Thornber10f1d5d2014-06-27 15:29:04 -0400391 DECLARE_COMPLETION_ONSTACK(wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100393 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 WARN_ON(1);
395 return -EIO;
396 }
397
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000398 io->error_bits = 0;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000399 atomic_set(&io->count, 1); /* see dispatch_io() */
Joe Thornber10f1d5d2014-06-27 15:29:04 -0400400 io->wait = &wait;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000401 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100403 io->vma_invalidate_address = dp->vma_invalidate_address;
404 io->vma_invalidate_size = dp->vma_invalidate_size;
405
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000406 dispatch_io(rw, num_regions, where, dp, io, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Joe Thornber10f1d5d2014-06-27 15:29:04 -0400408 wait_for_completion_io(&wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700410 if (error_bits)
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000411 *error_bits = io->error_bits;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700412
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000413 return io->error_bits ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700416static int async_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100417 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700418 io_notify_fn fn, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419{
420 struct io *io;
421
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100422 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 WARN_ON(1);
424 fn(1, context);
425 return -EIO;
426 }
427
Milan Brozbf17ce32007-05-09 02:33:05 -0700428 io = mempool_alloc(client->pool, GFP_NOIO);
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100429 io->error_bits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 atomic_set(&io->count, 1); /* see dispatch_io() */
Joe Thornber10f1d5d2014-06-27 15:29:04 -0400431 io->wait = NULL;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700432 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 io->callback = fn;
434 io->context = context;
435
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100436 io->vma_invalidate_address = dp->vma_invalidate_address;
437 io->vma_invalidate_size = dp->vma_invalidate_size;
438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 dispatch_io(rw, num_regions, where, dp, io, 0);
440 return 0;
441}
442
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100443static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
444 unsigned long size)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700445{
446 /* Set up dpages based on memory type */
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100447
448 dp->vma_invalidate_address = NULL;
449 dp->vma_invalidate_size = 0;
450
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700451 switch (io_req->mem.type) {
452 case DM_IO_PAGE_LIST:
453 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
454 break;
455
Kent Overstreet003b5c52013-10-11 15:45:43 -0700456 case DM_IO_BIO:
457 bio_dp_init(dp, io_req->mem.ptr.bio);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700458 break;
459
460 case DM_IO_VMA:
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100461 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
462 if ((io_req->bi_rw & RW_MASK) == READ) {
463 dp->vma_invalidate_address = io_req->mem.ptr.vma;
464 dp->vma_invalidate_size = size;
465 }
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700466 vm_dp_init(dp, io_req->mem.ptr.vma);
467 break;
468
469 case DM_IO_KMEM:
470 km_dp_init(dp, io_req->mem.ptr.addr);
471 break;
472
473 default:
474 return -EINVAL;
475 }
476
477 return 0;
478}
479
480/*
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100481 * New collapsed (a)synchronous interface.
482 *
483 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200484 * the queue with blk_unplug() some time later or set REQ_SYNC in
485io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100486 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700487 */
488int dm_io(struct dm_io_request *io_req, unsigned num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100489 struct dm_io_region *where, unsigned long *sync_error_bits)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700490{
491 int r;
492 struct dpages dp;
493
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100494 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700495 if (r)
496 return r;
497
498 if (!io_req->notify.fn)
499 return sync_io(io_req->client, num_regions, where,
500 io_req->bi_rw, &dp, sync_error_bits);
501
502 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
503 &dp, io_req->notify.fn, io_req->notify.context);
504}
505EXPORT_SYMBOL(dm_io);
Mikulas Patocka952b3552009-12-10 23:51:57 +0000506
507int __init dm_io_init(void)
508{
509 _dm_io_cache = KMEM_CACHE(io, 0);
510 if (!_dm_io_cache)
511 return -ENOMEM;
512
513 return 0;
514}
515
516void dm_io_exit(void)
517{
518 kmem_cache_destroy(_dm_io_cache);
519 _dm_io_cache = NULL;
520}