blob: ea49834377c8e17b6e8b221e4bd58389a8d7c32e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software
Heinz Mauelshagen891ce202007-05-09 02:33:00 -07003 * Copyright (C) 2006 Red Hat GmbH
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Mikulas Patocka952b3552009-12-10 23:51:57 +00008#include "dm.h"
9
Mikulas Patocka586e80e2008-10-21 17:44:59 +010010#include <linux/device-mapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
12#include <linux/bio.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/sched.h>
16#include <linux/slab.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010017#include <linux/dm-io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
Mikulas Patockaf1e53982009-12-10 23:51:58 +000019#define DM_MSG_PREFIX "io"
20
21#define DM_IO_MAX_REGIONS BITS_PER_LONG
Mikulas Patockabda8efe2011-05-29 13:03:09 +010022#define MIN_IOS 16
23#define MIN_BIOS 16
Mikulas Patockaf1e53982009-12-10 23:51:58 +000024
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070025struct dm_io_client {
26 mempool_t *pool;
27 struct bio_set *bios;
28};
29
Mikulas Patockaf1e53982009-12-10 23:51:58 +000030/*
31 * Aligning 'struct io' reduces the number of bits required to store
32 * its address. Refer to store_io_and_region_in_bio() below.
33 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070034struct io {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +010035 unsigned long error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 atomic_t count;
37 struct task_struct *sleeper;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -070038 struct dm_io_client *client;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 io_notify_fn callback;
40 void *context;
Mikulas Patockabb91bc72011-08-02 12:32:01 +010041 void *vma_invalidate_address;
42 unsigned long vma_invalidate_size;
Mikulas Patockaf1e53982009-12-10 23:51:58 +000043} __attribute__((aligned(DM_IO_MAX_REGIONS)));
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Mikulas Patocka952b3552009-12-10 23:51:57 +000045static struct kmem_cache *_dm_io_cache;
46
Linus Torvalds1da177e2005-04-16 15:20:36 -070047/*
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070048 * Create a client with mempool and bioset.
49 */
Mikulas Patockabda8efe2011-05-29 13:03:09 +010050struct dm_io_client *dm_io_client_create(void)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070051{
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070052 struct dm_io_client *client;
53
54 client = kmalloc(sizeof(*client), GFP_KERNEL);
55 if (!client)
56 return ERR_PTR(-ENOMEM);
57
Mikulas Patockabda8efe2011-05-29 13:03:09 +010058 client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070059 if (!client->pool)
60 goto bad;
61
Mikulas Patockabda8efe2011-05-29 13:03:09 +010062 client->bios = bioset_create(MIN_BIOS, 0);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070063 if (!client->bios)
64 goto bad;
65
66 return client;
67
68 bad:
69 if (client->pool)
70 mempool_destroy(client->pool);
71 kfree(client);
72 return ERR_PTR(-ENOMEM);
73}
74EXPORT_SYMBOL(dm_io_client_create);
75
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -070076void dm_io_client_destroy(struct dm_io_client *client)
77{
78 mempool_destroy(client->pool);
79 bioset_free(client->bios);
80 kfree(client);
81}
82EXPORT_SYMBOL(dm_io_client_destroy);
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*-----------------------------------------------------------------
85 * We need to keep track of which region a bio is doing io for.
Mikulas Patockaf1e53982009-12-10 23:51:58 +000086 * To avoid a memory allocation to store just 5 or 6 bits, we
87 * ensure the 'struct io' pointer is aligned so enough low bits are
88 * always zero and then combine it with the region number directly in
89 * bi_private.
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 *---------------------------------------------------------------*/
Mikulas Patockaf1e53982009-12-10 23:51:58 +000091static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
92 unsigned region)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
Mikulas Patockaf1e53982009-12-10 23:51:58 +000094 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
95 DMCRIT("Unaligned struct io pointer %p", io);
96 BUG();
97 }
98
99 bio->bi_private = (void *)((unsigned long)io | region);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000102static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
103 unsigned *region)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000105 unsigned long val = (unsigned long)bio->bi_private;
106
107 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
108 *region = val & (DM_IO_MAX_REGIONS - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
111/*-----------------------------------------------------------------
112 * We need an io object to keep track of the number of bios that
113 * have been dispatched for a particular io.
114 *---------------------------------------------------------------*/
115static void dec_count(struct io *io, unsigned int region, int error)
116{
Tejun Heod87f4c12010-09-03 11:56:19 +0200117 if (error)
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100118 set_bit(region, &io->error_bits);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
120 if (atomic_dec_and_test(&io->count)) {
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100121 if (io->vma_invalidate_size)
122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
123 io->vma_invalidate_size);
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 if (io->sleeper)
126 wake_up_process(io->sleeper);
127
128 else {
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100129 unsigned long r = io->error_bits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 io_notify_fn fn = io->callback;
131 void *context = io->context;
132
Milan Brozbf17ce32007-05-09 02:33:05 -0700133 mempool_free(io, io->client->pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134 fn(r, context);
135 }
136 }
137}
138
NeilBrown6712ecf2007-09-27 12:47:43 +0200139static void endio(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140{
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700141 struct io *io;
142 unsigned region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 if (error && bio_data_dir(bio) == READ)
145 zero_fill_bio(bio);
146
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700147 /*
148 * The bio destructor in bio_put() may use the io object.
149 */
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000150 retrieve_io_and_region_from_bio(bio, &io, &region);
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 bio_put(bio);
153
Heinz Mauelshagenc897feb2007-05-09 02:33:00 -0700154 dec_count(io, region, error);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
157/*-----------------------------------------------------------------
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
161struct dpages {
162 void (*get_page)(struct dpages *dp,
163 struct page **p, unsigned long *len, unsigned *offset);
164 void (*next_page)(struct dpages *dp);
165
166 unsigned context_u;
167 void *context_ptr;
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100168
169 void *vma_invalidate_address;
170 unsigned long vma_invalidate_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171};
172
173/*
174 * Functions for getting the pages from a list.
175 */
176static void list_get_page(struct dpages *dp,
177 struct page **p, unsigned long *len, unsigned *offset)
178{
179 unsigned o = dp->context_u;
180 struct page_list *pl = (struct page_list *) dp->context_ptr;
181
182 *p = pl->page;
183 *len = PAGE_SIZE - o;
184 *offset = o;
185}
186
187static void list_next_page(struct dpages *dp)
188{
189 struct page_list *pl = (struct page_list *) dp->context_ptr;
190 dp->context_ptr = pl->next;
191 dp->context_u = 0;
192}
193
194static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
195{
196 dp->get_page = list_get_page;
197 dp->next_page = list_next_page;
198 dp->context_u = offset;
199 dp->context_ptr = pl;
200}
201
202/*
203 * Functions for getting the pages from a bvec.
204 */
205static void bvec_get_page(struct dpages *dp,
206 struct page **p, unsigned long *len, unsigned *offset)
207{
208 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
209 *p = bvec->bv_page;
210 *len = bvec->bv_len;
211 *offset = bvec->bv_offset;
212}
213
214static void bvec_next_page(struct dpages *dp)
215{
216 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
217 dp->context_ptr = bvec + 1;
218}
219
220static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
221{
222 dp->get_page = bvec_get_page;
223 dp->next_page = bvec_next_page;
224 dp->context_ptr = bvec;
225}
226
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700227/*
228 * Functions for getting the pages from a VMA.
229 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230static void vm_get_page(struct dpages *dp,
231 struct page **p, unsigned long *len, unsigned *offset)
232{
233 *p = vmalloc_to_page(dp->context_ptr);
234 *offset = dp->context_u;
235 *len = PAGE_SIZE - dp->context_u;
236}
237
238static void vm_next_page(struct dpages *dp)
239{
240 dp->context_ptr += PAGE_SIZE - dp->context_u;
241 dp->context_u = 0;
242}
243
244static void vm_dp_init(struct dpages *dp, void *data)
245{
246 dp->get_page = vm_get_page;
247 dp->next_page = vm_next_page;
248 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249 dp->context_ptr = data;
250}
251
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
257{
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
275}
276
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277/*-----------------------------------------------------------------
278 * IO routines that accept a list of pages.
279 *---------------------------------------------------------------*/
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100280static void do_region(int rw, unsigned region, struct dm_io_region *where,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 struct dpages *dp, struct io *io)
282{
283 struct bio *bio;
284 struct page *page;
285 unsigned long len;
286 unsigned offset;
287 unsigned num_bvecs;
288 sector_t remaining = where->count;
Milan Broz0c535e02012-03-07 19:09:37 +0000289 struct request_queue *q = bdev_get_queue(where->bdev);
Mike Snitzer70d6c402012-12-21 20:23:37 +0000290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000293 /*
Tejun Heod87f4c12010-09-03 11:56:19 +0200294 * where->count may be zero if rw holds a flush and we need to
295 * send a zero-sized flush.
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000296 */
297 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 /*
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000299 * Allocate a suitably sized-bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 */
Mike Snitzer70d6c402012-12-21 20:23:37 +0000301 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
Milan Broz0c535e02012-03-07 19:09:37 +0000302 num_bvecs = 1;
303 else
304 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
305 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
306
Milan Brozbf17ce32007-05-09 02:33:05 -0700307 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 bio->bi_sector = where->sector + (where->count - remaining);
309 bio->bi_bdev = where->bdev;
310 bio->bi_end_io = endio;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000311 store_io_and_region_in_bio(bio, io, region);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Milan Broz0c535e02012-03-07 19:09:37 +0000313 if (rw & REQ_DISCARD) {
Mike Snitzer70d6c402012-12-21 20:23:37 +0000314 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
315 bio->bi_size = num_sectors << SECTOR_SHIFT;
316 remaining -= num_sectors;
317 } else if (rw & REQ_WRITE_SAME) {
318 /*
319 * WRITE SAME only uses a single page.
320 */
321 dp->get_page(dp, &page, &len, &offset);
322 bio_add_page(bio, page, logical_block_size, offset);
323 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
324 bio->bi_size = num_sectors << SECTOR_SHIFT;
325
326 offset = 0;
327 remaining -= num_sectors;
328 dp->next_page(dp);
Milan Broz0c535e02012-03-07 19:09:37 +0000329 } else while (remaining) {
330 /*
331 * Try and add as many pages as possible.
332 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 dp->get_page(dp, &page, &len, &offset);
334 len = min(len, to_bytes(remaining));
335 if (!bio_add_page(bio, page, len, offset))
336 break;
337
338 offset = 0;
339 remaining -= to_sector(len);
340 dp->next_page(dp);
341 }
342
343 atomic_inc(&io->count);
344 submit_bio(rw, bio);
Mikulas Patocka12fc0f42009-12-10 23:52:22 +0000345 } while (remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347
348static void dispatch_io(int rw, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100349 struct dm_io_region *where, struct dpages *dp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 struct io *io, int sync)
351{
352 int i;
353 struct dpages old_pages = *dp;
354
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000355 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 if (sync)
Jens Axboe721a9602011-03-09 11:56:30 +0100358 rw |= REQ_SYNC;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 /*
361 * For multiple regions we need to be careful to rewind
362 * the dp object for each call to do_region.
363 */
364 for (i = 0; i < num_regions; i++) {
365 *dp = old_pages;
Tejun Heod87f4c12010-09-03 11:56:19 +0200366 if (where[i].count || (rw & REQ_FLUSH))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 do_region(rw, i, where + i, dp, io);
368 }
369
370 /*
Heinz Mauelshagenf00b16a2006-12-08 02:41:01 -0800371 * Drop the extra reference that we were holding to avoid
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 * the io being completed too early.
373 */
374 dec_count(io, 0, 0);
375}
376
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700377static int sync_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100378 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700379 unsigned long *error_bits)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000381 /*
382 * gcc <= 4.3 can't do the alignment for stack variables, so we must
383 * align it on our own.
384 * volatile prevents the optimizer from removing or reusing
385 * "io_" field from the stack frame (allowed in ANSI C).
386 */
387 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
388 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100390 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 WARN_ON(1);
392 return -EIO;
393 }
394
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000395 io->error_bits = 0;
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000396 atomic_set(&io->count, 1); /* see dispatch_io() */
397 io->sleeper = current;
398 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100400 io->vma_invalidate_address = dp->vma_invalidate_address;
401 io->vma_invalidate_size = dp->vma_invalidate_size;
402
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000403 dispatch_io(rw, num_regions, where, dp, io, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 while (1) {
406 set_current_state(TASK_UNINTERRUPTIBLE);
407
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000408 if (!atomic_read(&io->count))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 break;
410
411 io_schedule();
412 }
413 set_current_state(TASK_RUNNING);
414
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700415 if (error_bits)
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000416 *error_bits = io->error_bits;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700417
Mikulas Patockaf1e53982009-12-10 23:51:58 +0000418 return io->error_bits ? -EIO : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
420
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700421static int async_io(struct dm_io_client *client, unsigned int num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100422 struct dm_io_region *where, int rw, struct dpages *dp,
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700423 io_notify_fn fn, void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
425 struct io *io;
426
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100427 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 WARN_ON(1);
429 fn(1, context);
430 return -EIO;
431 }
432
Milan Brozbf17ce32007-05-09 02:33:05 -0700433 io = mempool_alloc(client->pool, GFP_NOIO);
Alasdair G Kergone01fd7e2008-04-24 21:43:14 +0100434 io->error_bits = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 atomic_set(&io->count, 1); /* see dispatch_io() */
436 io->sleeper = NULL;
Heinz Mauelshagen891ce202007-05-09 02:33:00 -0700437 io->client = client;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 io->callback = fn;
439 io->context = context;
440
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100441 io->vma_invalidate_address = dp->vma_invalidate_address;
442 io->vma_invalidate_size = dp->vma_invalidate_size;
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 dispatch_io(rw, num_regions, where, dp, io, 0);
445 return 0;
446}
447
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100448static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
449 unsigned long size)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700450{
451 /* Set up dpages based on memory type */
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100452
453 dp->vma_invalidate_address = NULL;
454 dp->vma_invalidate_size = 0;
455
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700456 switch (io_req->mem.type) {
457 case DM_IO_PAGE_LIST:
458 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
459 break;
460
461 case DM_IO_BVEC:
462 bvec_dp_init(dp, io_req->mem.ptr.bvec);
463 break;
464
465 case DM_IO_VMA:
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100466 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
467 if ((io_req->bi_rw & RW_MASK) == READ) {
468 dp->vma_invalidate_address = io_req->mem.ptr.vma;
469 dp->vma_invalidate_size = size;
470 }
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700471 vm_dp_init(dp, io_req->mem.ptr.vma);
472 break;
473
474 case DM_IO_KMEM:
475 km_dp_init(dp, io_req->mem.ptr.addr);
476 break;
477
478 default:
479 return -EINVAL;
480 }
481
482 return 0;
483}
484
485/*
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100486 * New collapsed (a)synchronous interface.
487 *
488 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200489 * the queue with blk_unplug() some time later or set REQ_SYNC in
490io_req->bi_rw. If you fail to do one of these, the IO will be submitted to
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100491 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c.
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700492 */
493int dm_io(struct dm_io_request *io_req, unsigned num_regions,
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100494 struct dm_io_region *where, unsigned long *sync_error_bits)
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700495{
496 int r;
497 struct dpages dp;
498
Mikulas Patockabb91bc72011-08-02 12:32:01 +0100499 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
Heinz Mauelshagenc8b03af2007-05-09 02:33:01 -0700500 if (r)
501 return r;
502
503 if (!io_req->notify.fn)
504 return sync_io(io_req->client, num_regions, where,
505 io_req->bi_rw, &dp, sync_error_bits);
506
507 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
508 &dp, io_req->notify.fn, io_req->notify.context);
509}
510EXPORT_SYMBOL(dm_io);
Mikulas Patocka952b3552009-12-10 23:51:57 +0000511
512int __init dm_io_init(void)
513{
514 _dm_io_cache = KMEM_CACHE(io, 0);
515 if (!_dm_io_cache)
516 return -ENOMEM;
517
518 return 0;
519}
520
521void dm_io_exit(void)
522{
523 kmem_cache_destroy(_dm_io_cache);
524 _dm_io_cache = NULL;
525}