Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2003 Sistina Software |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 3 | * Copyright (C) 2006 Red Hat GmbH |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * This file is released under the GPL. |
| 6 | */ |
| 7 | |
Mikulas Patocka | 586e80e | 2008-10-21 17:44:59 +0100 | [diff] [blame] | 8 | #include <linux/device-mapper.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
| 10 | #include <linux/bio.h> |
| 11 | #include <linux/mempool.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/sched.h> |
| 14 | #include <linux/slab.h> |
Alasdair G Kergon | a765e20 | 2008-04-24 22:02:01 +0100 | [diff] [blame] | 15 | #include <linux/dm-io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 17 | struct dm_io_client { |
| 18 | mempool_t *pool; |
| 19 | struct bio_set *bios; |
| 20 | }; |
| 21 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | /* FIXME: can we shrink this ? */ |
| 23 | struct io { |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 24 | unsigned long error_bits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | atomic_t count; |
| 26 | struct task_struct *sleeper; |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 27 | struct dm_io_client *client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | io_notify_fn callback; |
| 29 | void *context; |
| 30 | }; |
| 31 | |
| 32 | /* |
| 33 | * io contexts are only dynamically allocated for asynchronous |
| 34 | * io. Since async io is likely to be the majority of io we'll |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 35 | * have the same number of io contexts as bios! (FIXME: must reduce this). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | */ |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | static unsigned int pages_to_ios(unsigned int pages) |
| 39 | { |
| 40 | return 4 * pages; /* too many ? */ |
| 41 | } |
| 42 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 43 | /* |
| 44 | * Create a client with mempool and bioset. |
| 45 | */ |
| 46 | struct dm_io_client *dm_io_client_create(unsigned num_pages) |
| 47 | { |
| 48 | unsigned ios = pages_to_ios(num_pages); |
| 49 | struct dm_io_client *client; |
| 50 | |
| 51 | client = kmalloc(sizeof(*client), GFP_KERNEL); |
| 52 | if (!client) |
| 53 | return ERR_PTR(-ENOMEM); |
| 54 | |
| 55 | client->pool = mempool_create_kmalloc_pool(ios, sizeof(struct io)); |
| 56 | if (!client->pool) |
| 57 | goto bad; |
| 58 | |
Jens Axboe | bb799ca | 2008-12-10 15:35:05 +0100 | [diff] [blame^] | 59 | client->bios = bioset_create(16, 0); |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 60 | if (!client->bios) |
| 61 | goto bad; |
| 62 | |
| 63 | return client; |
| 64 | |
| 65 | bad: |
| 66 | if (client->pool) |
| 67 | mempool_destroy(client->pool); |
| 68 | kfree(client); |
| 69 | return ERR_PTR(-ENOMEM); |
| 70 | } |
| 71 | EXPORT_SYMBOL(dm_io_client_create); |
| 72 | |
| 73 | int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) |
| 74 | { |
| 75 | return mempool_resize(client->pool, pages_to_ios(num_pages), |
| 76 | GFP_KERNEL); |
| 77 | } |
| 78 | EXPORT_SYMBOL(dm_io_client_resize); |
| 79 | |
| 80 | void dm_io_client_destroy(struct dm_io_client *client) |
| 81 | { |
| 82 | mempool_destroy(client->pool); |
| 83 | bioset_free(client->bios); |
| 84 | kfree(client); |
| 85 | } |
| 86 | EXPORT_SYMBOL(dm_io_client_destroy); |
| 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | /*----------------------------------------------------------------- |
| 89 | * We need to keep track of which region a bio is doing io for. |
| 90 | * In order to save a memory allocation we store this the last |
| 91 | * bvec which we know is unused (blech). |
| 92 | * XXX This is ugly and can OOPS with some configs... find another way. |
| 93 | *---------------------------------------------------------------*/ |
| 94 | static inline void bio_set_region(struct bio *bio, unsigned region) |
| 95 | { |
Heinz Mauelshagen | f00b16a | 2006-12-08 02:41:01 -0800 | [diff] [blame] | 96 | bio->bi_io_vec[bio->bi_max_vecs].bv_len = region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | } |
| 98 | |
| 99 | static inline unsigned bio_get_region(struct bio *bio) |
| 100 | { |
Heinz Mauelshagen | f00b16a | 2006-12-08 02:41:01 -0800 | [diff] [blame] | 101 | return bio->bi_io_vec[bio->bi_max_vecs].bv_len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 102 | } |
| 103 | |
| 104 | /*----------------------------------------------------------------- |
| 105 | * We need an io object to keep track of the number of bios that |
| 106 | * have been dispatched for a particular io. |
| 107 | *---------------------------------------------------------------*/ |
| 108 | static void dec_count(struct io *io, unsigned int region, int error) |
| 109 | { |
| 110 | if (error) |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 111 | set_bit(region, &io->error_bits); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | |
| 113 | if (atomic_dec_and_test(&io->count)) { |
| 114 | if (io->sleeper) |
| 115 | wake_up_process(io->sleeper); |
| 116 | |
| 117 | else { |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 118 | unsigned long r = io->error_bits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | io_notify_fn fn = io->callback; |
| 120 | void *context = io->context; |
| 121 | |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 122 | mempool_free(io, io->client->pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | fn(r, context); |
| 124 | } |
| 125 | } |
| 126 | } |
| 127 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 128 | static void endio(struct bio *bio, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | { |
Heinz Mauelshagen | c897feb | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 130 | struct io *io; |
| 131 | unsigned region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 133 | if (error && bio_data_dir(bio) == READ) |
| 134 | zero_fill_bio(bio); |
| 135 | |
Heinz Mauelshagen | c897feb | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 136 | /* |
| 137 | * The bio destructor in bio_put() may use the io object. |
| 138 | */ |
| 139 | io = bio->bi_private; |
| 140 | region = bio_get_region(bio); |
| 141 | |
Heinz Mauelshagen | f00b16a | 2006-12-08 02:41:01 -0800 | [diff] [blame] | 142 | bio->bi_max_vecs++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | bio_put(bio); |
| 144 | |
Heinz Mauelshagen | c897feb | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 145 | dec_count(io, region, error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | /*----------------------------------------------------------------- |
| 149 | * These little objects provide an abstraction for getting a new |
| 150 | * destination page for io. |
| 151 | *---------------------------------------------------------------*/ |
| 152 | struct dpages { |
| 153 | void (*get_page)(struct dpages *dp, |
| 154 | struct page **p, unsigned long *len, unsigned *offset); |
| 155 | void (*next_page)(struct dpages *dp); |
| 156 | |
| 157 | unsigned context_u; |
| 158 | void *context_ptr; |
| 159 | }; |
| 160 | |
| 161 | /* |
| 162 | * Functions for getting the pages from a list. |
| 163 | */ |
| 164 | static void list_get_page(struct dpages *dp, |
| 165 | struct page **p, unsigned long *len, unsigned *offset) |
| 166 | { |
| 167 | unsigned o = dp->context_u; |
| 168 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
| 169 | |
| 170 | *p = pl->page; |
| 171 | *len = PAGE_SIZE - o; |
| 172 | *offset = o; |
| 173 | } |
| 174 | |
| 175 | static void list_next_page(struct dpages *dp) |
| 176 | { |
| 177 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
| 178 | dp->context_ptr = pl->next; |
| 179 | dp->context_u = 0; |
| 180 | } |
| 181 | |
| 182 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) |
| 183 | { |
| 184 | dp->get_page = list_get_page; |
| 185 | dp->next_page = list_next_page; |
| 186 | dp->context_u = offset; |
| 187 | dp->context_ptr = pl; |
| 188 | } |
| 189 | |
| 190 | /* |
| 191 | * Functions for getting the pages from a bvec. |
| 192 | */ |
| 193 | static void bvec_get_page(struct dpages *dp, |
| 194 | struct page **p, unsigned long *len, unsigned *offset) |
| 195 | { |
| 196 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
| 197 | *p = bvec->bv_page; |
| 198 | *len = bvec->bv_len; |
| 199 | *offset = bvec->bv_offset; |
| 200 | } |
| 201 | |
| 202 | static void bvec_next_page(struct dpages *dp) |
| 203 | { |
| 204 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
| 205 | dp->context_ptr = bvec + 1; |
| 206 | } |
| 207 | |
| 208 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) |
| 209 | { |
| 210 | dp->get_page = bvec_get_page; |
| 211 | dp->next_page = bvec_next_page; |
| 212 | dp->context_ptr = bvec; |
| 213 | } |
| 214 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 215 | /* |
| 216 | * Functions for getting the pages from a VMA. |
| 217 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | static void vm_get_page(struct dpages *dp, |
| 219 | struct page **p, unsigned long *len, unsigned *offset) |
| 220 | { |
| 221 | *p = vmalloc_to_page(dp->context_ptr); |
| 222 | *offset = dp->context_u; |
| 223 | *len = PAGE_SIZE - dp->context_u; |
| 224 | } |
| 225 | |
| 226 | static void vm_next_page(struct dpages *dp) |
| 227 | { |
| 228 | dp->context_ptr += PAGE_SIZE - dp->context_u; |
| 229 | dp->context_u = 0; |
| 230 | } |
| 231 | |
| 232 | static void vm_dp_init(struct dpages *dp, void *data) |
| 233 | { |
| 234 | dp->get_page = vm_get_page; |
| 235 | dp->next_page = vm_next_page; |
| 236 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); |
| 237 | dp->context_ptr = data; |
| 238 | } |
| 239 | |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 240 | static void dm_bio_destructor(struct bio *bio) |
| 241 | { |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 242 | struct io *io = bio->bi_private; |
| 243 | |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 244 | bio_free(bio, io->client->bios); |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 245 | } |
| 246 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 247 | /* |
| 248 | * Functions for getting the pages from kernel memory. |
| 249 | */ |
| 250 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, |
| 251 | unsigned *offset) |
| 252 | { |
| 253 | *p = virt_to_page(dp->context_ptr); |
| 254 | *offset = dp->context_u; |
| 255 | *len = PAGE_SIZE - dp->context_u; |
| 256 | } |
| 257 | |
| 258 | static void km_next_page(struct dpages *dp) |
| 259 | { |
| 260 | dp->context_ptr += PAGE_SIZE - dp->context_u; |
| 261 | dp->context_u = 0; |
| 262 | } |
| 263 | |
| 264 | static void km_dp_init(struct dpages *dp, void *data) |
| 265 | { |
| 266 | dp->get_page = km_get_page; |
| 267 | dp->next_page = km_next_page; |
| 268 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); |
| 269 | dp->context_ptr = data; |
| 270 | } |
| 271 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | /*----------------------------------------------------------------- |
| 273 | * IO routines that accept a list of pages. |
| 274 | *---------------------------------------------------------------*/ |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 275 | static void do_region(int rw, unsigned region, struct dm_io_region *where, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | struct dpages *dp, struct io *io) |
| 277 | { |
| 278 | struct bio *bio; |
| 279 | struct page *page; |
| 280 | unsigned long len; |
| 281 | unsigned offset; |
| 282 | unsigned num_bvecs; |
| 283 | sector_t remaining = where->count; |
| 284 | |
| 285 | while (remaining) { |
| 286 | /* |
Heinz Mauelshagen | f00b16a | 2006-12-08 02:41:01 -0800 | [diff] [blame] | 287 | * Allocate a suitably sized-bio: we add an extra |
| 288 | * bvec for bio_get/set_region() and decrement bi_max_vecs |
| 289 | * to hide it from bio_add_page(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | */ |
Jun'ichi Nomura | 596f138 | 2007-07-12 17:27:45 +0100 | [diff] [blame] | 291 | num_bvecs = dm_sector_div_up(remaining, |
| 292 | (PAGE_SIZE >> SECTOR_SHIFT)); |
| 293 | num_bvecs = 1 + min_t(int, bio_get_nr_vecs(where->bdev), |
| 294 | num_bvecs); |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 295 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | bio->bi_sector = where->sector + (where->count - remaining); |
| 297 | bio->bi_bdev = where->bdev; |
| 298 | bio->bi_end_io = endio; |
| 299 | bio->bi_private = io; |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 300 | bio->bi_destructor = dm_bio_destructor; |
Heinz Mauelshagen | f00b16a | 2006-12-08 02:41:01 -0800 | [diff] [blame] | 301 | bio->bi_max_vecs--; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | bio_set_region(bio, region); |
| 303 | |
| 304 | /* |
| 305 | * Try and add as many pages as possible. |
| 306 | */ |
| 307 | while (remaining) { |
| 308 | dp->get_page(dp, &page, &len, &offset); |
| 309 | len = min(len, to_bytes(remaining)); |
| 310 | if (!bio_add_page(bio, page, len, offset)) |
| 311 | break; |
| 312 | |
| 313 | offset = 0; |
| 314 | remaining -= to_sector(len); |
| 315 | dp->next_page(dp); |
| 316 | } |
| 317 | |
| 318 | atomic_inc(&io->count); |
| 319 | submit_bio(rw, bio); |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | static void dispatch_io(int rw, unsigned int num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 324 | struct dm_io_region *where, struct dpages *dp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | struct io *io, int sync) |
| 326 | { |
| 327 | int i; |
| 328 | struct dpages old_pages = *dp; |
| 329 | |
| 330 | if (sync) |
| 331 | rw |= (1 << BIO_RW_SYNC); |
| 332 | |
| 333 | /* |
| 334 | * For multiple regions we need to be careful to rewind |
| 335 | * the dp object for each call to do_region. |
| 336 | */ |
| 337 | for (i = 0; i < num_regions; i++) { |
| 338 | *dp = old_pages; |
| 339 | if (where[i].count) |
| 340 | do_region(rw, i, where + i, dp, io); |
| 341 | } |
| 342 | |
| 343 | /* |
Heinz Mauelshagen | f00b16a | 2006-12-08 02:41:01 -0800 | [diff] [blame] | 344 | * Drop the extra reference that we were holding to avoid |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | * the io being completed too early. |
| 346 | */ |
| 347 | dec_count(io, 0, 0); |
| 348 | } |
| 349 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 350 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 351 | struct dm_io_region *where, int rw, struct dpages *dp, |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 352 | unsigned long *error_bits) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | { |
| 354 | struct io io; |
| 355 | |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 356 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | WARN_ON(1); |
| 358 | return -EIO; |
| 359 | } |
| 360 | |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 361 | io.error_bits = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | atomic_set(&io.count, 1); /* see dispatch_io() */ |
| 363 | io.sleeper = current; |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 364 | io.client = client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 365 | |
| 366 | dispatch_io(rw, num_regions, where, dp, &io, 1); |
| 367 | |
| 368 | while (1) { |
| 369 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 370 | |
| 371 | if (!atomic_read(&io.count) || signal_pending(current)) |
| 372 | break; |
| 373 | |
| 374 | io_schedule(); |
| 375 | } |
| 376 | set_current_state(TASK_RUNNING); |
| 377 | |
| 378 | if (atomic_read(&io.count)) |
| 379 | return -EINTR; |
| 380 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 381 | if (error_bits) |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 382 | *error_bits = io.error_bits; |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 383 | |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 384 | return io.error_bits ? -EIO : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 385 | } |
| 386 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 387 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 388 | struct dm_io_region *where, int rw, struct dpages *dp, |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 389 | io_notify_fn fn, void *context) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | { |
| 391 | struct io *io; |
| 392 | |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 393 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | WARN_ON(1); |
| 395 | fn(1, context); |
| 396 | return -EIO; |
| 397 | } |
| 398 | |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 399 | io = mempool_alloc(client->pool, GFP_NOIO); |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 400 | io->error_bits = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
| 402 | io->sleeper = NULL; |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 403 | io->client = client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | io->callback = fn; |
| 405 | io->context = context; |
| 406 | |
| 407 | dispatch_io(rw, num_regions, where, dp, io, 0); |
| 408 | return 0; |
| 409 | } |
| 410 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 411 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp) |
| 412 | { |
| 413 | /* Set up dpages based on memory type */ |
| 414 | switch (io_req->mem.type) { |
| 415 | case DM_IO_PAGE_LIST: |
| 416 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); |
| 417 | break; |
| 418 | |
| 419 | case DM_IO_BVEC: |
| 420 | bvec_dp_init(dp, io_req->mem.ptr.bvec); |
| 421 | break; |
| 422 | |
| 423 | case DM_IO_VMA: |
| 424 | vm_dp_init(dp, io_req->mem.ptr.vma); |
| 425 | break; |
| 426 | |
| 427 | case DM_IO_KMEM: |
| 428 | km_dp_init(dp, io_req->mem.ptr.addr); |
| 429 | break; |
| 430 | |
| 431 | default: |
| 432 | return -EINVAL; |
| 433 | } |
| 434 | |
| 435 | return 0; |
| 436 | } |
| 437 | |
| 438 | /* |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 439 | * New collapsed (a)synchronous interface. |
| 440 | * |
| 441 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug |
| 442 | * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in |
| 443 | * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to |
| 444 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 445 | */ |
| 446 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 447 | struct dm_io_region *where, unsigned long *sync_error_bits) |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 448 | { |
| 449 | int r; |
| 450 | struct dpages dp; |
| 451 | |
| 452 | r = dp_init(io_req, &dp); |
| 453 | if (r) |
| 454 | return r; |
| 455 | |
| 456 | if (!io_req->notify.fn) |
| 457 | return sync_io(io_req->client, num_regions, where, |
| 458 | io_req->bi_rw, &dp, sync_error_bits); |
| 459 | |
| 460 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, |
| 461 | &dp, io_req->notify.fn, io_req->notify.context); |
| 462 | } |
| 463 | EXPORT_SYMBOL(dm_io); |