Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2003 Sistina Software |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 3 | * Copyright (C) 2006 Red Hat GmbH |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * This file is released under the GPL. |
| 6 | */ |
| 7 | |
Mikulas Patocka | 952b355 | 2009-12-10 23:51:57 +0000 | [diff] [blame] | 8 | #include "dm.h" |
| 9 | |
Mikulas Patocka | 586e80e | 2008-10-21 17:44:59 +0100 | [diff] [blame] | 10 | #include <linux/device-mapper.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
| 12 | #include <linux/bio.h> |
| 13 | #include <linux/mempool.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/sched.h> |
| 16 | #include <linux/slab.h> |
Alasdair G Kergon | a765e20 | 2008-04-24 22:02:01 +0100 | [diff] [blame] | 17 | #include <linux/dm-io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 19 | #define DM_MSG_PREFIX "io" |
| 20 | |
| 21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG |
| 22 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 23 | struct dm_io_client { |
| 24 | mempool_t *pool; |
| 25 | struct bio_set *bios; |
| 26 | }; |
| 27 | |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 28 | /* |
| 29 | * Aligning 'struct io' reduces the number of bits required to store |
| 30 | * its address. Refer to store_io_and_region_in_bio() below. |
| 31 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | struct io { |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 33 | unsigned long error_bits; |
Mikulas Patocka | 5af443a | 2009-06-22 10:12:25 +0100 | [diff] [blame] | 34 | unsigned long eopnotsupp_bits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | atomic_t count; |
| 36 | struct task_struct *sleeper; |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 37 | struct dm_io_client *client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | io_notify_fn callback; |
| 39 | void *context; |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 40 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Mikulas Patocka | 952b355 | 2009-12-10 23:51:57 +0000 | [diff] [blame] | 42 | static struct kmem_cache *_dm_io_cache; |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | /* |
| 45 | * io contexts are only dynamically allocated for asynchronous |
| 46 | * io. Since async io is likely to be the majority of io we'll |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 47 | * have the same number of io contexts as bios! (FIXME: must reduce this). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 48 | */ |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 49 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | static unsigned int pages_to_ios(unsigned int pages) |
| 51 | { |
| 52 | return 4 * pages; /* too many ? */ |
| 53 | } |
| 54 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 55 | /* |
| 56 | * Create a client with mempool and bioset. |
| 57 | */ |
| 58 | struct dm_io_client *dm_io_client_create(unsigned num_pages) |
| 59 | { |
| 60 | unsigned ios = pages_to_ios(num_pages); |
| 61 | struct dm_io_client *client; |
| 62 | |
| 63 | client = kmalloc(sizeof(*client), GFP_KERNEL); |
| 64 | if (!client) |
| 65 | return ERR_PTR(-ENOMEM); |
| 66 | |
Mikulas Patocka | 952b355 | 2009-12-10 23:51:57 +0000 | [diff] [blame] | 67 | client->pool = mempool_create_slab_pool(ios, _dm_io_cache); |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 68 | if (!client->pool) |
| 69 | goto bad; |
| 70 | |
Jens Axboe | bb799ca | 2008-12-10 15:35:05 +0100 | [diff] [blame] | 71 | client->bios = bioset_create(16, 0); |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 72 | if (!client->bios) |
| 73 | goto bad; |
| 74 | |
| 75 | return client; |
| 76 | |
| 77 | bad: |
| 78 | if (client->pool) |
| 79 | mempool_destroy(client->pool); |
| 80 | kfree(client); |
| 81 | return ERR_PTR(-ENOMEM); |
| 82 | } |
| 83 | EXPORT_SYMBOL(dm_io_client_create); |
| 84 | |
| 85 | int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) |
| 86 | { |
| 87 | return mempool_resize(client->pool, pages_to_ios(num_pages), |
| 88 | GFP_KERNEL); |
| 89 | } |
| 90 | EXPORT_SYMBOL(dm_io_client_resize); |
| 91 | |
| 92 | void dm_io_client_destroy(struct dm_io_client *client) |
| 93 | { |
| 94 | mempool_destroy(client->pool); |
| 95 | bioset_free(client->bios); |
| 96 | kfree(client); |
| 97 | } |
| 98 | EXPORT_SYMBOL(dm_io_client_destroy); |
| 99 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 100 | /*----------------------------------------------------------------- |
| 101 | * We need to keep track of which region a bio is doing io for. |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 102 | * To avoid a memory allocation to store just 5 or 6 bits, we |
| 103 | * ensure the 'struct io' pointer is aligned so enough low bits are |
| 104 | * always zero and then combine it with the region number directly in |
| 105 | * bi_private. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | *---------------------------------------------------------------*/ |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 107 | static void store_io_and_region_in_bio(struct bio *bio, struct io *io, |
| 108 | unsigned region) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | { |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 110 | if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { |
| 111 | DMCRIT("Unaligned struct io pointer %p", io); |
| 112 | BUG(); |
| 113 | } |
| 114 | |
| 115 | bio->bi_private = (void *)((unsigned long)io | region); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | } |
| 117 | |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 118 | static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, |
| 119 | unsigned *region) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | { |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 121 | unsigned long val = (unsigned long)bio->bi_private; |
| 122 | |
| 123 | *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); |
| 124 | *region = val & (DM_IO_MAX_REGIONS - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | /*----------------------------------------------------------------- |
| 128 | * We need an io object to keep track of the number of bios that |
| 129 | * have been dispatched for a particular io. |
| 130 | *---------------------------------------------------------------*/ |
| 131 | static void dec_count(struct io *io, unsigned int region, int error) |
| 132 | { |
Mikulas Patocka | 5af443a | 2009-06-22 10:12:25 +0100 | [diff] [blame] | 133 | if (error) { |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 134 | set_bit(region, &io->error_bits); |
Mikulas Patocka | 5af443a | 2009-06-22 10:12:25 +0100 | [diff] [blame] | 135 | if (error == -EOPNOTSUPP) |
| 136 | set_bit(region, &io->eopnotsupp_bits); |
| 137 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 138 | |
| 139 | if (atomic_dec_and_test(&io->count)) { |
| 140 | if (io->sleeper) |
| 141 | wake_up_process(io->sleeper); |
| 142 | |
| 143 | else { |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 144 | unsigned long r = io->error_bits; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | io_notify_fn fn = io->callback; |
| 146 | void *context = io->context; |
| 147 | |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 148 | mempool_free(io, io->client->pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | fn(r, context); |
| 150 | } |
| 151 | } |
| 152 | } |
| 153 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 154 | static void endio(struct bio *bio, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | { |
Heinz Mauelshagen | c897feb | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 156 | struct io *io; |
| 157 | unsigned region; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | if (error && bio_data_dir(bio) == READ) |
| 160 | zero_fill_bio(bio); |
| 161 | |
Heinz Mauelshagen | c897feb | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 162 | /* |
| 163 | * The bio destructor in bio_put() may use the io object. |
| 164 | */ |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 165 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
Heinz Mauelshagen | c897feb | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 166 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | bio_put(bio); |
| 168 | |
Heinz Mauelshagen | c897feb | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 169 | dec_count(io, region, error); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | } |
| 171 | |
| 172 | /*----------------------------------------------------------------- |
| 173 | * These little objects provide an abstraction for getting a new |
| 174 | * destination page for io. |
| 175 | *---------------------------------------------------------------*/ |
| 176 | struct dpages { |
| 177 | void (*get_page)(struct dpages *dp, |
| 178 | struct page **p, unsigned long *len, unsigned *offset); |
| 179 | void (*next_page)(struct dpages *dp); |
| 180 | |
| 181 | unsigned context_u; |
| 182 | void *context_ptr; |
| 183 | }; |
| 184 | |
| 185 | /* |
| 186 | * Functions for getting the pages from a list. |
| 187 | */ |
| 188 | static void list_get_page(struct dpages *dp, |
| 189 | struct page **p, unsigned long *len, unsigned *offset) |
| 190 | { |
| 191 | unsigned o = dp->context_u; |
| 192 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
| 193 | |
| 194 | *p = pl->page; |
| 195 | *len = PAGE_SIZE - o; |
| 196 | *offset = o; |
| 197 | } |
| 198 | |
| 199 | static void list_next_page(struct dpages *dp) |
| 200 | { |
| 201 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
| 202 | dp->context_ptr = pl->next; |
| 203 | dp->context_u = 0; |
| 204 | } |
| 205 | |
| 206 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) |
| 207 | { |
| 208 | dp->get_page = list_get_page; |
| 209 | dp->next_page = list_next_page; |
| 210 | dp->context_u = offset; |
| 211 | dp->context_ptr = pl; |
| 212 | } |
| 213 | |
| 214 | /* |
| 215 | * Functions for getting the pages from a bvec. |
| 216 | */ |
| 217 | static void bvec_get_page(struct dpages *dp, |
| 218 | struct page **p, unsigned long *len, unsigned *offset) |
| 219 | { |
| 220 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
| 221 | *p = bvec->bv_page; |
| 222 | *len = bvec->bv_len; |
| 223 | *offset = bvec->bv_offset; |
| 224 | } |
| 225 | |
| 226 | static void bvec_next_page(struct dpages *dp) |
| 227 | { |
| 228 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
| 229 | dp->context_ptr = bvec + 1; |
| 230 | } |
| 231 | |
| 232 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) |
| 233 | { |
| 234 | dp->get_page = bvec_get_page; |
| 235 | dp->next_page = bvec_next_page; |
| 236 | dp->context_ptr = bvec; |
| 237 | } |
| 238 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 239 | /* |
| 240 | * Functions for getting the pages from a VMA. |
| 241 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | static void vm_get_page(struct dpages *dp, |
| 243 | struct page **p, unsigned long *len, unsigned *offset) |
| 244 | { |
| 245 | *p = vmalloc_to_page(dp->context_ptr); |
| 246 | *offset = dp->context_u; |
| 247 | *len = PAGE_SIZE - dp->context_u; |
| 248 | } |
| 249 | |
| 250 | static void vm_next_page(struct dpages *dp) |
| 251 | { |
| 252 | dp->context_ptr += PAGE_SIZE - dp->context_u; |
| 253 | dp->context_u = 0; |
| 254 | } |
| 255 | |
| 256 | static void vm_dp_init(struct dpages *dp, void *data) |
| 257 | { |
| 258 | dp->get_page = vm_get_page; |
| 259 | dp->next_page = vm_next_page; |
| 260 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); |
| 261 | dp->context_ptr = data; |
| 262 | } |
| 263 | |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 264 | static void dm_bio_destructor(struct bio *bio) |
| 265 | { |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 266 | unsigned region; |
| 267 | struct io *io; |
| 268 | |
| 269 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 270 | |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 271 | bio_free(bio, io->client->bios); |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 272 | } |
| 273 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 274 | /* |
| 275 | * Functions for getting the pages from kernel memory. |
| 276 | */ |
| 277 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, |
| 278 | unsigned *offset) |
| 279 | { |
| 280 | *p = virt_to_page(dp->context_ptr); |
| 281 | *offset = dp->context_u; |
| 282 | *len = PAGE_SIZE - dp->context_u; |
| 283 | } |
| 284 | |
| 285 | static void km_next_page(struct dpages *dp) |
| 286 | { |
| 287 | dp->context_ptr += PAGE_SIZE - dp->context_u; |
| 288 | dp->context_u = 0; |
| 289 | } |
| 290 | |
| 291 | static void km_dp_init(struct dpages *dp, void *data) |
| 292 | { |
| 293 | dp->get_page = km_get_page; |
| 294 | dp->next_page = km_next_page; |
| 295 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); |
| 296 | dp->context_ptr = data; |
| 297 | } |
| 298 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | /*----------------------------------------------------------------- |
| 300 | * IO routines that accept a list of pages. |
| 301 | *---------------------------------------------------------------*/ |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 302 | static void do_region(int rw, unsigned region, struct dm_io_region *where, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | struct dpages *dp, struct io *io) |
| 304 | { |
| 305 | struct bio *bio; |
| 306 | struct page *page; |
| 307 | unsigned long len; |
| 308 | unsigned offset; |
| 309 | unsigned num_bvecs; |
| 310 | sector_t remaining = where->count; |
| 311 | |
Mikulas Patocka | 12fc0f4 | 2009-12-10 23:52:22 +0000 | [diff] [blame] | 312 | /* |
| 313 | * where->count may be zero if rw holds a write barrier and we |
| 314 | * need to send a zero-sized barrier. |
| 315 | */ |
| 316 | do { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | /* |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 318 | * Allocate a suitably sized-bio. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | */ |
Jun'ichi Nomura | 596f138 | 2007-07-12 17:27:45 +0100 | [diff] [blame] | 320 | num_bvecs = dm_sector_div_up(remaining, |
| 321 | (PAGE_SIZE >> SECTOR_SHIFT)); |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 322 | num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs); |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 323 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 324 | bio->bi_sector = where->sector + (where->count - remaining); |
| 325 | bio->bi_bdev = where->bdev; |
| 326 | bio->bi_end_io = endio; |
Peter Osterlund | 3676347 | 2005-09-06 15:16:42 -0700 | [diff] [blame] | 327 | bio->bi_destructor = dm_bio_destructor; |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 328 | store_io_and_region_in_bio(bio, io, region); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | |
| 330 | /* |
| 331 | * Try and add as many pages as possible. |
| 332 | */ |
| 333 | while (remaining) { |
| 334 | dp->get_page(dp, &page, &len, &offset); |
| 335 | len = min(len, to_bytes(remaining)); |
| 336 | if (!bio_add_page(bio, page, len, offset)) |
| 337 | break; |
| 338 | |
| 339 | offset = 0; |
| 340 | remaining -= to_sector(len); |
| 341 | dp->next_page(dp); |
| 342 | } |
| 343 | |
| 344 | atomic_inc(&io->count); |
| 345 | submit_bio(rw, bio); |
Mikulas Patocka | 12fc0f4 | 2009-12-10 23:52:22 +0000 | [diff] [blame] | 346 | } while (remaining); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | } |
| 348 | |
| 349 | static void dispatch_io(int rw, unsigned int num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 350 | struct dm_io_region *where, struct dpages *dp, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 351 | struct io *io, int sync) |
| 352 | { |
| 353 | int i; |
| 354 | struct dpages old_pages = *dp; |
| 355 | |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 356 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
| 357 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | if (sync) |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 359 | rw |= REQ_SYNC | REQ_UNPLUG; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | /* |
| 362 | * For multiple regions we need to be careful to rewind |
| 363 | * the dp object for each call to do_region. |
| 364 | */ |
| 365 | for (i = 0; i < num_regions; i++) { |
| 366 | *dp = old_pages; |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 367 | if (where[i].count || (rw & REQ_HARDBARRIER)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 368 | do_region(rw, i, where + i, dp, io); |
| 369 | } |
| 370 | |
| 371 | /* |
Heinz Mauelshagen | f00b16a | 2006-12-08 02:41:01 -0800 | [diff] [blame] | 372 | * Drop the extra reference that we were holding to avoid |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 373 | * the io being completed too early. |
| 374 | */ |
| 375 | dec_count(io, 0, 0); |
| 376 | } |
| 377 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 378 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 379 | struct dm_io_region *where, int rw, struct dpages *dp, |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 380 | unsigned long *error_bits) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 381 | { |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 382 | /* |
| 383 | * gcc <= 4.3 can't do the alignment for stack variables, so we must |
| 384 | * align it on our own. |
| 385 | * volatile prevents the optimizer from removing or reusing |
| 386 | * "io_" field from the stack frame (allowed in ANSI C). |
| 387 | */ |
| 388 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; |
| 389 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 391 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | WARN_ON(1); |
| 393 | return -EIO; |
| 394 | } |
| 395 | |
Mikulas Patocka | 51aa322 | 2009-06-22 10:12:26 +0100 | [diff] [blame] | 396 | retry: |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 397 | io->error_bits = 0; |
| 398 | io->eopnotsupp_bits = 0; |
| 399 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
| 400 | io->sleeper = current; |
| 401 | io->client = client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 403 | dispatch_io(rw, num_regions, where, dp, io, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | |
| 405 | while (1) { |
| 406 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 407 | |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 408 | if (!atomic_read(&io->count)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | break; |
| 410 | |
| 411 | io_schedule(); |
| 412 | } |
| 413 | set_current_state(TASK_RUNNING); |
| 414 | |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 415 | if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { |
| 416 | rw &= ~REQ_HARDBARRIER; |
Mikulas Patocka | 51aa322 | 2009-06-22 10:12:26 +0100 | [diff] [blame] | 417 | goto retry; |
| 418 | } |
| 419 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 420 | if (error_bits) |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 421 | *error_bits = io->error_bits; |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 422 | |
Mikulas Patocka | f1e5398 | 2009-12-10 23:51:58 +0000 | [diff] [blame] | 423 | return io->error_bits ? -EIO : 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 424 | } |
| 425 | |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 426 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 427 | struct dm_io_region *where, int rw, struct dpages *dp, |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 428 | io_notify_fn fn, void *context) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | { |
| 430 | struct io *io; |
| 431 | |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 432 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | WARN_ON(1); |
| 434 | fn(1, context); |
| 435 | return -EIO; |
| 436 | } |
| 437 | |
Milan Broz | bf17ce3 | 2007-05-09 02:33:05 -0700 | [diff] [blame] | 438 | io = mempool_alloc(client->pool, GFP_NOIO); |
Alasdair G Kergon | e01fd7e | 2008-04-24 21:43:14 +0100 | [diff] [blame] | 439 | io->error_bits = 0; |
Mikulas Patocka | 5af443a | 2009-06-22 10:12:25 +0100 | [diff] [blame] | 440 | io->eopnotsupp_bits = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 441 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
| 442 | io->sleeper = NULL; |
Heinz Mauelshagen | 891ce20 | 2007-05-09 02:33:00 -0700 | [diff] [blame] | 443 | io->client = client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | io->callback = fn; |
| 445 | io->context = context; |
| 446 | |
| 447 | dispatch_io(rw, num_regions, where, dp, io, 0); |
| 448 | return 0; |
| 449 | } |
| 450 | |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 451 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp) |
| 452 | { |
| 453 | /* Set up dpages based on memory type */ |
| 454 | switch (io_req->mem.type) { |
| 455 | case DM_IO_PAGE_LIST: |
| 456 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); |
| 457 | break; |
| 458 | |
| 459 | case DM_IO_BVEC: |
| 460 | bvec_dp_init(dp, io_req->mem.ptr.bvec); |
| 461 | break; |
| 462 | |
| 463 | case DM_IO_VMA: |
| 464 | vm_dp_init(dp, io_req->mem.ptr.vma); |
| 465 | break; |
| 466 | |
| 467 | case DM_IO_KMEM: |
| 468 | km_dp_init(dp, io_req->mem.ptr.addr); |
| 469 | break; |
| 470 | |
| 471 | default: |
| 472 | return -EINVAL; |
| 473 | } |
| 474 | |
| 475 | return 0; |
| 476 | } |
| 477 | |
| 478 | /* |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 479 | * New collapsed (a)synchronous interface. |
| 480 | * |
| 481 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 482 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
| 483 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 484 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 485 | */ |
| 486 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 487 | struct dm_io_region *where, unsigned long *sync_error_bits) |
Heinz Mauelshagen | c8b03af | 2007-05-09 02:33:01 -0700 | [diff] [blame] | 488 | { |
| 489 | int r; |
| 490 | struct dpages dp; |
| 491 | |
| 492 | r = dp_init(io_req, &dp); |
| 493 | if (r) |
| 494 | return r; |
| 495 | |
| 496 | if (!io_req->notify.fn) |
| 497 | return sync_io(io_req->client, num_regions, where, |
| 498 | io_req->bi_rw, &dp, sync_error_bits); |
| 499 | |
| 500 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, |
| 501 | &dp, io_req->notify.fn, io_req->notify.context); |
| 502 | } |
| 503 | EXPORT_SYMBOL(dm_io); |
Mikulas Patocka | 952b355 | 2009-12-10 23:51:57 +0000 | [diff] [blame] | 504 | |
| 505 | int __init dm_io_init(void) |
| 506 | { |
| 507 | _dm_io_cache = KMEM_CACHE(io, 0); |
| 508 | if (!_dm_io_cache) |
| 509 | return -ENOMEM; |
| 510 | |
| 511 | return 0; |
| 512 | } |
| 513 | |
| 514 | void dm_io_exit(void) |
| 515 | { |
| 516 | kmem_cache_destroy(_dm_io_cache); |
| 517 | _dm_io_cache = NULL; |
| 518 | } |