Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2003 Sistina Software |
| 3 | * |
| 4 | * This file is released under the GPL. |
| 5 | */ |
| 6 | |
| 7 | #include "dm-io.h" |
| 8 | |
| 9 | #include <linux/bio.h> |
| 10 | #include <linux/mempool.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/sched.h> |
| 13 | #include <linux/slab.h> |
| 14 | |
| 15 | static struct bio_set *_bios; |
| 16 | |
| 17 | /* FIXME: can we shrink this ? */ |
| 18 | struct io { |
| 19 | unsigned long error; |
| 20 | atomic_t count; |
| 21 | struct task_struct *sleeper; |
| 22 | io_notify_fn callback; |
| 23 | void *context; |
| 24 | }; |
| 25 | |
| 26 | /* |
| 27 | * io contexts are only dynamically allocated for asynchronous |
| 28 | * io. Since async io is likely to be the majority of io we'll |
| 29 | * have the same number of io contexts as buffer heads ! (FIXME: |
| 30 | * must reduce this). |
| 31 | */ |
| 32 | static unsigned _num_ios; |
| 33 | static mempool_t *_io_pool; |
| 34 | |
| 35 | static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data) |
| 36 | { |
| 37 | return kmalloc(sizeof(struct io), gfp_mask); |
| 38 | } |
| 39 | |
| 40 | static void free_io(void *element, void *pool_data) |
| 41 | { |
| 42 | kfree(element); |
| 43 | } |
| 44 | |
| 45 | static unsigned int pages_to_ios(unsigned int pages) |
| 46 | { |
| 47 | return 4 * pages; /* too many ? */ |
| 48 | } |
| 49 | |
| 50 | static int resize_pool(unsigned int new_ios) |
| 51 | { |
| 52 | int r = 0; |
| 53 | |
| 54 | if (_io_pool) { |
| 55 | if (new_ios == 0) { |
| 56 | /* free off the pool */ |
| 57 | mempool_destroy(_io_pool); |
| 58 | _io_pool = NULL; |
| 59 | bioset_free(_bios); |
| 60 | |
| 61 | } else { |
| 62 | /* resize the pool */ |
| 63 | r = mempool_resize(_io_pool, new_ios, GFP_KERNEL); |
| 64 | } |
| 65 | |
| 66 | } else { |
| 67 | /* create new pool */ |
| 68 | _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL); |
| 69 | if (!_io_pool) |
| 70 | return -ENOMEM; |
| 71 | |
| 72 | _bios = bioset_create(16, 16, 4); |
| 73 | if (!_bios) { |
| 74 | mempool_destroy(_io_pool); |
| 75 | _io_pool = NULL; |
| 76 | return -ENOMEM; |
| 77 | } |
| 78 | } |
| 79 | |
| 80 | if (!r) |
| 81 | _num_ios = new_ios; |
| 82 | |
| 83 | return r; |
| 84 | } |
| 85 | |
| 86 | int dm_io_get(unsigned int num_pages) |
| 87 | { |
| 88 | return resize_pool(_num_ios + pages_to_ios(num_pages)); |
| 89 | } |
| 90 | |
| 91 | void dm_io_put(unsigned int num_pages) |
| 92 | { |
| 93 | resize_pool(_num_ios - pages_to_ios(num_pages)); |
| 94 | } |
| 95 | |
| 96 | /*----------------------------------------------------------------- |
| 97 | * We need to keep track of which region a bio is doing io for. |
| 98 | * In order to save a memory allocation we store this the last |
| 99 | * bvec which we know is unused (blech). |
| 100 | * XXX This is ugly and can OOPS with some configs... find another way. |
| 101 | *---------------------------------------------------------------*/ |
| 102 | static inline void bio_set_region(struct bio *bio, unsigned region) |
| 103 | { |
| 104 | bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len = region; |
| 105 | } |
| 106 | |
| 107 | static inline unsigned bio_get_region(struct bio *bio) |
| 108 | { |
| 109 | return bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len; |
| 110 | } |
| 111 | |
| 112 | /*----------------------------------------------------------------- |
| 113 | * We need an io object to keep track of the number of bios that |
| 114 | * have been dispatched for a particular io. |
| 115 | *---------------------------------------------------------------*/ |
| 116 | static void dec_count(struct io *io, unsigned int region, int error) |
| 117 | { |
| 118 | if (error) |
| 119 | set_bit(region, &io->error); |
| 120 | |
| 121 | if (atomic_dec_and_test(&io->count)) { |
| 122 | if (io->sleeper) |
| 123 | wake_up_process(io->sleeper); |
| 124 | |
| 125 | else { |
| 126 | int r = io->error; |
| 127 | io_notify_fn fn = io->callback; |
| 128 | void *context = io->context; |
| 129 | |
| 130 | mempool_free(io, _io_pool); |
| 131 | fn(r, context); |
| 132 | } |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | static int endio(struct bio *bio, unsigned int done, int error) |
| 137 | { |
| 138 | struct io *io = (struct io *) bio->bi_private; |
| 139 | |
| 140 | /* keep going until we've finished */ |
| 141 | if (bio->bi_size) |
| 142 | return 1; |
| 143 | |
| 144 | if (error && bio_data_dir(bio) == READ) |
| 145 | zero_fill_bio(bio); |
| 146 | |
| 147 | dec_count(io, bio_get_region(bio), error); |
| 148 | bio_put(bio); |
| 149 | |
| 150 | return 0; |
| 151 | } |
| 152 | |
| 153 | /*----------------------------------------------------------------- |
| 154 | * These little objects provide an abstraction for getting a new |
| 155 | * destination page for io. |
| 156 | *---------------------------------------------------------------*/ |
| 157 | struct dpages { |
| 158 | void (*get_page)(struct dpages *dp, |
| 159 | struct page **p, unsigned long *len, unsigned *offset); |
| 160 | void (*next_page)(struct dpages *dp); |
| 161 | |
| 162 | unsigned context_u; |
| 163 | void *context_ptr; |
| 164 | }; |
| 165 | |
| 166 | /* |
| 167 | * Functions for getting the pages from a list. |
| 168 | */ |
| 169 | static void list_get_page(struct dpages *dp, |
| 170 | struct page **p, unsigned long *len, unsigned *offset) |
| 171 | { |
| 172 | unsigned o = dp->context_u; |
| 173 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
| 174 | |
| 175 | *p = pl->page; |
| 176 | *len = PAGE_SIZE - o; |
| 177 | *offset = o; |
| 178 | } |
| 179 | |
| 180 | static void list_next_page(struct dpages *dp) |
| 181 | { |
| 182 | struct page_list *pl = (struct page_list *) dp->context_ptr; |
| 183 | dp->context_ptr = pl->next; |
| 184 | dp->context_u = 0; |
| 185 | } |
| 186 | |
| 187 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) |
| 188 | { |
| 189 | dp->get_page = list_get_page; |
| 190 | dp->next_page = list_next_page; |
| 191 | dp->context_u = offset; |
| 192 | dp->context_ptr = pl; |
| 193 | } |
| 194 | |
| 195 | /* |
| 196 | * Functions for getting the pages from a bvec. |
| 197 | */ |
| 198 | static void bvec_get_page(struct dpages *dp, |
| 199 | struct page **p, unsigned long *len, unsigned *offset) |
| 200 | { |
| 201 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
| 202 | *p = bvec->bv_page; |
| 203 | *len = bvec->bv_len; |
| 204 | *offset = bvec->bv_offset; |
| 205 | } |
| 206 | |
| 207 | static void bvec_next_page(struct dpages *dp) |
| 208 | { |
| 209 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; |
| 210 | dp->context_ptr = bvec + 1; |
| 211 | } |
| 212 | |
| 213 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) |
| 214 | { |
| 215 | dp->get_page = bvec_get_page; |
| 216 | dp->next_page = bvec_next_page; |
| 217 | dp->context_ptr = bvec; |
| 218 | } |
| 219 | |
| 220 | static void vm_get_page(struct dpages *dp, |
| 221 | struct page **p, unsigned long *len, unsigned *offset) |
| 222 | { |
| 223 | *p = vmalloc_to_page(dp->context_ptr); |
| 224 | *offset = dp->context_u; |
| 225 | *len = PAGE_SIZE - dp->context_u; |
| 226 | } |
| 227 | |
| 228 | static void vm_next_page(struct dpages *dp) |
| 229 | { |
| 230 | dp->context_ptr += PAGE_SIZE - dp->context_u; |
| 231 | dp->context_u = 0; |
| 232 | } |
| 233 | |
| 234 | static void vm_dp_init(struct dpages *dp, void *data) |
| 235 | { |
| 236 | dp->get_page = vm_get_page; |
| 237 | dp->next_page = vm_next_page; |
| 238 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); |
| 239 | dp->context_ptr = data; |
| 240 | } |
| 241 | |
| 242 | /*----------------------------------------------------------------- |
| 243 | * IO routines that accept a list of pages. |
| 244 | *---------------------------------------------------------------*/ |
| 245 | static void do_region(int rw, unsigned int region, struct io_region *where, |
| 246 | struct dpages *dp, struct io *io) |
| 247 | { |
| 248 | struct bio *bio; |
| 249 | struct page *page; |
| 250 | unsigned long len; |
| 251 | unsigned offset; |
| 252 | unsigned num_bvecs; |
| 253 | sector_t remaining = where->count; |
| 254 | |
| 255 | while (remaining) { |
| 256 | /* |
| 257 | * Allocate a suitably sized bio, we add an extra |
| 258 | * bvec for bio_get/set_region(). |
| 259 | */ |
| 260 | num_bvecs = (remaining / (PAGE_SIZE >> 9)) + 2; |
| 261 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios); |
| 262 | bio->bi_sector = where->sector + (where->count - remaining); |
| 263 | bio->bi_bdev = where->bdev; |
| 264 | bio->bi_end_io = endio; |
| 265 | bio->bi_private = io; |
| 266 | bio_set_region(bio, region); |
| 267 | |
| 268 | /* |
| 269 | * Try and add as many pages as possible. |
| 270 | */ |
| 271 | while (remaining) { |
| 272 | dp->get_page(dp, &page, &len, &offset); |
| 273 | len = min(len, to_bytes(remaining)); |
| 274 | if (!bio_add_page(bio, page, len, offset)) |
| 275 | break; |
| 276 | |
| 277 | offset = 0; |
| 278 | remaining -= to_sector(len); |
| 279 | dp->next_page(dp); |
| 280 | } |
| 281 | |
| 282 | atomic_inc(&io->count); |
| 283 | submit_bio(rw, bio); |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | static void dispatch_io(int rw, unsigned int num_regions, |
| 288 | struct io_region *where, struct dpages *dp, |
| 289 | struct io *io, int sync) |
| 290 | { |
| 291 | int i; |
| 292 | struct dpages old_pages = *dp; |
| 293 | |
| 294 | if (sync) |
| 295 | rw |= (1 << BIO_RW_SYNC); |
| 296 | |
| 297 | /* |
| 298 | * For multiple regions we need to be careful to rewind |
| 299 | * the dp object for each call to do_region. |
| 300 | */ |
| 301 | for (i = 0; i < num_regions; i++) { |
| 302 | *dp = old_pages; |
| 303 | if (where[i].count) |
| 304 | do_region(rw, i, where + i, dp, io); |
| 305 | } |
| 306 | |
| 307 | /* |
| 308 | * Drop the extra refence that we were holding to avoid |
| 309 | * the io being completed too early. |
| 310 | */ |
| 311 | dec_count(io, 0, 0); |
| 312 | } |
| 313 | |
| 314 | static int sync_io(unsigned int num_regions, struct io_region *where, |
| 315 | int rw, struct dpages *dp, unsigned long *error_bits) |
| 316 | { |
| 317 | struct io io; |
| 318 | |
| 319 | if (num_regions > 1 && rw != WRITE) { |
| 320 | WARN_ON(1); |
| 321 | return -EIO; |
| 322 | } |
| 323 | |
| 324 | io.error = 0; |
| 325 | atomic_set(&io.count, 1); /* see dispatch_io() */ |
| 326 | io.sleeper = current; |
| 327 | |
| 328 | dispatch_io(rw, num_regions, where, dp, &io, 1); |
| 329 | |
| 330 | while (1) { |
| 331 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 332 | |
| 333 | if (!atomic_read(&io.count) || signal_pending(current)) |
| 334 | break; |
| 335 | |
| 336 | io_schedule(); |
| 337 | } |
| 338 | set_current_state(TASK_RUNNING); |
| 339 | |
| 340 | if (atomic_read(&io.count)) |
| 341 | return -EINTR; |
| 342 | |
| 343 | *error_bits = io.error; |
| 344 | return io.error ? -EIO : 0; |
| 345 | } |
| 346 | |
| 347 | static int async_io(unsigned int num_regions, struct io_region *where, int rw, |
| 348 | struct dpages *dp, io_notify_fn fn, void *context) |
| 349 | { |
| 350 | struct io *io; |
| 351 | |
| 352 | if (num_regions > 1 && rw != WRITE) { |
| 353 | WARN_ON(1); |
| 354 | fn(1, context); |
| 355 | return -EIO; |
| 356 | } |
| 357 | |
| 358 | io = mempool_alloc(_io_pool, GFP_NOIO); |
| 359 | io->error = 0; |
| 360 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
| 361 | io->sleeper = NULL; |
| 362 | io->callback = fn; |
| 363 | io->context = context; |
| 364 | |
| 365 | dispatch_io(rw, num_regions, where, dp, io, 0); |
| 366 | return 0; |
| 367 | } |
| 368 | |
| 369 | int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw, |
| 370 | struct page_list *pl, unsigned int offset, |
| 371 | unsigned long *error_bits) |
| 372 | { |
| 373 | struct dpages dp; |
| 374 | list_dp_init(&dp, pl, offset); |
| 375 | return sync_io(num_regions, where, rw, &dp, error_bits); |
| 376 | } |
| 377 | |
| 378 | int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw, |
| 379 | struct bio_vec *bvec, unsigned long *error_bits) |
| 380 | { |
| 381 | struct dpages dp; |
| 382 | bvec_dp_init(&dp, bvec); |
| 383 | return sync_io(num_regions, where, rw, &dp, error_bits); |
| 384 | } |
| 385 | |
| 386 | int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw, |
| 387 | void *data, unsigned long *error_bits) |
| 388 | { |
| 389 | struct dpages dp; |
| 390 | vm_dp_init(&dp, data); |
| 391 | return sync_io(num_regions, where, rw, &dp, error_bits); |
| 392 | } |
| 393 | |
| 394 | int dm_io_async(unsigned int num_regions, struct io_region *where, int rw, |
| 395 | struct page_list *pl, unsigned int offset, |
| 396 | io_notify_fn fn, void *context) |
| 397 | { |
| 398 | struct dpages dp; |
| 399 | list_dp_init(&dp, pl, offset); |
| 400 | return async_io(num_regions, where, rw, &dp, fn, context); |
| 401 | } |
| 402 | |
| 403 | int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw, |
| 404 | struct bio_vec *bvec, io_notify_fn fn, void *context) |
| 405 | { |
| 406 | struct dpages dp; |
| 407 | bvec_dp_init(&dp, bvec); |
| 408 | return async_io(num_regions, where, rw, &dp, fn, context); |
| 409 | } |
| 410 | |
| 411 | int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw, |
| 412 | void *data, io_notify_fn fn, void *context) |
| 413 | { |
| 414 | struct dpages dp; |
| 415 | vm_dp_init(&dp, data); |
| 416 | return async_io(num_regions, where, rw, &dp, fn, context); |
| 417 | } |
| 418 | |
| 419 | EXPORT_SYMBOL(dm_io_get); |
| 420 | EXPORT_SYMBOL(dm_io_put); |
| 421 | EXPORT_SYMBOL(dm_io_sync); |
| 422 | EXPORT_SYMBOL(dm_io_async); |
| 423 | EXPORT_SYMBOL(dm_io_sync_bvec); |
| 424 | EXPORT_SYMBOL(dm_io_async_bvec); |
| 425 | EXPORT_SYMBOL(dm_io_sync_vm); |
| 426 | EXPORT_SYMBOL(dm_io_async_vm); |