Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2002 Sistina Software (UK) Limited. |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 3 | * Copyright (C) 2006 Red Hat GmbH |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * This file is released under the GPL. |
| 6 | * |
| 7 | * Kcopyd provides a simple interface for copying an area of one |
| 8 | * block-device to one or more other block-devices, with an asynchronous |
| 9 | * completion notification. |
| 10 | */ |
| 11 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 12 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <asm/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/blkdev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/fs.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/list.h> |
| 18 | #include <linux/mempool.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/pagemap.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/vmalloc.h> |
| 23 | #include <linux/workqueue.h> |
Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 24 | #include <linux/mutex.h> |
Alasdair G Kergon | a765e20 | 2008-04-24 22:02:01 +0100 | [diff] [blame] | 25 | #include <linux/dm-kcopyd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 27 | #include "dm.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | /*----------------------------------------------------------------- |
| 30 | * Each kcopyd client has its own little pool of preallocated |
| 31 | * pages for kcopyd io. |
| 32 | *---------------------------------------------------------------*/ |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 33 | struct dm_kcopyd_client { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | spinlock_t lock; |
| 35 | struct page_list *pages; |
| 36 | unsigned int nr_pages; |
| 37 | unsigned int nr_free_pages; |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 38 | |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 39 | struct dm_io_client *io_client; |
| 40 | |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 41 | wait_queue_head_t destroyq; |
| 42 | atomic_t nr_jobs; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 43 | |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 44 | mempool_t *job_pool; |
| 45 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 46 | struct workqueue_struct *kcopyd_wq; |
| 47 | struct work_struct kcopyd_work; |
| 48 | |
| 49 | /* |
| 50 | * We maintain three lists of jobs: |
| 51 | * |
| 52 | * i) jobs waiting for pages |
| 53 | * ii) jobs that have pages, and are waiting for the io to be issued. |
| 54 | * iii) jobs that have completed. |
| 55 | * |
| 56 | * All three of these are protected by job_lock. |
| 57 | */ |
| 58 | spinlock_t job_lock; |
| 59 | struct list_head complete_jobs; |
| 60 | struct list_head io_jobs; |
| 61 | struct list_head pages_jobs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | }; |
| 63 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 64 | static void wake(struct dm_kcopyd_client *kc) |
| 65 | { |
| 66 | queue_work(kc->kcopyd_wq, &kc->kcopyd_work); |
| 67 | } |
| 68 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | static struct page_list *alloc_pl(void) |
| 70 | { |
| 71 | struct page_list *pl; |
| 72 | |
| 73 | pl = kmalloc(sizeof(*pl), GFP_KERNEL); |
| 74 | if (!pl) |
| 75 | return NULL; |
| 76 | |
| 77 | pl->page = alloc_page(GFP_KERNEL); |
| 78 | if (!pl->page) { |
| 79 | kfree(pl); |
| 80 | return NULL; |
| 81 | } |
| 82 | |
| 83 | return pl; |
| 84 | } |
| 85 | |
| 86 | static void free_pl(struct page_list *pl) |
| 87 | { |
| 88 | __free_page(pl->page); |
| 89 | kfree(pl); |
| 90 | } |
| 91 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 92 | static int kcopyd_get_pages(struct dm_kcopyd_client *kc, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | unsigned int nr, struct page_list **pages) |
| 94 | { |
| 95 | struct page_list *pl; |
| 96 | |
| 97 | spin_lock(&kc->lock); |
| 98 | if (kc->nr_free_pages < nr) { |
| 99 | spin_unlock(&kc->lock); |
| 100 | return -ENOMEM; |
| 101 | } |
| 102 | |
| 103 | kc->nr_free_pages -= nr; |
| 104 | for (*pages = pl = kc->pages; --nr; pl = pl->next) |
| 105 | ; |
| 106 | |
| 107 | kc->pages = pl->next; |
| 108 | pl->next = NULL; |
| 109 | |
| 110 | spin_unlock(&kc->lock); |
| 111 | |
| 112 | return 0; |
| 113 | } |
| 114 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 115 | static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | { |
| 117 | struct page_list *cursor; |
| 118 | |
| 119 | spin_lock(&kc->lock); |
| 120 | for (cursor = pl; cursor->next; cursor = cursor->next) |
| 121 | kc->nr_free_pages++; |
| 122 | |
| 123 | kc->nr_free_pages++; |
| 124 | cursor->next = kc->pages; |
| 125 | kc->pages = pl; |
| 126 | spin_unlock(&kc->lock); |
| 127 | } |
| 128 | |
| 129 | /* |
| 130 | * These three functions resize the page pool. |
| 131 | */ |
| 132 | static void drop_pages(struct page_list *pl) |
| 133 | { |
| 134 | struct page_list *next; |
| 135 | |
| 136 | while (pl) { |
| 137 | next = pl->next; |
| 138 | free_pl(pl); |
| 139 | pl = next; |
| 140 | } |
| 141 | } |
| 142 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 143 | static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { |
| 145 | unsigned int i; |
| 146 | struct page_list *pl = NULL, *next; |
| 147 | |
| 148 | for (i = 0; i < nr; i++) { |
| 149 | next = alloc_pl(); |
| 150 | if (!next) { |
| 151 | if (pl) |
| 152 | drop_pages(pl); |
| 153 | return -ENOMEM; |
| 154 | } |
| 155 | next->next = pl; |
| 156 | pl = next; |
| 157 | } |
| 158 | |
| 159 | kcopyd_put_pages(kc, pl); |
| 160 | kc->nr_pages += nr; |
| 161 | return 0; |
| 162 | } |
| 163 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 164 | static void client_free_pages(struct dm_kcopyd_client *kc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | { |
| 166 | BUG_ON(kc->nr_free_pages != kc->nr_pages); |
| 167 | drop_pages(kc->pages); |
| 168 | kc->pages = NULL; |
| 169 | kc->nr_free_pages = kc->nr_pages = 0; |
| 170 | } |
| 171 | |
| 172 | /*----------------------------------------------------------------- |
| 173 | * kcopyd_jobs need to be allocated by the *clients* of kcopyd, |
| 174 | * for this reason we use a mempool to prevent the client from |
| 175 | * ever having to do io (which could cause a deadlock). |
| 176 | *---------------------------------------------------------------*/ |
| 177 | struct kcopyd_job { |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 178 | struct dm_kcopyd_client *kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | struct list_head list; |
| 180 | unsigned long flags; |
| 181 | |
| 182 | /* |
| 183 | * Error state of the job. |
| 184 | */ |
| 185 | int read_err; |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 186 | unsigned long write_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 187 | |
| 188 | /* |
| 189 | * Either READ or WRITE |
| 190 | */ |
| 191 | int rw; |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 192 | struct dm_io_region source; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | |
| 194 | /* |
| 195 | * The destinations for the transfer. |
| 196 | */ |
| 197 | unsigned int num_dests; |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 198 | struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | |
| 200 | sector_t offset; |
| 201 | unsigned int nr_pages; |
| 202 | struct page_list *pages; |
| 203 | |
| 204 | /* |
| 205 | * Set this to ensure you are notified when the job has |
| 206 | * completed. 'context' is for callback to use. |
| 207 | */ |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 208 | dm_kcopyd_notify_fn fn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | void *context; |
| 210 | |
| 211 | /* |
| 212 | * These fields are only used if the job has been split |
| 213 | * into more manageable parts. |
| 214 | */ |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 215 | struct mutex lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | atomic_t sub_jobs; |
| 217 | sector_t progress; |
| 218 | }; |
| 219 | |
| 220 | /* FIXME: this should scale with the number of pages */ |
| 221 | #define MIN_JOBS 512 |
| 222 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 223 | static struct kmem_cache *_job_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 225 | int __init dm_kcopyd_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | { |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 227 | _job_cache = KMEM_CACHE(kcopyd_job, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | if (!_job_cache) |
| 229 | return -ENOMEM; |
| 230 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | return 0; |
| 232 | } |
| 233 | |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 234 | void dm_kcopyd_exit(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 235 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | kmem_cache_destroy(_job_cache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | _job_cache = NULL; |
| 238 | } |
| 239 | |
| 240 | /* |
| 241 | * Functions to push and pop a job onto the head of a given job |
| 242 | * list. |
| 243 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 244 | static struct kcopyd_job *pop(struct list_head *jobs, |
| 245 | struct dm_kcopyd_client *kc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | { |
| 247 | struct kcopyd_job *job = NULL; |
| 248 | unsigned long flags; |
| 249 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 250 | spin_lock_irqsave(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | |
| 252 | if (!list_empty(jobs)) { |
| 253 | job = list_entry(jobs->next, struct kcopyd_job, list); |
| 254 | list_del(&job->list); |
| 255 | } |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 256 | spin_unlock_irqrestore(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
| 258 | return job; |
| 259 | } |
| 260 | |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 261 | static void push(struct list_head *jobs, struct kcopyd_job *job) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | { |
| 263 | unsigned long flags; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 264 | struct dm_kcopyd_client *kc = job->kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 265 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 266 | spin_lock_irqsave(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | list_add_tail(&job->list, jobs); |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 268 | spin_unlock_irqrestore(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | } |
| 270 | |
| 271 | /* |
| 272 | * These three functions process 1 item from the corresponding |
| 273 | * job list. |
| 274 | * |
| 275 | * They return: |
| 276 | * < 0: error |
| 277 | * 0: success |
| 278 | * > 0: can't process yet. |
| 279 | */ |
| 280 | static int run_complete_job(struct kcopyd_job *job) |
| 281 | { |
| 282 | void *context = job->context; |
| 283 | int read_err = job->read_err; |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 284 | unsigned long write_err = job->write_err; |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 285 | dm_kcopyd_notify_fn fn = job->fn; |
| 286 | struct dm_kcopyd_client *kc = job->kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 288 | kcopyd_put_pages(kc, job->pages); |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 289 | mempool_free(job, kc->job_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 290 | fn(read_err, write_err, context); |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 291 | |
| 292 | if (atomic_dec_and_test(&kc->nr_jobs)) |
| 293 | wake_up(&kc->destroyq); |
| 294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | static void complete_io(unsigned long error, void *context) |
| 299 | { |
| 300 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 301 | struct dm_kcopyd_client *kc = job->kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | |
| 303 | if (error) { |
| 304 | if (job->rw == WRITE) |
Jonathan Brassow | ce503f5 | 2006-06-26 00:27:30 -0700 | [diff] [blame] | 305 | job->write_err |= error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | else |
| 307 | job->read_err = 1; |
| 308 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 309 | if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 310 | push(&kc->complete_jobs, job); |
| 311 | wake(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | return; |
| 313 | } |
| 314 | } |
| 315 | |
| 316 | if (job->rw == WRITE) |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 317 | push(&kc->complete_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | |
| 319 | else { |
| 320 | job->rw = WRITE; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 321 | push(&kc->io_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 322 | } |
| 323 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 324 | wake(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | } |
| 326 | |
| 327 | /* |
| 328 | * Request io on as many buffer heads as we can currently get for |
| 329 | * a particular job. |
| 330 | */ |
| 331 | static int run_io_job(struct kcopyd_job *job) |
| 332 | { |
| 333 | int r; |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 334 | struct dm_io_request io_req = { |
Mikulas Patocka | 7ff14a3 | 2008-04-24 22:10:47 +0100 | [diff] [blame] | 335 | .bi_rw = job->rw | (1 << BIO_RW_SYNC), |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 336 | .mem.type = DM_IO_PAGE_LIST, |
| 337 | .mem.ptr.pl = job->pages, |
| 338 | .mem.offset = job->offset, |
| 339 | .notify.fn = complete_io, |
| 340 | .notify.context = job, |
| 341 | .client = job->kc->io_client, |
| 342 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 343 | |
| 344 | if (job->rw == READ) |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 345 | r = dm_io(&io_req, 1, &job->source, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | else |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 347 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | |
| 349 | return r; |
| 350 | } |
| 351 | |
| 352 | static int run_pages_job(struct kcopyd_job *job) |
| 353 | { |
| 354 | int r; |
| 355 | |
| 356 | job->nr_pages = dm_div_up(job->dests[0].count + job->offset, |
| 357 | PAGE_SIZE >> 9); |
| 358 | r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); |
| 359 | if (!r) { |
| 360 | /* this job is ready for io */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 361 | push(&job->kc->io_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | return 0; |
| 363 | } |
| 364 | |
| 365 | if (r == -ENOMEM) |
| 366 | /* can't complete now */ |
| 367 | return 1; |
| 368 | |
| 369 | return r; |
| 370 | } |
| 371 | |
| 372 | /* |
| 373 | * Run through a list for as long as possible. Returns the count |
| 374 | * of successful jobs. |
| 375 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 376 | static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, |
| 377 | int (*fn) (struct kcopyd_job *)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | { |
| 379 | struct kcopyd_job *job; |
| 380 | int r, count = 0; |
| 381 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 382 | while ((job = pop(jobs, kc))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | |
| 384 | r = fn(job); |
| 385 | |
| 386 | if (r < 0) { |
| 387 | /* error this rogue job */ |
| 388 | if (job->rw == WRITE) |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 389 | job->write_err = (unsigned long) -1L; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | else |
| 391 | job->read_err = 1; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 392 | push(&kc->complete_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 393 | break; |
| 394 | } |
| 395 | |
| 396 | if (r > 0) { |
| 397 | /* |
| 398 | * We couldn't service this job ATM, so |
| 399 | * push this job back onto the list. |
| 400 | */ |
| 401 | push(jobs, job); |
| 402 | break; |
| 403 | } |
| 404 | |
| 405 | count++; |
| 406 | } |
| 407 | |
| 408 | return count; |
| 409 | } |
| 410 | |
| 411 | /* |
| 412 | * kcopyd does this every time it's woken up. |
| 413 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 414 | static void do_work(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | { |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 416 | struct dm_kcopyd_client *kc = container_of(work, |
| 417 | struct dm_kcopyd_client, kcopyd_work); |
| 418 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | /* |
| 420 | * The order that these are called is *very* important. |
| 421 | * complete jobs can free some pages for pages jobs. |
| 422 | * Pages jobs when successful will jump onto the io jobs |
| 423 | * list. io jobs call wake when they complete and it all |
| 424 | * starts again. |
| 425 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 426 | process_jobs(&kc->complete_jobs, kc, run_complete_job); |
| 427 | process_jobs(&kc->pages_jobs, kc, run_pages_job); |
| 428 | process_jobs(&kc->io_jobs, kc, run_io_job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 429 | } |
| 430 | |
| 431 | /* |
| 432 | * If we are copying a small region we just dispatch a single job |
| 433 | * to do the copy, otherwise the io has to be split up into many |
| 434 | * jobs. |
| 435 | */ |
| 436 | static void dispatch_job(struct kcopyd_job *job) |
| 437 | { |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 438 | struct dm_kcopyd_client *kc = job->kc; |
| 439 | atomic_inc(&kc->nr_jobs); |
| 440 | push(&kc->pages_jobs, job); |
| 441 | wake(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | #define SUB_JOB_SIZE 128 |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 445 | static void segment_complete(int read_err, unsigned long write_err, |
| 446 | void *context) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | { |
| 448 | /* FIXME: tidy this function */ |
| 449 | sector_t progress = 0; |
| 450 | sector_t count = 0; |
| 451 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
| 452 | |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 453 | mutex_lock(&job->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 454 | |
| 455 | /* update the error */ |
| 456 | if (read_err) |
| 457 | job->read_err = 1; |
| 458 | |
| 459 | if (write_err) |
Jonathan Brassow | ce503f5 | 2006-06-26 00:27:30 -0700 | [diff] [blame] | 460 | job->write_err |= write_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | |
| 462 | /* |
| 463 | * Only dispatch more work if there hasn't been an error. |
| 464 | */ |
| 465 | if ((!job->read_err && !job->write_err) || |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 466 | test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | /* get the next chunk of work */ |
| 468 | progress = job->progress; |
| 469 | count = job->source.count - progress; |
| 470 | if (count) { |
| 471 | if (count > SUB_JOB_SIZE) |
| 472 | count = SUB_JOB_SIZE; |
| 473 | |
| 474 | job->progress += count; |
| 475 | } |
| 476 | } |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 477 | mutex_unlock(&job->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | |
| 479 | if (count) { |
| 480 | int i; |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 481 | struct kcopyd_job *sub_job = mempool_alloc(job->kc->job_pool, |
| 482 | GFP_NOIO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 483 | |
| 484 | *sub_job = *job; |
| 485 | sub_job->source.sector += progress; |
| 486 | sub_job->source.count = count; |
| 487 | |
| 488 | for (i = 0; i < job->num_dests; i++) { |
| 489 | sub_job->dests[i].sector += progress; |
| 490 | sub_job->dests[i].count = count; |
| 491 | } |
| 492 | |
| 493 | sub_job->fn = segment_complete; |
| 494 | sub_job->context = job; |
| 495 | dispatch_job(sub_job); |
| 496 | |
| 497 | } else if (atomic_dec_and_test(&job->sub_jobs)) { |
| 498 | |
| 499 | /* |
| 500 | * To avoid a race we must keep the job around |
| 501 | * until after the notify function has completed. |
| 502 | * Otherwise the client may try and stop the job |
| 503 | * after we've completed. |
| 504 | */ |
| 505 | job->fn(read_err, write_err, job->context); |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 506 | mempool_free(job, job->kc->job_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 507 | } |
| 508 | } |
| 509 | |
| 510 | /* |
| 511 | * Create some little jobs that will do the move between |
| 512 | * them. |
| 513 | */ |
| 514 | #define SPLIT_COUNT 8 |
| 515 | static void split_job(struct kcopyd_job *job) |
| 516 | { |
| 517 | int i; |
| 518 | |
| 519 | atomic_set(&job->sub_jobs, SPLIT_COUNT); |
| 520 | for (i = 0; i < SPLIT_COUNT; i++) |
| 521 | segment_complete(0, 0u, job); |
| 522 | } |
| 523 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 524 | int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, |
| 525 | unsigned int num_dests, struct dm_io_region *dests, |
| 526 | unsigned int flags, dm_kcopyd_notify_fn fn, void *context) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | { |
| 528 | struct kcopyd_job *job; |
| 529 | |
| 530 | /* |
| 531 | * Allocate a new job. |
| 532 | */ |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 533 | job = mempool_alloc(kc->job_pool, GFP_NOIO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | |
| 535 | /* |
| 536 | * set up for the read. |
| 537 | */ |
| 538 | job->kc = kc; |
| 539 | job->flags = flags; |
| 540 | job->read_err = 0; |
| 541 | job->write_err = 0; |
| 542 | job->rw = READ; |
| 543 | |
| 544 | job->source = *from; |
| 545 | |
| 546 | job->num_dests = num_dests; |
| 547 | memcpy(&job->dests, dests, sizeof(*dests) * num_dests); |
| 548 | |
| 549 | job->offset = 0; |
| 550 | job->nr_pages = 0; |
| 551 | job->pages = NULL; |
| 552 | |
| 553 | job->fn = fn; |
| 554 | job->context = context; |
| 555 | |
| 556 | if (job->source.count < SUB_JOB_SIZE) |
| 557 | dispatch_job(job); |
| 558 | |
| 559 | else { |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 560 | mutex_init(&job->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 561 | job->progress = 0; |
| 562 | split_job(job); |
| 563 | } |
| 564 | |
| 565 | return 0; |
| 566 | } |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 567 | EXPORT_SYMBOL(dm_kcopyd_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | |
| 569 | /* |
| 570 | * Cancels a kcopyd job, eg. someone might be deactivating a |
| 571 | * mirror. |
| 572 | */ |
Adrian Bunk | 0b56306 | 2006-01-06 00:20:08 -0800 | [diff] [blame] | 573 | #if 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | int kcopyd_cancel(struct kcopyd_job *job, int block) |
| 575 | { |
| 576 | /* FIXME: finish */ |
| 577 | return -1; |
| 578 | } |
Adrian Bunk | 0b56306 | 2006-01-06 00:20:08 -0800 | [diff] [blame] | 579 | #endif /* 0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | |
| 581 | /*----------------------------------------------------------------- |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 582 | * Client setup |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | *---------------------------------------------------------------*/ |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 584 | int dm_kcopyd_client_create(unsigned int nr_pages, |
| 585 | struct dm_kcopyd_client **result) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | { |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 587 | int r = -ENOMEM; |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 588 | struct dm_kcopyd_client *kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 589 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | kc = kmalloc(sizeof(*kc), GFP_KERNEL); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 591 | if (!kc) |
| 592 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | |
| 594 | spin_lock_init(&kc->lock); |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 595 | spin_lock_init(&kc->job_lock); |
| 596 | INIT_LIST_HEAD(&kc->complete_jobs); |
| 597 | INIT_LIST_HEAD(&kc->io_jobs); |
| 598 | INIT_LIST_HEAD(&kc->pages_jobs); |
| 599 | |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 600 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 601 | if (!kc->job_pool) |
| 602 | goto bad_slab; |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 603 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 604 | INIT_WORK(&kc->kcopyd_work, do_work); |
| 605 | kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 606 | if (!kc->kcopyd_wq) |
| 607 | goto bad_workqueue; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 608 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | kc->pages = NULL; |
| 610 | kc->nr_pages = kc->nr_free_pages = 0; |
| 611 | r = client_alloc_pages(kc, nr_pages); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 612 | if (r) |
| 613 | goto bad_client_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 614 | |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 615 | kc->io_client = dm_io_client_create(nr_pages); |
| 616 | if (IS_ERR(kc->io_client)) { |
| 617 | r = PTR_ERR(kc->io_client); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 618 | goto bad_io_client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | } |
| 620 | |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 621 | init_waitqueue_head(&kc->destroyq); |
| 622 | atomic_set(&kc->nr_jobs, 0); |
| 623 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | *result = kc; |
| 625 | return 0; |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 626 | |
| 627 | bad_io_client: |
| 628 | client_free_pages(kc); |
| 629 | bad_client_pages: |
| 630 | destroy_workqueue(kc->kcopyd_wq); |
| 631 | bad_workqueue: |
| 632 | mempool_destroy(kc->job_pool); |
| 633 | bad_slab: |
| 634 | kfree(kc); |
| 635 | |
| 636 | return r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | } |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 638 | EXPORT_SYMBOL(dm_kcopyd_client_create); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 640 | void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | { |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 642 | /* Wait for completion of all jobs submitted by this client. */ |
| 643 | wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); |
| 644 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 645 | BUG_ON(!list_empty(&kc->complete_jobs)); |
| 646 | BUG_ON(!list_empty(&kc->io_jobs)); |
| 647 | BUG_ON(!list_empty(&kc->pages_jobs)); |
| 648 | destroy_workqueue(kc->kcopyd_wq); |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 649 | dm_io_client_destroy(kc->io_client); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 650 | client_free_pages(kc); |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 651 | mempool_destroy(kc->job_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 652 | kfree(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | } |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 654 | EXPORT_SYMBOL(dm_kcopyd_client_destroy); |