Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2002 Sistina Software (UK) Limited. |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 3 | * Copyright (C) 2006 Red Hat GmbH |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | * |
| 5 | * This file is released under the GPL. |
| 6 | * |
| 7 | * Kcopyd provides a simple interface for copying an area of one |
| 8 | * block-device to one or more other block-devices, with an asynchronous |
| 9 | * completion notification. |
| 10 | */ |
| 11 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 12 | #include <linux/types.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <asm/atomic.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/blkdev.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/fs.h> |
| 16 | #include <linux/init.h> |
| 17 | #include <linux/list.h> |
| 18 | #include <linux/mempool.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/pagemap.h> |
| 21 | #include <linux/slab.h> |
| 22 | #include <linux/vmalloc.h> |
| 23 | #include <linux/workqueue.h> |
Arjan van de Ven | 48c9c27 | 2006-03-27 01:18:20 -0800 | [diff] [blame] | 24 | #include <linux/mutex.h> |
Mikulas Patocka | 586e80e | 2008-10-21 17:44:59 +0100 | [diff] [blame] | 25 | #include <linux/device-mapper.h> |
Alasdair G Kergon | a765e20 | 2008-04-24 22:02:01 +0100 | [diff] [blame] | 26 | #include <linux/dm-kcopyd.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 28 | #include "dm.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | /*----------------------------------------------------------------- |
| 31 | * Each kcopyd client has its own little pool of preallocated |
| 32 | * pages for kcopyd io. |
| 33 | *---------------------------------------------------------------*/ |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 34 | struct dm_kcopyd_client { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | spinlock_t lock; |
| 36 | struct page_list *pages; |
| 37 | unsigned int nr_pages; |
| 38 | unsigned int nr_free_pages; |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 39 | |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 40 | struct dm_io_client *io_client; |
| 41 | |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 42 | wait_queue_head_t destroyq; |
| 43 | atomic_t nr_jobs; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 44 | |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 45 | mempool_t *job_pool; |
| 46 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 47 | struct workqueue_struct *kcopyd_wq; |
| 48 | struct work_struct kcopyd_work; |
| 49 | |
| 50 | /* |
| 51 | * We maintain three lists of jobs: |
| 52 | * |
| 53 | * i) jobs waiting for pages |
| 54 | * ii) jobs that have pages, and are waiting for the io to be issued. |
| 55 | * iii) jobs that have completed. |
| 56 | * |
| 57 | * All three of these are protected by job_lock. |
| 58 | */ |
| 59 | spinlock_t job_lock; |
| 60 | struct list_head complete_jobs; |
| 61 | struct list_head io_jobs; |
| 62 | struct list_head pages_jobs; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 63 | }; |
| 64 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 65 | static void wake(struct dm_kcopyd_client *kc) |
| 66 | { |
| 67 | queue_work(kc->kcopyd_wq, &kc->kcopyd_work); |
| 68 | } |
| 69 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | static struct page_list *alloc_pl(void) |
| 71 | { |
| 72 | struct page_list *pl; |
| 73 | |
| 74 | pl = kmalloc(sizeof(*pl), GFP_KERNEL); |
| 75 | if (!pl) |
| 76 | return NULL; |
| 77 | |
| 78 | pl->page = alloc_page(GFP_KERNEL); |
| 79 | if (!pl->page) { |
| 80 | kfree(pl); |
| 81 | return NULL; |
| 82 | } |
| 83 | |
| 84 | return pl; |
| 85 | } |
| 86 | |
| 87 | static void free_pl(struct page_list *pl) |
| 88 | { |
| 89 | __free_page(pl->page); |
| 90 | kfree(pl); |
| 91 | } |
| 92 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 93 | static int kcopyd_get_pages(struct dm_kcopyd_client *kc, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | unsigned int nr, struct page_list **pages) |
| 95 | { |
| 96 | struct page_list *pl; |
| 97 | |
| 98 | spin_lock(&kc->lock); |
| 99 | if (kc->nr_free_pages < nr) { |
| 100 | spin_unlock(&kc->lock); |
| 101 | return -ENOMEM; |
| 102 | } |
| 103 | |
| 104 | kc->nr_free_pages -= nr; |
| 105 | for (*pages = pl = kc->pages; --nr; pl = pl->next) |
| 106 | ; |
| 107 | |
| 108 | kc->pages = pl->next; |
| 109 | pl->next = NULL; |
| 110 | |
| 111 | spin_unlock(&kc->lock); |
| 112 | |
| 113 | return 0; |
| 114 | } |
| 115 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 116 | static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | { |
| 118 | struct page_list *cursor; |
| 119 | |
| 120 | spin_lock(&kc->lock); |
| 121 | for (cursor = pl; cursor->next; cursor = cursor->next) |
| 122 | kc->nr_free_pages++; |
| 123 | |
| 124 | kc->nr_free_pages++; |
| 125 | cursor->next = kc->pages; |
| 126 | kc->pages = pl; |
| 127 | spin_unlock(&kc->lock); |
| 128 | } |
| 129 | |
| 130 | /* |
| 131 | * These three functions resize the page pool. |
| 132 | */ |
| 133 | static void drop_pages(struct page_list *pl) |
| 134 | { |
| 135 | struct page_list *next; |
| 136 | |
| 137 | while (pl) { |
| 138 | next = pl->next; |
| 139 | free_pl(pl); |
| 140 | pl = next; |
| 141 | } |
| 142 | } |
| 143 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 144 | static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 145 | { |
| 146 | unsigned int i; |
| 147 | struct page_list *pl = NULL, *next; |
| 148 | |
| 149 | for (i = 0; i < nr; i++) { |
| 150 | next = alloc_pl(); |
| 151 | if (!next) { |
| 152 | if (pl) |
| 153 | drop_pages(pl); |
| 154 | return -ENOMEM; |
| 155 | } |
| 156 | next->next = pl; |
| 157 | pl = next; |
| 158 | } |
| 159 | |
| 160 | kcopyd_put_pages(kc, pl); |
| 161 | kc->nr_pages += nr; |
| 162 | return 0; |
| 163 | } |
| 164 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 165 | static void client_free_pages(struct dm_kcopyd_client *kc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | { |
| 167 | BUG_ON(kc->nr_free_pages != kc->nr_pages); |
| 168 | drop_pages(kc->pages); |
| 169 | kc->pages = NULL; |
| 170 | kc->nr_free_pages = kc->nr_pages = 0; |
| 171 | } |
| 172 | |
| 173 | /*----------------------------------------------------------------- |
| 174 | * kcopyd_jobs need to be allocated by the *clients* of kcopyd, |
| 175 | * for this reason we use a mempool to prevent the client from |
| 176 | * ever having to do io (which could cause a deadlock). |
| 177 | *---------------------------------------------------------------*/ |
| 178 | struct kcopyd_job { |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 179 | struct dm_kcopyd_client *kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | struct list_head list; |
| 181 | unsigned long flags; |
| 182 | |
| 183 | /* |
| 184 | * Error state of the job. |
| 185 | */ |
| 186 | int read_err; |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 187 | unsigned long write_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
| 189 | /* |
| 190 | * Either READ or WRITE |
| 191 | */ |
| 192 | int rw; |
Heinz Mauelshagen | 22a1ceb | 2008-04-24 21:43:17 +0100 | [diff] [blame] | 193 | struct dm_io_region source; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | |
| 195 | /* |
| 196 | * The destinations for the transfer. |
| 197 | */ |
| 198 | unsigned int num_dests; |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 199 | struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | |
| 201 | sector_t offset; |
| 202 | unsigned int nr_pages; |
| 203 | struct page_list *pages; |
| 204 | |
| 205 | /* |
| 206 | * Set this to ensure you are notified when the job has |
| 207 | * completed. 'context' is for callback to use. |
| 208 | */ |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 209 | dm_kcopyd_notify_fn fn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | void *context; |
| 211 | |
| 212 | /* |
| 213 | * These fields are only used if the job has been split |
| 214 | * into more manageable parts. |
| 215 | */ |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 216 | struct mutex lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | atomic_t sub_jobs; |
| 218 | sector_t progress; |
| 219 | }; |
| 220 | |
| 221 | /* FIXME: this should scale with the number of pages */ |
| 222 | #define MIN_JOBS 512 |
| 223 | |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 224 | static struct kmem_cache *_job_cache; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 226 | int __init dm_kcopyd_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | { |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 228 | _job_cache = KMEM_CACHE(kcopyd_job, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | if (!_job_cache) |
| 230 | return -ENOMEM; |
| 231 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 232 | return 0; |
| 233 | } |
| 234 | |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 235 | void dm_kcopyd_exit(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | kmem_cache_destroy(_job_cache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 238 | _job_cache = NULL; |
| 239 | } |
| 240 | |
| 241 | /* |
| 242 | * Functions to push and pop a job onto the head of a given job |
| 243 | * list. |
| 244 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 245 | static struct kcopyd_job *pop(struct list_head *jobs, |
| 246 | struct dm_kcopyd_client *kc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | { |
| 248 | struct kcopyd_job *job = NULL; |
| 249 | unsigned long flags; |
| 250 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 251 | spin_lock_irqsave(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | |
| 253 | if (!list_empty(jobs)) { |
| 254 | job = list_entry(jobs->next, struct kcopyd_job, list); |
| 255 | list_del(&job->list); |
| 256 | } |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 257 | spin_unlock_irqrestore(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | |
| 259 | return job; |
| 260 | } |
| 261 | |
Alasdair G Kergon | 028867a | 2007-07-12 17:26:32 +0100 | [diff] [blame] | 262 | static void push(struct list_head *jobs, struct kcopyd_job *job) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | { |
| 264 | unsigned long flags; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 265 | struct dm_kcopyd_client *kc = job->kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 267 | spin_lock_irqsave(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | list_add_tail(&job->list, jobs); |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 269 | spin_unlock_irqrestore(&kc->job_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | } |
| 271 | |
Kazuo Ito | b673c3a | 2008-10-21 17:44:50 +0100 | [diff] [blame] | 272 | |
| 273 | static void push_head(struct list_head *jobs, struct kcopyd_job *job) |
| 274 | { |
| 275 | unsigned long flags; |
| 276 | struct dm_kcopyd_client *kc = job->kc; |
| 277 | |
| 278 | spin_lock_irqsave(&kc->job_lock, flags); |
| 279 | list_add(&job->list, jobs); |
| 280 | spin_unlock_irqrestore(&kc->job_lock, flags); |
| 281 | } |
| 282 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | /* |
| 284 | * These three functions process 1 item from the corresponding |
| 285 | * job list. |
| 286 | * |
| 287 | * They return: |
| 288 | * < 0: error |
| 289 | * 0: success |
| 290 | * > 0: can't process yet. |
| 291 | */ |
| 292 | static int run_complete_job(struct kcopyd_job *job) |
| 293 | { |
| 294 | void *context = job->context; |
| 295 | int read_err = job->read_err; |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 296 | unsigned long write_err = job->write_err; |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 297 | dm_kcopyd_notify_fn fn = job->fn; |
| 298 | struct dm_kcopyd_client *kc = job->kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 299 | |
Mikulas Patocka | 7383085 | 2009-04-09 00:27:16 +0100 | [diff] [blame] | 300 | if (job->pages) |
| 301 | kcopyd_put_pages(kc, job->pages); |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 302 | mempool_free(job, kc->job_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 303 | fn(read_err, write_err, context); |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 304 | |
| 305 | if (atomic_dec_and_test(&kc->nr_jobs)) |
| 306 | wake_up(&kc->destroyq); |
| 307 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | return 0; |
| 309 | } |
| 310 | |
| 311 | static void complete_io(unsigned long error, void *context) |
| 312 | { |
| 313 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 314 | struct dm_kcopyd_client *kc = job->kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | |
| 316 | if (error) { |
| 317 | if (job->rw == WRITE) |
Jonathan Brassow | ce503f5 | 2006-06-26 00:27:30 -0700 | [diff] [blame] | 318 | job->write_err |= error; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 319 | else |
| 320 | job->read_err = 1; |
| 321 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 322 | if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 323 | push(&kc->complete_jobs, job); |
| 324 | wake(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | return; |
| 326 | } |
| 327 | } |
| 328 | |
| 329 | if (job->rw == WRITE) |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 330 | push(&kc->complete_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | |
| 332 | else { |
| 333 | job->rw = WRITE; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 334 | push(&kc->io_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | } |
| 336 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 337 | wake(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | } |
| 339 | |
| 340 | /* |
| 341 | * Request io on as many buffer heads as we can currently get for |
| 342 | * a particular job. |
| 343 | */ |
| 344 | static int run_io_job(struct kcopyd_job *job) |
| 345 | { |
| 346 | int r; |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 347 | struct dm_io_request io_req = { |
Christoph Hellwig | 7b6d91d | 2010-08-07 18:20:39 +0200 | [diff] [blame] | 348 | .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG, |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 349 | .mem.type = DM_IO_PAGE_LIST, |
| 350 | .mem.ptr.pl = job->pages, |
| 351 | .mem.offset = job->offset, |
| 352 | .notify.fn = complete_io, |
| 353 | .notify.context = job, |
| 354 | .client = job->kc->io_client, |
| 355 | }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | |
| 357 | if (job->rw == READ) |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 358 | r = dm_io(&io_req, 1, &job->source, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 359 | else |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 360 | r = dm_io(&io_req, job->num_dests, job->dests, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | |
| 362 | return r; |
| 363 | } |
| 364 | |
| 365 | static int run_pages_job(struct kcopyd_job *job) |
| 366 | { |
| 367 | int r; |
| 368 | |
| 369 | job->nr_pages = dm_div_up(job->dests[0].count + job->offset, |
| 370 | PAGE_SIZE >> 9); |
| 371 | r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages); |
| 372 | if (!r) { |
| 373 | /* this job is ready for io */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 374 | push(&job->kc->io_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | return 0; |
| 376 | } |
| 377 | |
| 378 | if (r == -ENOMEM) |
| 379 | /* can't complete now */ |
| 380 | return 1; |
| 381 | |
| 382 | return r; |
| 383 | } |
| 384 | |
| 385 | /* |
| 386 | * Run through a list for as long as possible. Returns the count |
| 387 | * of successful jobs. |
| 388 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 389 | static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc, |
| 390 | int (*fn) (struct kcopyd_job *)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | { |
| 392 | struct kcopyd_job *job; |
| 393 | int r, count = 0; |
| 394 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 395 | while ((job = pop(jobs, kc))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | |
| 397 | r = fn(job); |
| 398 | |
| 399 | if (r < 0) { |
| 400 | /* error this rogue job */ |
| 401 | if (job->rw == WRITE) |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 402 | job->write_err = (unsigned long) -1L; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 403 | else |
| 404 | job->read_err = 1; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 405 | push(&kc->complete_jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | break; |
| 407 | } |
| 408 | |
| 409 | if (r > 0) { |
| 410 | /* |
| 411 | * We couldn't service this job ATM, so |
| 412 | * push this job back onto the list. |
| 413 | */ |
Kazuo Ito | b673c3a | 2008-10-21 17:44:50 +0100 | [diff] [blame] | 414 | push_head(jobs, job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | break; |
| 416 | } |
| 417 | |
| 418 | count++; |
| 419 | } |
| 420 | |
| 421 | return count; |
| 422 | } |
| 423 | |
| 424 | /* |
| 425 | * kcopyd does this every time it's woken up. |
| 426 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 427 | static void do_work(struct work_struct *work) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | { |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 429 | struct dm_kcopyd_client *kc = container_of(work, |
| 430 | struct dm_kcopyd_client, kcopyd_work); |
| 431 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | /* |
| 433 | * The order that these are called is *very* important. |
| 434 | * complete jobs can free some pages for pages jobs. |
| 435 | * Pages jobs when successful will jump onto the io jobs |
| 436 | * list. io jobs call wake when they complete and it all |
| 437 | * starts again. |
| 438 | */ |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 439 | process_jobs(&kc->complete_jobs, kc, run_complete_job); |
| 440 | process_jobs(&kc->pages_jobs, kc, run_pages_job); |
| 441 | process_jobs(&kc->io_jobs, kc, run_io_job); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | } |
| 443 | |
| 444 | /* |
| 445 | * If we are copying a small region we just dispatch a single job |
| 446 | * to do the copy, otherwise the io has to be split up into many |
| 447 | * jobs. |
| 448 | */ |
| 449 | static void dispatch_job(struct kcopyd_job *job) |
| 450 | { |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 451 | struct dm_kcopyd_client *kc = job->kc; |
| 452 | atomic_inc(&kc->nr_jobs); |
Mikulas Patocka | 9ca170a | 2009-12-10 23:52:13 +0000 | [diff] [blame] | 453 | if (unlikely(!job->source.count)) |
| 454 | push(&kc->complete_jobs, job); |
| 455 | else |
| 456 | push(&kc->pages_jobs, job); |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 457 | wake(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } |
| 459 | |
| 460 | #define SUB_JOB_SIZE 128 |
Alasdair G Kergon | 4cdc1d1 | 2008-03-28 14:16:10 -0700 | [diff] [blame] | 461 | static void segment_complete(int read_err, unsigned long write_err, |
| 462 | void *context) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | { |
| 464 | /* FIXME: tidy this function */ |
| 465 | sector_t progress = 0; |
| 466 | sector_t count = 0; |
| 467 | struct kcopyd_job *job = (struct kcopyd_job *) context; |
Mikulas Patocka | 7383085 | 2009-04-09 00:27:16 +0100 | [diff] [blame] | 468 | struct dm_kcopyd_client *kc = job->kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 470 | mutex_lock(&job->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 471 | |
| 472 | /* update the error */ |
| 473 | if (read_err) |
| 474 | job->read_err = 1; |
| 475 | |
| 476 | if (write_err) |
Jonathan Brassow | ce503f5 | 2006-06-26 00:27:30 -0700 | [diff] [blame] | 477 | job->write_err |= write_err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | |
| 479 | /* |
| 480 | * Only dispatch more work if there hasn't been an error. |
| 481 | */ |
| 482 | if ((!job->read_err && !job->write_err) || |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 483 | test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | /* get the next chunk of work */ |
| 485 | progress = job->progress; |
| 486 | count = job->source.count - progress; |
| 487 | if (count) { |
| 488 | if (count > SUB_JOB_SIZE) |
| 489 | count = SUB_JOB_SIZE; |
| 490 | |
| 491 | job->progress += count; |
| 492 | } |
| 493 | } |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 494 | mutex_unlock(&job->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | |
| 496 | if (count) { |
| 497 | int i; |
Mikulas Patocka | 7383085 | 2009-04-09 00:27:16 +0100 | [diff] [blame] | 498 | struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool, |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 499 | GFP_NOIO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | |
| 501 | *sub_job = *job; |
| 502 | sub_job->source.sector += progress; |
| 503 | sub_job->source.count = count; |
| 504 | |
| 505 | for (i = 0; i < job->num_dests; i++) { |
| 506 | sub_job->dests[i].sector += progress; |
| 507 | sub_job->dests[i].count = count; |
| 508 | } |
| 509 | |
| 510 | sub_job->fn = segment_complete; |
| 511 | sub_job->context = job; |
| 512 | dispatch_job(sub_job); |
| 513 | |
| 514 | } else if (atomic_dec_and_test(&job->sub_jobs)) { |
| 515 | |
| 516 | /* |
Mikulas Patocka | 340cd44 | 2009-04-09 00:27:17 +0100 | [diff] [blame] | 517 | * Queue the completion callback to the kcopyd thread. |
| 518 | * |
| 519 | * Some callers assume that all the completions are called |
| 520 | * from a single thread and don't race with each other. |
| 521 | * |
| 522 | * We must not call the callback directly here because this |
| 523 | * code may not be executing in the thread. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | */ |
Mikulas Patocka | 340cd44 | 2009-04-09 00:27:17 +0100 | [diff] [blame] | 525 | push(&kc->complete_jobs, job); |
| 526 | wake(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | } |
| 528 | } |
| 529 | |
| 530 | /* |
| 531 | * Create some little jobs that will do the move between |
| 532 | * them. |
| 533 | */ |
| 534 | #define SPLIT_COUNT 8 |
| 535 | static void split_job(struct kcopyd_job *job) |
| 536 | { |
| 537 | int i; |
| 538 | |
Mikulas Patocka | 340cd44 | 2009-04-09 00:27:17 +0100 | [diff] [blame] | 539 | atomic_inc(&job->kc->nr_jobs); |
| 540 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | atomic_set(&job->sub_jobs, SPLIT_COUNT); |
| 542 | for (i = 0; i < SPLIT_COUNT; i++) |
| 543 | segment_complete(0, 0u, job); |
| 544 | } |
| 545 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 546 | int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from, |
| 547 | unsigned int num_dests, struct dm_io_region *dests, |
| 548 | unsigned int flags, dm_kcopyd_notify_fn fn, void *context) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | { |
| 550 | struct kcopyd_job *job; |
| 551 | |
| 552 | /* |
| 553 | * Allocate a new job. |
| 554 | */ |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 555 | job = mempool_alloc(kc->job_pool, GFP_NOIO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 556 | |
| 557 | /* |
| 558 | * set up for the read. |
| 559 | */ |
| 560 | job->kc = kc; |
| 561 | job->flags = flags; |
| 562 | job->read_err = 0; |
| 563 | job->write_err = 0; |
| 564 | job->rw = READ; |
| 565 | |
| 566 | job->source = *from; |
| 567 | |
| 568 | job->num_dests = num_dests; |
| 569 | memcpy(&job->dests, dests, sizeof(*dests) * num_dests); |
| 570 | |
| 571 | job->offset = 0; |
| 572 | job->nr_pages = 0; |
| 573 | job->pages = NULL; |
| 574 | |
| 575 | job->fn = fn; |
| 576 | job->context = context; |
| 577 | |
| 578 | if (job->source.count < SUB_JOB_SIZE) |
| 579 | dispatch_job(job); |
| 580 | |
| 581 | else { |
Matthias Kaehlcke | def5b5b | 2007-10-19 22:38:52 +0100 | [diff] [blame] | 582 | mutex_init(&job->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | job->progress = 0; |
| 584 | split_job(job); |
| 585 | } |
| 586 | |
| 587 | return 0; |
| 588 | } |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 589 | EXPORT_SYMBOL(dm_kcopyd_copy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 590 | |
| 591 | /* |
| 592 | * Cancels a kcopyd job, eg. someone might be deactivating a |
| 593 | * mirror. |
| 594 | */ |
Adrian Bunk | 0b56306 | 2006-01-06 00:20:08 -0800 | [diff] [blame] | 595 | #if 0 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | int kcopyd_cancel(struct kcopyd_job *job, int block) |
| 597 | { |
| 598 | /* FIXME: finish */ |
| 599 | return -1; |
| 600 | } |
Adrian Bunk | 0b56306 | 2006-01-06 00:20:08 -0800 | [diff] [blame] | 601 | #endif /* 0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 602 | |
| 603 | /*----------------------------------------------------------------- |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 604 | * Client setup |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 605 | *---------------------------------------------------------------*/ |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 606 | int dm_kcopyd_client_create(unsigned int nr_pages, |
| 607 | struct dm_kcopyd_client **result) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | { |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 609 | int r = -ENOMEM; |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 610 | struct dm_kcopyd_client *kc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 611 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | kc = kmalloc(sizeof(*kc), GFP_KERNEL); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 613 | if (!kc) |
| 614 | return -ENOMEM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | |
| 616 | spin_lock_init(&kc->lock); |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 617 | spin_lock_init(&kc->job_lock); |
| 618 | INIT_LIST_HEAD(&kc->complete_jobs); |
| 619 | INIT_LIST_HEAD(&kc->io_jobs); |
| 620 | INIT_LIST_HEAD(&kc->pages_jobs); |
| 621 | |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 622 | kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 623 | if (!kc->job_pool) |
| 624 | goto bad_slab; |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 625 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 626 | INIT_WORK(&kc->kcopyd_work, do_work); |
| 627 | kc->kcopyd_wq = create_singlethread_workqueue("kcopyd"); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 628 | if (!kc->kcopyd_wq) |
| 629 | goto bad_workqueue; |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 630 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | kc->pages = NULL; |
| 632 | kc->nr_pages = kc->nr_free_pages = 0; |
| 633 | r = client_alloc_pages(kc, nr_pages); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 634 | if (r) |
| 635 | goto bad_client_pages; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 637 | kc->io_client = dm_io_client_create(nr_pages); |
| 638 | if (IS_ERR(kc->io_client)) { |
| 639 | r = PTR_ERR(kc->io_client); |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 640 | goto bad_io_client; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 641 | } |
| 642 | |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 643 | init_waitqueue_head(&kc->destroyq); |
| 644 | atomic_set(&kc->nr_jobs, 0); |
| 645 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | *result = kc; |
| 647 | return 0; |
Mikulas Patocka | 945fa4d | 2008-04-24 21:43:49 +0100 | [diff] [blame] | 648 | |
| 649 | bad_io_client: |
| 650 | client_free_pages(kc); |
| 651 | bad_client_pages: |
| 652 | destroy_workqueue(kc->kcopyd_wq); |
| 653 | bad_workqueue: |
| 654 | mempool_destroy(kc->job_pool); |
| 655 | bad_slab: |
| 656 | kfree(kc); |
| 657 | |
| 658 | return r; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 659 | } |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 660 | EXPORT_SYMBOL(dm_kcopyd_client_create); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 662 | void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 663 | { |
Alasdair G Kergon | 138728dc | 2006-03-27 01:17:50 -0800 | [diff] [blame] | 664 | /* Wait for completion of all jobs submitted by this client. */ |
| 665 | wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs)); |
| 666 | |
Mikulas Patocka | 8c0cbc2 | 2008-04-24 21:43:44 +0100 | [diff] [blame] | 667 | BUG_ON(!list_empty(&kc->complete_jobs)); |
| 668 | BUG_ON(!list_empty(&kc->io_jobs)); |
| 669 | BUG_ON(!list_empty(&kc->pages_jobs)); |
| 670 | destroy_workqueue(kc->kcopyd_wq); |
Milan Broz | 373a392 | 2007-05-09 02:33:02 -0700 | [diff] [blame] | 671 | dm_io_client_destroy(kc->io_client); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 672 | client_free_pages(kc); |
Mikulas Patocka | 08d8757 | 2008-04-24 21:43:46 +0100 | [diff] [blame] | 673 | mempool_destroy(kc->job_pool); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | kfree(kc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | } |
Heinz Mauelshagen | eb69aca | 2008-04-24 21:43:19 +0100 | [diff] [blame] | 676 | EXPORT_SYMBOL(dm_kcopyd_client_destroy); |