blob: 4e1e04dbc4ab2684113ad03f2f21737aea5af45e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-list.h"
9#include "dm-io.h"
10#include "dm-log.h"
11#include "kcopyd.h"
12
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20#include <linux/vmalloc.h>
21#include <linux/workqueue.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010022#include <linux/log2.h>
Jonathan Brassow72f4b312008-02-08 02:11:29 +000023#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025#define DM_MSG_PREFIX "raid1"
Milan Broz88be1632007-05-09 02:33:04 -070026#define DM_IO_PAGES 64
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070028#define DM_RAID1_HANDLE_ERRORS 0x01
Jonathan Brassowf44db672007-07-12 17:29:04 +010029#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070030
Jonathan E Brassow33184042006-11-08 17:44:44 -080031static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*-----------------------------------------------------------------
34 * Region hash
35 *
36 * The mirror splits itself up into discrete regions. Each
37 * region can be in one of three states: clean, dirty,
38 * nosync. There is no need to put clean regions in the hash.
39 *
40 * In addition to being present in the hash table a region _may_
41 * be present on one of three lists.
42 *
43 * clean_regions: Regions on this list have no io pending to
44 * them, they are in sync, we are no longer interested in them,
45 * they are dull. rh_update_states() will remove them from the
46 * hash table.
47 *
48 * quiesced_regions: These regions have been spun down, ready
49 * for recovery. rh_recovery_start() will remove regions from
50 * this list and hand them to kmirrord, which will schedule the
51 * recovery io with kcopyd.
52 *
53 * recovered_regions: Regions that kcopyd has successfully
54 * recovered. rh_update_states() will now schedule any delayed
55 * io, up the recovery_count, and remove the region from the
56 * hash.
57 *
58 * There are 2 locks:
59 * A rw spin lock 'hash_lock' protects just the hash table,
60 * this is never held in write mode from interrupt context,
61 * which I believe means that we only have to disable irqs when
62 * doing a write lock.
63 *
64 * An ordinary spin lock 'region_lock' that protects the three
65 * lists in the region_hash, with the 'state', 'list' and
66 * 'bhs_delayed' fields of the regions. This is used from irq
67 * context, so all other uses will have to suspend local irqs.
68 *---------------------------------------------------------------*/
69struct mirror_set;
70struct region_hash {
71 struct mirror_set *ms;
72 uint32_t region_size;
73 unsigned region_shift;
74
75 /* holds persistent region state */
76 struct dirty_log *log;
77
78 /* hash table */
79 rwlock_t hash_lock;
80 mempool_t *region_pool;
81 unsigned int mask;
82 unsigned int nr_buckets;
83 struct list_head *buckets;
84
85 spinlock_t region_lock;
Jonathan E Brassow33184042006-11-08 17:44:44 -080086 atomic_t recovery_in_flight;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 struct semaphore recovery_count;
88 struct list_head clean_regions;
89 struct list_head quiesced_regions;
90 struct list_head recovered_regions;
Jonathan Brassowf44db672007-07-12 17:29:04 +010091 struct list_head failed_recovered_regions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092};
93
94enum {
95 RH_CLEAN,
96 RH_DIRTY,
97 RH_NOSYNC,
98 RH_RECOVERING
99};
100
101struct region {
102 struct region_hash *rh; /* FIXME: can we get rid of this ? */
103 region_t key;
104 int state;
105
106 struct list_head hash_list;
107 struct list_head list;
108
109 atomic_t pending;
110 struct bio_list delayed_bios;
111};
112
Neil Browne4c8b3b2006-06-26 00:27:26 -0700113
114/*-----------------------------------------------------------------
115 * Mirror set structures.
116 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000117enum dm_raid1_error {
118 DM_RAID1_WRITE_ERROR,
119 DM_RAID1_SYNC_ERROR,
120 DM_RAID1_READ_ERROR
121};
122
Neil Browne4c8b3b2006-06-26 00:27:26 -0700123struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100124 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700125 atomic_t error_count;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000126 uint32_t error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700127 struct dm_dev *dev;
128 sector_t offset;
129};
130
131struct mirror_set {
132 struct dm_target *ti;
133 struct list_head list;
134 struct region_hash rh;
135 struct kcopyd_client *kcopyd_client;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -0700136 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700137
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000138 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -0700139 struct bio_list reads;
140 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000141 struct bio_list failures;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700142
Milan Broz88be1632007-05-09 02:33:04 -0700143 struct dm_io_client *io_client;
144
Neil Browne4c8b3b2006-06-26 00:27:26 -0700145 /* recovery */
146 region_t nr_regions;
147 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100148 int log_failure;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700149
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000150 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -0700151
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700152 struct workqueue_struct *kmirrord_wq;
153 struct work_struct kmirrord_work;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000154 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700155
Neil Browne4c8b3b2006-06-26 00:27:26 -0700156 unsigned int nr_mirrors;
157 struct mirror mirror[0];
158};
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160/*
161 * Conversion fns
162 */
163static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
164{
Neil Browne4c8b3b2006-06-26 00:27:26 -0700165 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
167
168static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
169{
170 return region << rh->region_shift;
171}
172
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700173static void wake(struct mirror_set *ms)
174{
175 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
176}
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178/* FIXME move this */
179static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
180
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181#define MIN_REGIONS 64
182#define MAX_RECOVERY 1
183static int rh_init(struct region_hash *rh, struct mirror_set *ms,
184 struct dirty_log *log, uint32_t region_size,
185 region_t nr_regions)
186{
187 unsigned int nr_buckets, max_buckets;
188 size_t i;
189
190 /*
191 * Calculate a suitable number of buckets for our hash
192 * table.
193 */
194 max_buckets = nr_regions >> 6;
195 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
196 ;
197 nr_buckets >>= 1;
198
199 rh->ms = ms;
200 rh->log = log;
201 rh->region_size = region_size;
202 rh->region_shift = ffs(region_size) - 1;
203 rwlock_init(&rh->hash_lock);
204 rh->mask = nr_buckets - 1;
205 rh->nr_buckets = nr_buckets;
206
207 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
208 if (!rh->buckets) {
209 DMERR("unable to allocate region hash memory");
210 return -ENOMEM;
211 }
212
213 for (i = 0; i < nr_buckets; i++)
214 INIT_LIST_HEAD(rh->buckets + i);
215
216 spin_lock_init(&rh->region_lock);
217 sema_init(&rh->recovery_count, 0);
Jonathan E Brassow33184042006-11-08 17:44:44 -0800218 atomic_set(&rh->recovery_in_flight, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 INIT_LIST_HEAD(&rh->clean_regions);
220 INIT_LIST_HEAD(&rh->quiesced_regions);
221 INIT_LIST_HEAD(&rh->recovered_regions);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100222 INIT_LIST_HEAD(&rh->failed_recovered_regions);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Matthew Dobson0eaae62a2006-03-26 01:37:47 -0800224 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
225 sizeof(struct region));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 if (!rh->region_pool) {
227 vfree(rh->buckets);
228 rh->buckets = NULL;
229 return -ENOMEM;
230 }
231
232 return 0;
233}
234
235static void rh_exit(struct region_hash *rh)
236{
237 unsigned int h;
238 struct region *reg, *nreg;
239
240 BUG_ON(!list_empty(&rh->quiesced_regions));
241 for (h = 0; h < rh->nr_buckets; h++) {
242 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
243 BUG_ON(atomic_read(&reg->pending));
244 mempool_free(reg, rh->region_pool);
245 }
246 }
247
248 if (rh->log)
249 dm_destroy_dirty_log(rh->log);
250 if (rh->region_pool)
251 mempool_destroy(rh->region_pool);
252 vfree(rh->buckets);
253}
254
255#define RH_HASH_MULT 2654435387U
256
257static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
258{
259 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
260}
261
262static struct region *__rh_lookup(struct region_hash *rh, region_t region)
263{
264 struct region *reg;
265
266 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
267 if (reg->key == region)
268 return reg;
269
270 return NULL;
271}
272
273static void __rh_insert(struct region_hash *rh, struct region *reg)
274{
275 unsigned int h = rh_hash(rh, reg->key);
276 list_add(&reg->hash_list, rh->buckets + h);
277}
278
279static struct region *__rh_alloc(struct region_hash *rh, region_t region)
280{
281 struct region *reg, *nreg;
282
283 read_unlock(&rh->hash_lock);
Daniel Kobrasc06aad82006-08-27 01:23:24 -0700284 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
285 if (unlikely(!nreg))
286 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
288 RH_CLEAN : RH_NOSYNC;
289 nreg->rh = rh;
290 nreg->key = region;
291
292 INIT_LIST_HEAD(&nreg->list);
293
294 atomic_set(&nreg->pending, 0);
295 bio_list_init(&nreg->delayed_bios);
296 write_lock_irq(&rh->hash_lock);
297
298 reg = __rh_lookup(rh, region);
299 if (reg)
300 /* we lost the race */
301 mempool_free(nreg, rh->region_pool);
302
303 else {
304 __rh_insert(rh, nreg);
305 if (nreg->state == RH_CLEAN) {
306 spin_lock(&rh->region_lock);
307 list_add(&nreg->list, &rh->clean_regions);
308 spin_unlock(&rh->region_lock);
309 }
310 reg = nreg;
311 }
312 write_unlock_irq(&rh->hash_lock);
313 read_lock(&rh->hash_lock);
314
315 return reg;
316}
317
318static inline struct region *__rh_find(struct region_hash *rh, region_t region)
319{
320 struct region *reg;
321
322 reg = __rh_lookup(rh, region);
323 if (!reg)
324 reg = __rh_alloc(rh, region);
325
326 return reg;
327}
328
329static int rh_state(struct region_hash *rh, region_t region, int may_block)
330{
331 int r;
332 struct region *reg;
333
334 read_lock(&rh->hash_lock);
335 reg = __rh_lookup(rh, region);
336 read_unlock(&rh->hash_lock);
337
338 if (reg)
339 return reg->state;
340
341 /*
342 * The region wasn't in the hash, so we fall back to the
343 * dirty log.
344 */
345 r = rh->log->type->in_sync(rh->log, region, may_block);
346
347 /*
348 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
349 * taken as a RH_NOSYNC
350 */
351 return r == 1 ? RH_CLEAN : RH_NOSYNC;
352}
353
354static inline int rh_in_sync(struct region_hash *rh,
355 region_t region, int may_block)
356{
357 int state = rh_state(rh, region, may_block);
358 return state == RH_CLEAN || state == RH_DIRTY;
359}
360
361static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
362{
363 struct bio *bio;
364
365 while ((bio = bio_list_pop(bio_list))) {
366 queue_bio(ms, bio, WRITE);
367 }
368}
369
Jonathan E Brassowf3ee6b22006-12-08 02:41:11 -0800370static void complete_resync_work(struct region *reg, int success)
371{
372 struct region_hash *rh = reg->rh;
373
374 rh->log->type->set_region_sync(rh->log, reg->key, success);
375 dispatch_bios(rh->ms, &reg->delayed_bios);
376 if (atomic_dec_and_test(&rh->recovery_in_flight))
377 wake_up_all(&_kmirrord_recovery_stopped);
378 up(&rh->recovery_count);
379}
380
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381static void rh_update_states(struct region_hash *rh)
382{
383 struct region *reg, *next;
384
385 LIST_HEAD(clean);
386 LIST_HEAD(recovered);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100387 LIST_HEAD(failed_recovered);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
389 /*
390 * Quickly grab the lists.
391 */
392 write_lock_irq(&rh->hash_lock);
393 spin_lock(&rh->region_lock);
394 if (!list_empty(&rh->clean_regions)) {
395 list_splice(&rh->clean_regions, &clean);
396 INIT_LIST_HEAD(&rh->clean_regions);
397
Jonathan Brassow943317e2007-07-12 17:28:25 +0100398 list_for_each_entry(reg, &clean, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 list_del(&reg->hash_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 }
401
402 if (!list_empty(&rh->recovered_regions)) {
403 list_splice(&rh->recovered_regions, &recovered);
404 INIT_LIST_HEAD(&rh->recovered_regions);
405
406 list_for_each_entry (reg, &recovered, list)
407 list_del(&reg->hash_list);
408 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100409
410 if (!list_empty(&rh->failed_recovered_regions)) {
411 list_splice(&rh->failed_recovered_regions, &failed_recovered);
412 INIT_LIST_HEAD(&rh->failed_recovered_regions);
413
414 list_for_each_entry(reg, &failed_recovered, list)
415 list_del(&reg->hash_list);
416 }
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 spin_unlock(&rh->region_lock);
419 write_unlock_irq(&rh->hash_lock);
420
421 /*
422 * All the regions on the recovered and clean lists have
423 * now been pulled out of the system, so no need to do
424 * any more locking.
425 */
426 list_for_each_entry_safe (reg, next, &recovered, list) {
427 rh->log->type->clear_region(rh->log, reg->key);
Jonathan E Brassowf3ee6b22006-12-08 02:41:11 -0800428 complete_resync_work(reg, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 mempool_free(reg, rh->region_pool);
430 }
431
Jonathan Brassowf44db672007-07-12 17:29:04 +0100432 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
433 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
434 mempool_free(reg, rh->region_pool);
435 }
436
Jonathan Brassow943317e2007-07-12 17:28:25 +0100437 list_for_each_entry_safe(reg, next, &clean, list) {
438 rh->log->type->clear_region(rh->log, reg->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 mempool_free(reg, rh->region_pool);
Jonathan Brassow943317e2007-07-12 17:28:25 +0100440 }
441
442 rh->log->type->flush(rh->log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443}
444
445static void rh_inc(struct region_hash *rh, region_t region)
446{
447 struct region *reg;
448
449 read_lock(&rh->hash_lock);
450 reg = __rh_find(rh, region);
Jun'ichi Nomura844e8d92005-09-09 16:23:42 -0700451
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800452 spin_lock_irq(&rh->region_lock);
Jun'ichi Nomura844e8d92005-09-09 16:23:42 -0700453 atomic_inc(&reg->pending);
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 if (reg->state == RH_CLEAN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 reg->state = RH_DIRTY;
457 list_del_init(&reg->list); /* take off the clean list */
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800458 spin_unlock_irq(&rh->region_lock);
459
460 rh->log->type->mark_region(rh->log, reg->key);
461 } else
462 spin_unlock_irq(&rh->region_lock);
463
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 read_unlock(&rh->hash_lock);
466}
467
468static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
469{
470 struct bio *bio;
471
472 for (bio = bios->head; bio; bio = bio->bi_next)
473 rh_inc(rh, bio_to_region(rh, bio));
474}
475
476static void rh_dec(struct region_hash *rh, region_t region)
477{
478 unsigned long flags;
479 struct region *reg;
480 int should_wake = 0;
481
482 read_lock(&rh->hash_lock);
483 reg = __rh_lookup(rh, region);
484 read_unlock(&rh->hash_lock);
485
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800486 spin_lock_irqsave(&rh->region_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 if (atomic_dec_and_test(&reg->pending)) {
Jun'ichi Nomura930d3322006-03-27 01:17:47 -0800488 /*
489 * There is no pending I/O for this region.
490 * We can move the region to corresponding list for next action.
491 * At this point, the region is not yet connected to any list.
492 *
493 * If the state is RH_NOSYNC, the region should be kept off
494 * from clean list.
495 * The hash entry for RH_NOSYNC will remain in memory
496 * until the region is recovered or the map is reloaded.
497 */
498
499 /* do nothing for RH_NOSYNC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 if (reg->state == RH_RECOVERING) {
501 list_add_tail(&reg->list, &rh->quiesced_regions);
Jun'ichi Nomura930d3322006-03-27 01:17:47 -0800502 } else if (reg->state == RH_DIRTY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 reg->state = RH_CLEAN;
504 list_add(&reg->list, &rh->clean_regions);
505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 should_wake = 1;
507 }
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800508 spin_unlock_irqrestore(&rh->region_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
510 if (should_wake)
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700511 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512}
513
514/*
515 * Starts quiescing a region in preparation for recovery.
516 */
517static int __rh_recovery_prepare(struct region_hash *rh)
518{
519 int r;
520 struct region *reg;
521 region_t region;
522
523 /*
524 * Ask the dirty log what's next.
525 */
526 r = rh->log->type->get_resync_work(rh->log, &region);
527 if (r <= 0)
528 return r;
529
530 /*
531 * Get this region, and start it quiescing by setting the
532 * recovering flag.
533 */
534 read_lock(&rh->hash_lock);
535 reg = __rh_find(rh, region);
536 read_unlock(&rh->hash_lock);
537
538 spin_lock_irq(&rh->region_lock);
539 reg->state = RH_RECOVERING;
540
541 /* Already quiesced ? */
542 if (atomic_read(&reg->pending))
543 list_del_init(&reg->list);
Akinobu Mita179e0912006-06-26 00:24:41 -0700544 else
545 list_move(&reg->list, &rh->quiesced_regions);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 spin_unlock_irq(&rh->region_lock);
548
549 return 1;
550}
551
552static void rh_recovery_prepare(struct region_hash *rh)
553{
Jonathan E Brassow33184042006-11-08 17:44:44 -0800554 /* Extra reference to avoid race with rh_stop_recovery */
555 atomic_inc(&rh->recovery_in_flight);
556
557 while (!down_trylock(&rh->recovery_count)) {
558 atomic_inc(&rh->recovery_in_flight);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 if (__rh_recovery_prepare(rh) <= 0) {
Jonathan E Brassow33184042006-11-08 17:44:44 -0800560 atomic_dec(&rh->recovery_in_flight);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 up(&rh->recovery_count);
562 break;
563 }
Jonathan E Brassow33184042006-11-08 17:44:44 -0800564 }
565
566 /* Drop the extra reference */
567 if (atomic_dec_and_test(&rh->recovery_in_flight))
568 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
571/*
572 * Returns any quiesced regions.
573 */
574static struct region *rh_recovery_start(struct region_hash *rh)
575{
576 struct region *reg = NULL;
577
578 spin_lock_irq(&rh->region_lock);
579 if (!list_empty(&rh->quiesced_regions)) {
580 reg = list_entry(rh->quiesced_regions.next,
581 struct region, list);
582 list_del_init(&reg->list); /* remove from the quiesced list */
583 }
584 spin_unlock_irq(&rh->region_lock);
585
586 return reg;
587}
588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589static void rh_recovery_end(struct region *reg, int success)
590{
591 struct region_hash *rh = reg->rh;
592
593 spin_lock_irq(&rh->region_lock);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100594 if (success)
595 list_add(&reg->list, &reg->rh->recovered_regions);
596 else {
597 reg->state = RH_NOSYNC;
598 list_add(&reg->list, &reg->rh->failed_recovered_regions);
599 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 spin_unlock_irq(&rh->region_lock);
601
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700602 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100605static int rh_flush(struct region_hash *rh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606{
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100607 return rh->log->type->flush(rh->log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608}
609
610static void rh_delay(struct region_hash *rh, struct bio *bio)
611{
612 struct region *reg;
613
614 read_lock(&rh->hash_lock);
615 reg = __rh_find(rh, bio_to_region(rh, bio));
616 bio_list_add(&reg->delayed_bios, bio);
617 read_unlock(&rh->hash_lock);
618}
619
620static void rh_stop_recovery(struct region_hash *rh)
621{
622 int i;
623
624 /* wait for any recovering regions */
625 for (i = 0; i < MAX_RECOVERY; i++)
626 down(&rh->recovery_count);
627}
628
629static void rh_start_recovery(struct region_hash *rh)
630{
631 int i;
632
633 for (i = 0; i < MAX_RECOVERY; i++)
634 up(&rh->recovery_count);
635
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700636 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639/*
640 * Every mirror should look like this one.
641 */
642#define DEFAULT_MIRROR 0
643
644/*
645 * This is yucky. We squirrel the mirror_set struct away inside
646 * bi_next for write buffers. This is safe since the bh
647 * doesn't get submitted to the lower levels of block layer.
648 */
649static struct mirror_set *bio_get_ms(struct bio *bio)
650{
651 return (struct mirror_set *) bio->bi_next;
652}
653
654static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
655{
656 bio->bi_next = (struct bio *) ms;
657}
658
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000659static struct mirror *get_default_mirror(struct mirror_set *ms)
660{
661 return &ms->mirror[atomic_read(&ms->default_mirror)];
662}
663
664static void set_default_mirror(struct mirror *m)
665{
666 struct mirror_set *ms = m->ms;
667 struct mirror *m0 = &(ms->mirror[0]);
668
669 atomic_set(&ms->default_mirror, m - m0);
670}
671
672/* fail_mirror
673 * @m: mirror device to fail
674 * @error_type: one of the enum's, DM_RAID1_*_ERROR
675 *
676 * If errors are being handled, record the type of
677 * error encountered for this device. If this type
678 * of error has already been recorded, we can return;
679 * otherwise, we must signal userspace by triggering
680 * an event. Additionally, if the device is the
681 * primary device, we must choose a new primary, but
682 * only if the mirror is in-sync.
683 *
684 * This function must not block.
685 */
686static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
687{
688 struct mirror_set *ms = m->ms;
689 struct mirror *new;
690
691 if (!errors_handled(ms))
692 return;
693
694 /*
695 * error_count is used for nothing more than a
696 * simple way to tell if a device has encountered
697 * errors.
698 */
699 atomic_inc(&m->error_count);
700
701 if (test_and_set_bit(error_type, &m->error_type))
702 return;
703
704 if (m != get_default_mirror(ms))
705 goto out;
706
707 if (!ms->in_sync) {
708 /*
709 * Better to issue requests to same failing device
710 * than to risk returning corrupt data.
711 */
712 DMERR("Primary mirror (%s) failed while out-of-sync: "
713 "Reads may fail.", m->dev->name);
714 goto out;
715 }
716
717 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
718 if (!atomic_read(&new->error_count)) {
719 set_default_mirror(new);
720 break;
721 }
722
723 if (unlikely(new == ms->mirror + ms->nr_mirrors))
724 DMWARN("All sides of mirror have failed.");
725
726out:
727 schedule_work(&ms->trigger_event);
728}
729
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730/*-----------------------------------------------------------------
731 * Recovery.
732 *
733 * When a mirror is first activated we may find that some regions
734 * are in the no-sync state. We have to recover these by
735 * recopying from the default mirror to all the others.
736 *---------------------------------------------------------------*/
737static void recovery_complete(int read_err, unsigned int write_err,
738 void *context)
739{
740 struct region *reg = (struct region *) context;
741
Jonathan Brassowf44db672007-07-12 17:29:04 +0100742 if (read_err)
743 /* Read error means the failure of default mirror. */
744 DMERR_LIMIT("Unable to read primary mirror during recovery");
745
746 if (write_err)
747 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
748 write_err);
749
Jonathan Brassowce503f52006-06-26 00:27:30 -0700750 rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751}
752
753static int recover(struct mirror_set *ms, struct region *reg)
754{
755 int r;
756 unsigned int i;
757 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
758 struct mirror *m;
759 unsigned long flags = 0;
760
761 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000762 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 from.bdev = m->dev->bdev;
764 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
765 if (reg->key == (ms->nr_regions - 1)) {
766 /*
767 * The final region may be smaller than
768 * region_size.
769 */
770 from.count = ms->ti->len & (reg->rh->region_size - 1);
771 if (!from.count)
772 from.count = reg->rh->region_size;
773 } else
774 from.count = reg->rh->region_size;
775
776 /* fill in the destinations */
777 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000778 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 continue;
780
781 m = ms->mirror + i;
782 dest->bdev = m->dev->bdev;
783 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
784 dest->count = from.count;
785 dest++;
786 }
787
788 /* hand to kcopyd */
789 set_bit(KCOPYD_IGNORE_ERROR, &flags);
790 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
791 recovery_complete, reg);
792
793 return r;
794}
795
796static void do_recovery(struct mirror_set *ms)
797{
798 int r;
799 struct region *reg;
800 struct dirty_log *log = ms->rh.log;
801
802 /*
803 * Start quiescing some regions.
804 */
805 rh_recovery_prepare(&ms->rh);
806
807 /*
808 * Copy any already quiesced regions.
809 */
810 while ((reg = rh_recovery_start(&ms->rh))) {
811 r = recover(ms, reg);
812 if (r)
813 rh_recovery_end(reg, 0);
814 }
815
816 /*
817 * Update the in sync flag.
818 */
819 if (!ms->in_sync &&
820 (log->type->get_sync_count(log) == ms->nr_regions)) {
821 /* the sync is complete */
822 dm_table_event(ms->ti->table);
823 ms->in_sync = 1;
824 }
825}
826
827/*-----------------------------------------------------------------
828 * Reads
829 *---------------------------------------------------------------*/
830static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
831{
832 /* FIXME: add read balancing */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000833 return get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834}
835
836/*
837 * remap a buffer to a particular mirror.
838 */
839static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
840{
841 bio->bi_bdev = m->dev->bdev;
842 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
843}
844
845static void do_reads(struct mirror_set *ms, struct bio_list *reads)
846{
847 region_t region;
848 struct bio *bio;
849 struct mirror *m;
850
851 while ((bio = bio_list_pop(reads))) {
852 region = bio_to_region(&ms->rh, bio);
853
854 /*
855 * We can only read balance if the region is in sync.
856 */
Jonathan Brassowb997b822007-05-09 02:33:08 -0700857 if (rh_in_sync(&ms->rh, region, 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 m = choose_mirror(ms, bio->bi_sector);
859 else
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000860 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 map_bio(ms, m, bio);
863 generic_make_request(bio);
864 }
865}
866
867/*-----------------------------------------------------------------
868 * Writes.
869 *
870 * We do different things with the write io depending on the
871 * state of the region that it's in:
872 *
873 * SYNC: increment pending, use kcopyd to write to *all* mirrors
874 * RECOVERING: delay the io until recovery completes
875 * NOSYNC: increment pending, just write to the default mirror
876 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000877
878/* __bio_mark_nosync
879 * @ms
880 * @bio
881 * @done
882 * @error
883 *
884 * The bio was written on some mirror(s) but failed on other mirror(s).
885 * We can successfully endio the bio but should avoid the region being
886 * marked clean by setting the state RH_NOSYNC.
887 *
888 * This function is _not_ safe in interrupt context!
889 */
890static void __bio_mark_nosync(struct mirror_set *ms,
891 struct bio *bio, unsigned done, int error)
892{
893 unsigned long flags;
894 struct region_hash *rh = &ms->rh;
895 struct dirty_log *log = ms->rh.log;
896 struct region *reg;
897 region_t region = bio_to_region(rh, bio);
898 int recovering = 0;
899
900 /* We must inform the log that the sync count has changed. */
901 log->type->set_region_sync(log, region, 0);
902 ms->in_sync = 0;
903
904 read_lock(&rh->hash_lock);
905 reg = __rh_find(rh, region);
906 read_unlock(&rh->hash_lock);
907
908 /* region hash entry should exist because write was in-flight */
909 BUG_ON(!reg);
910 BUG_ON(!list_empty(&reg->list));
911
912 spin_lock_irqsave(&rh->region_lock, flags);
913 /*
914 * Possible cases:
915 * 1) RH_DIRTY
916 * 2) RH_NOSYNC: was dirty, other preceeding writes failed
917 * 3) RH_RECOVERING: flushing pending writes
918 * Either case, the region should have not been connected to list.
919 */
920 recovering = (reg->state == RH_RECOVERING);
921 reg->state = RH_NOSYNC;
922 BUG_ON(!list_empty(&reg->list));
923 spin_unlock_irqrestore(&rh->region_lock, flags);
924
925 bio_endio(bio, error);
926 if (recovering)
927 complete_resync_work(reg, 0);
928}
929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930static void write_callback(unsigned long error, void *context)
931{
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000932 unsigned i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 struct bio *bio = (struct bio *) context;
934 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000935 int uptodate = 0;
936 int should_wake = 0;
937 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 ms = bio_get_ms(bio);
940 bio_set_ms(bio, NULL);
941
942 /*
943 * NOTE: We don't decrement the pending count here,
944 * instead it is done by the targets endio function.
945 * This way we handle both writes to SYNC and NOSYNC
946 * regions with the same code.
947 */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000948 if (likely(!error))
949 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000951 for (i = 0; i < ms->nr_mirrors; i++)
952 if (test_bit(i, &error))
953 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
954 else
955 uptodate = 1;
956
957 if (unlikely(!uptodate)) {
958 DMERR("All replicated volumes dead, failing I/O");
959 /* None of the writes succeeded, fail the I/O. */
960 ret = -EIO;
961 } else if (errors_handled(ms)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 /*
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000963 * Need to raise event. Since raising
964 * events can block, we need to do it in
965 * the main thread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000967 spin_lock_irqsave(&ms->lock, flags);
968 if (!ms->failures.head)
969 should_wake = 1;
970 bio_list_add(&ms->failures, bio);
971 spin_unlock_irqrestore(&ms->lock, flags);
972 if (should_wake)
973 wake(ms);
974 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000976out:
977 bio_endio(bio, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978}
979
980static void do_write(struct mirror_set *ms, struct bio *bio)
981{
982 unsigned int i;
983 struct io_region io[KCOPYD_MAX_REGIONS+1];
984 struct mirror *m;
Milan Broz88be1632007-05-09 02:33:04 -0700985 struct dm_io_request io_req = {
986 .bi_rw = WRITE,
987 .mem.type = DM_IO_BVEC,
988 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
989 .notify.fn = write_callback,
990 .notify.context = bio,
991 .client = ms->io_client,
992 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
994 for (i = 0; i < ms->nr_mirrors; i++) {
995 m = ms->mirror + i;
996
997 io[i].bdev = m->dev->bdev;
998 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
999 io[i].count = bio->bi_size >> 9;
1000 }
1001
1002 bio_set_ms(bio, ms);
Milan Broz88be1632007-05-09 02:33:04 -07001003
1004 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005}
1006
1007static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1008{
1009 int state;
1010 struct bio *bio;
1011 struct bio_list sync, nosync, recover, *this_list = NULL;
1012
1013 if (!writes->head)
1014 return;
1015
1016 /*
1017 * Classify each write.
1018 */
1019 bio_list_init(&sync);
1020 bio_list_init(&nosync);
1021 bio_list_init(&recover);
1022
1023 while ((bio = bio_list_pop(writes))) {
1024 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
1025 switch (state) {
1026 case RH_CLEAN:
1027 case RH_DIRTY:
1028 this_list = &sync;
1029 break;
1030
1031 case RH_NOSYNC:
1032 this_list = &nosync;
1033 break;
1034
1035 case RH_RECOVERING:
1036 this_list = &recover;
1037 break;
1038 }
1039
1040 bio_list_add(this_list, bio);
1041 }
1042
1043 /*
1044 * Increment the pending counts for any regions that will
1045 * be written to (writes to recover regions are going to
1046 * be delayed).
1047 */
1048 rh_inc_pending(&ms->rh, &sync);
1049 rh_inc_pending(&ms->rh, &nosync);
Jonathan Brassowfc1ff952007-07-12 17:29:15 +01001050 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
1052 /*
1053 * Dispatch io.
1054 */
Jonathan Brassowfc1ff952007-07-12 17:29:15 +01001055 if (unlikely(ms->log_failure))
1056 while ((bio = bio_list_pop(&sync)))
NeilBrown6712ecf2007-09-27 12:47:43 +02001057 bio_endio(bio, -EIO);
Jonathan Brassowfc1ff952007-07-12 17:29:15 +01001058 else while ((bio = bio_list_pop(&sync)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 do_write(ms, bio);
1060
1061 while ((bio = bio_list_pop(&recover)))
1062 rh_delay(&ms->rh, bio);
1063
1064 while ((bio = bio_list_pop(&nosync))) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001065 map_bio(ms, get_default_mirror(ms), bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 generic_make_request(bio);
1067 }
1068}
1069
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001070static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1071{
1072 struct bio *bio;
1073
1074 if (!failures->head)
1075 return;
1076
1077 while ((bio = bio_list_pop(failures)))
1078 __bio_mark_nosync(ms, bio, bio->bi_size, 0);
1079}
1080
1081static void trigger_event(struct work_struct *work)
1082{
1083 struct mirror_set *ms =
1084 container_of(work, struct mirror_set, trigger_event);
1085
1086 dm_table_event(ms->ti->table);
1087}
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089/*-----------------------------------------------------------------
1090 * kmirrord
1091 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001092static int _do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001094 struct mirror_set *ms =container_of(work, struct mirror_set,
1095 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001096 struct bio_list reads, writes, failures;
1097 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001099 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 reads = ms->reads;
1101 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001102 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 bio_list_init(&ms->reads);
1104 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001105 bio_list_init(&ms->failures);
1106 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 rh_update_states(&ms->rh);
1109 do_recovery(ms);
1110 do_reads(ms, &reads);
1111 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001112 do_failures(ms, &failures);
1113
1114 return (ms->failures.head) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115}
1116
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001117static void do_mirror(struct work_struct *work)
1118{
1119 /*
1120 * If _do_mirror returns 1, we give it
1121 * another shot. This helps for cases like
1122 * 'suspend' where we call flush_workqueue
1123 * and expect all work to be finished. If
1124 * a failure happens during a suspend, we
1125 * couldn't issue a 'wake' because it would
1126 * not be honored. Therefore, we return '1'
1127 * from _do_mirror, and retry here.
1128 */
1129 while (_do_mirror(work))
1130 schedule();
1131}
1132
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134/*-----------------------------------------------------------------
1135 * Target functions
1136 *---------------------------------------------------------------*/
1137static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1138 uint32_t region_size,
1139 struct dm_target *ti,
1140 struct dirty_log *dl)
1141{
1142 size_t len;
1143 struct mirror_set *ms = NULL;
1144
1145 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
1146 return NULL;
1147
1148 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
1149
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07001150 ms = kzalloc(len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001151 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001152 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 return NULL;
1154 }
1155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 spin_lock_init(&ms->lock);
1157
1158 ms->ti = ti;
1159 ms->nr_mirrors = nr_mirrors;
1160 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
1161 ms->in_sync = 0;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001162 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
Milan Broz88be1632007-05-09 02:33:04 -07001164 ms->io_client = dm_io_client_create(DM_IO_PAGES);
1165 if (IS_ERR(ms->io_client)) {
1166 ti->error = "Error creating dm_io client";
1167 kfree(ms);
1168 return NULL;
1169 }
1170
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001172 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001173 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 kfree(ms);
1175 return NULL;
1176 }
1177
1178 return ms;
1179}
1180
1181static void free_context(struct mirror_set *ms, struct dm_target *ti,
1182 unsigned int m)
1183{
1184 while (m--)
1185 dm_put_device(ti, ms->mirror[m].dev);
1186
Milan Broz88be1632007-05-09 02:33:04 -07001187 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 rh_exit(&ms->rh);
1189 kfree(ms);
1190}
1191
1192static inline int _check_region_size(struct dm_target *ti, uint32_t size)
1193{
vignesh babu6f3c3f02007-10-19 22:38:44 +01001194 return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 size > ti->len);
1196}
1197
1198static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1199 unsigned int mirror, char **argv)
1200{
Andrew Morton4ee218c2006-03-27 01:17:48 -08001201 unsigned long long offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202
Andrew Morton4ee218c2006-03-27 01:17:48 -08001203 if (sscanf(argv[1], "%llu", &offset) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001204 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 return -EINVAL;
1206 }
1207
1208 if (dm_get_device(ti, argv[0], offset, ti->len,
1209 dm_table_get_mode(ti->table),
1210 &ms->mirror[mirror].dev)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001211 ti->error = "Device lookup failure";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 return -ENXIO;
1213 }
1214
Jonathan Brassowaa5617c2007-10-19 22:47:58 +01001215 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001216 atomic_set(&(ms->mirror[mirror].error_count), 0);
1217 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 ms->mirror[mirror].offset = offset;
1219
1220 return 0;
1221}
1222
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223/*
1224 * Create dirty log: log_type #log_params <log_params>
1225 */
1226static struct dirty_log *create_dirty_log(struct dm_target *ti,
1227 unsigned int argc, char **argv,
1228 unsigned int *args_used)
1229{
1230 unsigned int param_count;
1231 struct dirty_log *dl;
1232
1233 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001234 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 return NULL;
1236 }
1237
1238 if (sscanf(argv[1], "%u", &param_count) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001239 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 return NULL;
1241 }
1242
1243 *args_used = 2 + param_count;
1244
1245 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001246 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 return NULL;
1248 }
1249
1250 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1251 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001252 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 return NULL;
1254 }
1255
1256 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001257 ti->error = "Invalid region size";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 dm_destroy_dirty_log(dl);
1259 return NULL;
1260 }
1261
1262 return dl;
1263}
1264
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001265static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1266 unsigned *args_used)
1267{
1268 unsigned num_features;
1269 struct dm_target *ti = ms->ti;
1270
1271 *args_used = 0;
1272
1273 if (!argc)
1274 return 0;
1275
1276 if (sscanf(argv[0], "%u", &num_features) != 1) {
1277 ti->error = "Invalid number of features";
1278 return -EINVAL;
1279 }
1280
1281 argc--;
1282 argv++;
1283 (*args_used)++;
1284
1285 if (num_features > argc) {
1286 ti->error = "Not enough arguments to support feature count";
1287 return -EINVAL;
1288 }
1289
1290 if (!strcmp("handle_errors", argv[0]))
1291 ms->features |= DM_RAID1_HANDLE_ERRORS;
1292 else {
1293 ti->error = "Unrecognised feature requested";
1294 return -EINVAL;
1295 }
1296
1297 (*args_used)++;
1298
1299 return 0;
1300}
1301
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302/*
1303 * Construct a mirror mapping:
1304 *
1305 * log_type #log_params <log_params>
1306 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001307 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 *
1309 * log_type is "core" or "disk"
1310 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001311 *
1312 * If present, features must be "handle_errors".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1315{
1316 int r;
1317 unsigned int nr_mirrors, m, args_used;
1318 struct mirror_set *ms;
1319 struct dirty_log *dl;
1320
1321 dl = create_dirty_log(ti, argc, argv, &args_used);
1322 if (!dl)
1323 return -EINVAL;
1324
1325 argv += args_used;
1326 argc -= args_used;
1327
1328 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1329 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001330 ti->error = "Invalid number of mirrors";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 dm_destroy_dirty_log(dl);
1332 return -EINVAL;
1333 }
1334
1335 argv++, argc--;
1336
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001337 if (argc < nr_mirrors * 2) {
1338 ti->error = "Too few mirror arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 dm_destroy_dirty_log(dl);
1340 return -EINVAL;
1341 }
1342
1343 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1344 if (!ms) {
1345 dm_destroy_dirty_log(dl);
1346 return -ENOMEM;
1347 }
1348
1349 /* Get the mirror parameter sets */
1350 for (m = 0; m < nr_mirrors; m++) {
1351 r = get_mirror(ms, ti, m, argv);
1352 if (r) {
1353 free_context(ms, ti, m);
1354 return r;
1355 }
1356 argv += 2;
1357 argc -= 2;
1358 }
1359
1360 ti->private = ms;
Alasdair G Kergond88854f2005-07-07 17:59:34 -07001361 ti->split_io = ms->rh.region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001363 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1364 if (!ms->kmirrord_wq) {
1365 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001366 r = -ENOMEM;
1367 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001368 }
1369 INIT_WORK(&ms->kmirrord_work, do_mirror);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001370 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001371
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001372 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001373 if (r)
1374 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001375
1376 argv += args_used;
1377 argc -= args_used;
1378
Jonathan Brassowf44db672007-07-12 17:29:04 +01001379 /*
1380 * Any read-balancing addition depends on the
1381 * DM_RAID1_HANDLE_ERRORS flag being present.
1382 * This is because the decision to balance depends
1383 * on the sync state of a region. If the above
1384 * flag is not present, we ignore errors; and
1385 * the sync state may be inaccurate.
1386 */
1387
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001388 if (argc) {
1389 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001390 r = -EINVAL;
1391 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001392 }
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001395 if (r)
1396 goto err_destroy_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001398 wake(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001400
1401err_destroy_wq:
1402 destroy_workqueue(ms->kmirrord_wq);
1403err_free_context:
1404 free_context(ms, ti, ms->nr_mirrors);
1405 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001406}
1407
1408static void mirror_dtr(struct dm_target *ti)
1409{
1410 struct mirror_set *ms = (struct mirror_set *) ti->private;
1411
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001412 flush_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001414 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 free_context(ms, ti, ms->nr_mirrors);
1416}
1417
1418static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1419{
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001420 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 int should_wake = 0;
1422 struct bio_list *bl;
1423
1424 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001425 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 should_wake = !(bl->head);
1427 bio_list_add(bl, bio);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001428 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 if (should_wake)
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001431 wake(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433
1434/*
1435 * Mirror mapping function
1436 */
1437static int mirror_map(struct dm_target *ti, struct bio *bio,
1438 union map_info *map_context)
1439{
1440 int r, rw = bio_rw(bio);
1441 struct mirror *m;
1442 struct mirror_set *ms = ti->private;
1443
Neil Browne4c8b3b2006-06-26 00:27:26 -07001444 map_context->ll = bio_to_region(&ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445
1446 if (rw == WRITE) {
1447 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001448 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 }
1450
1451 r = ms->rh.log->type->in_sync(ms->rh.log,
1452 bio_to_region(&ms->rh, bio), 0);
1453 if (r < 0 && r != -EWOULDBLOCK)
1454 return r;
1455
1456 if (r == -EWOULDBLOCK) /* FIXME: ugly */
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001457 r = DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
1459 /*
1460 * We don't want to fast track a recovery just for a read
1461 * ahead. So we just let it silently fail.
1462 * FIXME: get rid of this.
1463 */
1464 if (!r && rw == READA)
1465 return -EIO;
1466
1467 if (!r) {
1468 /* Pass this io over to the daemon */
1469 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001470 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 }
1472
1473 m = choose_mirror(ms, bio->bi_sector);
1474 if (!m)
1475 return -EIO;
1476
1477 map_bio(ms, m, bio);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001478 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479}
1480
1481static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1482 int error, union map_info *map_context)
1483{
1484 int rw = bio_rw(bio);
1485 struct mirror_set *ms = (struct mirror_set *) ti->private;
1486 region_t region = map_context->ll;
1487
1488 /*
1489 * We need to dec pending if this was a write.
1490 */
1491 if (rw == WRITE)
1492 rh_dec(&ms->rh, region);
1493
1494 return 0;
1495}
1496
1497static void mirror_postsuspend(struct dm_target *ti)
1498{
1499 struct mirror_set *ms = (struct mirror_set *) ti->private;
1500 struct dirty_log *log = ms->rh.log;
1501
1502 rh_stop_recovery(&ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001503
1504 /* Wait for all I/O we generated to complete */
1505 wait_event(_kmirrord_recovery_stopped,
1506 !atomic_read(&ms->rh.recovery_in_flight));
1507
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001508 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 /* FIXME: need better error handling */
1510 DMWARN("log suspend failed");
1511}
1512
1513static void mirror_resume(struct dm_target *ti)
1514{
1515 struct mirror_set *ms = (struct mirror_set *) ti->private;
1516 struct dirty_log *log = ms->rh.log;
1517 if (log->type->resume && log->type->resume(log))
1518 /* FIXME: need better error handling */
1519 DMWARN("log resume failed");
1520 rh_start_recovery(&ms->rh);
1521}
1522
1523static int mirror_status(struct dm_target *ti, status_type_t type,
1524 char *result, unsigned int maxlen)
1525{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001526 unsigned int m, sz = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 struct mirror_set *ms = (struct mirror_set *) ti->private;
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 switch (type) {
1530 case STATUSTYPE_INFO:
1531 DMEMIT("%d ", ms->nr_mirrors);
1532 for (m = 0; m < ms->nr_mirrors; m++)
1533 DMEMIT("%s ", ms->mirror[m].dev->name);
1534
Milan Brozc95bc202007-07-12 17:27:24 +01001535 DMEMIT("%llu/%llu 0 ",
Andrew Morton4ee218c2006-03-27 01:17:48 -08001536 (unsigned long long)ms->rh.log->type->
1537 get_sync_count(ms->rh.log),
1538 (unsigned long long)ms->nr_regions);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001539
Milan Brozc95bc202007-07-12 17:27:24 +01001540 sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 break;
1543
1544 case STATUSTYPE_TABLE:
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001545 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1546
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001547 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001549 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Andrew Morton4ee218c2006-03-27 01:17:48 -08001550 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001551
1552 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1553 DMEMIT(" 1 handle_errors");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 }
1555
1556 return 0;
1557}
1558
1559static struct target_type mirror_target = {
1560 .name = "mirror",
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001561 .version = {1, 0, 3},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 .module = THIS_MODULE,
1563 .ctr = mirror_ctr,
1564 .dtr = mirror_dtr,
1565 .map = mirror_map,
1566 .end_io = mirror_end_io,
1567 .postsuspend = mirror_postsuspend,
1568 .resume = mirror_resume,
1569 .status = mirror_status,
1570};
1571
1572static int __init dm_mirror_init(void)
1573{
1574 int r;
1575
1576 r = dm_dirty_log_init();
1577 if (r)
1578 return r;
1579
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 r = dm_register_target(&mirror_target);
1581 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001582 DMERR("Failed to register mirror target");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 dm_dirty_log_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 }
1585
1586 return r;
1587}
1588
1589static void __exit dm_mirror_exit(void)
1590{
1591 int r;
1592
1593 r = dm_unregister_target(&mirror_target);
1594 if (r < 0)
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001595 DMERR("unregister failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 dm_dirty_log_exit();
1598}
1599
1600/* Module hooks */
1601module_init(dm_mirror_init);
1602module_exit(dm_mirror_exit);
1603
1604MODULE_DESCRIPTION(DM_NAME " mirror target");
1605MODULE_AUTHOR("Joe Thornber");
1606MODULE_LICENSE("GPL");