blob: ec6d675bf766ce036881b4daf1391bda0dc19895 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-list.h"
9#include "dm-io.h"
10#include "dm-log.h"
11#include "kcopyd.h"
12
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20#include <linux/vmalloc.h>
21#include <linux/workqueue.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010022#include <linux/log2.h>
Jonathan Brassow72f4b312008-02-08 02:11:29 +000023#include <linux/hardirq.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025#define DM_MSG_PREFIX "raid1"
Milan Broz88be1632007-05-09 02:33:04 -070026#define DM_IO_PAGES 64
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070028#define DM_RAID1_HANDLE_ERRORS 0x01
Jonathan Brassowf44db672007-07-12 17:29:04 +010029#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070030
Jonathan E Brassow33184042006-11-08 17:44:44 -080031static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*-----------------------------------------------------------------
34 * Region hash
35 *
36 * The mirror splits itself up into discrete regions. Each
37 * region can be in one of three states: clean, dirty,
38 * nosync. There is no need to put clean regions in the hash.
39 *
40 * In addition to being present in the hash table a region _may_
41 * be present on one of three lists.
42 *
43 * clean_regions: Regions on this list have no io pending to
44 * them, they are in sync, we are no longer interested in them,
45 * they are dull. rh_update_states() will remove them from the
46 * hash table.
47 *
48 * quiesced_regions: These regions have been spun down, ready
49 * for recovery. rh_recovery_start() will remove regions from
50 * this list and hand them to kmirrord, which will schedule the
51 * recovery io with kcopyd.
52 *
53 * recovered_regions: Regions that kcopyd has successfully
54 * recovered. rh_update_states() will now schedule any delayed
55 * io, up the recovery_count, and remove the region from the
56 * hash.
57 *
58 * There are 2 locks:
59 * A rw spin lock 'hash_lock' protects just the hash table,
60 * this is never held in write mode from interrupt context,
61 * which I believe means that we only have to disable irqs when
62 * doing a write lock.
63 *
64 * An ordinary spin lock 'region_lock' that protects the three
65 * lists in the region_hash, with the 'state', 'list' and
66 * 'bhs_delayed' fields of the regions. This is used from irq
67 * context, so all other uses will have to suspend local irqs.
68 *---------------------------------------------------------------*/
69struct mirror_set;
70struct region_hash {
71 struct mirror_set *ms;
72 uint32_t region_size;
73 unsigned region_shift;
74
75 /* holds persistent region state */
76 struct dirty_log *log;
77
78 /* hash table */
79 rwlock_t hash_lock;
80 mempool_t *region_pool;
81 unsigned int mask;
82 unsigned int nr_buckets;
83 struct list_head *buckets;
84
85 spinlock_t region_lock;
Jonathan E Brassow33184042006-11-08 17:44:44 -080086 atomic_t recovery_in_flight;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 struct semaphore recovery_count;
88 struct list_head clean_regions;
89 struct list_head quiesced_regions;
90 struct list_head recovered_regions;
Jonathan Brassowf44db672007-07-12 17:29:04 +010091 struct list_head failed_recovered_regions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092};
93
94enum {
95 RH_CLEAN,
96 RH_DIRTY,
97 RH_NOSYNC,
98 RH_RECOVERING
99};
100
101struct region {
102 struct region_hash *rh; /* FIXME: can we get rid of this ? */
103 region_t key;
104 int state;
105
106 struct list_head hash_list;
107 struct list_head list;
108
109 atomic_t pending;
110 struct bio_list delayed_bios;
111};
112
Neil Browne4c8b3b2006-06-26 00:27:26 -0700113
114/*-----------------------------------------------------------------
115 * Mirror set structures.
116 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000117enum dm_raid1_error {
118 DM_RAID1_WRITE_ERROR,
119 DM_RAID1_SYNC_ERROR,
120 DM_RAID1_READ_ERROR
121};
122
Neil Browne4c8b3b2006-06-26 00:27:26 -0700123struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100124 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700125 atomic_t error_count;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000126 uint32_t error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700127 struct dm_dev *dev;
128 sector_t offset;
129};
130
131struct mirror_set {
132 struct dm_target *ti;
133 struct list_head list;
134 struct region_hash rh;
135 struct kcopyd_client *kcopyd_client;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -0700136 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700137
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000138 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -0700139 struct bio_list reads;
140 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000141 struct bio_list failures;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700142
Milan Broz88be1632007-05-09 02:33:04 -0700143 struct dm_io_client *io_client;
144
Neil Browne4c8b3b2006-06-26 00:27:26 -0700145 /* recovery */
146 region_t nr_regions;
147 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100148 int log_failure;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000149 atomic_t suspend;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700150
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000151 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -0700152
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700153 struct workqueue_struct *kmirrord_wq;
154 struct work_struct kmirrord_work;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000155 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700156
Neil Browne4c8b3b2006-06-26 00:27:26 -0700157 unsigned int nr_mirrors;
158 struct mirror mirror[0];
159};
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161/*
162 * Conversion fns
163 */
164static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
165{
Neil Browne4c8b3b2006-06-26 00:27:26 -0700166 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167}
168
169static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
170{
171 return region << rh->region_shift;
172}
173
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700174static void wake(struct mirror_set *ms)
175{
176 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
177}
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179/* FIXME move this */
180static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182#define MIN_REGIONS 64
183#define MAX_RECOVERY 1
184static int rh_init(struct region_hash *rh, struct mirror_set *ms,
185 struct dirty_log *log, uint32_t region_size,
186 region_t nr_regions)
187{
188 unsigned int nr_buckets, max_buckets;
189 size_t i;
190
191 /*
192 * Calculate a suitable number of buckets for our hash
193 * table.
194 */
195 max_buckets = nr_regions >> 6;
196 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
197 ;
198 nr_buckets >>= 1;
199
200 rh->ms = ms;
201 rh->log = log;
202 rh->region_size = region_size;
203 rh->region_shift = ffs(region_size) - 1;
204 rwlock_init(&rh->hash_lock);
205 rh->mask = nr_buckets - 1;
206 rh->nr_buckets = nr_buckets;
207
208 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
209 if (!rh->buckets) {
210 DMERR("unable to allocate region hash memory");
211 return -ENOMEM;
212 }
213
214 for (i = 0; i < nr_buckets; i++)
215 INIT_LIST_HEAD(rh->buckets + i);
216
217 spin_lock_init(&rh->region_lock);
218 sema_init(&rh->recovery_count, 0);
Jonathan E Brassow33184042006-11-08 17:44:44 -0800219 atomic_set(&rh->recovery_in_flight, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 INIT_LIST_HEAD(&rh->clean_regions);
221 INIT_LIST_HEAD(&rh->quiesced_regions);
222 INIT_LIST_HEAD(&rh->recovered_regions);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100223 INIT_LIST_HEAD(&rh->failed_recovered_regions);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Matthew Dobson0eaae62a2006-03-26 01:37:47 -0800225 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
226 sizeof(struct region));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 if (!rh->region_pool) {
228 vfree(rh->buckets);
229 rh->buckets = NULL;
230 return -ENOMEM;
231 }
232
233 return 0;
234}
235
236static void rh_exit(struct region_hash *rh)
237{
238 unsigned int h;
239 struct region *reg, *nreg;
240
241 BUG_ON(!list_empty(&rh->quiesced_regions));
242 for (h = 0; h < rh->nr_buckets; h++) {
243 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
244 BUG_ON(atomic_read(&reg->pending));
245 mempool_free(reg, rh->region_pool);
246 }
247 }
248
249 if (rh->log)
250 dm_destroy_dirty_log(rh->log);
251 if (rh->region_pool)
252 mempool_destroy(rh->region_pool);
253 vfree(rh->buckets);
254}
255
256#define RH_HASH_MULT 2654435387U
257
258static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
259{
260 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
261}
262
263static struct region *__rh_lookup(struct region_hash *rh, region_t region)
264{
265 struct region *reg;
266
267 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
268 if (reg->key == region)
269 return reg;
270
271 return NULL;
272}
273
274static void __rh_insert(struct region_hash *rh, struct region *reg)
275{
276 unsigned int h = rh_hash(rh, reg->key);
277 list_add(&reg->hash_list, rh->buckets + h);
278}
279
280static struct region *__rh_alloc(struct region_hash *rh, region_t region)
281{
282 struct region *reg, *nreg;
283
284 read_unlock(&rh->hash_lock);
Daniel Kobrasc06aad82006-08-27 01:23:24 -0700285 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
286 if (unlikely(!nreg))
287 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
289 RH_CLEAN : RH_NOSYNC;
290 nreg->rh = rh;
291 nreg->key = region;
292
293 INIT_LIST_HEAD(&nreg->list);
294
295 atomic_set(&nreg->pending, 0);
296 bio_list_init(&nreg->delayed_bios);
297 write_lock_irq(&rh->hash_lock);
298
299 reg = __rh_lookup(rh, region);
300 if (reg)
301 /* we lost the race */
302 mempool_free(nreg, rh->region_pool);
303
304 else {
305 __rh_insert(rh, nreg);
306 if (nreg->state == RH_CLEAN) {
307 spin_lock(&rh->region_lock);
308 list_add(&nreg->list, &rh->clean_regions);
309 spin_unlock(&rh->region_lock);
310 }
311 reg = nreg;
312 }
313 write_unlock_irq(&rh->hash_lock);
314 read_lock(&rh->hash_lock);
315
316 return reg;
317}
318
319static inline struct region *__rh_find(struct region_hash *rh, region_t region)
320{
321 struct region *reg;
322
323 reg = __rh_lookup(rh, region);
324 if (!reg)
325 reg = __rh_alloc(rh, region);
326
327 return reg;
328}
329
330static int rh_state(struct region_hash *rh, region_t region, int may_block)
331{
332 int r;
333 struct region *reg;
334
335 read_lock(&rh->hash_lock);
336 reg = __rh_lookup(rh, region);
337 read_unlock(&rh->hash_lock);
338
339 if (reg)
340 return reg->state;
341
342 /*
343 * The region wasn't in the hash, so we fall back to the
344 * dirty log.
345 */
346 r = rh->log->type->in_sync(rh->log, region, may_block);
347
348 /*
349 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
350 * taken as a RH_NOSYNC
351 */
352 return r == 1 ? RH_CLEAN : RH_NOSYNC;
353}
354
355static inline int rh_in_sync(struct region_hash *rh,
356 region_t region, int may_block)
357{
358 int state = rh_state(rh, region, may_block);
359 return state == RH_CLEAN || state == RH_DIRTY;
360}
361
362static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
363{
364 struct bio *bio;
365
366 while ((bio = bio_list_pop(bio_list))) {
367 queue_bio(ms, bio, WRITE);
368 }
369}
370
Jonathan E Brassowf3ee6b22006-12-08 02:41:11 -0800371static void complete_resync_work(struct region *reg, int success)
372{
373 struct region_hash *rh = reg->rh;
374
375 rh->log->type->set_region_sync(rh->log, reg->key, success);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000376
377 /*
378 * Dispatch the bios before we call 'wake_up_all'.
379 * This is important because if we are suspending,
380 * we want to know that recovery is complete and
381 * the work queue is flushed. If we wake_up_all
382 * before we dispatch_bios (queue bios and call wake()),
383 * then we risk suspending before the work queue
384 * has been properly flushed.
385 */
Jonathan E Brassowf3ee6b22006-12-08 02:41:11 -0800386 dispatch_bios(rh->ms, &reg->delayed_bios);
387 if (atomic_dec_and_test(&rh->recovery_in_flight))
388 wake_up_all(&_kmirrord_recovery_stopped);
389 up(&rh->recovery_count);
390}
391
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392static void rh_update_states(struct region_hash *rh)
393{
394 struct region *reg, *next;
395
396 LIST_HEAD(clean);
397 LIST_HEAD(recovered);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100398 LIST_HEAD(failed_recovered);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
400 /*
401 * Quickly grab the lists.
402 */
403 write_lock_irq(&rh->hash_lock);
404 spin_lock(&rh->region_lock);
405 if (!list_empty(&rh->clean_regions)) {
406 list_splice(&rh->clean_regions, &clean);
407 INIT_LIST_HEAD(&rh->clean_regions);
408
Jonathan Brassow943317e2007-07-12 17:28:25 +0100409 list_for_each_entry(reg, &clean, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 list_del(&reg->hash_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
412
413 if (!list_empty(&rh->recovered_regions)) {
414 list_splice(&rh->recovered_regions, &recovered);
415 INIT_LIST_HEAD(&rh->recovered_regions);
416
417 list_for_each_entry (reg, &recovered, list)
418 list_del(&reg->hash_list);
419 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100420
421 if (!list_empty(&rh->failed_recovered_regions)) {
422 list_splice(&rh->failed_recovered_regions, &failed_recovered);
423 INIT_LIST_HEAD(&rh->failed_recovered_regions);
424
425 list_for_each_entry(reg, &failed_recovered, list)
426 list_del(&reg->hash_list);
427 }
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 spin_unlock(&rh->region_lock);
430 write_unlock_irq(&rh->hash_lock);
431
432 /*
433 * All the regions on the recovered and clean lists have
434 * now been pulled out of the system, so no need to do
435 * any more locking.
436 */
437 list_for_each_entry_safe (reg, next, &recovered, list) {
438 rh->log->type->clear_region(rh->log, reg->key);
Jonathan E Brassowf3ee6b22006-12-08 02:41:11 -0800439 complete_resync_work(reg, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 mempool_free(reg, rh->region_pool);
441 }
442
Jonathan Brassowf44db672007-07-12 17:29:04 +0100443 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
444 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
445 mempool_free(reg, rh->region_pool);
446 }
447
Jonathan Brassow943317e2007-07-12 17:28:25 +0100448 list_for_each_entry_safe(reg, next, &clean, list) {
449 rh->log->type->clear_region(rh->log, reg->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 mempool_free(reg, rh->region_pool);
Jonathan Brassow943317e2007-07-12 17:28:25 +0100451 }
452
453 rh->log->type->flush(rh->log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454}
455
456static void rh_inc(struct region_hash *rh, region_t region)
457{
458 struct region *reg;
459
460 read_lock(&rh->hash_lock);
461 reg = __rh_find(rh, region);
Jun'ichi Nomura844e8d92005-09-09 16:23:42 -0700462
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800463 spin_lock_irq(&rh->region_lock);
Jun'ichi Nomura844e8d92005-09-09 16:23:42 -0700464 atomic_inc(&reg->pending);
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 if (reg->state == RH_CLEAN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 reg->state = RH_DIRTY;
468 list_del_init(&reg->list); /* take off the clean list */
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800469 spin_unlock_irq(&rh->region_lock);
470
471 rh->log->type->mark_region(rh->log, reg->key);
472 } else
473 spin_unlock_irq(&rh->region_lock);
474
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 read_unlock(&rh->hash_lock);
477}
478
479static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
480{
481 struct bio *bio;
482
483 for (bio = bios->head; bio; bio = bio->bi_next)
484 rh_inc(rh, bio_to_region(rh, bio));
485}
486
487static void rh_dec(struct region_hash *rh, region_t region)
488{
489 unsigned long flags;
490 struct region *reg;
491 int should_wake = 0;
492
493 read_lock(&rh->hash_lock);
494 reg = __rh_lookup(rh, region);
495 read_unlock(&rh->hash_lock);
496
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800497 spin_lock_irqsave(&rh->region_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 if (atomic_dec_and_test(&reg->pending)) {
Jun'ichi Nomura930d3322006-03-27 01:17:47 -0800499 /*
500 * There is no pending I/O for this region.
501 * We can move the region to corresponding list for next action.
502 * At this point, the region is not yet connected to any list.
503 *
504 * If the state is RH_NOSYNC, the region should be kept off
505 * from clean list.
506 * The hash entry for RH_NOSYNC will remain in memory
507 * until the region is recovered or the map is reloaded.
508 */
509
510 /* do nothing for RH_NOSYNC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (reg->state == RH_RECOVERING) {
512 list_add_tail(&reg->list, &rh->quiesced_regions);
Jun'ichi Nomura930d3322006-03-27 01:17:47 -0800513 } else if (reg->state == RH_DIRTY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 reg->state = RH_CLEAN;
515 list_add(&reg->list, &rh->clean_regions);
516 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 should_wake = 1;
518 }
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800519 spin_unlock_irqrestore(&rh->region_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520
521 if (should_wake)
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700522 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523}
524
525/*
526 * Starts quiescing a region in preparation for recovery.
527 */
528static int __rh_recovery_prepare(struct region_hash *rh)
529{
530 int r;
531 struct region *reg;
532 region_t region;
533
534 /*
535 * Ask the dirty log what's next.
536 */
537 r = rh->log->type->get_resync_work(rh->log, &region);
538 if (r <= 0)
539 return r;
540
541 /*
542 * Get this region, and start it quiescing by setting the
543 * recovering flag.
544 */
545 read_lock(&rh->hash_lock);
546 reg = __rh_find(rh, region);
547 read_unlock(&rh->hash_lock);
548
549 spin_lock_irq(&rh->region_lock);
550 reg->state = RH_RECOVERING;
551
552 /* Already quiesced ? */
553 if (atomic_read(&reg->pending))
554 list_del_init(&reg->list);
Akinobu Mita179e0912006-06-26 00:24:41 -0700555 else
556 list_move(&reg->list, &rh->quiesced_regions);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 spin_unlock_irq(&rh->region_lock);
559
560 return 1;
561}
562
563static void rh_recovery_prepare(struct region_hash *rh)
564{
Jonathan E Brassow33184042006-11-08 17:44:44 -0800565 /* Extra reference to avoid race with rh_stop_recovery */
566 atomic_inc(&rh->recovery_in_flight);
567
568 while (!down_trylock(&rh->recovery_count)) {
569 atomic_inc(&rh->recovery_in_flight);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 if (__rh_recovery_prepare(rh) <= 0) {
Jonathan E Brassow33184042006-11-08 17:44:44 -0800571 atomic_dec(&rh->recovery_in_flight);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 up(&rh->recovery_count);
573 break;
574 }
Jonathan E Brassow33184042006-11-08 17:44:44 -0800575 }
576
577 /* Drop the extra reference */
578 if (atomic_dec_and_test(&rh->recovery_in_flight))
579 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580}
581
582/*
583 * Returns any quiesced regions.
584 */
585static struct region *rh_recovery_start(struct region_hash *rh)
586{
587 struct region *reg = NULL;
588
589 spin_lock_irq(&rh->region_lock);
590 if (!list_empty(&rh->quiesced_regions)) {
591 reg = list_entry(rh->quiesced_regions.next,
592 struct region, list);
593 list_del_init(&reg->list); /* remove from the quiesced list */
594 }
595 spin_unlock_irq(&rh->region_lock);
596
597 return reg;
598}
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600static void rh_recovery_end(struct region *reg, int success)
601{
602 struct region_hash *rh = reg->rh;
603
604 spin_lock_irq(&rh->region_lock);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100605 if (success)
606 list_add(&reg->list, &reg->rh->recovered_regions);
607 else {
608 reg->state = RH_NOSYNC;
609 list_add(&reg->list, &reg->rh->failed_recovered_regions);
610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 spin_unlock_irq(&rh->region_lock);
612
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700613 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614}
615
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100616static int rh_flush(struct region_hash *rh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617{
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100618 return rh->log->type->flush(rh->log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619}
620
621static void rh_delay(struct region_hash *rh, struct bio *bio)
622{
623 struct region *reg;
624
625 read_lock(&rh->hash_lock);
626 reg = __rh_find(rh, bio_to_region(rh, bio));
627 bio_list_add(&reg->delayed_bios, bio);
628 read_unlock(&rh->hash_lock);
629}
630
631static void rh_stop_recovery(struct region_hash *rh)
632{
633 int i;
634
635 /* wait for any recovering regions */
636 for (i = 0; i < MAX_RECOVERY; i++)
637 down(&rh->recovery_count);
638}
639
640static void rh_start_recovery(struct region_hash *rh)
641{
642 int i;
643
644 for (i = 0; i < MAX_RECOVERY; i++)
645 up(&rh->recovery_count);
646
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700647 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650/*
651 * Every mirror should look like this one.
652 */
653#define DEFAULT_MIRROR 0
654
655/*
656 * This is yucky. We squirrel the mirror_set struct away inside
657 * bi_next for write buffers. This is safe since the bh
658 * doesn't get submitted to the lower levels of block layer.
659 */
660static struct mirror_set *bio_get_ms(struct bio *bio)
661{
662 return (struct mirror_set *) bio->bi_next;
663}
664
665static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
666{
667 bio->bi_next = (struct bio *) ms;
668}
669
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000670static struct mirror *get_default_mirror(struct mirror_set *ms)
671{
672 return &ms->mirror[atomic_read(&ms->default_mirror)];
673}
674
675static void set_default_mirror(struct mirror *m)
676{
677 struct mirror_set *ms = m->ms;
678 struct mirror *m0 = &(ms->mirror[0]);
679
680 atomic_set(&ms->default_mirror, m - m0);
681}
682
683/* fail_mirror
684 * @m: mirror device to fail
685 * @error_type: one of the enum's, DM_RAID1_*_ERROR
686 *
687 * If errors are being handled, record the type of
688 * error encountered for this device. If this type
689 * of error has already been recorded, we can return;
690 * otherwise, we must signal userspace by triggering
691 * an event. Additionally, if the device is the
692 * primary device, we must choose a new primary, but
693 * only if the mirror is in-sync.
694 *
695 * This function must not block.
696 */
697static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
698{
699 struct mirror_set *ms = m->ms;
700 struct mirror *new;
701
702 if (!errors_handled(ms))
703 return;
704
705 /*
706 * error_count is used for nothing more than a
707 * simple way to tell if a device has encountered
708 * errors.
709 */
710 atomic_inc(&m->error_count);
711
712 if (test_and_set_bit(error_type, &m->error_type))
713 return;
714
715 if (m != get_default_mirror(ms))
716 goto out;
717
718 if (!ms->in_sync) {
719 /*
720 * Better to issue requests to same failing device
721 * than to risk returning corrupt data.
722 */
723 DMERR("Primary mirror (%s) failed while out-of-sync: "
724 "Reads may fail.", m->dev->name);
725 goto out;
726 }
727
728 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
729 if (!atomic_read(&new->error_count)) {
730 set_default_mirror(new);
731 break;
732 }
733
734 if (unlikely(new == ms->mirror + ms->nr_mirrors))
735 DMWARN("All sides of mirror have failed.");
736
737out:
738 schedule_work(&ms->trigger_event);
739}
740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741/*-----------------------------------------------------------------
742 * Recovery.
743 *
744 * When a mirror is first activated we may find that some regions
745 * are in the no-sync state. We have to recover these by
746 * recopying from the default mirror to all the others.
747 *---------------------------------------------------------------*/
748static void recovery_complete(int read_err, unsigned int write_err,
749 void *context)
750{
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000751 struct region *reg = (struct region *)context;
752 struct mirror_set *ms = reg->rh->ms;
753 int m, bit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000755 if (read_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100756 /* Read error means the failure of default mirror. */
757 DMERR_LIMIT("Unable to read primary mirror during recovery");
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000758 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
759 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100760
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000761 if (write_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100762 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
763 write_err);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000764 /*
765 * Bits correspond to devices (excluding default mirror).
766 * The default mirror cannot change during recovery.
767 */
768 for (m = 0; m < ms->nr_mirrors; m++) {
769 if (&ms->mirror[m] == get_default_mirror(ms))
770 continue;
771 if (test_bit(bit, &write_err))
772 fail_mirror(ms->mirror + m,
773 DM_RAID1_SYNC_ERROR);
774 bit++;
775 }
776 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100777
Jonathan Brassowce503f52006-06-26 00:27:30 -0700778 rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779}
780
781static int recover(struct mirror_set *ms, struct region *reg)
782{
783 int r;
784 unsigned int i;
785 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
786 struct mirror *m;
787 unsigned long flags = 0;
788
789 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000790 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 from.bdev = m->dev->bdev;
792 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
793 if (reg->key == (ms->nr_regions - 1)) {
794 /*
795 * The final region may be smaller than
796 * region_size.
797 */
798 from.count = ms->ti->len & (reg->rh->region_size - 1);
799 if (!from.count)
800 from.count = reg->rh->region_size;
801 } else
802 from.count = reg->rh->region_size;
803
804 /* fill in the destinations */
805 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000806 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 continue;
808
809 m = ms->mirror + i;
810 dest->bdev = m->dev->bdev;
811 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
812 dest->count = from.count;
813 dest++;
814 }
815
816 /* hand to kcopyd */
817 set_bit(KCOPYD_IGNORE_ERROR, &flags);
818 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
819 recovery_complete, reg);
820
821 return r;
822}
823
824static void do_recovery(struct mirror_set *ms)
825{
826 int r;
827 struct region *reg;
828 struct dirty_log *log = ms->rh.log;
829
830 /*
831 * Start quiescing some regions.
832 */
833 rh_recovery_prepare(&ms->rh);
834
835 /*
836 * Copy any already quiesced regions.
837 */
838 while ((reg = rh_recovery_start(&ms->rh))) {
839 r = recover(ms, reg);
840 if (r)
841 rh_recovery_end(reg, 0);
842 }
843
844 /*
845 * Update the in sync flag.
846 */
847 if (!ms->in_sync &&
848 (log->type->get_sync_count(log) == ms->nr_regions)) {
849 /* the sync is complete */
850 dm_table_event(ms->ti->table);
851 ms->in_sync = 1;
852 }
853}
854
855/*-----------------------------------------------------------------
856 * Reads
857 *---------------------------------------------------------------*/
858static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
859{
860 /* FIXME: add read balancing */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000861 return get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862}
863
864/*
865 * remap a buffer to a particular mirror.
866 */
867static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
868{
869 bio->bi_bdev = m->dev->bdev;
870 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
871}
872
873static void do_reads(struct mirror_set *ms, struct bio_list *reads)
874{
875 region_t region;
876 struct bio *bio;
877 struct mirror *m;
878
879 while ((bio = bio_list_pop(reads))) {
880 region = bio_to_region(&ms->rh, bio);
881
882 /*
883 * We can only read balance if the region is in sync.
884 */
Jonathan Brassowb997b822007-05-09 02:33:08 -0700885 if (rh_in_sync(&ms->rh, region, 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 m = choose_mirror(ms, bio->bi_sector);
887 else
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000888 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 map_bio(ms, m, bio);
891 generic_make_request(bio);
892 }
893}
894
895/*-----------------------------------------------------------------
896 * Writes.
897 *
898 * We do different things with the write io depending on the
899 * state of the region that it's in:
900 *
901 * SYNC: increment pending, use kcopyd to write to *all* mirrors
902 * RECOVERING: delay the io until recovery completes
903 * NOSYNC: increment pending, just write to the default mirror
904 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000905
906/* __bio_mark_nosync
907 * @ms
908 * @bio
909 * @done
910 * @error
911 *
912 * The bio was written on some mirror(s) but failed on other mirror(s).
913 * We can successfully endio the bio but should avoid the region being
914 * marked clean by setting the state RH_NOSYNC.
915 *
916 * This function is _not_ safe in interrupt context!
917 */
918static void __bio_mark_nosync(struct mirror_set *ms,
919 struct bio *bio, unsigned done, int error)
920{
921 unsigned long flags;
922 struct region_hash *rh = &ms->rh;
923 struct dirty_log *log = ms->rh.log;
924 struct region *reg;
925 region_t region = bio_to_region(rh, bio);
926 int recovering = 0;
927
928 /* We must inform the log that the sync count has changed. */
929 log->type->set_region_sync(log, region, 0);
930 ms->in_sync = 0;
931
932 read_lock(&rh->hash_lock);
933 reg = __rh_find(rh, region);
934 read_unlock(&rh->hash_lock);
935
936 /* region hash entry should exist because write was in-flight */
937 BUG_ON(!reg);
938 BUG_ON(!list_empty(&reg->list));
939
940 spin_lock_irqsave(&rh->region_lock, flags);
941 /*
942 * Possible cases:
943 * 1) RH_DIRTY
944 * 2) RH_NOSYNC: was dirty, other preceeding writes failed
945 * 3) RH_RECOVERING: flushing pending writes
946 * Either case, the region should have not been connected to list.
947 */
948 recovering = (reg->state == RH_RECOVERING);
949 reg->state = RH_NOSYNC;
950 BUG_ON(!list_empty(&reg->list));
951 spin_unlock_irqrestore(&rh->region_lock, flags);
952
953 bio_endio(bio, error);
954 if (recovering)
955 complete_resync_work(reg, 0);
956}
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958static void write_callback(unsigned long error, void *context)
959{
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000960 unsigned i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 struct bio *bio = (struct bio *) context;
962 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000963 int uptodate = 0;
964 int should_wake = 0;
965 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966
967 ms = bio_get_ms(bio);
968 bio_set_ms(bio, NULL);
969
970 /*
971 * NOTE: We don't decrement the pending count here,
972 * instead it is done by the targets endio function.
973 * This way we handle both writes to SYNC and NOSYNC
974 * regions with the same code.
975 */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000976 if (likely(!error))
977 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000979 for (i = 0; i < ms->nr_mirrors; i++)
980 if (test_bit(i, &error))
981 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
982 else
983 uptodate = 1;
984
985 if (unlikely(!uptodate)) {
986 DMERR("All replicated volumes dead, failing I/O");
987 /* None of the writes succeeded, fail the I/O. */
988 ret = -EIO;
989 } else if (errors_handled(ms)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 /*
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000991 * Need to raise event. Since raising
992 * events can block, we need to do it in
993 * the main thread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000995 spin_lock_irqsave(&ms->lock, flags);
996 if (!ms->failures.head)
997 should_wake = 1;
998 bio_list_add(&ms->failures, bio);
999 spin_unlock_irqrestore(&ms->lock, flags);
1000 if (should_wake)
1001 wake(ms);
1002 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001004out:
1005 bio_endio(bio, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006}
1007
1008static void do_write(struct mirror_set *ms, struct bio *bio)
1009{
1010 unsigned int i;
1011 struct io_region io[KCOPYD_MAX_REGIONS+1];
1012 struct mirror *m;
Milan Broz88be1632007-05-09 02:33:04 -07001013 struct dm_io_request io_req = {
1014 .bi_rw = WRITE,
1015 .mem.type = DM_IO_BVEC,
1016 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
1017 .notify.fn = write_callback,
1018 .notify.context = bio,
1019 .client = ms->io_client,
1020 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 for (i = 0; i < ms->nr_mirrors; i++) {
1023 m = ms->mirror + i;
1024
1025 io[i].bdev = m->dev->bdev;
1026 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
1027 io[i].count = bio->bi_size >> 9;
1028 }
1029
1030 bio_set_ms(bio, ms);
Milan Broz88be1632007-05-09 02:33:04 -07001031
1032 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033}
1034
1035static void do_writes(struct mirror_set *ms, struct bio_list *writes)
1036{
1037 int state;
1038 struct bio *bio;
1039 struct bio_list sync, nosync, recover, *this_list = NULL;
1040
1041 if (!writes->head)
1042 return;
1043
1044 /*
1045 * Classify each write.
1046 */
1047 bio_list_init(&sync);
1048 bio_list_init(&nosync);
1049 bio_list_init(&recover);
1050
1051 while ((bio = bio_list_pop(writes))) {
1052 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
1053 switch (state) {
1054 case RH_CLEAN:
1055 case RH_DIRTY:
1056 this_list = &sync;
1057 break;
1058
1059 case RH_NOSYNC:
1060 this_list = &nosync;
1061 break;
1062
1063 case RH_RECOVERING:
1064 this_list = &recover;
1065 break;
1066 }
1067
1068 bio_list_add(this_list, bio);
1069 }
1070
1071 /*
1072 * Increment the pending counts for any regions that will
1073 * be written to (writes to recover regions are going to
1074 * be delayed).
1075 */
1076 rh_inc_pending(&ms->rh, &sync);
1077 rh_inc_pending(&ms->rh, &nosync);
Jonathan Brassowfc1ff952007-07-12 17:29:15 +01001078 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
1080 /*
1081 * Dispatch io.
1082 */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001083 if (unlikely(ms->log_failure)) {
1084 spin_lock_irq(&ms->lock);
1085 bio_list_merge(&ms->failures, &sync);
1086 spin_unlock_irq(&ms->lock);
1087 } else
Jonathan Brassowfc1ff952007-07-12 17:29:15 +01001088 while ((bio = bio_list_pop(&sync)))
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001089 do_write(ms, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090
1091 while ((bio = bio_list_pop(&recover)))
1092 rh_delay(&ms->rh, bio);
1093
1094 while ((bio = bio_list_pop(&nosync))) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001095 map_bio(ms, get_default_mirror(ms), bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 generic_make_request(bio);
1097 }
1098}
1099
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001100static void do_failures(struct mirror_set *ms, struct bio_list *failures)
1101{
1102 struct bio *bio;
1103
1104 if (!failures->head)
1105 return;
1106
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001107 if (!ms->log_failure) {
1108 while ((bio = bio_list_pop(failures)))
1109 __bio_mark_nosync(ms, bio, bio->bi_size, 0);
1110 return;
1111 }
1112
1113 /*
1114 * If the log has failed, unattempted writes are being
1115 * put on the failures list. We can't issue those writes
1116 * until a log has been marked, so we must store them.
1117 *
1118 * If a 'noflush' suspend is in progress, we can requeue
1119 * the I/O's to the core. This give userspace a chance
1120 * to reconfigure the mirror, at which point the core
1121 * will reissue the writes. If the 'noflush' flag is
1122 * not set, we have no choice but to return errors.
1123 *
1124 * Some writes on the failures list may have been
1125 * submitted before the log failure and represent a
1126 * failure to write to one of the devices. It is ok
1127 * for us to treat them the same and requeue them
1128 * as well.
1129 */
1130 if (dm_noflush_suspending(ms->ti)) {
1131 while ((bio = bio_list_pop(failures)))
1132 bio_endio(bio, DM_ENDIO_REQUEUE);
1133 return;
1134 }
1135
1136 if (atomic_read(&ms->suspend)) {
1137 while ((bio = bio_list_pop(failures)))
1138 bio_endio(bio, -EIO);
1139 return;
1140 }
1141
1142 spin_lock_irq(&ms->lock);
1143 bio_list_merge(&ms->failures, failures);
1144 spin_unlock_irq(&ms->lock);
1145
1146 wake(ms);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001147}
1148
1149static void trigger_event(struct work_struct *work)
1150{
1151 struct mirror_set *ms =
1152 container_of(work, struct mirror_set, trigger_event);
1153
1154 dm_table_event(ms->ti->table);
1155}
1156
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157/*-----------------------------------------------------------------
1158 * kmirrord
1159 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001160static int _do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161{
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001162 struct mirror_set *ms =container_of(work, struct mirror_set,
1163 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001164 struct bio_list reads, writes, failures;
1165 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001167 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 reads = ms->reads;
1169 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001170 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 bio_list_init(&ms->reads);
1172 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001173 bio_list_init(&ms->failures);
1174 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
1176 rh_update_states(&ms->rh);
1177 do_recovery(ms);
1178 do_reads(ms, &reads);
1179 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001180 do_failures(ms, &failures);
1181
1182 return (ms->failures.head) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183}
1184
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001185static void do_mirror(struct work_struct *work)
1186{
1187 /*
1188 * If _do_mirror returns 1, we give it
1189 * another shot. This helps for cases like
1190 * 'suspend' where we call flush_workqueue
1191 * and expect all work to be finished. If
1192 * a failure happens during a suspend, we
1193 * couldn't issue a 'wake' because it would
1194 * not be honored. Therefore, we return '1'
1195 * from _do_mirror, and retry here.
1196 */
1197 while (_do_mirror(work))
1198 schedule();
1199}
1200
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202/*-----------------------------------------------------------------
1203 * Target functions
1204 *---------------------------------------------------------------*/
1205static struct mirror_set *alloc_context(unsigned int nr_mirrors,
1206 uint32_t region_size,
1207 struct dm_target *ti,
1208 struct dirty_log *dl)
1209{
1210 size_t len;
1211 struct mirror_set *ms = NULL;
1212
1213 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
1214 return NULL;
1215
1216 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
1217
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07001218 ms = kzalloc(len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001220 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 return NULL;
1222 }
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 spin_lock_init(&ms->lock);
1225
1226 ms->ti = ti;
1227 ms->nr_mirrors = nr_mirrors;
1228 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
1229 ms->in_sync = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001230 ms->log_failure = 0;
1231 atomic_set(&ms->suspend, 0);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001232 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Milan Broz88be1632007-05-09 02:33:04 -07001234 ms->io_client = dm_io_client_create(DM_IO_PAGES);
1235 if (IS_ERR(ms->io_client)) {
1236 ti->error = "Error creating dm_io client";
1237 kfree(ms);
1238 return NULL;
1239 }
1240
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001242 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001243 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 kfree(ms);
1245 return NULL;
1246 }
1247
1248 return ms;
1249}
1250
1251static void free_context(struct mirror_set *ms, struct dm_target *ti,
1252 unsigned int m)
1253{
1254 while (m--)
1255 dm_put_device(ti, ms->mirror[m].dev);
1256
Milan Broz88be1632007-05-09 02:33:04 -07001257 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 rh_exit(&ms->rh);
1259 kfree(ms);
1260}
1261
1262static inline int _check_region_size(struct dm_target *ti, uint32_t size)
1263{
vignesh babu6f3c3f02007-10-19 22:38:44 +01001264 return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 size > ti->len);
1266}
1267
1268static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1269 unsigned int mirror, char **argv)
1270{
Andrew Morton4ee218c2006-03-27 01:17:48 -08001271 unsigned long long offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Andrew Morton4ee218c2006-03-27 01:17:48 -08001273 if (sscanf(argv[1], "%llu", &offset) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001274 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 return -EINVAL;
1276 }
1277
1278 if (dm_get_device(ti, argv[0], offset, ti->len,
1279 dm_table_get_mode(ti->table),
1280 &ms->mirror[mirror].dev)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001281 ti->error = "Device lookup failure";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 return -ENXIO;
1283 }
1284
Jonathan Brassowaa5617c2007-10-19 22:47:58 +01001285 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001286 atomic_set(&(ms->mirror[mirror].error_count), 0);
1287 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 ms->mirror[mirror].offset = offset;
1289
1290 return 0;
1291}
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293/*
1294 * Create dirty log: log_type #log_params <log_params>
1295 */
1296static struct dirty_log *create_dirty_log(struct dm_target *ti,
1297 unsigned int argc, char **argv,
1298 unsigned int *args_used)
1299{
1300 unsigned int param_count;
1301 struct dirty_log *dl;
1302
1303 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001304 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 return NULL;
1306 }
1307
1308 if (sscanf(argv[1], "%u", &param_count) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001309 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 return NULL;
1311 }
1312
1313 *args_used = 2 + param_count;
1314
1315 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001316 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 return NULL;
1318 }
1319
1320 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1321 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001322 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 return NULL;
1324 }
1325
1326 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001327 ti->error = "Invalid region size";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 dm_destroy_dirty_log(dl);
1329 return NULL;
1330 }
1331
1332 return dl;
1333}
1334
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001335static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1336 unsigned *args_used)
1337{
1338 unsigned num_features;
1339 struct dm_target *ti = ms->ti;
1340
1341 *args_used = 0;
1342
1343 if (!argc)
1344 return 0;
1345
1346 if (sscanf(argv[0], "%u", &num_features) != 1) {
1347 ti->error = "Invalid number of features";
1348 return -EINVAL;
1349 }
1350
1351 argc--;
1352 argv++;
1353 (*args_used)++;
1354
1355 if (num_features > argc) {
1356 ti->error = "Not enough arguments to support feature count";
1357 return -EINVAL;
1358 }
1359
1360 if (!strcmp("handle_errors", argv[0]))
1361 ms->features |= DM_RAID1_HANDLE_ERRORS;
1362 else {
1363 ti->error = "Unrecognised feature requested";
1364 return -EINVAL;
1365 }
1366
1367 (*args_used)++;
1368
1369 return 0;
1370}
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372/*
1373 * Construct a mirror mapping:
1374 *
1375 * log_type #log_params <log_params>
1376 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001377 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 *
1379 * log_type is "core" or "disk"
1380 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001381 *
1382 * If present, features must be "handle_errors".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1385{
1386 int r;
1387 unsigned int nr_mirrors, m, args_used;
1388 struct mirror_set *ms;
1389 struct dirty_log *dl;
1390
1391 dl = create_dirty_log(ti, argc, argv, &args_used);
1392 if (!dl)
1393 return -EINVAL;
1394
1395 argv += args_used;
1396 argc -= args_used;
1397
1398 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1399 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001400 ti->error = "Invalid number of mirrors";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 dm_destroy_dirty_log(dl);
1402 return -EINVAL;
1403 }
1404
1405 argv++, argc--;
1406
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001407 if (argc < nr_mirrors * 2) {
1408 ti->error = "Too few mirror arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 dm_destroy_dirty_log(dl);
1410 return -EINVAL;
1411 }
1412
1413 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1414 if (!ms) {
1415 dm_destroy_dirty_log(dl);
1416 return -ENOMEM;
1417 }
1418
1419 /* Get the mirror parameter sets */
1420 for (m = 0; m < nr_mirrors; m++) {
1421 r = get_mirror(ms, ti, m, argv);
1422 if (r) {
1423 free_context(ms, ti, m);
1424 return r;
1425 }
1426 argv += 2;
1427 argc -= 2;
1428 }
1429
1430 ti->private = ms;
Alasdair G Kergond88854f2005-07-07 17:59:34 -07001431 ti->split_io = ms->rh.region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001433 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1434 if (!ms->kmirrord_wq) {
1435 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001436 r = -ENOMEM;
1437 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001438 }
1439 INIT_WORK(&ms->kmirrord_work, do_mirror);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001440 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001441
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001442 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001443 if (r)
1444 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001445
1446 argv += args_used;
1447 argc -= args_used;
1448
Jonathan Brassowf44db672007-07-12 17:29:04 +01001449 /*
1450 * Any read-balancing addition depends on the
1451 * DM_RAID1_HANDLE_ERRORS flag being present.
1452 * This is because the decision to balance depends
1453 * on the sync state of a region. If the above
1454 * flag is not present, we ignore errors; and
1455 * the sync state may be inaccurate.
1456 */
1457
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001458 if (argc) {
1459 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001460 r = -EINVAL;
1461 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001462 }
1463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001465 if (r)
1466 goto err_destroy_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001468 wake(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001470
1471err_destroy_wq:
1472 destroy_workqueue(ms->kmirrord_wq);
1473err_free_context:
1474 free_context(ms, ti, ms->nr_mirrors);
1475 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476}
1477
1478static void mirror_dtr(struct dm_target *ti)
1479{
1480 struct mirror_set *ms = (struct mirror_set *) ti->private;
1481
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001482 flush_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001484 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 free_context(ms, ti, ms->nr_mirrors);
1486}
1487
1488static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1489{
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001490 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 int should_wake = 0;
1492 struct bio_list *bl;
1493
1494 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001495 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 should_wake = !(bl->head);
1497 bio_list_add(bl, bio);
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001498 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
1500 if (should_wake)
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001501 wake(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502}
1503
1504/*
1505 * Mirror mapping function
1506 */
1507static int mirror_map(struct dm_target *ti, struct bio *bio,
1508 union map_info *map_context)
1509{
1510 int r, rw = bio_rw(bio);
1511 struct mirror *m;
1512 struct mirror_set *ms = ti->private;
1513
Neil Browne4c8b3b2006-06-26 00:27:26 -07001514 map_context->ll = bio_to_region(&ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515
1516 if (rw == WRITE) {
1517 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001518 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 }
1520
1521 r = ms->rh.log->type->in_sync(ms->rh.log,
1522 bio_to_region(&ms->rh, bio), 0);
1523 if (r < 0 && r != -EWOULDBLOCK)
1524 return r;
1525
1526 if (r == -EWOULDBLOCK) /* FIXME: ugly */
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001527 r = DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
1529 /*
1530 * We don't want to fast track a recovery just for a read
1531 * ahead. So we just let it silently fail.
1532 * FIXME: get rid of this.
1533 */
1534 if (!r && rw == READA)
1535 return -EIO;
1536
1537 if (!r) {
1538 /* Pass this io over to the daemon */
1539 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001540 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 }
1542
1543 m = choose_mirror(ms, bio->bi_sector);
1544 if (!m)
1545 return -EIO;
1546
1547 map_bio(ms, m, bio);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001548 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549}
1550
1551static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1552 int error, union map_info *map_context)
1553{
1554 int rw = bio_rw(bio);
1555 struct mirror_set *ms = (struct mirror_set *) ti->private;
1556 region_t region = map_context->ll;
1557
1558 /*
1559 * We need to dec pending if this was a write.
1560 */
1561 if (rw == WRITE)
1562 rh_dec(&ms->rh, region);
1563
1564 return 0;
1565}
1566
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001567static void mirror_presuspend(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568{
1569 struct mirror_set *ms = (struct mirror_set *) ti->private;
1570 struct dirty_log *log = ms->rh.log;
1571
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001572 atomic_set(&ms->suspend, 1);
1573
1574 /*
1575 * We must finish up all the work that we've
1576 * generated (i.e. recovery work).
1577 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 rh_stop_recovery(&ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001579
Jonathan E Brassow33184042006-11-08 17:44:44 -08001580 wait_event(_kmirrord_recovery_stopped,
1581 !atomic_read(&ms->rh.recovery_in_flight));
1582
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001583 if (log->type->presuspend && log->type->presuspend(log))
1584 /* FIXME: need better error handling */
1585 DMWARN("log presuspend failed");
1586
1587 /*
1588 * Now that recovery is complete/stopped and the
1589 * delayed bios are queued, we need to wait for
1590 * the worker thread to complete. This way,
1591 * we know that all of our I/O has been pushed.
1592 */
1593 flush_workqueue(ms->kmirrord_wq);
1594}
1595
1596static void mirror_postsuspend(struct dm_target *ti)
1597{
1598 struct mirror_set *ms = ti->private;
1599 struct dirty_log *log = ms->rh.log;
1600
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001601 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 /* FIXME: need better error handling */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001603 DMWARN("log postsuspend failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604}
1605
1606static void mirror_resume(struct dm_target *ti)
1607{
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001608 struct mirror_set *ms = ti->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 struct dirty_log *log = ms->rh.log;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001610
1611 atomic_set(&ms->suspend, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 if (log->type->resume && log->type->resume(log))
1613 /* FIXME: need better error handling */
1614 DMWARN("log resume failed");
1615 rh_start_recovery(&ms->rh);
1616}
1617
1618static int mirror_status(struct dm_target *ti, status_type_t type,
1619 char *result, unsigned int maxlen)
1620{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001621 unsigned int m, sz = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 struct mirror_set *ms = (struct mirror_set *) ti->private;
1623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 switch (type) {
1625 case STATUSTYPE_INFO:
1626 DMEMIT("%d ", ms->nr_mirrors);
1627 for (m = 0; m < ms->nr_mirrors; m++)
1628 DMEMIT("%s ", ms->mirror[m].dev->name);
1629
Milan Brozc95bc202007-07-12 17:27:24 +01001630 DMEMIT("%llu/%llu 0 ",
Andrew Morton4ee218c2006-03-27 01:17:48 -08001631 (unsigned long long)ms->rh.log->type->
1632 get_sync_count(ms->rh.log),
1633 (unsigned long long)ms->nr_regions);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001634
Milan Brozc95bc202007-07-12 17:27:24 +01001635 sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001636
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 break;
1638
1639 case STATUSTYPE_TABLE:
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001640 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1641
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001642 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001644 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001645 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001646
1647 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1648 DMEMIT(" 1 handle_errors");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 }
1650
1651 return 0;
1652}
1653
1654static struct target_type mirror_target = {
1655 .name = "mirror",
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001656 .version = {1, 0, 3},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 .module = THIS_MODULE,
1658 .ctr = mirror_ctr,
1659 .dtr = mirror_dtr,
1660 .map = mirror_map,
1661 .end_io = mirror_end_io,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001662 .presuspend = mirror_presuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 .postsuspend = mirror_postsuspend,
1664 .resume = mirror_resume,
1665 .status = mirror_status,
1666};
1667
1668static int __init dm_mirror_init(void)
1669{
1670 int r;
1671
1672 r = dm_dirty_log_init();
1673 if (r)
1674 return r;
1675
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 r = dm_register_target(&mirror_target);
1677 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001678 DMERR("Failed to register mirror target");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679 dm_dirty_log_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 }
1681
1682 return r;
1683}
1684
1685static void __exit dm_mirror_exit(void)
1686{
1687 int r;
1688
1689 r = dm_unregister_target(&mirror_target);
1690 if (r < 0)
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001691 DMERR("unregister failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 dm_dirty_log_exit();
1694}
1695
1696/* Module hooks */
1697module_init(dm_mirror_init);
1698module_exit(dm_mirror_exit);
1699
1700MODULE_DESCRIPTION(DM_NAME " mirror target");
1701MODULE_AUTHOR("Joe Thornber");
1702MODULE_LICENSE("GPL");