blob: 2b2ca371e20b9f66189b3ca5061fad2869575f68 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-list.h"
9#include "dm-io.h"
10#include "dm-log.h"
11#include "kcopyd.h"
12
13#include <linux/ctype.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/pagemap.h>
18#include <linux/slab.h>
19#include <linux/time.h>
20#include <linux/vmalloc.h>
21#include <linux/workqueue.h>
vignesh babu6f3c3f02007-10-19 22:38:44 +010022#include <linux/log2.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Alasdair G Kergon72d94862006-06-26 00:27:35 -070024#define DM_MSG_PREFIX "raid1"
Milan Broz88be1632007-05-09 02:33:04 -070025#define DM_IO_PAGES 64
Alasdair G Kergon72d94862006-06-26 00:27:35 -070026
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070027#define DM_RAID1_HANDLE_ERRORS 0x01
Jonathan Brassowf44db672007-07-12 17:29:04 +010028#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070029
Jonathan E Brassow33184042006-11-08 17:44:44 -080030static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/*-----------------------------------------------------------------
33 * Region hash
34 *
35 * The mirror splits itself up into discrete regions. Each
36 * region can be in one of three states: clean, dirty,
37 * nosync. There is no need to put clean regions in the hash.
38 *
39 * In addition to being present in the hash table a region _may_
40 * be present on one of three lists.
41 *
42 * clean_regions: Regions on this list have no io pending to
43 * them, they are in sync, we are no longer interested in them,
44 * they are dull. rh_update_states() will remove them from the
45 * hash table.
46 *
47 * quiesced_regions: These regions have been spun down, ready
48 * for recovery. rh_recovery_start() will remove regions from
49 * this list and hand them to kmirrord, which will schedule the
50 * recovery io with kcopyd.
51 *
52 * recovered_regions: Regions that kcopyd has successfully
53 * recovered. rh_update_states() will now schedule any delayed
54 * io, up the recovery_count, and remove the region from the
55 * hash.
56 *
57 * There are 2 locks:
58 * A rw spin lock 'hash_lock' protects just the hash table,
59 * this is never held in write mode from interrupt context,
60 * which I believe means that we only have to disable irqs when
61 * doing a write lock.
62 *
63 * An ordinary spin lock 'region_lock' that protects the three
64 * lists in the region_hash, with the 'state', 'list' and
65 * 'bhs_delayed' fields of the regions. This is used from irq
66 * context, so all other uses will have to suspend local irqs.
67 *---------------------------------------------------------------*/
68struct mirror_set;
69struct region_hash {
70 struct mirror_set *ms;
71 uint32_t region_size;
72 unsigned region_shift;
73
74 /* holds persistent region state */
75 struct dirty_log *log;
76
77 /* hash table */
78 rwlock_t hash_lock;
79 mempool_t *region_pool;
80 unsigned int mask;
81 unsigned int nr_buckets;
82 struct list_head *buckets;
83
84 spinlock_t region_lock;
Jonathan E Brassow33184042006-11-08 17:44:44 -080085 atomic_t recovery_in_flight;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 struct semaphore recovery_count;
87 struct list_head clean_regions;
88 struct list_head quiesced_regions;
89 struct list_head recovered_regions;
Jonathan Brassowf44db672007-07-12 17:29:04 +010090 struct list_head failed_recovered_regions;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091};
92
93enum {
94 RH_CLEAN,
95 RH_DIRTY,
96 RH_NOSYNC,
97 RH_RECOVERING
98};
99
100struct region {
101 struct region_hash *rh; /* FIXME: can we get rid of this ? */
102 region_t key;
103 int state;
104
105 struct list_head hash_list;
106 struct list_head list;
107
108 atomic_t pending;
109 struct bio_list delayed_bios;
110};
111
Neil Browne4c8b3b2006-06-26 00:27:26 -0700112
113/*-----------------------------------------------------------------
114 * Mirror set structures.
115 *---------------------------------------------------------------*/
116struct mirror {
117 atomic_t error_count;
118 struct dm_dev *dev;
119 sector_t offset;
120};
121
122struct mirror_set {
123 struct dm_target *ti;
124 struct list_head list;
125 struct region_hash rh;
126 struct kcopyd_client *kcopyd_client;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -0700127 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700128
129 spinlock_t lock; /* protects the next two lists */
130 struct bio_list reads;
131 struct bio_list writes;
132
Milan Broz88be1632007-05-09 02:33:04 -0700133 struct dm_io_client *io_client;
134
Neil Browne4c8b3b2006-06-26 00:27:26 -0700135 /* recovery */
136 region_t nr_regions;
137 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100138 int log_failure;
Neil Browne4c8b3b2006-06-26 00:27:26 -0700139
140 struct mirror *default_mirror; /* Default mirror */
141
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700142 struct workqueue_struct *kmirrord_wq;
143 struct work_struct kmirrord_work;
144
Neil Browne4c8b3b2006-06-26 00:27:26 -0700145 unsigned int nr_mirrors;
146 struct mirror mirror[0];
147};
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/*
150 * Conversion fns
151 */
152static inline region_t bio_to_region(struct region_hash *rh, struct bio *bio)
153{
Neil Browne4c8b3b2006-06-26 00:27:26 -0700154 return (bio->bi_sector - rh->ms->ti->begin) >> rh->region_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155}
156
157static inline sector_t region_to_sector(struct region_hash *rh, region_t region)
158{
159 return region << rh->region_shift;
160}
161
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700162static void wake(struct mirror_set *ms)
163{
164 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
165}
166
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167/* FIXME move this */
168static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw);
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170#define MIN_REGIONS 64
171#define MAX_RECOVERY 1
172static int rh_init(struct region_hash *rh, struct mirror_set *ms,
173 struct dirty_log *log, uint32_t region_size,
174 region_t nr_regions)
175{
176 unsigned int nr_buckets, max_buckets;
177 size_t i;
178
179 /*
180 * Calculate a suitable number of buckets for our hash
181 * table.
182 */
183 max_buckets = nr_regions >> 6;
184 for (nr_buckets = 128u; nr_buckets < max_buckets; nr_buckets <<= 1)
185 ;
186 nr_buckets >>= 1;
187
188 rh->ms = ms;
189 rh->log = log;
190 rh->region_size = region_size;
191 rh->region_shift = ffs(region_size) - 1;
192 rwlock_init(&rh->hash_lock);
193 rh->mask = nr_buckets - 1;
194 rh->nr_buckets = nr_buckets;
195
196 rh->buckets = vmalloc(nr_buckets * sizeof(*rh->buckets));
197 if (!rh->buckets) {
198 DMERR("unable to allocate region hash memory");
199 return -ENOMEM;
200 }
201
202 for (i = 0; i < nr_buckets; i++)
203 INIT_LIST_HEAD(rh->buckets + i);
204
205 spin_lock_init(&rh->region_lock);
206 sema_init(&rh->recovery_count, 0);
Jonathan E Brassow33184042006-11-08 17:44:44 -0800207 atomic_set(&rh->recovery_in_flight, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 INIT_LIST_HEAD(&rh->clean_regions);
209 INIT_LIST_HEAD(&rh->quiesced_regions);
210 INIT_LIST_HEAD(&rh->recovered_regions);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100211 INIT_LIST_HEAD(&rh->failed_recovered_regions);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Matthew Dobson0eaae62a2006-03-26 01:37:47 -0800213 rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS,
214 sizeof(struct region));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 if (!rh->region_pool) {
216 vfree(rh->buckets);
217 rh->buckets = NULL;
218 return -ENOMEM;
219 }
220
221 return 0;
222}
223
224static void rh_exit(struct region_hash *rh)
225{
226 unsigned int h;
227 struct region *reg, *nreg;
228
229 BUG_ON(!list_empty(&rh->quiesced_regions));
230 for (h = 0; h < rh->nr_buckets; h++) {
231 list_for_each_entry_safe(reg, nreg, rh->buckets + h, hash_list) {
232 BUG_ON(atomic_read(&reg->pending));
233 mempool_free(reg, rh->region_pool);
234 }
235 }
236
237 if (rh->log)
238 dm_destroy_dirty_log(rh->log);
239 if (rh->region_pool)
240 mempool_destroy(rh->region_pool);
241 vfree(rh->buckets);
242}
243
244#define RH_HASH_MULT 2654435387U
245
246static inline unsigned int rh_hash(struct region_hash *rh, region_t region)
247{
248 return (unsigned int) ((region * RH_HASH_MULT) >> 12) & rh->mask;
249}
250
251static struct region *__rh_lookup(struct region_hash *rh, region_t region)
252{
253 struct region *reg;
254
255 list_for_each_entry (reg, rh->buckets + rh_hash(rh, region), hash_list)
256 if (reg->key == region)
257 return reg;
258
259 return NULL;
260}
261
262static void __rh_insert(struct region_hash *rh, struct region *reg)
263{
264 unsigned int h = rh_hash(rh, reg->key);
265 list_add(&reg->hash_list, rh->buckets + h);
266}
267
268static struct region *__rh_alloc(struct region_hash *rh, region_t region)
269{
270 struct region *reg, *nreg;
271
272 read_unlock(&rh->hash_lock);
Daniel Kobrasc06aad82006-08-27 01:23:24 -0700273 nreg = mempool_alloc(rh->region_pool, GFP_ATOMIC);
274 if (unlikely(!nreg))
275 nreg = kmalloc(sizeof(struct region), GFP_NOIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 nreg->state = rh->log->type->in_sync(rh->log, region, 1) ?
277 RH_CLEAN : RH_NOSYNC;
278 nreg->rh = rh;
279 nreg->key = region;
280
281 INIT_LIST_HEAD(&nreg->list);
282
283 atomic_set(&nreg->pending, 0);
284 bio_list_init(&nreg->delayed_bios);
285 write_lock_irq(&rh->hash_lock);
286
287 reg = __rh_lookup(rh, region);
288 if (reg)
289 /* we lost the race */
290 mempool_free(nreg, rh->region_pool);
291
292 else {
293 __rh_insert(rh, nreg);
294 if (nreg->state == RH_CLEAN) {
295 spin_lock(&rh->region_lock);
296 list_add(&nreg->list, &rh->clean_regions);
297 spin_unlock(&rh->region_lock);
298 }
299 reg = nreg;
300 }
301 write_unlock_irq(&rh->hash_lock);
302 read_lock(&rh->hash_lock);
303
304 return reg;
305}
306
307static inline struct region *__rh_find(struct region_hash *rh, region_t region)
308{
309 struct region *reg;
310
311 reg = __rh_lookup(rh, region);
312 if (!reg)
313 reg = __rh_alloc(rh, region);
314
315 return reg;
316}
317
318static int rh_state(struct region_hash *rh, region_t region, int may_block)
319{
320 int r;
321 struct region *reg;
322
323 read_lock(&rh->hash_lock);
324 reg = __rh_lookup(rh, region);
325 read_unlock(&rh->hash_lock);
326
327 if (reg)
328 return reg->state;
329
330 /*
331 * The region wasn't in the hash, so we fall back to the
332 * dirty log.
333 */
334 r = rh->log->type->in_sync(rh->log, region, may_block);
335
336 /*
337 * Any error from the dirty log (eg. -EWOULDBLOCK) gets
338 * taken as a RH_NOSYNC
339 */
340 return r == 1 ? RH_CLEAN : RH_NOSYNC;
341}
342
343static inline int rh_in_sync(struct region_hash *rh,
344 region_t region, int may_block)
345{
346 int state = rh_state(rh, region, may_block);
347 return state == RH_CLEAN || state == RH_DIRTY;
348}
349
350static void dispatch_bios(struct mirror_set *ms, struct bio_list *bio_list)
351{
352 struct bio *bio;
353
354 while ((bio = bio_list_pop(bio_list))) {
355 queue_bio(ms, bio, WRITE);
356 }
357}
358
Jonathan E Brassowf3ee6b22006-12-08 02:41:11 -0800359static void complete_resync_work(struct region *reg, int success)
360{
361 struct region_hash *rh = reg->rh;
362
363 rh->log->type->set_region_sync(rh->log, reg->key, success);
364 dispatch_bios(rh->ms, &reg->delayed_bios);
365 if (atomic_dec_and_test(&rh->recovery_in_flight))
366 wake_up_all(&_kmirrord_recovery_stopped);
367 up(&rh->recovery_count);
368}
369
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370static void rh_update_states(struct region_hash *rh)
371{
372 struct region *reg, *next;
373
374 LIST_HEAD(clean);
375 LIST_HEAD(recovered);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100376 LIST_HEAD(failed_recovered);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377
378 /*
379 * Quickly grab the lists.
380 */
381 write_lock_irq(&rh->hash_lock);
382 spin_lock(&rh->region_lock);
383 if (!list_empty(&rh->clean_regions)) {
384 list_splice(&rh->clean_regions, &clean);
385 INIT_LIST_HEAD(&rh->clean_regions);
386
Jonathan Brassow943317e2007-07-12 17:28:25 +0100387 list_for_each_entry(reg, &clean, list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 list_del(&reg->hash_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 }
390
391 if (!list_empty(&rh->recovered_regions)) {
392 list_splice(&rh->recovered_regions, &recovered);
393 INIT_LIST_HEAD(&rh->recovered_regions);
394
395 list_for_each_entry (reg, &recovered, list)
396 list_del(&reg->hash_list);
397 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100398
399 if (!list_empty(&rh->failed_recovered_regions)) {
400 list_splice(&rh->failed_recovered_regions, &failed_recovered);
401 INIT_LIST_HEAD(&rh->failed_recovered_regions);
402
403 list_for_each_entry(reg, &failed_recovered, list)
404 list_del(&reg->hash_list);
405 }
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 spin_unlock(&rh->region_lock);
408 write_unlock_irq(&rh->hash_lock);
409
410 /*
411 * All the regions on the recovered and clean lists have
412 * now been pulled out of the system, so no need to do
413 * any more locking.
414 */
415 list_for_each_entry_safe (reg, next, &recovered, list) {
416 rh->log->type->clear_region(rh->log, reg->key);
Jonathan E Brassowf3ee6b22006-12-08 02:41:11 -0800417 complete_resync_work(reg, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 mempool_free(reg, rh->region_pool);
419 }
420
Jonathan Brassowf44db672007-07-12 17:29:04 +0100421 list_for_each_entry_safe(reg, next, &failed_recovered, list) {
422 complete_resync_work(reg, errors_handled(rh->ms) ? 0 : 1);
423 mempool_free(reg, rh->region_pool);
424 }
425
Jonathan Brassow943317e2007-07-12 17:28:25 +0100426 list_for_each_entry_safe(reg, next, &clean, list) {
427 rh->log->type->clear_region(rh->log, reg->key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 mempool_free(reg, rh->region_pool);
Jonathan Brassow943317e2007-07-12 17:28:25 +0100429 }
430
431 rh->log->type->flush(rh->log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
434static void rh_inc(struct region_hash *rh, region_t region)
435{
436 struct region *reg;
437
438 read_lock(&rh->hash_lock);
439 reg = __rh_find(rh, region);
Jun'ichi Nomura844e8d92005-09-09 16:23:42 -0700440
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800441 spin_lock_irq(&rh->region_lock);
Jun'ichi Nomura844e8d92005-09-09 16:23:42 -0700442 atomic_inc(&reg->pending);
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 if (reg->state == RH_CLEAN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 reg->state = RH_DIRTY;
446 list_del_init(&reg->list); /* take off the clean list */
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800447 spin_unlock_irq(&rh->region_lock);
448
449 rh->log->type->mark_region(rh->log, reg->key);
450 } else
451 spin_unlock_irq(&rh->region_lock);
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 read_unlock(&rh->hash_lock);
455}
456
457static void rh_inc_pending(struct region_hash *rh, struct bio_list *bios)
458{
459 struct bio *bio;
460
461 for (bio = bios->head; bio; bio = bio->bi_next)
462 rh_inc(rh, bio_to_region(rh, bio));
463}
464
465static void rh_dec(struct region_hash *rh, region_t region)
466{
467 unsigned long flags;
468 struct region *reg;
469 int should_wake = 0;
470
471 read_lock(&rh->hash_lock);
472 reg = __rh_lookup(rh, region);
473 read_unlock(&rh->hash_lock);
474
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800475 spin_lock_irqsave(&rh->region_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 if (atomic_dec_and_test(&reg->pending)) {
Jun'ichi Nomura930d3322006-03-27 01:17:47 -0800477 /*
478 * There is no pending I/O for this region.
479 * We can move the region to corresponding list for next action.
480 * At this point, the region is not yet connected to any list.
481 *
482 * If the state is RH_NOSYNC, the region should be kept off
483 * from clean list.
484 * The hash entry for RH_NOSYNC will remain in memory
485 * until the region is recovered or the map is reloaded.
486 */
487
488 /* do nothing for RH_NOSYNC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 if (reg->state == RH_RECOVERING) {
490 list_add_tail(&reg->list, &rh->quiesced_regions);
Jun'ichi Nomura930d3322006-03-27 01:17:47 -0800491 } else if (reg->state == RH_DIRTY) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 reg->state = RH_CLEAN;
493 list_add(&reg->list, &rh->clean_regions);
494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 should_wake = 1;
496 }
Jonathan E Brassow7692c5d2005-11-21 21:32:37 -0800497 spin_unlock_irqrestore(&rh->region_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498
499 if (should_wake)
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700500 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501}
502
503/*
504 * Starts quiescing a region in preparation for recovery.
505 */
506static int __rh_recovery_prepare(struct region_hash *rh)
507{
508 int r;
509 struct region *reg;
510 region_t region;
511
512 /*
513 * Ask the dirty log what's next.
514 */
515 r = rh->log->type->get_resync_work(rh->log, &region);
516 if (r <= 0)
517 return r;
518
519 /*
520 * Get this region, and start it quiescing by setting the
521 * recovering flag.
522 */
523 read_lock(&rh->hash_lock);
524 reg = __rh_find(rh, region);
525 read_unlock(&rh->hash_lock);
526
527 spin_lock_irq(&rh->region_lock);
528 reg->state = RH_RECOVERING;
529
530 /* Already quiesced ? */
531 if (atomic_read(&reg->pending))
532 list_del_init(&reg->list);
Akinobu Mita179e0912006-06-26 00:24:41 -0700533 else
534 list_move(&reg->list, &rh->quiesced_regions);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 spin_unlock_irq(&rh->region_lock);
537
538 return 1;
539}
540
541static void rh_recovery_prepare(struct region_hash *rh)
542{
Jonathan E Brassow33184042006-11-08 17:44:44 -0800543 /* Extra reference to avoid race with rh_stop_recovery */
544 atomic_inc(&rh->recovery_in_flight);
545
546 while (!down_trylock(&rh->recovery_count)) {
547 atomic_inc(&rh->recovery_in_flight);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (__rh_recovery_prepare(rh) <= 0) {
Jonathan E Brassow33184042006-11-08 17:44:44 -0800549 atomic_dec(&rh->recovery_in_flight);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 up(&rh->recovery_count);
551 break;
552 }
Jonathan E Brassow33184042006-11-08 17:44:44 -0800553 }
554
555 /* Drop the extra reference */
556 if (atomic_dec_and_test(&rh->recovery_in_flight))
557 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558}
559
560/*
561 * Returns any quiesced regions.
562 */
563static struct region *rh_recovery_start(struct region_hash *rh)
564{
565 struct region *reg = NULL;
566
567 spin_lock_irq(&rh->region_lock);
568 if (!list_empty(&rh->quiesced_regions)) {
569 reg = list_entry(rh->quiesced_regions.next,
570 struct region, list);
571 list_del_init(&reg->list); /* remove from the quiesced list */
572 }
573 spin_unlock_irq(&rh->region_lock);
574
575 return reg;
576}
577
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578static void rh_recovery_end(struct region *reg, int success)
579{
580 struct region_hash *rh = reg->rh;
581
582 spin_lock_irq(&rh->region_lock);
Jonathan Brassowf44db672007-07-12 17:29:04 +0100583 if (success)
584 list_add(&reg->list, &reg->rh->recovered_regions);
585 else {
586 reg->state = RH_NOSYNC;
587 list_add(&reg->list, &reg->rh->failed_recovered_regions);
588 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 spin_unlock_irq(&rh->region_lock);
590
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700591 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592}
593
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100594static int rh_flush(struct region_hash *rh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100596 return rh->log->type->flush(rh->log);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597}
598
599static void rh_delay(struct region_hash *rh, struct bio *bio)
600{
601 struct region *reg;
602
603 read_lock(&rh->hash_lock);
604 reg = __rh_find(rh, bio_to_region(rh, bio));
605 bio_list_add(&reg->delayed_bios, bio);
606 read_unlock(&rh->hash_lock);
607}
608
609static void rh_stop_recovery(struct region_hash *rh)
610{
611 int i;
612
613 /* wait for any recovering regions */
614 for (i = 0; i < MAX_RECOVERY; i++)
615 down(&rh->recovery_count);
616}
617
618static void rh_start_recovery(struct region_hash *rh)
619{
620 int i;
621
622 for (i = 0; i < MAX_RECOVERY; i++)
623 up(&rh->recovery_count);
624
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700625 wake(rh->ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626}
627
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628/*
629 * Every mirror should look like this one.
630 */
631#define DEFAULT_MIRROR 0
632
633/*
634 * This is yucky. We squirrel the mirror_set struct away inside
635 * bi_next for write buffers. This is safe since the bh
636 * doesn't get submitted to the lower levels of block layer.
637 */
638static struct mirror_set *bio_get_ms(struct bio *bio)
639{
640 return (struct mirror_set *) bio->bi_next;
641}
642
643static void bio_set_ms(struct bio *bio, struct mirror_set *ms)
644{
645 bio->bi_next = (struct bio *) ms;
646}
647
648/*-----------------------------------------------------------------
649 * Recovery.
650 *
651 * When a mirror is first activated we may find that some regions
652 * are in the no-sync state. We have to recover these by
653 * recopying from the default mirror to all the others.
654 *---------------------------------------------------------------*/
655static void recovery_complete(int read_err, unsigned int write_err,
656 void *context)
657{
658 struct region *reg = (struct region *) context;
659
Jonathan Brassowf44db672007-07-12 17:29:04 +0100660 if (read_err)
661 /* Read error means the failure of default mirror. */
662 DMERR_LIMIT("Unable to read primary mirror during recovery");
663
664 if (write_err)
665 DMERR_LIMIT("Write error during recovery (error = 0x%x)",
666 write_err);
667
Jonathan Brassowce503f52006-06-26 00:27:30 -0700668 rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
671static int recover(struct mirror_set *ms, struct region *reg)
672{
673 int r;
674 unsigned int i;
675 struct io_region from, to[KCOPYD_MAX_REGIONS], *dest;
676 struct mirror *m;
677 unsigned long flags = 0;
678
679 /* fill in the source */
Jonathan E Brassowa1a19082006-01-06 00:20:05 -0800680 m = ms->default_mirror;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 from.bdev = m->dev->bdev;
682 from.sector = m->offset + region_to_sector(reg->rh, reg->key);
683 if (reg->key == (ms->nr_regions - 1)) {
684 /*
685 * The final region may be smaller than
686 * region_size.
687 */
688 from.count = ms->ti->len & (reg->rh->region_size - 1);
689 if (!from.count)
690 from.count = reg->rh->region_size;
691 } else
692 from.count = reg->rh->region_size;
693
694 /* fill in the destinations */
695 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan E Brassowa1a19082006-01-06 00:20:05 -0800696 if (&ms->mirror[i] == ms->default_mirror)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 continue;
698
699 m = ms->mirror + i;
700 dest->bdev = m->dev->bdev;
701 dest->sector = m->offset + region_to_sector(reg->rh, reg->key);
702 dest->count = from.count;
703 dest++;
704 }
705
706 /* hand to kcopyd */
707 set_bit(KCOPYD_IGNORE_ERROR, &flags);
708 r = kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to, flags,
709 recovery_complete, reg);
710
711 return r;
712}
713
714static void do_recovery(struct mirror_set *ms)
715{
716 int r;
717 struct region *reg;
718 struct dirty_log *log = ms->rh.log;
719
720 /*
721 * Start quiescing some regions.
722 */
723 rh_recovery_prepare(&ms->rh);
724
725 /*
726 * Copy any already quiesced regions.
727 */
728 while ((reg = rh_recovery_start(&ms->rh))) {
729 r = recover(ms, reg);
730 if (r)
731 rh_recovery_end(reg, 0);
732 }
733
734 /*
735 * Update the in sync flag.
736 */
737 if (!ms->in_sync &&
738 (log->type->get_sync_count(log) == ms->nr_regions)) {
739 /* the sync is complete */
740 dm_table_event(ms->ti->table);
741 ms->in_sync = 1;
742 }
743}
744
745/*-----------------------------------------------------------------
746 * Reads
747 *---------------------------------------------------------------*/
748static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
749{
750 /* FIXME: add read balancing */
Jonathan E Brassowa1a19082006-01-06 00:20:05 -0800751 return ms->default_mirror;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
754/*
755 * remap a buffer to a particular mirror.
756 */
757static void map_bio(struct mirror_set *ms, struct mirror *m, struct bio *bio)
758{
759 bio->bi_bdev = m->dev->bdev;
760 bio->bi_sector = m->offset + (bio->bi_sector - ms->ti->begin);
761}
762
763static void do_reads(struct mirror_set *ms, struct bio_list *reads)
764{
765 region_t region;
766 struct bio *bio;
767 struct mirror *m;
768
769 while ((bio = bio_list_pop(reads))) {
770 region = bio_to_region(&ms->rh, bio);
771
772 /*
773 * We can only read balance if the region is in sync.
774 */
Jonathan Brassowb997b822007-05-09 02:33:08 -0700775 if (rh_in_sync(&ms->rh, region, 1))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776 m = choose_mirror(ms, bio->bi_sector);
777 else
Jonathan E Brassowa1a19082006-01-06 00:20:05 -0800778 m = ms->default_mirror;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 map_bio(ms, m, bio);
781 generic_make_request(bio);
782 }
783}
784
785/*-----------------------------------------------------------------
786 * Writes.
787 *
788 * We do different things with the write io depending on the
789 * state of the region that it's in:
790 *
791 * SYNC: increment pending, use kcopyd to write to *all* mirrors
792 * RECOVERING: delay the io until recovery completes
793 * NOSYNC: increment pending, just write to the default mirror
794 *---------------------------------------------------------------*/
795static void write_callback(unsigned long error, void *context)
796{
797 unsigned int i;
798 int uptodate = 1;
799 struct bio *bio = (struct bio *) context;
800 struct mirror_set *ms;
801
802 ms = bio_get_ms(bio);
803 bio_set_ms(bio, NULL);
804
805 /*
806 * NOTE: We don't decrement the pending count here,
807 * instead it is done by the targets endio function.
808 * This way we handle both writes to SYNC and NOSYNC
809 * regions with the same code.
810 */
811
812 if (error) {
813 /*
814 * only error the io if all mirrors failed.
815 * FIXME: bogus
816 */
817 uptodate = 0;
818 for (i = 0; i < ms->nr_mirrors; i++)
819 if (!test_bit(i, &error)) {
820 uptodate = 1;
821 break;
822 }
823 }
NeilBrown6712ecf2007-09-27 12:47:43 +0200824 bio_endio(bio, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825}
826
827static void do_write(struct mirror_set *ms, struct bio *bio)
828{
829 unsigned int i;
830 struct io_region io[KCOPYD_MAX_REGIONS+1];
831 struct mirror *m;
Milan Broz88be1632007-05-09 02:33:04 -0700832 struct dm_io_request io_req = {
833 .bi_rw = WRITE,
834 .mem.type = DM_IO_BVEC,
835 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
836 .notify.fn = write_callback,
837 .notify.context = bio,
838 .client = ms->io_client,
839 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840
841 for (i = 0; i < ms->nr_mirrors; i++) {
842 m = ms->mirror + i;
843
844 io[i].bdev = m->dev->bdev;
845 io[i].sector = m->offset + (bio->bi_sector - ms->ti->begin);
846 io[i].count = bio->bi_size >> 9;
847 }
848
849 bio_set_ms(bio, ms);
Milan Broz88be1632007-05-09 02:33:04 -0700850
851 (void) dm_io(&io_req, ms->nr_mirrors, io, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
854static void do_writes(struct mirror_set *ms, struct bio_list *writes)
855{
856 int state;
857 struct bio *bio;
858 struct bio_list sync, nosync, recover, *this_list = NULL;
859
860 if (!writes->head)
861 return;
862
863 /*
864 * Classify each write.
865 */
866 bio_list_init(&sync);
867 bio_list_init(&nosync);
868 bio_list_init(&recover);
869
870 while ((bio = bio_list_pop(writes))) {
871 state = rh_state(&ms->rh, bio_to_region(&ms->rh, bio), 1);
872 switch (state) {
873 case RH_CLEAN:
874 case RH_DIRTY:
875 this_list = &sync;
876 break;
877
878 case RH_NOSYNC:
879 this_list = &nosync;
880 break;
881
882 case RH_RECOVERING:
883 this_list = &recover;
884 break;
885 }
886
887 bio_list_add(this_list, bio);
888 }
889
890 /*
891 * Increment the pending counts for any regions that will
892 * be written to (writes to recover regions are going to
893 * be delayed).
894 */
895 rh_inc_pending(&ms->rh, &sync);
896 rh_inc_pending(&ms->rh, &nosync);
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100897 ms->log_failure = rh_flush(&ms->rh) ? 1 : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898
899 /*
900 * Dispatch io.
901 */
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100902 if (unlikely(ms->log_failure))
903 while ((bio = bio_list_pop(&sync)))
NeilBrown6712ecf2007-09-27 12:47:43 +0200904 bio_endio(bio, -EIO);
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100905 else while ((bio = bio_list_pop(&sync)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 do_write(ms, bio);
907
908 while ((bio = bio_list_pop(&recover)))
909 rh_delay(&ms->rh, bio);
910
911 while ((bio = bio_list_pop(&nosync))) {
Jonathan E Brassowa1a19082006-01-06 00:20:05 -0800912 map_bio(ms, ms->default_mirror, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 generic_make_request(bio);
914 }
915}
916
917/*-----------------------------------------------------------------
918 * kmirrord
919 *---------------------------------------------------------------*/
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700920static void do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921{
Holger Smolinski6ad36fe2007-05-09 02:32:50 -0700922 struct mirror_set *ms =container_of(work, struct mirror_set,
923 kmirrord_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 struct bio_list reads, writes;
925
926 spin_lock(&ms->lock);
927 reads = ms->reads;
928 writes = ms->writes;
929 bio_list_init(&ms->reads);
930 bio_list_init(&ms->writes);
931 spin_unlock(&ms->lock);
932
933 rh_update_states(&ms->rh);
934 do_recovery(ms);
935 do_reads(ms, &reads);
936 do_writes(ms, &writes);
937}
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939/*-----------------------------------------------------------------
940 * Target functions
941 *---------------------------------------------------------------*/
942static struct mirror_set *alloc_context(unsigned int nr_mirrors,
943 uint32_t region_size,
944 struct dm_target *ti,
945 struct dirty_log *dl)
946{
947 size_t len;
948 struct mirror_set *ms = NULL;
949
950 if (array_too_big(sizeof(*ms), sizeof(ms->mirror[0]), nr_mirrors))
951 return NULL;
952
953 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
954
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700955 ms = kzalloc(len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700957 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 return NULL;
959 }
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 spin_lock_init(&ms->lock);
962
963 ms->ti = ti;
964 ms->nr_mirrors = nr_mirrors;
965 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
966 ms->in_sync = 0;
Jonathan E Brassowa1a19082006-01-06 00:20:05 -0800967 ms->default_mirror = &ms->mirror[DEFAULT_MIRROR];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Milan Broz88be1632007-05-09 02:33:04 -0700969 ms->io_client = dm_io_client_create(DM_IO_PAGES);
970 if (IS_ERR(ms->io_client)) {
971 ti->error = "Error creating dm_io client";
972 kfree(ms);
973 return NULL;
974 }
975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 if (rh_init(&ms->rh, ms, dl, region_size, ms->nr_regions)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700977 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +0100978 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 kfree(ms);
980 return NULL;
981 }
982
983 return ms;
984}
985
986static void free_context(struct mirror_set *ms, struct dm_target *ti,
987 unsigned int m)
988{
989 while (m--)
990 dm_put_device(ti, ms->mirror[m].dev);
991
Milan Broz88be1632007-05-09 02:33:04 -0700992 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 rh_exit(&ms->rh);
994 kfree(ms);
995}
996
997static inline int _check_region_size(struct dm_target *ti, uint32_t size)
998{
vignesh babu6f3c3f02007-10-19 22:38:44 +0100999 return !(size % (PAGE_SIZE >> 9) || !is_power_of_2(size) ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 size > ti->len);
1001}
1002
1003static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
1004 unsigned int mirror, char **argv)
1005{
Andrew Morton4ee218c2006-03-27 01:17:48 -08001006 unsigned long long offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Andrew Morton4ee218c2006-03-27 01:17:48 -08001008 if (sscanf(argv[1], "%llu", &offset) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001009 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 return -EINVAL;
1011 }
1012
1013 if (dm_get_device(ti, argv[0], offset, ti->len,
1014 dm_table_get_mode(ti->table),
1015 &ms->mirror[mirror].dev)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001016 ti->error = "Device lookup failure";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 return -ENXIO;
1018 }
1019
1020 ms->mirror[mirror].offset = offset;
1021
1022 return 0;
1023}
1024
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025/*
1026 * Create dirty log: log_type #log_params <log_params>
1027 */
1028static struct dirty_log *create_dirty_log(struct dm_target *ti,
1029 unsigned int argc, char **argv,
1030 unsigned int *args_used)
1031{
1032 unsigned int param_count;
1033 struct dirty_log *dl;
1034
1035 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001036 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 return NULL;
1038 }
1039
1040 if (sscanf(argv[1], "%u", &param_count) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001041 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 return NULL;
1043 }
1044
1045 *args_used = 2 + param_count;
1046
1047 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001048 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 return NULL;
1050 }
1051
1052 dl = dm_create_dirty_log(argv[0], ti, param_count, argv + 2);
1053 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001054 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 return NULL;
1056 }
1057
1058 if (!_check_region_size(ti, dl->type->get_region_size(dl))) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001059 ti->error = "Invalid region size";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 dm_destroy_dirty_log(dl);
1061 return NULL;
1062 }
1063
1064 return dl;
1065}
1066
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001067static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1068 unsigned *args_used)
1069{
1070 unsigned num_features;
1071 struct dm_target *ti = ms->ti;
1072
1073 *args_used = 0;
1074
1075 if (!argc)
1076 return 0;
1077
1078 if (sscanf(argv[0], "%u", &num_features) != 1) {
1079 ti->error = "Invalid number of features";
1080 return -EINVAL;
1081 }
1082
1083 argc--;
1084 argv++;
1085 (*args_used)++;
1086
1087 if (num_features > argc) {
1088 ti->error = "Not enough arguments to support feature count";
1089 return -EINVAL;
1090 }
1091
1092 if (!strcmp("handle_errors", argv[0]))
1093 ms->features |= DM_RAID1_HANDLE_ERRORS;
1094 else {
1095 ti->error = "Unrecognised feature requested";
1096 return -EINVAL;
1097 }
1098
1099 (*args_used)++;
1100
1101 return 0;
1102}
1103
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104/*
1105 * Construct a mirror mapping:
1106 *
1107 * log_type #log_params <log_params>
1108 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001109 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 *
1111 * log_type is "core" or "disk"
1112 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001113 *
1114 * If present, features must be "handle_errors".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1117{
1118 int r;
1119 unsigned int nr_mirrors, m, args_used;
1120 struct mirror_set *ms;
1121 struct dirty_log *dl;
1122
1123 dl = create_dirty_log(ti, argc, argv, &args_used);
1124 if (!dl)
1125 return -EINVAL;
1126
1127 argv += args_used;
1128 argc -= args_used;
1129
1130 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
1131 nr_mirrors < 2 || nr_mirrors > KCOPYD_MAX_REGIONS + 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001132 ti->error = "Invalid number of mirrors";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 dm_destroy_dirty_log(dl);
1134 return -EINVAL;
1135 }
1136
1137 argv++, argc--;
1138
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001139 if (argc < nr_mirrors * 2) {
1140 ti->error = "Too few mirror arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 dm_destroy_dirty_log(dl);
1142 return -EINVAL;
1143 }
1144
1145 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1146 if (!ms) {
1147 dm_destroy_dirty_log(dl);
1148 return -ENOMEM;
1149 }
1150
1151 /* Get the mirror parameter sets */
1152 for (m = 0; m < nr_mirrors; m++) {
1153 r = get_mirror(ms, ti, m, argv);
1154 if (r) {
1155 free_context(ms, ti, m);
1156 return r;
1157 }
1158 argv += 2;
1159 argc -= 2;
1160 }
1161
1162 ti->private = ms;
Alasdair G Kergond88854f2005-07-07 17:59:34 -07001163 ti->split_io = ms->rh.region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001165 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1166 if (!ms->kmirrord_wq) {
1167 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001168 r = -ENOMEM;
1169 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001170 }
1171 INIT_WORK(&ms->kmirrord_work, do_mirror);
1172
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001173 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001174 if (r)
1175 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001176
1177 argv += args_used;
1178 argc -= args_used;
1179
Jonathan Brassowf44db672007-07-12 17:29:04 +01001180 /*
1181 * Any read-balancing addition depends on the
1182 * DM_RAID1_HANDLE_ERRORS flag being present.
1183 * This is because the decision to balance depends
1184 * on the sync state of a region. If the above
1185 * flag is not present, we ignore errors; and
1186 * the sync state may be inaccurate.
1187 */
1188
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001189 if (argc) {
1190 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001191 r = -EINVAL;
1192 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001193 }
1194
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 r = kcopyd_client_create(DM_IO_PAGES, &ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001196 if (r)
1197 goto err_destroy_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001199 wake(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001201
1202err_destroy_wq:
1203 destroy_workqueue(ms->kmirrord_wq);
1204err_free_context:
1205 free_context(ms, ti, ms->nr_mirrors);
1206 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207}
1208
1209static void mirror_dtr(struct dm_target *ti)
1210{
1211 struct mirror_set *ms = (struct mirror_set *) ti->private;
1212
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001213 flush_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001215 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216 free_context(ms, ti, ms->nr_mirrors);
1217}
1218
1219static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
1220{
1221 int should_wake = 0;
1222 struct bio_list *bl;
1223
1224 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
1225 spin_lock(&ms->lock);
1226 should_wake = !(bl->head);
1227 bio_list_add(bl, bio);
1228 spin_unlock(&ms->lock);
1229
1230 if (should_wake)
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001231 wake(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232}
1233
1234/*
1235 * Mirror mapping function
1236 */
1237static int mirror_map(struct dm_target *ti, struct bio *bio,
1238 union map_info *map_context)
1239{
1240 int r, rw = bio_rw(bio);
1241 struct mirror *m;
1242 struct mirror_set *ms = ti->private;
1243
Neil Browne4c8b3b2006-06-26 00:27:26 -07001244 map_context->ll = bio_to_region(&ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 if (rw == WRITE) {
1247 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001248 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 }
1250
1251 r = ms->rh.log->type->in_sync(ms->rh.log,
1252 bio_to_region(&ms->rh, bio), 0);
1253 if (r < 0 && r != -EWOULDBLOCK)
1254 return r;
1255
1256 if (r == -EWOULDBLOCK) /* FIXME: ugly */
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001257 r = DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
1259 /*
1260 * We don't want to fast track a recovery just for a read
1261 * ahead. So we just let it silently fail.
1262 * FIXME: get rid of this.
1263 */
1264 if (!r && rw == READA)
1265 return -EIO;
1266
1267 if (!r) {
1268 /* Pass this io over to the daemon */
1269 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001270 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271 }
1272
1273 m = choose_mirror(ms, bio->bi_sector);
1274 if (!m)
1275 return -EIO;
1276
1277 map_bio(ms, m, bio);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001278 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279}
1280
1281static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1282 int error, union map_info *map_context)
1283{
1284 int rw = bio_rw(bio);
1285 struct mirror_set *ms = (struct mirror_set *) ti->private;
1286 region_t region = map_context->ll;
1287
1288 /*
1289 * We need to dec pending if this was a write.
1290 */
1291 if (rw == WRITE)
1292 rh_dec(&ms->rh, region);
1293
1294 return 0;
1295}
1296
1297static void mirror_postsuspend(struct dm_target *ti)
1298{
1299 struct mirror_set *ms = (struct mirror_set *) ti->private;
1300 struct dirty_log *log = ms->rh.log;
1301
1302 rh_stop_recovery(&ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001303
1304 /* Wait for all I/O we generated to complete */
1305 wait_event(_kmirrord_recovery_stopped,
1306 !atomic_read(&ms->rh.recovery_in_flight));
1307
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 if (log->type->suspend && log->type->suspend(log))
1309 /* FIXME: need better error handling */
1310 DMWARN("log suspend failed");
1311}
1312
1313static void mirror_resume(struct dm_target *ti)
1314{
1315 struct mirror_set *ms = (struct mirror_set *) ti->private;
1316 struct dirty_log *log = ms->rh.log;
1317 if (log->type->resume && log->type->resume(log))
1318 /* FIXME: need better error handling */
1319 DMWARN("log resume failed");
1320 rh_start_recovery(&ms->rh);
1321}
1322
1323static int mirror_status(struct dm_target *ti, status_type_t type,
1324 char *result, unsigned int maxlen)
1325{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001326 unsigned int m, sz = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 struct mirror_set *ms = (struct mirror_set *) ti->private;
1328
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 switch (type) {
1330 case STATUSTYPE_INFO:
1331 DMEMIT("%d ", ms->nr_mirrors);
1332 for (m = 0; m < ms->nr_mirrors; m++)
1333 DMEMIT("%s ", ms->mirror[m].dev->name);
1334
Milan Brozc95bc202007-07-12 17:27:24 +01001335 DMEMIT("%llu/%llu 0 ",
Andrew Morton4ee218c2006-03-27 01:17:48 -08001336 (unsigned long long)ms->rh.log->type->
1337 get_sync_count(ms->rh.log),
1338 (unsigned long long)ms->nr_regions);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001339
Milan Brozc95bc202007-07-12 17:27:24 +01001340 sz += ms->rh.log->type->status(ms->rh.log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 break;
1343
1344 case STATUSTYPE_TABLE:
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001345 sz = ms->rh.log->type->status(ms->rh.log, type, result, maxlen);
1346
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001347 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001349 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Andrew Morton4ee218c2006-03-27 01:17:48 -08001350 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001351
1352 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1353 DMEMIT(" 1 handle_errors");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 }
1355
1356 return 0;
1357}
1358
1359static struct target_type mirror_target = {
1360 .name = "mirror",
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001361 .version = {1, 0, 3},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 .module = THIS_MODULE,
1363 .ctr = mirror_ctr,
1364 .dtr = mirror_dtr,
1365 .map = mirror_map,
1366 .end_io = mirror_end_io,
1367 .postsuspend = mirror_postsuspend,
1368 .resume = mirror_resume,
1369 .status = mirror_status,
1370};
1371
1372static int __init dm_mirror_init(void)
1373{
1374 int r;
1375
1376 r = dm_dirty_log_init();
1377 if (r)
1378 return r;
1379
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 r = dm_register_target(&mirror_target);
1381 if (r < 0) {
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001382 DMERR("Failed to register mirror target");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 dm_dirty_log_exit();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 }
1385
1386 return r;
1387}
1388
1389static void __exit dm_mirror_exit(void)
1390{
1391 int r;
1392
1393 r = dm_unregister_target(&mirror_target);
1394 if (r < 0)
Alasdair G Kergon0cd33122007-07-12 17:27:01 +01001395 DMERR("unregister failed %d", r);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 dm_dirty_log_exit();
1398}
1399
1400/* Module hooks */
1401module_init(dm_mirror_init);
1402module_exit(dm_mirror_exit);
1403
1404MODULE_DESCRIPTION(DM_NAME " mirror target");
1405MODULE_AUTHOR("Joe Thornber");
1406MODULE_LICENSE("GPL");