blob: 895cce75eee918579f0f489b510ced26e819d3e1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01003 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Jonathan Brassow06386bb2008-02-08 02:11:37 +00008#include "dm-bio-record.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/workqueue.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010016#include <linux/device-mapper.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010017#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010020#include <linux/dm-region-hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Alasdair G Kergon72d94862006-06-26 00:27:35 -070022#define DM_MSG_PREFIX "raid1"
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010023
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
Milan Broz88be1632007-05-09 02:33:04 -070025#define DM_IO_PAGES 64
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010026#define DM_KCOPYD_PAGES 64
Alasdair G Kergon72d94862006-06-26 00:27:35 -070027
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070028#define DM_RAID1_HANDLE_ERRORS 0x01
Jonathan Brassowf44db672007-07-12 17:29:04 +010029#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070030
Jonathan E Brassow33184042006-11-08 17:44:44 -080031static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*-----------------------------------------------------------------
Neil Browne4c8b3b2006-06-26 00:27:26 -070034 * Mirror set structures.
35 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +000036enum dm_raid1_error {
37 DM_RAID1_WRITE_ERROR,
Mikulas Patocka64b30c42009-12-10 23:52:02 +000038 DM_RAID1_FLUSH_ERROR,
Jonathan Brassow72f4b312008-02-08 02:11:29 +000039 DM_RAID1_SYNC_ERROR,
40 DM_RAID1_READ_ERROR
41};
42
Neil Browne4c8b3b2006-06-26 00:27:26 -070043struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +010044 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -070045 atomic_t error_count;
Al Viro39ed7ad2008-02-13 03:53:00 +000046 unsigned long error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -070047 struct dm_dev *dev;
48 sector_t offset;
49};
50
51struct mirror_set {
52 struct dm_target *ti;
53 struct list_head list;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010054
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070055 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -070056
Jonathan Brassow72f4b312008-02-08 02:11:29 +000057 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -070058 struct bio_list reads;
59 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +000060 struct bio_list failures;
Mikulas Patocka04788502009-12-10 23:52:03 +000061 struct bio_list holds; /* bios are waiting until suspend */
Neil Browne4c8b3b2006-06-26 00:27:26 -070062
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010063 struct dm_region_hash *rh;
64 struct dm_kcopyd_client *kcopyd_client;
Milan Broz88be1632007-05-09 02:33:04 -070065 struct dm_io_client *io_client;
Jonathan Brassow06386bb2008-02-08 02:11:37 +000066 mempool_t *read_record_pool;
Milan Broz88be1632007-05-09 02:33:04 -070067
Neil Browne4c8b3b2006-06-26 00:27:26 -070068 /* recovery */
69 region_t nr_regions;
70 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +010071 int log_failure;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +000072 atomic_t suspend;
Neil Browne4c8b3b2006-06-26 00:27:26 -070073
Jonathan Brassow72f4b312008-02-08 02:11:29 +000074 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -070075
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070076 struct workqueue_struct *kmirrord_wq;
77 struct work_struct kmirrord_work;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010078 struct timer_list timer;
79 unsigned long timer_pending;
80
Jonathan Brassow72f4b312008-02-08 02:11:29 +000081 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070082
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010083 unsigned nr_mirrors;
Neil Browne4c8b3b2006-06-26 00:27:26 -070084 struct mirror mirror[0];
85};
86
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010087static void wakeup_mirrord(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010089 struct mirror_set *ms = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070091 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
92}
93
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010094static void delayed_wake_fn(unsigned long data)
95{
96 struct mirror_set *ms = (struct mirror_set *) data;
97
98 clear_bit(0, &ms->timer_pending);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010099 wakeup_mirrord(ms);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100100}
101
102static void delayed_wake(struct mirror_set *ms)
103{
104 if (test_and_set_bit(0, &ms->timer_pending))
105 return;
106
107 ms->timer.expires = jiffies + HZ / 5;
108 ms->timer.data = (unsigned long) ms;
109 ms->timer.function = delayed_wake_fn;
110 add_timer(&ms->timer);
111}
112
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100113static void wakeup_all_recovery_waiters(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100115 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
117
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100118static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 int should_wake = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100122 struct bio_list *bl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100124 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
125 spin_lock_irqsave(&ms->lock, flags);
126 should_wake = !(bl->head);
127 bio_list_add(bl, bio);
128 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 if (should_wake)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100131 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132}
133
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100134static void dispatch_bios(void *context, struct bio_list *bio_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100136 struct mirror_set *ms = context;
137 struct bio *bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100139 while ((bio = bio_list_pop(bio_list)))
140 queue_bio(ms, bio, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141}
142
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000143#define MIN_READ_RECORDS 20
144struct dm_raid1_read_record {
145 struct mirror *m;
146 struct dm_bio_details details;
147};
148
Mikulas Patocka95f8fac2009-04-02 19:55:24 +0100149static struct kmem_cache *_dm_raid1_read_record_cache;
150
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151/*
152 * Every mirror should look like this one.
153 */
154#define DEFAULT_MIRROR 0
155
156/*
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000157 * This is yucky. We squirrel the mirror struct away inside
158 * bi_next for read/write buffers. This is safe since the bh
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 * doesn't get submitted to the lower levels of block layer.
160 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000161static struct mirror *bio_get_m(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000163 return (struct mirror *) bio->bi_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}
165
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000166static void bio_set_m(struct bio *bio, struct mirror *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000168 bio->bi_next = (struct bio *) m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169}
170
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000171static struct mirror *get_default_mirror(struct mirror_set *ms)
172{
173 return &ms->mirror[atomic_read(&ms->default_mirror)];
174}
175
176static void set_default_mirror(struct mirror *m)
177{
178 struct mirror_set *ms = m->ms;
179 struct mirror *m0 = &(ms->mirror[0]);
180
181 atomic_set(&ms->default_mirror, m - m0);
182}
183
184/* fail_mirror
185 * @m: mirror device to fail
186 * @error_type: one of the enum's, DM_RAID1_*_ERROR
187 *
188 * If errors are being handled, record the type of
189 * error encountered for this device. If this type
190 * of error has already been recorded, we can return;
191 * otherwise, we must signal userspace by triggering
192 * an event. Additionally, if the device is the
193 * primary device, we must choose a new primary, but
194 * only if the mirror is in-sync.
195 *
196 * This function must not block.
197 */
198static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
199{
200 struct mirror_set *ms = m->ms;
201 struct mirror *new;
202
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000203 /*
204 * error_count is used for nothing more than a
205 * simple way to tell if a device has encountered
206 * errors.
207 */
208 atomic_inc(&m->error_count);
209
210 if (test_and_set_bit(error_type, &m->error_type))
211 return;
212
Jonathan Brassowd460c652009-01-06 03:04:57 +0000213 if (!errors_handled(ms))
214 return;
215
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000216 if (m != get_default_mirror(ms))
217 goto out;
218
219 if (!ms->in_sync) {
220 /*
221 * Better to issue requests to same failing device
222 * than to risk returning corrupt data.
223 */
224 DMERR("Primary mirror (%s) failed while out-of-sync: "
225 "Reads may fail.", m->dev->name);
226 goto out;
227 }
228
229 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
230 if (!atomic_read(&new->error_count)) {
231 set_default_mirror(new);
232 break;
233 }
234
235 if (unlikely(new == ms->mirror + ms->nr_mirrors))
236 DMWARN("All sides of mirror have failed.");
237
238out:
239 schedule_work(&ms->trigger_event);
240}
241
Mikulas Patockac0da3742009-12-10 23:52:02 +0000242static int mirror_flush(struct dm_target *ti)
243{
244 struct mirror_set *ms = ti->private;
245 unsigned long error_bits;
246
247 unsigned int i;
248 struct dm_io_region io[ms->nr_mirrors];
249 struct mirror *m;
250 struct dm_io_request io_req = {
251 .bi_rw = WRITE_BARRIER,
252 .mem.type = DM_IO_KMEM,
253 .mem.ptr.bvec = NULL,
254 .client = ms->io_client,
255 };
256
257 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
258 io[i].bdev = m->dev->bdev;
259 io[i].sector = 0;
260 io[i].count = 0;
261 }
262
263 error_bits = -1;
264 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
265 if (unlikely(error_bits != 0)) {
266 for (i = 0; i < ms->nr_mirrors; i++)
267 if (test_bit(i, &error_bits))
268 fail_mirror(ms->mirror + i,
Mikulas Patocka64b30c42009-12-10 23:52:02 +0000269 DM_RAID1_FLUSH_ERROR);
Mikulas Patockac0da3742009-12-10 23:52:02 +0000270 return -EIO;
271 }
272
273 return 0;
274}
275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276/*-----------------------------------------------------------------
277 * Recovery.
278 *
279 * When a mirror is first activated we may find that some regions
280 * are in the no-sync state. We have to recover these by
281 * recopying from the default mirror to all the others.
282 *---------------------------------------------------------------*/
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700283static void recovery_complete(int read_err, unsigned long write_err,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 void *context)
285{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100286 struct dm_region *reg = context;
287 struct mirror_set *ms = dm_rh_region_context(reg);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000288 int m, bit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000290 if (read_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100291 /* Read error means the failure of default mirror. */
292 DMERR_LIMIT("Unable to read primary mirror during recovery");
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000293 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
294 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100295
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000296 if (write_err) {
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700297 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
Jonathan Brassowf44db672007-07-12 17:29:04 +0100298 write_err);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000299 /*
300 * Bits correspond to devices (excluding default mirror).
301 * The default mirror cannot change during recovery.
302 */
303 for (m = 0; m < ms->nr_mirrors; m++) {
304 if (&ms->mirror[m] == get_default_mirror(ms))
305 continue;
306 if (test_bit(bit, &write_err))
307 fail_mirror(ms->mirror + m,
308 DM_RAID1_SYNC_ERROR);
309 bit++;
310 }
311 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100312
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100313 dm_rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314}
315
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100316static int recover(struct mirror_set *ms, struct dm_region *reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
318 int r;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100319 unsigned i;
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100320 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 struct mirror *m;
322 unsigned long flags = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100323 region_t key = dm_rh_get_region_key(reg);
324 sector_t region_size = dm_rh_get_region_size(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000327 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 from.bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100329 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
330 if (key == (ms->nr_regions - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 /*
332 * The final region may be smaller than
333 * region_size.
334 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100335 from.count = ms->ti->len & (region_size - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 if (!from.count)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100337 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 } else
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100339 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 /* fill in the destinations */
342 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000343 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 continue;
345
346 m = ms->mirror + i;
347 dest->bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100348 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 dest->count = from.count;
350 dest++;
351 }
352
353 /* hand to kcopyd */
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100354 if (!errors_handled(ms))
355 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
356
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100357 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
358 flags, recovery_complete, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 return r;
361}
362
363static void do_recovery(struct mirror_set *ms)
364{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100365 struct dm_region *reg;
366 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 /*
370 * Start quiescing some regions.
371 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100372 dm_rh_recovery_prepare(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373
374 /*
375 * Copy any already quiesced regions.
376 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100377 while ((reg = dm_rh_recovery_start(ms->rh))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 r = recover(ms, reg);
379 if (r)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100380 dm_rh_recovery_end(reg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 }
382
383 /*
384 * Update the in sync flag.
385 */
386 if (!ms->in_sync &&
387 (log->type->get_sync_count(log) == ms->nr_regions)) {
388 /* the sync is complete */
389 dm_table_event(ms->ti->table);
390 ms->in_sync = 1;
391 }
392}
393
394/*-----------------------------------------------------------------
395 * Reads
396 *---------------------------------------------------------------*/
397static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
398{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000399 struct mirror *m = get_default_mirror(ms);
400
401 do {
402 if (likely(!atomic_read(&m->error_count)))
403 return m;
404
405 if (m-- == ms->mirror)
406 m += ms->nr_mirrors;
407 } while (m != get_default_mirror(ms));
408
409 return NULL;
410}
411
412static int default_ok(struct mirror *m)
413{
414 struct mirror *default_mirror = get_default_mirror(m->ms);
415
416 return !atomic_read(&default_mirror->error_count);
417}
418
419static int mirror_available(struct mirror_set *ms, struct bio *bio)
420{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100421 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
422 region_t region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000423
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100424 if (log->type->in_sync(log, region, 0))
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000425 return choose_mirror(ms, bio->bi_sector) ? 1 : 0;
426
427 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428}
429
430/*
431 * remap a buffer to a particular mirror.
432 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000433static sector_t map_sector(struct mirror *m, struct bio *bio)
434{
Mikulas Patocka41841532009-12-10 23:51:59 +0000435 if (unlikely(!bio->bi_size))
436 return 0;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000437 return m->offset + (bio->bi_sector - m->ms->ti->begin);
438}
439
440static void map_bio(struct mirror *m, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
442 bio->bi_bdev = m->dev->bdev;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000443 bio->bi_sector = map_sector(m, bio);
444}
445
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100446static void map_region(struct dm_io_region *io, struct mirror *m,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000447 struct bio *bio)
448{
449 io->bdev = m->dev->bdev;
450 io->sector = map_sector(m, bio);
451 io->count = bio->bi_size >> 9;
452}
453
Mikulas Patocka04788502009-12-10 23:52:03 +0000454static void hold_bio(struct mirror_set *ms, struct bio *bio)
455{
456 /*
457 * If device is suspended, complete the bio.
458 */
459 if (atomic_read(&ms->suspend)) {
460 if (dm_noflush_suspending(ms->ti))
461 bio_endio(bio, DM_ENDIO_REQUEUE);
462 else
463 bio_endio(bio, -EIO);
464 return;
465 }
466
467 /*
468 * Hold bio until the suspend is complete.
469 */
470 spin_lock_irq(&ms->lock);
471 bio_list_add(&ms->holds, bio);
472 spin_unlock_irq(&ms->lock);
473}
474
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000475/*-----------------------------------------------------------------
476 * Reads
477 *---------------------------------------------------------------*/
478static void read_callback(unsigned long error, void *context)
479{
480 struct bio *bio = context;
481 struct mirror *m;
482
483 m = bio_get_m(bio);
484 bio_set_m(bio, NULL);
485
486 if (likely(!error)) {
487 bio_endio(bio, 0);
488 return;
489 }
490
491 fail_mirror(m, DM_RAID1_READ_ERROR);
492
493 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
494 DMWARN_LIMIT("Read failure on mirror device %s. "
495 "Trying alternative device.",
496 m->dev->name);
497 queue_bio(m->ms, bio, bio_rw(bio));
498 return;
499 }
500
501 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
502 m->dev->name);
503 bio_endio(bio, -EIO);
504}
505
506/* Asynchronous read. */
507static void read_async_bio(struct mirror *m, struct bio *bio)
508{
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100509 struct dm_io_region io;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000510 struct dm_io_request io_req = {
511 .bi_rw = READ,
512 .mem.type = DM_IO_BVEC,
513 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
514 .notify.fn = read_callback,
515 .notify.context = bio,
516 .client = m->ms->io_client,
517 };
518
519 map_region(&io, m, bio);
520 bio_set_m(bio, m);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100521 BUG_ON(dm_io(&io_req, 1, &io, NULL));
522}
523
524static inline int region_in_sync(struct mirror_set *ms, region_t region,
525 int may_block)
526{
527 int state = dm_rh_get_state(ms->rh, region, may_block);
528 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529}
530
531static void do_reads(struct mirror_set *ms, struct bio_list *reads)
532{
533 region_t region;
534 struct bio *bio;
535 struct mirror *m;
536
537 while ((bio = bio_list_pop(reads))) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100538 region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000539 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
541 /*
542 * We can only read balance if the region is in sync.
543 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100544 if (likely(region_in_sync(ms, region, 1)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 m = choose_mirror(ms, bio->bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000546 else if (m && atomic_read(&m->error_count))
547 m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000549 if (likely(m))
550 read_async_bio(m, bio);
551 else
552 bio_endio(bio, -EIO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 }
554}
555
556/*-----------------------------------------------------------------
557 * Writes.
558 *
559 * We do different things with the write io depending on the
560 * state of the region that it's in:
561 *
562 * SYNC: increment pending, use kcopyd to write to *all* mirrors
563 * RECOVERING: delay the io until recovery completes
564 * NOSYNC: increment pending, just write to the default mirror
565 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000566
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568static void write_callback(unsigned long error, void *context)
569{
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000570 unsigned i, ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 struct bio *bio = (struct bio *) context;
572 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000573 int uptodate = 0;
574 int should_wake = 0;
575 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000577 ms = bio_get_m(bio)->ms;
578 bio_set_m(bio, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 /*
581 * NOTE: We don't decrement the pending count here,
582 * instead it is done by the targets endio function.
583 * This way we handle both writes to SYNC and NOSYNC
584 * regions with the same code.
585 */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000586 if (likely(!error))
587 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000589 for (i = 0; i < ms->nr_mirrors; i++)
590 if (test_bit(i, &error))
591 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
592 else
593 uptodate = 1;
594
595 if (unlikely(!uptodate)) {
596 DMERR("All replicated volumes dead, failing I/O");
597 /* None of the writes succeeded, fail the I/O. */
598 ret = -EIO;
599 } else if (errors_handled(ms)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 /*
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000601 * Need to raise event. Since raising
602 * events can block, we need to do it in
603 * the main thread.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000605 spin_lock_irqsave(&ms->lock, flags);
606 if (!ms->failures.head)
607 should_wake = 1;
608 bio_list_add(&ms->failures, bio);
609 spin_unlock_irqrestore(&ms->lock, flags);
610 if (should_wake)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100611 wakeup_mirrord(ms);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000612 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000614out:
615 bio_endio(bio, ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616}
617
618static void do_write(struct mirror_set *ms, struct bio *bio)
619{
620 unsigned int i;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100621 struct dm_io_region io[ms->nr_mirrors], *dest = io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 struct mirror *m;
Milan Broz88be1632007-05-09 02:33:04 -0700623 struct dm_io_request io_req = {
Mikulas Patocka41841532009-12-10 23:51:59 +0000624 .bi_rw = WRITE | (bio->bi_rw & WRITE_BARRIER),
Milan Broz88be1632007-05-09 02:33:04 -0700625 .mem.type = DM_IO_BVEC,
626 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx,
627 .notify.fn = write_callback,
628 .notify.context = bio,
629 .client = ms->io_client,
630 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000632 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
633 map_region(dest++, m, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000635 /*
636 * Use default mirror because we only need it to retrieve the reference
637 * to the mirror set in write_callback().
638 */
639 bio_set_m(bio, get_default_mirror(ms));
Milan Broz88be1632007-05-09 02:33:04 -0700640
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100641 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
644static void do_writes(struct mirror_set *ms, struct bio_list *writes)
645{
646 int state;
647 struct bio *bio;
648 struct bio_list sync, nosync, recover, *this_list = NULL;
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100649 struct bio_list requeue;
650 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
651 region_t region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
653 if (!writes->head)
654 return;
655
656 /*
657 * Classify each write.
658 */
659 bio_list_init(&sync);
660 bio_list_init(&nosync);
661 bio_list_init(&recover);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100662 bio_list_init(&requeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
664 while ((bio = bio_list_pop(writes))) {
Mikulas Patocka41841532009-12-10 23:51:59 +0000665 if (unlikely(bio_empty_barrier(bio))) {
666 bio_list_add(&sync, bio);
667 continue;
668 }
669
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100670 region = dm_rh_bio_to_region(ms->rh, bio);
671
672 if (log->type->is_remote_recovering &&
673 log->type->is_remote_recovering(log, region)) {
674 bio_list_add(&requeue, bio);
675 continue;
676 }
677
678 state = dm_rh_get_state(ms->rh, region, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 switch (state) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100680 case DM_RH_CLEAN:
681 case DM_RH_DIRTY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 this_list = &sync;
683 break;
684
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100685 case DM_RH_NOSYNC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 this_list = &nosync;
687 break;
688
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100689 case DM_RH_RECOVERING:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 this_list = &recover;
691 break;
692 }
693
694 bio_list_add(this_list, bio);
695 }
696
697 /*
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100698 * Add bios that are delayed due to remote recovery
699 * back on to the write queue
700 */
701 if (unlikely(requeue.head)) {
702 spin_lock_irq(&ms->lock);
703 bio_list_merge(&ms->writes, &requeue);
704 spin_unlock_irq(&ms->lock);
Mikulas Patocka69885682009-07-23 20:30:37 +0100705 delayed_wake(ms);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100706 }
707
708 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 * Increment the pending counts for any regions that will
710 * be written to (writes to recover regions are going to
711 * be delayed).
712 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100713 dm_rh_inc_pending(ms->rh, &sync);
714 dm_rh_inc_pending(ms->rh, &nosync);
Jonathan Brassowd2b69862009-09-04 20:40:32 +0100715
716 /*
717 * If the flush fails on a previous call and succeeds here,
718 * we must not reset the log_failure variable. We need
719 * userspace interaction to do that.
720 */
721 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 /*
724 * Dispatch io.
725 */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000726 if (unlikely(ms->log_failure)) {
727 spin_lock_irq(&ms->lock);
728 bio_list_merge(&ms->failures, &sync);
729 spin_unlock_irq(&ms->lock);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100730 wakeup_mirrord(ms);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000731 } else
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100732 while ((bio = bio_list_pop(&sync)))
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000733 do_write(ms, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735 while ((bio = bio_list_pop(&recover)))
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100736 dm_rh_delay(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
738 while ((bio = bio_list_pop(&nosync))) {
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000739 map_bio(get_default_mirror(ms), bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 generic_make_request(bio);
741 }
742}
743
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000744static void do_failures(struct mirror_set *ms, struct bio_list *failures)
745{
746 struct bio *bio;
747
748 if (!failures->head)
749 return;
750
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000751 if (!ms->log_failure) {
Ilpo Jarvinenb34578a2008-10-30 13:33:07 +0000752 while ((bio = bio_list_pop(failures))) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100753 ms->in_sync = 0;
754 dm_rh_mark_nosync(ms->rh, bio, bio->bi_size, 0);
Ilpo Jarvinenb34578a2008-10-30 13:33:07 +0000755 }
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000756 return;
757 }
758
759 /*
760 * If the log has failed, unattempted writes are being
761 * put on the failures list. We can't issue those writes
762 * until a log has been marked, so we must store them.
763 *
764 * If a 'noflush' suspend is in progress, we can requeue
765 * the I/O's to the core. This give userspace a chance
766 * to reconfigure the mirror, at which point the core
767 * will reissue the writes. If the 'noflush' flag is
768 * not set, we have no choice but to return errors.
769 *
770 * Some writes on the failures list may have been
771 * submitted before the log failure and represent a
772 * failure to write to one of the devices. It is ok
773 * for us to treat them the same and requeue them
774 * as well.
775 */
776 if (dm_noflush_suspending(ms->ti)) {
777 while ((bio = bio_list_pop(failures)))
778 bio_endio(bio, DM_ENDIO_REQUEUE);
779 return;
780 }
781
782 if (atomic_read(&ms->suspend)) {
783 while ((bio = bio_list_pop(failures)))
784 bio_endio(bio, -EIO);
785 return;
786 }
787
788 spin_lock_irq(&ms->lock);
789 bio_list_merge(&ms->failures, failures);
790 spin_unlock_irq(&ms->lock);
791
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100792 delayed_wake(ms);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000793}
794
795static void trigger_event(struct work_struct *work)
796{
797 struct mirror_set *ms =
798 container_of(work, struct mirror_set, trigger_event);
799
800 dm_table_event(ms->ti->table);
801}
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/*-----------------------------------------------------------------
804 * kmirrord
805 *---------------------------------------------------------------*/
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100806static void do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100808 struct mirror_set *ms = container_of(work, struct mirror_set,
809 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000810 struct bio_list reads, writes, failures;
811 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000813 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 reads = ms->reads;
815 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000816 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 bio_list_init(&ms->reads);
818 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000819 bio_list_init(&ms->failures);
820 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100822 dm_rh_update_states(ms->rh, errors_handled(ms));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 do_recovery(ms);
824 do_reads(ms, &reads);
825 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000826 do_failures(ms, &failures);
Mikulas Patocka7ff14a32008-04-24 22:10:47 +0100827
828 dm_table_unplug_all(ms->ti->table);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000829}
830
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831/*-----------------------------------------------------------------
832 * Target functions
833 *---------------------------------------------------------------*/
834static struct mirror_set *alloc_context(unsigned int nr_mirrors,
835 uint32_t region_size,
836 struct dm_target *ti,
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100837 struct dm_dirty_log *dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838{
839 size_t len;
840 struct mirror_set *ms = NULL;
841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
843
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700844 ms = kzalloc(len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700846 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return NULL;
848 }
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 spin_lock_init(&ms->lock);
851
852 ms->ti = ti;
853 ms->nr_mirrors = nr_mirrors;
854 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
855 ms->in_sync = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000856 ms->log_failure = 0;
857 atomic_set(&ms->suspend, 0);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000858 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
Mikulas Patocka95f8fac2009-04-02 19:55:24 +0100860 ms->read_record_pool = mempool_create_slab_pool(MIN_READ_RECORDS,
861 _dm_raid1_read_record_cache);
862
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000863 if (!ms->read_record_pool) {
864 ti->error = "Error creating mirror read_record_pool";
865 kfree(ms);
866 return NULL;
867 }
868
Milan Broz88be1632007-05-09 02:33:04 -0700869 ms->io_client = dm_io_client_create(DM_IO_PAGES);
870 if (IS_ERR(ms->io_client)) {
871 ti->error = "Error creating dm_io client";
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000872 mempool_destroy(ms->read_record_pool);
Milan Broz88be1632007-05-09 02:33:04 -0700873 kfree(ms);
874 return NULL;
875 }
876
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100877 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
878 wakeup_all_recovery_waiters,
879 ms->ti->begin, MAX_RECOVERY,
880 dl, region_size, ms->nr_regions);
881 if (IS_ERR(ms->rh)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700882 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +0100883 dm_io_client_destroy(ms->io_client);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000884 mempool_destroy(ms->read_record_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 kfree(ms);
886 return NULL;
887 }
888
889 return ms;
890}
891
892static void free_context(struct mirror_set *ms, struct dm_target *ti,
893 unsigned int m)
894{
895 while (m--)
896 dm_put_device(ti, ms->mirror[m].dev);
897
Milan Broz88be1632007-05-09 02:33:04 -0700898 dm_io_client_destroy(ms->io_client);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100899 dm_region_hash_destroy(ms->rh);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000900 mempool_destroy(ms->read_record_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 kfree(ms);
902}
903
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
905 unsigned int mirror, char **argv)
906{
Andrew Morton4ee218c2006-03-27 01:17:48 -0800907 unsigned long long offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908
Andrew Morton4ee218c2006-03-27 01:17:48 -0800909 if (sscanf(argv[1], "%llu", &offset) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700910 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return -EINVAL;
912 }
913
914 if (dm_get_device(ti, argv[0], offset, ti->len,
915 dm_table_get_mode(ti->table),
916 &ms->mirror[mirror].dev)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700917 ti->error = "Device lookup failure";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 return -ENXIO;
919 }
920
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100921 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000922 atomic_set(&(ms->mirror[mirror].error_count), 0);
923 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 ms->mirror[mirror].offset = offset;
925
926 return 0;
927}
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929/*
930 * Create dirty log: log_type #log_params <log_params>
931 */
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100932static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100933 unsigned argc, char **argv,
934 unsigned *args_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100936 unsigned param_count;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100937 struct dm_dirty_log *dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700940 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 return NULL;
942 }
943
944 if (sscanf(argv[1], "%u", &param_count) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700945 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 return NULL;
947 }
948
949 *args_used = 2 + param_count;
950
951 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700952 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 return NULL;
954 }
955
Mikulas Patockac0da3742009-12-10 23:52:02 +0000956 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
957 argv + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700959 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 return NULL;
961 }
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 return dl;
964}
965
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -0700966static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
967 unsigned *args_used)
968{
969 unsigned num_features;
970 struct dm_target *ti = ms->ti;
971
972 *args_used = 0;
973
974 if (!argc)
975 return 0;
976
977 if (sscanf(argv[0], "%u", &num_features) != 1) {
978 ti->error = "Invalid number of features";
979 return -EINVAL;
980 }
981
982 argc--;
983 argv++;
984 (*args_used)++;
985
986 if (num_features > argc) {
987 ti->error = "Not enough arguments to support feature count";
988 return -EINVAL;
989 }
990
991 if (!strcmp("handle_errors", argv[0]))
992 ms->features |= DM_RAID1_HANDLE_ERRORS;
993 else {
994 ti->error = "Unrecognised feature requested";
995 return -EINVAL;
996 }
997
998 (*args_used)++;
999
1000 return 0;
1001}
1002
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003/*
1004 * Construct a mirror mapping:
1005 *
1006 * log_type #log_params <log_params>
1007 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001008 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 *
1010 * log_type is "core" or "disk"
1011 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001012 *
1013 * If present, features must be "handle_errors".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1016{
1017 int r;
1018 unsigned int nr_mirrors, m, args_used;
1019 struct mirror_set *ms;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001020 struct dm_dirty_log *dl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 dl = create_dirty_log(ti, argc, argv, &args_used);
1023 if (!dl)
1024 return -EINVAL;
1025
1026 argv += args_used;
1027 argc -= args_used;
1028
1029 if (!argc || sscanf(argv[0], "%u", &nr_mirrors) != 1 ||
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001030 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001031 ti->error = "Invalid number of mirrors";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001032 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 return -EINVAL;
1034 }
1035
1036 argv++, argc--;
1037
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001038 if (argc < nr_mirrors * 2) {
1039 ti->error = "Too few mirror arguments";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001040 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return -EINVAL;
1042 }
1043
1044 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1045 if (!ms) {
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001046 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 return -ENOMEM;
1048 }
1049
1050 /* Get the mirror parameter sets */
1051 for (m = 0; m < nr_mirrors; m++) {
1052 r = get_mirror(ms, ti, m, argv);
1053 if (r) {
1054 free_context(ms, ti, m);
1055 return r;
1056 }
1057 argv += 2;
1058 argc -= 2;
1059 }
1060
1061 ti->private = ms;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001062 ti->split_io = dm_rh_get_region_size(ms->rh);
Mikulas Patocka41841532009-12-10 23:51:59 +00001063 ti->num_flush_requests = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001065 ms->kmirrord_wq = create_singlethread_workqueue("kmirrord");
1066 if (!ms->kmirrord_wq) {
1067 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001068 r = -ENOMEM;
1069 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001070 }
1071 INIT_WORK(&ms->kmirrord_work, do_mirror);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001072 init_timer(&ms->timer);
1073 ms->timer_pending = 0;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001074 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001075
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001076 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001077 if (r)
1078 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001079
1080 argv += args_used;
1081 argc -= args_used;
1082
Jonathan Brassowf44db672007-07-12 17:29:04 +01001083 /*
1084 * Any read-balancing addition depends on the
1085 * DM_RAID1_HANDLE_ERRORS flag being present.
1086 * This is because the decision to balance depends
1087 * on the sync state of a region. If the above
1088 * flag is not present, we ignore errors; and
1089 * the sync state may be inaccurate.
1090 */
1091
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001092 if (argc) {
1093 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001094 r = -EINVAL;
1095 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001096 }
1097
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001098 r = dm_kcopyd_client_create(DM_KCOPYD_PAGES, &ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001099 if (r)
1100 goto err_destroy_wq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001102 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001104
1105err_destroy_wq:
1106 destroy_workqueue(ms->kmirrord_wq);
1107err_free_context:
1108 free_context(ms, ti, ms->nr_mirrors);
1109 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110}
1111
1112static void mirror_dtr(struct dm_target *ti)
1113{
1114 struct mirror_set *ms = (struct mirror_set *) ti->private;
1115
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001116 del_timer_sync(&ms->timer);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001117 flush_workqueue(ms->kmirrord_wq);
Mikulas Patocka18776c72008-11-13 23:38:52 +00001118 flush_scheduled_work();
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001119 dm_kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001120 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 free_context(ms, ti, ms->nr_mirrors);
1122}
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124/*
1125 * Mirror mapping function
1126 */
1127static int mirror_map(struct dm_target *ti, struct bio *bio,
1128 union map_info *map_context)
1129{
1130 int r, rw = bio_rw(bio);
1131 struct mirror *m;
1132 struct mirror_set *ms = ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001133 struct dm_raid1_read_record *read_record = NULL;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001134 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
1136 if (rw == WRITE) {
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001137 /* Save region for mirror_end_io() handler */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001138 map_context->ll = dm_rh_bio_to_region(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001140 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 }
1142
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001143 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144 if (r < 0 && r != -EWOULDBLOCK)
1145 return r;
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 /*
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001148 * If region is not in-sync queue the bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001150 if (!r || (r == -EWOULDBLOCK)) {
1151 if (rw == READA)
1152 return -EWOULDBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001155 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 }
1157
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001158 /*
1159 * The region is in-sync and we can perform reads directly.
1160 * Store enough information so we can retry if it fails.
1161 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 m = choose_mirror(ms, bio->bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001163 if (unlikely(!m))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 return -EIO;
1165
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001166 read_record = mempool_alloc(ms->read_record_pool, GFP_NOIO);
1167 if (likely(read_record)) {
1168 dm_bio_record(&read_record->details, bio);
1169 map_context->ptr = read_record;
1170 read_record->m = m;
1171 }
1172
1173 map_bio(m, bio);
1174
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001175 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
1178static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1179 int error, union map_info *map_context)
1180{
1181 int rw = bio_rw(bio);
1182 struct mirror_set *ms = (struct mirror_set *) ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001183 struct mirror *m = NULL;
1184 struct dm_bio_details *bd = NULL;
1185 struct dm_raid1_read_record *read_record = map_context->ptr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187 /*
1188 * We need to dec pending if this was a write.
1189 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001190 if (rw == WRITE) {
Mikulas Patocka41841532009-12-10 23:51:59 +00001191 if (likely(!bio_empty_barrier(bio)))
1192 dm_rh_dec(ms->rh, map_context->ll);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001193 return error;
1194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001196 if (error == -EOPNOTSUPP)
1197 goto out;
1198
Jens Axboe1f98a132009-09-11 14:32:04 +02001199 if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD))
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001200 goto out;
1201
1202 if (unlikely(error)) {
1203 if (!read_record) {
1204 /*
1205 * There wasn't enough memory to record necessary
1206 * information for a retry or there was no other
1207 * mirror in-sync.
1208 */
Adrian Bunke03f1a82008-02-19 19:44:19 +00001209 DMERR_LIMIT("Mirror read failed.");
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001210 return -EIO;
1211 }
Adrian Bunke03f1a82008-02-19 19:44:19 +00001212
1213 m = read_record->m;
1214
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001215 DMERR("Mirror read failed from %s. Trying alternative device.",
1216 m->dev->name);
1217
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001218 fail_mirror(m, DM_RAID1_READ_ERROR);
1219
1220 /*
1221 * A failed read is requeued for another attempt using an intact
1222 * mirror.
1223 */
1224 if (default_ok(m) || mirror_available(ms, bio)) {
1225 bd = &read_record->details;
1226
1227 dm_bio_restore(bd, bio);
1228 mempool_free(read_record, ms->read_record_pool);
1229 map_context->ptr = NULL;
1230 queue_bio(ms, bio, rw);
1231 return 1;
1232 }
1233 DMERR("All replicated volumes dead, failing I/O");
1234 }
1235
1236out:
1237 if (read_record) {
1238 mempool_free(read_record, ms->read_record_pool);
1239 map_context->ptr = NULL;
1240 }
1241
1242 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001245static void mirror_presuspend(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246{
1247 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001248 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249
Mikulas Patocka04788502009-12-10 23:52:03 +00001250 struct bio_list holds;
1251 struct bio *bio;
1252
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001253 atomic_set(&ms->suspend, 1);
1254
1255 /*
1256 * We must finish up all the work that we've
1257 * generated (i.e. recovery work).
1258 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001259 dm_rh_stop_recovery(ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001260
Jonathan E Brassow33184042006-11-08 17:44:44 -08001261 wait_event(_kmirrord_recovery_stopped,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001262 !dm_rh_recovery_in_flight(ms->rh));
Jonathan E Brassow33184042006-11-08 17:44:44 -08001263
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001264 if (log->type->presuspend && log->type->presuspend(log))
1265 /* FIXME: need better error handling */
1266 DMWARN("log presuspend failed");
1267
1268 /*
1269 * Now that recovery is complete/stopped and the
1270 * delayed bios are queued, we need to wait for
1271 * the worker thread to complete. This way,
1272 * we know that all of our I/O has been pushed.
1273 */
1274 flush_workqueue(ms->kmirrord_wq);
Mikulas Patocka04788502009-12-10 23:52:03 +00001275
1276 /*
1277 * Now set ms->suspend is set and the workqueue flushed, no more
1278 * entries can be added to ms->hold list, so process it.
1279 *
1280 * Bios can still arrive concurrently with or after this
1281 * presuspend function, but they cannot join the hold list
1282 * because ms->suspend is set.
1283 */
1284 spin_lock_irq(&ms->lock);
1285 holds = ms->holds;
1286 bio_list_init(&ms->holds);
1287 spin_unlock_irq(&ms->lock);
1288
1289 while ((bio = bio_list_pop(&holds)))
1290 hold_bio(ms, bio);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001291}
1292
1293static void mirror_postsuspend(struct dm_target *ti)
1294{
1295 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001296 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001297
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001298 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 /* FIXME: need better error handling */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001300 DMWARN("log postsuspend failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301}
1302
1303static void mirror_resume(struct dm_target *ti)
1304{
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001305 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001306 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001307
1308 atomic_set(&ms->suspend, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 if (log->type->resume && log->type->resume(log))
1310 /* FIXME: need better error handling */
1311 DMWARN("log resume failed");
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001312 dm_rh_start_recovery(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313}
1314
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001315/*
1316 * device_status_char
1317 * @m: mirror device/leg we want the status of
1318 *
1319 * We return one character representing the most severe error
1320 * we have encountered.
1321 * A => Alive - No failures
1322 * D => Dead - A write failure occurred leaving mirror out-of-sync
1323 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1324 * R => Read - A read failure occurred, mirror data unaffected
1325 *
1326 * Returns: <char>
1327 */
1328static char device_status_char(struct mirror *m)
1329{
1330 if (!atomic_read(&(m->error_count)))
1331 return 'A';
1332
Mikulas Patocka64b30c42009-12-10 23:52:02 +00001333 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1334 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001335 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1336 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1337}
1338
1339
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340static int mirror_status(struct dm_target *ti, status_type_t type,
1341 char *result, unsigned int maxlen)
1342{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001343 unsigned int m, sz = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001345 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001346 char buffer[ms->nr_mirrors + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 switch (type) {
1349 case STATUSTYPE_INFO:
1350 DMEMIT("%d ", ms->nr_mirrors);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001351 for (m = 0; m < ms->nr_mirrors; m++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 DMEMIT("%s ", ms->mirror[m].dev->name);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001353 buffer[m] = device_status_char(&(ms->mirror[m]));
1354 }
1355 buffer[m] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001357 DMEMIT("%llu/%llu 1 %s ",
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001358 (unsigned long long)log->type->get_sync_count(log),
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001359 (unsigned long long)ms->nr_regions, buffer);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001360
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001361 sz += log->type->status(log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001362
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363 break;
1364
1365 case STATUSTYPE_TABLE:
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001366 sz = log->type->status(log, type, result, maxlen);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001367
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001368 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001370 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001371 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001372
1373 if (ms->features & DM_RAID1_HANDLE_ERRORS)
1374 DMEMIT(" 1 handle_errors");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 }
1376
1377 return 0;
1378}
1379
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001380static int mirror_iterate_devices(struct dm_target *ti,
1381 iterate_devices_callout_fn fn, void *data)
1382{
1383 struct mirror_set *ms = ti->private;
1384 int ret = 0;
1385 unsigned i;
1386
1387 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1388 ret = fn(ti, ms->mirror[i].dev,
Mike Snitzer5dea2712009-07-23 20:30:42 +01001389 ms->mirror[i].offset, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001390
1391 return ret;
1392}
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394static struct target_type mirror_target = {
1395 .name = "mirror",
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001396 .version = {1, 12, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 .module = THIS_MODULE,
1398 .ctr = mirror_ctr,
1399 .dtr = mirror_dtr,
1400 .map = mirror_map,
1401 .end_io = mirror_end_io,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001402 .presuspend = mirror_presuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 .postsuspend = mirror_postsuspend,
1404 .resume = mirror_resume,
1405 .status = mirror_status,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001406 .iterate_devices = mirror_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407};
1408
1409static int __init dm_mirror_init(void)
1410{
1411 int r;
1412
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001413 _dm_raid1_read_record_cache = KMEM_CACHE(dm_raid1_read_record, 0);
1414 if (!_dm_raid1_read_record_cache) {
1415 DMERR("Can't allocate dm_raid1_read_record cache");
1416 r = -ENOMEM;
1417 goto bad_cache;
1418 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001420 r = dm_register_target(&mirror_target);
1421 if (r < 0) {
1422 DMERR("Failed to register mirror target");
1423 goto bad_target;
1424 }
1425
1426 return 0;
1427
1428bad_target:
1429 kmem_cache_destroy(_dm_raid1_read_record_cache);
1430bad_cache:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 return r;
1432}
1433
1434static void __exit dm_mirror_exit(void)
1435{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001436 dm_unregister_target(&mirror_target);
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001437 kmem_cache_destroy(_dm_raid1_read_record_cache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438}
1439
1440/* Module hooks */
1441module_init(dm_mirror_init);
1442module_exit(dm_mirror_exit);
1443
1444MODULE_DESCRIPTION(DM_NAME " mirror target");
1445MODULE_AUTHOR("Joe Thornber");
1446MODULE_LICENSE("GPL");