blob: 9a8b71067c6eba52dca7a4050a75cc51199ffbe5 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01003 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Jonathan Brassow06386bb2008-02-08 02:11:37 +00008#include "dm-bio-record.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/workqueue.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010016#include <linux/device-mapper.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010017#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010020#include <linux/dm-region-hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Alasdair G Kergon72d94862006-06-26 00:27:35 -070022#define DM_MSG_PREFIX "raid1"
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010023
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025
Lidong Zhonged632872015-05-13 14:04:10 +080026#define DM_RAID1_HANDLE_ERRORS 0x01
27#define DM_RAID1_KEEP_LOG 0x02
Jonathan Brassowf44db672007-07-12 17:29:04 +010028#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Lidong Zhonged632872015-05-13 14:04:10 +080029#define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070030
Jonathan E Brassow33184042006-11-08 17:44:44 -080031static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*-----------------------------------------------------------------
Neil Browne4c8b3b2006-06-26 00:27:26 -070034 * Mirror set structures.
35 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +000036enum dm_raid1_error {
37 DM_RAID1_WRITE_ERROR,
Mikulas Patocka64b30c42009-12-10 23:52:02 +000038 DM_RAID1_FLUSH_ERROR,
Jonathan Brassow72f4b312008-02-08 02:11:29 +000039 DM_RAID1_SYNC_ERROR,
40 DM_RAID1_READ_ERROR
41};
42
Neil Browne4c8b3b2006-06-26 00:27:26 -070043struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +010044 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -070045 atomic_t error_count;
Al Viro39ed7ad2008-02-13 03:53:00 +000046 unsigned long error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -070047 struct dm_dev *dev;
48 sector_t offset;
49};
50
51struct mirror_set {
52 struct dm_target *ti;
53 struct list_head list;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010054
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070055 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -070056
Jonathan Brassow72f4b312008-02-08 02:11:29 +000057 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -070058 struct bio_list reads;
59 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +000060 struct bio_list failures;
Mikulas Patocka04788502009-12-10 23:52:03 +000061 struct bio_list holds; /* bios are waiting until suspend */
Neil Browne4c8b3b2006-06-26 00:27:26 -070062
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010063 struct dm_region_hash *rh;
64 struct dm_kcopyd_client *kcopyd_client;
Milan Broz88be1632007-05-09 02:33:04 -070065 struct dm_io_client *io_client;
66
Neil Browne4c8b3b2006-06-26 00:27:26 -070067 /* recovery */
68 region_t nr_regions;
69 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +010070 int log_failure;
Mikulas Patocka929be8f2009-12-10 23:52:06 +000071 int leg_failure;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +000072 atomic_t suspend;
Neil Browne4c8b3b2006-06-26 00:27:26 -070073
Jonathan Brassow72f4b312008-02-08 02:11:29 +000074 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -070075
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070076 struct workqueue_struct *kmirrord_wq;
77 struct work_struct kmirrord_work;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010078 struct timer_list timer;
79 unsigned long timer_pending;
80
Jonathan Brassow72f4b312008-02-08 02:11:29 +000081 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070082
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010083 unsigned nr_mirrors;
Neil Browne4c8b3b2006-06-26 00:27:26 -070084 struct mirror mirror[0];
85};
86
Mikulas Patockadf5d2e92013-03-01 22:45:49 +000087DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
88 "A percentage of time allocated for raid resynchronization");
89
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010090static void wakeup_mirrord(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010092 struct mirror_set *ms = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070094 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
95}
96
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010097static void delayed_wake_fn(unsigned long data)
98{
99 struct mirror_set *ms = (struct mirror_set *) data;
100
101 clear_bit(0, &ms->timer_pending);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100102 wakeup_mirrord(ms);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100103}
104
105static void delayed_wake(struct mirror_set *ms)
106{
107 if (test_and_set_bit(0, &ms->timer_pending))
108 return;
109
110 ms->timer.expires = jiffies + HZ / 5;
111 ms->timer.data = (unsigned long) ms;
112 ms->timer.function = delayed_wake_fn;
113 add_timer(&ms->timer);
114}
115
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100116static void wakeup_all_recovery_waiters(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100118 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100121static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 int should_wake = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100125 struct bio_list *bl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100127 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
128 spin_lock_irqsave(&ms->lock, flags);
129 should_wake = !(bl->head);
130 bio_list_add(bl, bio);
131 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133 if (should_wake)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100134 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100137static void dispatch_bios(void *context, struct bio_list *bio_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100139 struct mirror_set *ms = context;
140 struct bio *bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100142 while ((bio = bio_list_pop(bio_list)))
143 queue_bio(ms, bio, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144}
145
Mikulas Patocka89c7cd82012-12-21 20:23:39 +0000146struct dm_raid1_bio_record {
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000147 struct mirror *m;
148 struct dm_bio_details details;
Mikulas Patocka0045d612012-12-21 20:23:40 +0000149 region_t write_region;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000150};
151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152/*
153 * Every mirror should look like this one.
154 */
155#define DEFAULT_MIRROR 0
156
157/*
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000158 * This is yucky. We squirrel the mirror struct away inside
159 * bi_next for read/write buffers. This is safe since the bh
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 * doesn't get submitted to the lower levels of block layer.
161 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000162static struct mirror *bio_get_m(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000164 return (struct mirror *) bio->bi_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165}
166
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000167static void bio_set_m(struct bio *bio, struct mirror *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000169 bio->bi_next = (struct bio *) m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170}
171
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000172static struct mirror *get_default_mirror(struct mirror_set *ms)
173{
174 return &ms->mirror[atomic_read(&ms->default_mirror)];
175}
176
177static void set_default_mirror(struct mirror *m)
178{
179 struct mirror_set *ms = m->ms;
180 struct mirror *m0 = &(ms->mirror[0]);
181
182 atomic_set(&ms->default_mirror, m - m0);
183}
184
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000185static struct mirror *get_valid_mirror(struct mirror_set *ms)
186{
187 struct mirror *m;
188
189 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
190 if (!atomic_read(&m->error_count))
191 return m;
192
193 return NULL;
194}
195
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000196/* fail_mirror
197 * @m: mirror device to fail
198 * @error_type: one of the enum's, DM_RAID1_*_ERROR
199 *
200 * If errors are being handled, record the type of
201 * error encountered for this device. If this type
202 * of error has already been recorded, we can return;
203 * otherwise, we must signal userspace by triggering
204 * an event. Additionally, if the device is the
205 * primary device, we must choose a new primary, but
206 * only if the mirror is in-sync.
207 *
208 * This function must not block.
209 */
210static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
211{
212 struct mirror_set *ms = m->ms;
213 struct mirror *new;
214
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000215 ms->leg_failure = 1;
216
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000217 /*
218 * error_count is used for nothing more than a
219 * simple way to tell if a device has encountered
220 * errors.
221 */
222 atomic_inc(&m->error_count);
223
224 if (test_and_set_bit(error_type, &m->error_type))
225 return;
226
Jonathan Brassowd460c652009-01-06 03:04:57 +0000227 if (!errors_handled(ms))
228 return;
229
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000230 if (m != get_default_mirror(ms))
231 goto out;
232
Lidong Zhonged632872015-05-13 14:04:10 +0800233 if (!ms->in_sync && !keep_log(ms)) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000234 /*
235 * Better to issue requests to same failing device
236 * than to risk returning corrupt data.
237 */
238 DMERR("Primary mirror (%s) failed while out-of-sync: "
239 "Reads may fail.", m->dev->name);
240 goto out;
241 }
242
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000243 new = get_valid_mirror(ms);
244 if (new)
245 set_default_mirror(new);
246 else
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000247 DMWARN("All sides of mirror have failed.");
248
249out:
250 schedule_work(&ms->trigger_event);
251}
252
Mikulas Patockac0da3742009-12-10 23:52:02 +0000253static int mirror_flush(struct dm_target *ti)
254{
255 struct mirror_set *ms = ti->private;
256 unsigned long error_bits;
257
258 unsigned int i;
259 struct dm_io_region io[ms->nr_mirrors];
260 struct mirror *m;
261 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -0500262 .bi_op = REQ_OP_WRITE,
263 .bi_op_flags = WRITE_FLUSH,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000264 .mem.type = DM_IO_KMEM,
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000265 .mem.ptr.addr = NULL,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000266 .client = ms->io_client,
267 };
268
269 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
270 io[i].bdev = m->dev->bdev;
271 io[i].sector = 0;
272 io[i].count = 0;
273 }
274
275 error_bits = -1;
276 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
277 if (unlikely(error_bits != 0)) {
278 for (i = 0; i < ms->nr_mirrors; i++)
279 if (test_bit(i, &error_bits))
280 fail_mirror(ms->mirror + i,
Mikulas Patocka64b30c42009-12-10 23:52:02 +0000281 DM_RAID1_FLUSH_ERROR);
Mikulas Patockac0da3742009-12-10 23:52:02 +0000282 return -EIO;
283 }
284
285 return 0;
286}
287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288/*-----------------------------------------------------------------
289 * Recovery.
290 *
291 * When a mirror is first activated we may find that some regions
292 * are in the no-sync state. We have to recover these by
293 * recopying from the default mirror to all the others.
294 *---------------------------------------------------------------*/
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700295static void recovery_complete(int read_err, unsigned long write_err,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 void *context)
297{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100298 struct dm_region *reg = context;
299 struct mirror_set *ms = dm_rh_region_context(reg);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000300 int m, bit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000302 if (read_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100303 /* Read error means the failure of default mirror. */
304 DMERR_LIMIT("Unable to read primary mirror during recovery");
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000305 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
306 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100307
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000308 if (write_err) {
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700309 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
Jonathan Brassowf44db672007-07-12 17:29:04 +0100310 write_err);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000311 /*
312 * Bits correspond to devices (excluding default mirror).
313 * The default mirror cannot change during recovery.
314 */
315 for (m = 0; m < ms->nr_mirrors; m++) {
316 if (&ms->mirror[m] == get_default_mirror(ms))
317 continue;
318 if (test_bit(bit, &write_err))
319 fail_mirror(ms->mirror + m,
320 DM_RAID1_SYNC_ERROR);
321 bit++;
322 }
323 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100324
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100325 dm_rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100328static int recover(struct mirror_set *ms, struct dm_region *reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329{
330 int r;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100331 unsigned i;
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100332 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 struct mirror *m;
334 unsigned long flags = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100335 region_t key = dm_rh_get_region_key(reg);
336 sector_t region_size = dm_rh_get_region_size(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337
338 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000339 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 from.bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100341 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
342 if (key == (ms->nr_regions - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 /*
344 * The final region may be smaller than
345 * region_size.
346 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100347 from.count = ms->ti->len & (region_size - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 if (!from.count)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100349 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 } else
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100351 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
353 /* fill in the destinations */
354 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000355 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 continue;
357
358 m = ms->mirror + i;
359 dest->bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100360 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 dest->count = from.count;
362 dest++;
363 }
364
365 /* hand to kcopyd */
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100366 if (!errors_handled(ms))
367 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
368
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100369 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
370 flags, recovery_complete, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 return r;
373}
374
Lidong Zhonged632872015-05-13 14:04:10 +0800375static void reset_ms_flags(struct mirror_set *ms)
376{
377 unsigned int m;
378
379 ms->leg_failure = 0;
380 for (m = 0; m < ms->nr_mirrors; m++) {
381 atomic_set(&(ms->mirror[m].error_count), 0);
382 ms->mirror[m].error_type = 0;
383 }
384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386static void do_recovery(struct mirror_set *ms)
387{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100388 struct dm_region *reg;
389 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
392 /*
393 * Start quiescing some regions.
394 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100395 dm_rh_recovery_prepare(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396
397 /*
398 * Copy any already quiesced regions.
399 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100400 while ((reg = dm_rh_recovery_start(ms->rh))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 r = recover(ms, reg);
402 if (r)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100403 dm_rh_recovery_end(reg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 }
405
406 /*
407 * Update the in sync flag.
408 */
409 if (!ms->in_sync &&
410 (log->type->get_sync_count(log) == ms->nr_regions)) {
411 /* the sync is complete */
412 dm_table_event(ms->ti->table);
413 ms->in_sync = 1;
Lidong Zhonged632872015-05-13 14:04:10 +0800414 reset_ms_flags(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 }
416}
417
418/*-----------------------------------------------------------------
419 * Reads
420 *---------------------------------------------------------------*/
421static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
422{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000423 struct mirror *m = get_default_mirror(ms);
424
425 do {
426 if (likely(!atomic_read(&m->error_count)))
427 return m;
428
429 if (m-- == ms->mirror)
430 m += ms->nr_mirrors;
431 } while (m != get_default_mirror(ms));
432
433 return NULL;
434}
435
436static int default_ok(struct mirror *m)
437{
438 struct mirror *default_mirror = get_default_mirror(m->ms);
439
440 return !atomic_read(&default_mirror->error_count);
441}
442
443static int mirror_available(struct mirror_set *ms, struct bio *bio)
444{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100445 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
446 region_t region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000447
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100448 if (log->type->in_sync(log, region, 0))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700449 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000450
451 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452}
453
454/*
455 * remap a buffer to a particular mirror.
456 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000457static sector_t map_sector(struct mirror *m, struct bio *bio)
458{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700459 if (unlikely(!bio->bi_iter.bi_size))
Mikulas Patocka41841532009-12-10 23:51:59 +0000460 return 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700461 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000462}
463
464static void map_bio(struct mirror *m, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465{
466 bio->bi_bdev = m->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700467 bio->bi_iter.bi_sector = map_sector(m, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000468}
469
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100470static void map_region(struct dm_io_region *io, struct mirror *m,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000471 struct bio *bio)
472{
473 io->bdev = m->dev->bdev;
474 io->sector = map_sector(m, bio);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800475 io->count = bio_sectors(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000476}
477
Mikulas Patocka04788502009-12-10 23:52:03 +0000478static void hold_bio(struct mirror_set *ms, struct bio *bio)
479{
480 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +0000481 * Lock is required to avoid race condition during suspend
482 * process.
Mikulas Patocka04788502009-12-10 23:52:03 +0000483 */
Takahiro Yasuif0703042010-03-06 02:32:35 +0000484 spin_lock_irq(&ms->lock);
485
Mikulas Patocka04788502009-12-10 23:52:03 +0000486 if (atomic_read(&ms->suspend)) {
Takahiro Yasuif0703042010-03-06 02:32:35 +0000487 spin_unlock_irq(&ms->lock);
488
489 /*
490 * If device is suspended, complete the bio.
491 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000492 if (dm_noflush_suspending(ms->ti))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200493 bio->bi_error = DM_ENDIO_REQUEUE;
Mikulas Patocka04788502009-12-10 23:52:03 +0000494 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200495 bio->bi_error = -EIO;
496
497 bio_endio(bio);
Mikulas Patocka04788502009-12-10 23:52:03 +0000498 return;
499 }
500
501 /*
502 * Hold bio until the suspend is complete.
503 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000504 bio_list_add(&ms->holds, bio);
505 spin_unlock_irq(&ms->lock);
506}
507
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000508/*-----------------------------------------------------------------
509 * Reads
510 *---------------------------------------------------------------*/
511static void read_callback(unsigned long error, void *context)
512{
513 struct bio *bio = context;
514 struct mirror *m;
515
516 m = bio_get_m(bio);
517 bio_set_m(bio, NULL);
518
519 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200520 bio_endio(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000521 return;
522 }
523
524 fail_mirror(m, DM_RAID1_READ_ERROR);
525
526 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
527 DMWARN_LIMIT("Read failure on mirror device %s. "
528 "Trying alternative device.",
529 m->dev->name);
Christoph Hellwig70246282016-07-19 11:28:41 +0200530 queue_bio(m->ms, bio, bio_data_dir(bio));
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000531 return;
532 }
533
534 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
535 m->dev->name);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200536 bio_io_error(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000537}
538
539/* Asynchronous read. */
540static void read_async_bio(struct mirror *m, struct bio *bio)
541{
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100542 struct dm_io_region io;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000543 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -0500544 .bi_op = REQ_OP_READ,
545 .bi_op_flags = 0,
Kent Overstreet003b5c52013-10-11 15:45:43 -0700546 .mem.type = DM_IO_BIO,
547 .mem.ptr.bio = bio,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000548 .notify.fn = read_callback,
549 .notify.context = bio,
550 .client = m->ms->io_client,
551 };
552
553 map_region(&io, m, bio);
554 bio_set_m(bio, m);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100555 BUG_ON(dm_io(&io_req, 1, &io, NULL));
556}
557
558static inline int region_in_sync(struct mirror_set *ms, region_t region,
559 int may_block)
560{
561 int state = dm_rh_get_state(ms->rh, region, may_block);
562 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563}
564
565static void do_reads(struct mirror_set *ms, struct bio_list *reads)
566{
567 region_t region;
568 struct bio *bio;
569 struct mirror *m;
570
571 while ((bio = bio_list_pop(reads))) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100572 region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000573 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 /*
576 * We can only read balance if the region is in sync.
577 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100578 if (likely(region_in_sync(ms, region, 1)))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700579 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000580 else if (m && atomic_read(&m->error_count))
581 m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000583 if (likely(m))
584 read_async_bio(m, bio);
585 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200586 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 }
588}
589
590/*-----------------------------------------------------------------
591 * Writes.
592 *
593 * We do different things with the write io depending on the
594 * state of the region that it's in:
595 *
596 * SYNC: increment pending, use kcopyd to write to *all* mirrors
597 * RECOVERING: delay the io until recovery completes
598 * NOSYNC: increment pending, just write to the default mirror
599 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000600
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000601
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602static void write_callback(unsigned long error, void *context)
603{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200604 unsigned i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 struct bio *bio = (struct bio *) context;
606 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000607 int should_wake = 0;
608 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000610 ms = bio_get_m(bio)->ms;
611 bio_set_m(bio, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
613 /*
614 * NOTE: We don't decrement the pending count here,
615 * instead it is done by the targets endio function.
616 * This way we handle both writes to SYNC and NOSYNC
617 * regions with the same code.
618 */
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000619 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200620 bio_endio(bio);
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000621 return;
622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500624 /*
625 * If the bio is discard, return an error, but do not
626 * degrade the array.
627 */
Mike Christiee6047142016-06-05 14:32:04 -0500628 if (bio_op(bio) == REQ_OP_DISCARD) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200629 bio->bi_error = -EOPNOTSUPP;
630 bio_endio(bio);
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500631 return;
632 }
633
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000634 for (i = 0; i < ms->nr_mirrors; i++)
635 if (test_bit(i, &error))
636 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000637
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000638 /*
639 * Need to raise event. Since raising
640 * events can block, we need to do it in
641 * the main thread.
642 */
643 spin_lock_irqsave(&ms->lock, flags);
644 if (!ms->failures.head)
645 should_wake = 1;
646 bio_list_add(&ms->failures, bio);
647 spin_unlock_irqrestore(&ms->lock, flags);
648 if (should_wake)
649 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650}
651
652static void do_write(struct mirror_set *ms, struct bio *bio)
653{
654 unsigned int i;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100655 struct dm_io_region io[ms->nr_mirrors], *dest = io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 struct mirror *m;
Milan Broz88be1632007-05-09 02:33:04 -0700657 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -0500658 .bi_op = REQ_OP_WRITE,
Jens Axboe1eff9d32016-08-05 15:35:16 -0600659 .bi_op_flags = bio->bi_opf & WRITE_FLUSH_FUA,
Kent Overstreet003b5c52013-10-11 15:45:43 -0700660 .mem.type = DM_IO_BIO,
661 .mem.ptr.bio = bio,
Milan Broz88be1632007-05-09 02:33:04 -0700662 .notify.fn = write_callback,
663 .notify.context = bio,
664 .client = ms->io_client,
665 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
Mike Christiee6047142016-06-05 14:32:04 -0500667 if (bio_op(bio) == REQ_OP_DISCARD) {
668 io_req.bi_op = REQ_OP_DISCARD;
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000669 io_req.mem.type = DM_IO_KMEM;
670 io_req.mem.ptr.addr = NULL;
671 }
672
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000673 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
674 map_region(dest++, m, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000676 /*
677 * Use default mirror because we only need it to retrieve the reference
678 * to the mirror set in write_callback().
679 */
680 bio_set_m(bio, get_default_mirror(ms));
Milan Broz88be1632007-05-09 02:33:04 -0700681
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100682 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683}
684
685static void do_writes(struct mirror_set *ms, struct bio_list *writes)
686{
687 int state;
688 struct bio *bio;
689 struct bio_list sync, nosync, recover, *this_list = NULL;
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100690 struct bio_list requeue;
691 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
692 region_t region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693
694 if (!writes->head)
695 return;
696
697 /*
698 * Classify each write.
699 */
700 bio_list_init(&sync);
701 bio_list_init(&nosync);
702 bio_list_init(&recover);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100703 bio_list_init(&requeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
705 while ((bio = bio_list_pop(writes))) {
Jens Axboe1eff9d32016-08-05 15:35:16 -0600706 if ((bio->bi_opf & REQ_PREFLUSH) ||
Mike Christiee6047142016-06-05 14:32:04 -0500707 (bio_op(bio) == REQ_OP_DISCARD)) {
Mikulas Patocka41841532009-12-10 23:51:59 +0000708 bio_list_add(&sync, bio);
709 continue;
710 }
711
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100712 region = dm_rh_bio_to_region(ms->rh, bio);
713
714 if (log->type->is_remote_recovering &&
715 log->type->is_remote_recovering(log, region)) {
716 bio_list_add(&requeue, bio);
717 continue;
718 }
719
720 state = dm_rh_get_state(ms->rh, region, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 switch (state) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100722 case DM_RH_CLEAN:
723 case DM_RH_DIRTY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 this_list = &sync;
725 break;
726
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100727 case DM_RH_NOSYNC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 this_list = &nosync;
729 break;
730
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100731 case DM_RH_RECOVERING:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 this_list = &recover;
733 break;
734 }
735
736 bio_list_add(this_list, bio);
737 }
738
739 /*
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100740 * Add bios that are delayed due to remote recovery
741 * back on to the write queue
742 */
743 if (unlikely(requeue.head)) {
744 spin_lock_irq(&ms->lock);
745 bio_list_merge(&ms->writes, &requeue);
746 spin_unlock_irq(&ms->lock);
Mikulas Patocka69885682009-07-23 20:30:37 +0100747 delayed_wake(ms);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100748 }
749
750 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 * Increment the pending counts for any regions that will
752 * be written to (writes to recover regions are going to
753 * be delayed).
754 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100755 dm_rh_inc_pending(ms->rh, &sync);
756 dm_rh_inc_pending(ms->rh, &nosync);
Jonathan Brassowd2b69862009-09-04 20:40:32 +0100757
758 /*
759 * If the flush fails on a previous call and succeeds here,
760 * we must not reset the log_failure variable. We need
761 * userspace interaction to do that.
762 */
763 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765 /*
766 * Dispatch io.
767 */
Mikulas Patocka5528d172010-02-16 18:42:55 +0000768 if (unlikely(ms->log_failure) && errors_handled(ms)) {
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000769 spin_lock_irq(&ms->lock);
770 bio_list_merge(&ms->failures, &sync);
771 spin_unlock_irq(&ms->lock);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100772 wakeup_mirrord(ms);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000773 } else
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100774 while ((bio = bio_list_pop(&sync)))
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000775 do_write(ms, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777 while ((bio = bio_list_pop(&recover)))
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100778 dm_rh_delay(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 while ((bio = bio_list_pop(&nosync))) {
Lidong Zhonged632872015-05-13 14:04:10 +0800781 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
Mikulas Patockaede5ea02010-03-06 02:32:22 +0000782 spin_lock_irq(&ms->lock);
783 bio_list_add(&ms->failures, bio);
784 spin_unlock_irq(&ms->lock);
785 wakeup_mirrord(ms);
786 } else {
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000787 map_bio(get_default_mirror(ms), bio);
788 generic_make_request(bio);
789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 }
791}
792
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000793static void do_failures(struct mirror_set *ms, struct bio_list *failures)
794{
795 struct bio *bio;
796
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000797 if (likely(!failures->head))
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000798 return;
799
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000800 /*
801 * If the log has failed, unattempted writes are being
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000802 * put on the holds list. We can't issue those writes
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000803 * until a log has been marked, so we must store them.
804 *
805 * If a 'noflush' suspend is in progress, we can requeue
806 * the I/O's to the core. This give userspace a chance
807 * to reconfigure the mirror, at which point the core
808 * will reissue the writes. If the 'noflush' flag is
809 * not set, we have no choice but to return errors.
810 *
811 * Some writes on the failures list may have been
812 * submitted before the log failure and represent a
813 * failure to write to one of the devices. It is ok
814 * for us to treat them the same and requeue them
815 * as well.
816 */
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000817 while ((bio = bio_list_pop(failures))) {
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000818 if (!ms->log_failure) {
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000819 ms->in_sync = 0;
Mikulas Patockac58098b2009-12-10 23:52:05 +0000820 dm_rh_mark_nosync(ms->rh, bio);
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000821 }
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000822
823 /*
824 * If all the legs are dead, fail the I/O.
Lidong Zhonged632872015-05-13 14:04:10 +0800825 * If the device has failed and keep_log is enabled,
826 * fail the I/O.
827 *
828 * If we have been told to handle errors, and keep_log
829 * isn't enabled, hold the bio and wait for userspace to
830 * deal with the problem.
831 *
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000832 * Otherwise pretend that the I/O succeeded. (This would
833 * be wrong if the failed leg returned after reboot and
834 * got replicated back to the good legs.)
835 */
Lidong Zhonged632872015-05-13 14:04:10 +0800836 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200837 bio_io_error(bio);
Lidong Zhonged632872015-05-13 14:04:10 +0800838 else if (errors_handled(ms) && !keep_log(ms))
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000839 hold_bio(ms, bio);
840 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200841 bio_endio(bio);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000842 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000843}
844
845static void trigger_event(struct work_struct *work)
846{
847 struct mirror_set *ms =
848 container_of(work, struct mirror_set, trigger_event);
849
850 dm_table_event(ms->ti->table);
851}
852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853/*-----------------------------------------------------------------
854 * kmirrord
855 *---------------------------------------------------------------*/
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100856static void do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100858 struct mirror_set *ms = container_of(work, struct mirror_set,
859 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000860 struct bio_list reads, writes, failures;
861 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000863 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 reads = ms->reads;
865 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000866 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 bio_list_init(&ms->reads);
868 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000869 bio_list_init(&ms->failures);
870 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100872 dm_rh_update_states(ms->rh, errors_handled(ms));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 do_recovery(ms);
874 do_reads(ms, &reads);
875 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000876 do_failures(ms, &failures);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000877}
878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879/*-----------------------------------------------------------------
880 * Target functions
881 *---------------------------------------------------------------*/
882static struct mirror_set *alloc_context(unsigned int nr_mirrors,
883 uint32_t region_size,
884 struct dm_target *ti,
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100885 struct dm_dirty_log *dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886{
887 size_t len;
888 struct mirror_set *ms = NULL;
889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
891
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700892 ms = kzalloc(len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700894 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 return NULL;
896 }
897
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 spin_lock_init(&ms->lock);
Mikulas Patocka5339fc22009-12-10 23:52:06 +0000899 bio_list_init(&ms->reads);
900 bio_list_init(&ms->writes);
901 bio_list_init(&ms->failures);
902 bio_list_init(&ms->holds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
904 ms->ti = ti;
905 ms->nr_mirrors = nr_mirrors;
906 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
907 ms->in_sync = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000908 ms->log_failure = 0;
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000909 ms->leg_failure = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000910 atomic_set(&ms->suspend, 0);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000911 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
Mikulas Patockabda8efe2011-05-29 13:03:09 +0100913 ms->io_client = dm_io_client_create();
Milan Broz88be1632007-05-09 02:33:04 -0700914 if (IS_ERR(ms->io_client)) {
915 ti->error = "Error creating dm_io client";
916 kfree(ms);
917 return NULL;
918 }
919
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100920 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
921 wakeup_all_recovery_waiters,
922 ms->ti->begin, MAX_RECOVERY,
923 dl, region_size, ms->nr_regions);
924 if (IS_ERR(ms->rh)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700925 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +0100926 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 kfree(ms);
928 return NULL;
929 }
930
931 return ms;
932}
933
934static void free_context(struct mirror_set *ms, struct dm_target *ti,
935 unsigned int m)
936{
937 while (m--)
938 dm_put_device(ti, ms->mirror[m].dev);
939
Milan Broz88be1632007-05-09 02:33:04 -0700940 dm_io_client_destroy(ms->io_client);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100941 dm_region_hash_destroy(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 kfree(ms);
943}
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
946 unsigned int mirror, char **argv)
947{
Andrew Morton4ee218c2006-03-27 01:17:48 -0800948 unsigned long long offset;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100949 char dummy;
Vivek Goyale80d1c82015-07-31 09:20:36 -0400950 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100952 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700953 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 return -EINVAL;
955 }
956
Vivek Goyale80d1c82015-07-31 09:20:36 -0400957 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
958 &ms->mirror[mirror].dev);
959 if (ret) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700960 ti->error = "Device lookup failure";
Vivek Goyale80d1c82015-07-31 09:20:36 -0400961 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 }
963
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100964 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000965 atomic_set(&(ms->mirror[mirror].error_count), 0);
966 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 ms->mirror[mirror].offset = offset;
968
969 return 0;
970}
971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972/*
973 * Create dirty log: log_type #log_params <log_params>
974 */
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100975static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100976 unsigned argc, char **argv,
977 unsigned *args_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100979 unsigned param_count;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100980 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100981 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
983 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700984 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return NULL;
986 }
987
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100988 if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700989 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 return NULL;
991 }
992
993 *args_used = 2 + param_count;
994
995 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700996 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return NULL;
998 }
999
Mikulas Patockac0da3742009-12-10 23:52:02 +00001000 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
1001 argv + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001003 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return NULL;
1005 }
1006
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 return dl;
1008}
1009
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001010static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1011 unsigned *args_used)
1012{
1013 unsigned num_features;
1014 struct dm_target *ti = ms->ti;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001015 char dummy;
Lidong Zhonged632872015-05-13 14:04:10 +08001016 int i;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001017
1018 *args_used = 0;
1019
1020 if (!argc)
1021 return 0;
1022
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001023 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001024 ti->error = "Invalid number of features";
1025 return -EINVAL;
1026 }
1027
1028 argc--;
1029 argv++;
1030 (*args_used)++;
1031
1032 if (num_features > argc) {
1033 ti->error = "Not enough arguments to support feature count";
1034 return -EINVAL;
1035 }
1036
Lidong Zhonged632872015-05-13 14:04:10 +08001037 for (i = 0; i < num_features; i++) {
1038 if (!strcmp("handle_errors", argv[0]))
1039 ms->features |= DM_RAID1_HANDLE_ERRORS;
1040 else if (!strcmp("keep_log", argv[0]))
1041 ms->features |= DM_RAID1_KEEP_LOG;
1042 else {
1043 ti->error = "Unrecognised feature requested";
1044 return -EINVAL;
1045 }
1046
1047 argc--;
1048 argv++;
1049 (*args_used)++;
1050 }
1051 if (!errors_handled(ms) && keep_log(ms)) {
1052 ti->error = "keep_log feature requires the handle_errors feature";
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001053 return -EINVAL;
1054 }
1055
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001056 return 0;
1057}
1058
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059/*
1060 * Construct a mirror mapping:
1061 *
1062 * log_type #log_params <log_params>
1063 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001064 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 *
1066 * log_type is "core" or "disk"
1067 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001068 *
Lidong Zhonged632872015-05-13 14:04:10 +08001069 * If present, supported features are "handle_errors" and "keep_log".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1072{
1073 int r;
1074 unsigned int nr_mirrors, m, args_used;
1075 struct mirror_set *ms;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001076 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001077 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 dl = create_dirty_log(ti, argc, argv, &args_used);
1080 if (!dl)
1081 return -EINVAL;
1082
1083 argv += args_used;
1084 argc -= args_used;
1085
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001086 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001087 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001088 ti->error = "Invalid number of mirrors";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001089 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 return -EINVAL;
1091 }
1092
1093 argv++, argc--;
1094
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001095 if (argc < nr_mirrors * 2) {
1096 ti->error = "Too few mirror arguments";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001097 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 return -EINVAL;
1099 }
1100
1101 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1102 if (!ms) {
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001103 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104 return -ENOMEM;
1105 }
1106
1107 /* Get the mirror parameter sets */
1108 for (m = 0; m < nr_mirrors; m++) {
1109 r = get_mirror(ms, ti, m, argv);
1110 if (r) {
1111 free_context(ms, ti, m);
1112 return r;
1113 }
1114 argv += 2;
1115 argc -= 2;
1116 }
1117
1118 ti->private = ms;
Mike Snitzer542f9032012-07-27 15:08:00 +01001119
1120 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1121 if (r)
1122 goto err_free_context;
1123
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001124 ti->num_flush_bios = 1;
1125 ti->num_discard_bios = 1;
Mike Snitzer30187e12016-01-31 13:28:26 -05001126 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
Alasdair G Kergon0ac55482012-07-27 15:08:08 +01001127 ti->discard_zeroes_data_unsupported = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Tejun Heo670368a2013-07-30 08:40:21 -04001129 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001130 if (!ms->kmirrord_wq) {
1131 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001132 r = -ENOMEM;
1133 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001134 }
1135 INIT_WORK(&ms->kmirrord_work, do_mirror);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001136 init_timer(&ms->timer);
1137 ms->timer_pending = 0;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001138 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001139
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001140 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001141 if (r)
1142 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001143
1144 argv += args_used;
1145 argc -= args_used;
1146
Jonathan Brassowf44db672007-07-12 17:29:04 +01001147 /*
1148 * Any read-balancing addition depends on the
1149 * DM_RAID1_HANDLE_ERRORS flag being present.
1150 * This is because the decision to balance depends
1151 * on the sync state of a region. If the above
1152 * flag is not present, we ignore errors; and
1153 * the sync state may be inaccurate.
1154 */
1155
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001156 if (argc) {
1157 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001158 r = -EINVAL;
1159 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001160 }
1161
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001162 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001163 if (IS_ERR(ms->kcopyd_client)) {
1164 r = PTR_ERR(ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001165 goto err_destroy_wq;
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001168 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001170
1171err_destroy_wq:
1172 destroy_workqueue(ms->kmirrord_wq);
1173err_free_context:
1174 free_context(ms, ti, ms->nr_mirrors);
1175 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
1178static void mirror_dtr(struct dm_target *ti)
1179{
1180 struct mirror_set *ms = (struct mirror_set *) ti->private;
1181
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001182 del_timer_sync(&ms->timer);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001183 flush_workqueue(ms->kmirrord_wq);
Tejun Heo43829732012-08-20 14:51:24 -07001184 flush_work(&ms->trigger_event);
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001185 dm_kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001186 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 free_context(ms, ti, ms->nr_mirrors);
1188}
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190/*
1191 * Mirror mapping function
1192 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001193static int mirror_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194{
Christoph Hellwig70246282016-07-19 11:28:41 +02001195 int r, rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 struct mirror *m;
1197 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001198 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Mikulas Patocka0045d612012-12-21 20:23:40 +00001199 struct dm_raid1_bio_record *bio_record =
1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 if (rw == WRITE) {
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001203 /* Save region for mirror_end_io() handler */
Mikulas Patocka0045d612012-12-21 20:23:40 +00001204 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001206 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 }
1208
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001209 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 if (r < 0 && r != -EWOULDBLOCK)
1211 return r;
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 /*
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001214 * If region is not in-sync queue the bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001216 if (!r || (r == -EWOULDBLOCK)) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001217 if (bio->bi_opf & REQ_RAHEAD)
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001218 return -EWOULDBLOCK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001221 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 }
1223
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001224 /*
1225 * The region is in-sync and we can perform reads directly.
1226 * Store enough information so we can retry if it fails.
1227 */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001228 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001229 if (unlikely(!m))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return -EIO;
1231
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001232 dm_bio_record(&bio_record->details, bio);
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001233 bio_record->m = m;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001234
1235 map_bio(m, bio);
1236
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001237 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238}
1239
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001240static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241{
Christoph Hellwig70246282016-07-19 11:28:41 +02001242 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 struct mirror_set *ms = (struct mirror_set *) ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001244 struct mirror *m = NULL;
1245 struct dm_bio_details *bd = NULL;
Mikulas Patocka0045d612012-12-21 20:23:40 +00001246 struct dm_raid1_bio_record *bio_record =
1247 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
1249 /*
1250 * We need to dec pending if this was a write.
1251 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001252 if (rw == WRITE) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001253 if (!(bio->bi_opf & REQ_PREFLUSH) &&
Mike Christie28a8f0d2016-06-05 14:32:25 -05001254 bio_op(bio) != REQ_OP_DISCARD)
Mikulas Patocka0045d612012-12-21 20:23:40 +00001255 dm_rh_dec(ms->rh, bio_record->write_region);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001256 return error;
1257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001259 if (error == -EOPNOTSUPP)
Heinz Mauelshagen12a7cf52016-10-10 18:48:06 +02001260 return error;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001261
Jens Axboe1eff9d32016-08-05 15:35:16 -06001262 if ((error == -EWOULDBLOCK) && (bio->bi_opf & REQ_RAHEAD))
Heinz Mauelshagen12a7cf52016-10-10 18:48:06 +02001263 return error;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001264
1265 if (unlikely(error)) {
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001266 m = bio_record->m;
Adrian Bunke03f1a82008-02-19 19:44:19 +00001267
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001268 DMERR("Mirror read failed from %s. Trying alternative device.",
1269 m->dev->name);
1270
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001271 fail_mirror(m, DM_RAID1_READ_ERROR);
1272
1273 /*
1274 * A failed read is requeued for another attempt using an intact
1275 * mirror.
1276 */
1277 if (default_ok(m) || mirror_available(ms, bio)) {
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001278 bd = &bio_record->details;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001279
1280 dm_bio_restore(bd, bio);
Heinz Mauelshagendcb2ff52016-10-10 17:58:32 +02001281 bio->bi_error = 0;
Mikulas Patockaf3a44fe2014-02-18 09:57:22 -05001282
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001283 queue_bio(ms, bio, rw);
Mikulas Patocka19cbbc62012-12-21 20:23:32 +00001284 return DM_ENDIO_INCOMPLETE;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001285 }
1286 DMERR("All replicated volumes dead, failing I/O");
1287 }
1288
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001289 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290}
1291
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001292static void mirror_presuspend(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293{
1294 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001295 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Mikulas Patocka04788502009-12-10 23:52:03 +00001297 struct bio_list holds;
1298 struct bio *bio;
1299
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001300 atomic_set(&ms->suspend, 1);
1301
1302 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +00001303 * Process bios in the hold list to start recovery waiting
1304 * for bios in the hold list. After the process, no bio has
1305 * a chance to be added in the hold list because ms->suspend
1306 * is set.
1307 */
1308 spin_lock_irq(&ms->lock);
1309 holds = ms->holds;
1310 bio_list_init(&ms->holds);
1311 spin_unlock_irq(&ms->lock);
1312
1313 while ((bio = bio_list_pop(&holds)))
1314 hold_bio(ms, bio);
1315
1316 /*
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001317 * We must finish up all the work that we've
1318 * generated (i.e. recovery work).
1319 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001320 dm_rh_stop_recovery(ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001321
Jonathan E Brassow33184042006-11-08 17:44:44 -08001322 wait_event(_kmirrord_recovery_stopped,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001323 !dm_rh_recovery_in_flight(ms->rh));
Jonathan E Brassow33184042006-11-08 17:44:44 -08001324
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001325 if (log->type->presuspend && log->type->presuspend(log))
1326 /* FIXME: need better error handling */
1327 DMWARN("log presuspend failed");
1328
1329 /*
1330 * Now that recovery is complete/stopped and the
1331 * delayed bios are queued, we need to wait for
1332 * the worker thread to complete. This way,
1333 * we know that all of our I/O has been pushed.
1334 */
1335 flush_workqueue(ms->kmirrord_wq);
1336}
1337
1338static void mirror_postsuspend(struct dm_target *ti)
1339{
1340 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001341 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001342
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001343 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 /* FIXME: need better error handling */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001345 DMWARN("log postsuspend failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346}
1347
1348static void mirror_resume(struct dm_target *ti)
1349{
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001350 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001351 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001352
1353 atomic_set(&ms->suspend, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 if (log->type->resume && log->type->resume(log))
1355 /* FIXME: need better error handling */
1356 DMWARN("log resume failed");
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001357 dm_rh_start_recovery(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358}
1359
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001360/*
1361 * device_status_char
1362 * @m: mirror device/leg we want the status of
1363 *
1364 * We return one character representing the most severe error
1365 * we have encountered.
1366 * A => Alive - No failures
1367 * D => Dead - A write failure occurred leaving mirror out-of-sync
1368 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1369 * R => Read - A read failure occurred, mirror data unaffected
1370 *
1371 * Returns: <char>
1372 */
1373static char device_status_char(struct mirror *m)
1374{
1375 if (!atomic_read(&(m->error_count)))
1376 return 'A';
1377
Mikulas Patocka64b30c42009-12-10 23:52:02 +00001378 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1379 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001380 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1381 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1382}
1383
1384
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001385static void mirror_status(struct dm_target *ti, status_type_t type,
1386 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001388 unsigned int m, sz = 0;
Lidong Zhonged632872015-05-13 14:04:10 +08001389 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001391 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001392 char buffer[ms->nr_mirrors + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 switch (type) {
1395 case STATUSTYPE_INFO:
1396 DMEMIT("%d ", ms->nr_mirrors);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001397 for (m = 0; m < ms->nr_mirrors; m++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 DMEMIT("%s ", ms->mirror[m].dev->name);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001399 buffer[m] = device_status_char(&(ms->mirror[m]));
1400 }
1401 buffer[m] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001403 DMEMIT("%llu/%llu 1 %s ",
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001404 (unsigned long long)log->type->get_sync_count(log),
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001405 (unsigned long long)ms->nr_regions, buffer);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001406
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001407 sz += log->type->status(log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 break;
1410
1411 case STATUSTYPE_TABLE:
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001412 sz = log->type->status(log, type, result, maxlen);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001413
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001414 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001416 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001417 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001418
Lidong Zhonged632872015-05-13 14:04:10 +08001419 num_feature_args += !!errors_handled(ms);
1420 num_feature_args += !!keep_log(ms);
1421 if (num_feature_args) {
1422 DMEMIT(" %d", num_feature_args);
1423 if (errors_handled(ms))
1424 DMEMIT(" handle_errors");
1425 if (keep_log(ms))
1426 DMEMIT(" keep_log");
1427 }
1428
1429 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431}
1432
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001433static int mirror_iterate_devices(struct dm_target *ti,
1434 iterate_devices_callout_fn fn, void *data)
1435{
1436 struct mirror_set *ms = ti->private;
1437 int ret = 0;
1438 unsigned i;
1439
1440 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1441 ret = fn(ti, ms->mirror[i].dev,
Mike Snitzer5dea2712009-07-23 20:30:42 +01001442 ms->mirror[i].offset, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001443
1444 return ret;
1445}
1446
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447static struct target_type mirror_target = {
1448 .name = "mirror",
Lidong Zhonged632872015-05-13 14:04:10 +08001449 .version = {1, 14, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 .module = THIS_MODULE,
1451 .ctr = mirror_ctr,
1452 .dtr = mirror_dtr,
1453 .map = mirror_map,
1454 .end_io = mirror_end_io,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001455 .presuspend = mirror_presuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 .postsuspend = mirror_postsuspend,
1457 .resume = mirror_resume,
1458 .status = mirror_status,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001459 .iterate_devices = mirror_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460};
1461
1462static int __init dm_mirror_init(void)
1463{
1464 int r;
1465
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001466 r = dm_register_target(&mirror_target);
1467 if (r < 0) {
1468 DMERR("Failed to register mirror target");
1469 goto bad_target;
1470 }
1471
1472 return 0;
1473
1474bad_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 return r;
1476}
1477
1478static void __exit dm_mirror_exit(void)
1479{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001480 dm_unregister_target(&mirror_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481}
1482
1483/* Module hooks */
1484module_init(dm_mirror_init);
1485module_exit(dm_mirror_exit);
1486
1487MODULE_DESCRIPTION(DM_NAME " mirror target");
1488MODULE_AUTHOR("Joe Thornber");
1489MODULE_LICENSE("GPL");