blob: a4fbd911d566e5d3bfc6637f0ebdfc0b15472b73 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (C) 2003 Sistina Software Limited.
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01003 * Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This file is released under the GPL.
6 */
7
Jonathan Brassow06386bb2008-02-08 02:11:37 +00008#include "dm-bio-record.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
11#include <linux/mempool.h>
12#include <linux/module.h>
13#include <linux/pagemap.h>
14#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/workqueue.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010016#include <linux/device-mapper.h>
Alasdair G Kergona765e202008-04-24 22:02:01 +010017#include <linux/dm-io.h>
18#include <linux/dm-dirty-log.h>
19#include <linux/dm-kcopyd.h>
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010020#include <linux/dm-region-hash.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Alasdair G Kergon72d94862006-06-26 00:27:35 -070022#define DM_MSG_PREFIX "raid1"
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010023
24#define MAX_RECOVERY 1 /* Maximum number of regions recovered in parallel. */
Alasdair G Kergon72d94862006-06-26 00:27:35 -070025
Lidong Zhonged632872015-05-13 14:04:10 +080026#define DM_RAID1_HANDLE_ERRORS 0x01
27#define DM_RAID1_KEEP_LOG 0x02
Jonathan Brassowf44db672007-07-12 17:29:04 +010028#define errors_handled(p) ((p)->features & DM_RAID1_HANDLE_ERRORS)
Lidong Zhonged632872015-05-13 14:04:10 +080029#define keep_log(p) ((p)->features & DM_RAID1_KEEP_LOG)
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070030
Jonathan E Brassow33184042006-11-08 17:44:44 -080031static DECLARE_WAIT_QUEUE_HEAD(_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033/*-----------------------------------------------------------------
Neil Browne4c8b3b2006-06-26 00:27:26 -070034 * Mirror set structures.
35 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +000036enum dm_raid1_error {
37 DM_RAID1_WRITE_ERROR,
Mikulas Patocka64b30c42009-12-10 23:52:02 +000038 DM_RAID1_FLUSH_ERROR,
Jonathan Brassow72f4b312008-02-08 02:11:29 +000039 DM_RAID1_SYNC_ERROR,
40 DM_RAID1_READ_ERROR
41};
42
Neil Browne4c8b3b2006-06-26 00:27:26 -070043struct mirror {
Jonathan Brassowaa5617c2007-10-19 22:47:58 +010044 struct mirror_set *ms;
Neil Browne4c8b3b2006-06-26 00:27:26 -070045 atomic_t error_count;
Al Viro39ed7ad2008-02-13 03:53:00 +000046 unsigned long error_type;
Neil Browne4c8b3b2006-06-26 00:27:26 -070047 struct dm_dev *dev;
48 sector_t offset;
49};
50
51struct mirror_set {
52 struct dm_target *ti;
53 struct list_head list;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010054
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -070055 uint64_t features;
Neil Browne4c8b3b2006-06-26 00:27:26 -070056
Jonathan Brassow72f4b312008-02-08 02:11:29 +000057 spinlock_t lock; /* protects the lists */
Neil Browne4c8b3b2006-06-26 00:27:26 -070058 struct bio_list reads;
59 struct bio_list writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +000060 struct bio_list failures;
Mikulas Patocka04788502009-12-10 23:52:03 +000061 struct bio_list holds; /* bios are waiting until suspend */
Neil Browne4c8b3b2006-06-26 00:27:26 -070062
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010063 struct dm_region_hash *rh;
64 struct dm_kcopyd_client *kcopyd_client;
Milan Broz88be1632007-05-09 02:33:04 -070065 struct dm_io_client *io_client;
66
Neil Browne4c8b3b2006-06-26 00:27:26 -070067 /* recovery */
68 region_t nr_regions;
69 int in_sync;
Jonathan Brassowfc1ff952007-07-12 17:29:15 +010070 int log_failure;
Mikulas Patocka929be8f2009-12-10 23:52:06 +000071 int leg_failure;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +000072 atomic_t suspend;
Neil Browne4c8b3b2006-06-26 00:27:26 -070073
Jonathan Brassow72f4b312008-02-08 02:11:29 +000074 atomic_t default_mirror; /* Default mirror */
Neil Browne4c8b3b2006-06-26 00:27:26 -070075
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070076 struct workqueue_struct *kmirrord_wq;
77 struct work_struct kmirrord_work;
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010078 struct timer_list timer;
79 unsigned long timer_pending;
80
Jonathan Brassow72f4b312008-02-08 02:11:29 +000081 struct work_struct trigger_event;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070082
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010083 unsigned nr_mirrors;
Neil Browne4c8b3b2006-06-26 00:27:26 -070084 struct mirror mirror[0];
85};
86
Mikulas Patockadf5d2e92013-03-01 22:45:49 +000087DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(raid1_resync_throttle,
88 "A percentage of time allocated for raid resynchronization");
89
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010090static void wakeup_mirrord(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +010092 struct mirror_set *ms = context;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Holger Smolinski6ad36fe2007-05-09 02:32:50 -070094 queue_work(ms->kmirrord_wq, &ms->kmirrord_work);
95}
96
Mikulas Patockaa2aebe02008-04-24 22:10:42 +010097static void delayed_wake_fn(unsigned long data)
98{
99 struct mirror_set *ms = (struct mirror_set *) data;
100
101 clear_bit(0, &ms->timer_pending);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100102 wakeup_mirrord(ms);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100103}
104
105static void delayed_wake(struct mirror_set *ms)
106{
107 if (test_and_set_bit(0, &ms->timer_pending))
108 return;
109
110 ms->timer.expires = jiffies + HZ / 5;
111 ms->timer.data = (unsigned long) ms;
112 ms->timer.function = delayed_wake_fn;
113 add_timer(&ms->timer);
114}
115
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100116static void wakeup_all_recovery_waiters(void *context)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100118 wake_up_all(&_kmirrord_recovery_stopped);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119}
120
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100121static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
123 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 int should_wake = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100125 struct bio_list *bl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100127 bl = (rw == WRITE) ? &ms->writes : &ms->reads;
128 spin_lock_irqsave(&ms->lock, flags);
129 should_wake = !(bl->head);
130 bio_list_add(bl, bio);
131 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
133 if (should_wake)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100134 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135}
136
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100137static void dispatch_bios(void *context, struct bio_list *bio_list)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100139 struct mirror_set *ms = context;
140 struct bio *bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100142 while ((bio = bio_list_pop(bio_list)))
143 queue_bio(ms, bio, WRITE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144}
145
Mikulas Patocka89c7cd82012-12-21 20:23:39 +0000146struct dm_raid1_bio_record {
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000147 struct mirror *m;
Mike Snitzercd15fb62017-06-15 08:39:15 -0400148 /* if details->bi_bdev == NULL, details were not saved */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000149 struct dm_bio_details details;
Mikulas Patocka0045d612012-12-21 20:23:40 +0000150 region_t write_region;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000151};
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/*
154 * Every mirror should look like this one.
155 */
156#define DEFAULT_MIRROR 0
157
158/*
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000159 * This is yucky. We squirrel the mirror struct away inside
160 * bi_next for read/write buffers. This is safe since the bh
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 * doesn't get submitted to the lower levels of block layer.
162 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000163static struct mirror *bio_get_m(struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000165 return (struct mirror *) bio->bi_next;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166}
167
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000168static void bio_set_m(struct bio *bio, struct mirror *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000170 bio->bi_next = (struct bio *) m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000173static struct mirror *get_default_mirror(struct mirror_set *ms)
174{
175 return &ms->mirror[atomic_read(&ms->default_mirror)];
176}
177
178static void set_default_mirror(struct mirror *m)
179{
180 struct mirror_set *ms = m->ms;
181 struct mirror *m0 = &(ms->mirror[0]);
182
183 atomic_set(&ms->default_mirror, m - m0);
184}
185
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000186static struct mirror *get_valid_mirror(struct mirror_set *ms)
187{
188 struct mirror *m;
189
190 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
191 if (!atomic_read(&m->error_count))
192 return m;
193
194 return NULL;
195}
196
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000197/* fail_mirror
198 * @m: mirror device to fail
199 * @error_type: one of the enum's, DM_RAID1_*_ERROR
200 *
201 * If errors are being handled, record the type of
202 * error encountered for this device. If this type
203 * of error has already been recorded, we can return;
204 * otherwise, we must signal userspace by triggering
205 * an event. Additionally, if the device is the
206 * primary device, we must choose a new primary, but
207 * only if the mirror is in-sync.
208 *
209 * This function must not block.
210 */
211static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
212{
213 struct mirror_set *ms = m->ms;
214 struct mirror *new;
215
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000216 ms->leg_failure = 1;
217
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000218 /*
219 * error_count is used for nothing more than a
220 * simple way to tell if a device has encountered
221 * errors.
222 */
223 atomic_inc(&m->error_count);
224
225 if (test_and_set_bit(error_type, &m->error_type))
226 return;
227
Jonathan Brassowd460c652009-01-06 03:04:57 +0000228 if (!errors_handled(ms))
229 return;
230
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000231 if (m != get_default_mirror(ms))
232 goto out;
233
Lidong Zhonged632872015-05-13 14:04:10 +0800234 if (!ms->in_sync && !keep_log(ms)) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000235 /*
236 * Better to issue requests to same failing device
237 * than to risk returning corrupt data.
238 */
239 DMERR("Primary mirror (%s) failed while out-of-sync: "
240 "Reads may fail.", m->dev->name);
241 goto out;
242 }
243
Mikulas Patocka87968dd2009-12-10 23:52:04 +0000244 new = get_valid_mirror(ms);
245 if (new)
246 set_default_mirror(new);
247 else
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000248 DMWARN("All sides of mirror have failed.");
249
250out:
251 schedule_work(&ms->trigger_event);
252}
253
Mikulas Patockac0da3742009-12-10 23:52:02 +0000254static int mirror_flush(struct dm_target *ti)
255{
256 struct mirror_set *ms = ti->private;
257 unsigned long error_bits;
258
259 unsigned int i;
260 struct dm_io_region io[ms->nr_mirrors];
261 struct mirror *m;
262 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -0500263 .bi_op = REQ_OP_WRITE,
Jan Karaff0361b2017-05-31 09:44:32 +0200264 .bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000265 .mem.type = DM_IO_KMEM,
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000266 .mem.ptr.addr = NULL,
Mikulas Patockac0da3742009-12-10 23:52:02 +0000267 .client = ms->io_client,
268 };
269
270 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++) {
271 io[i].bdev = m->dev->bdev;
272 io[i].sector = 0;
273 io[i].count = 0;
274 }
275
276 error_bits = -1;
277 dm_io(&io_req, ms->nr_mirrors, io, &error_bits);
278 if (unlikely(error_bits != 0)) {
279 for (i = 0; i < ms->nr_mirrors; i++)
280 if (test_bit(i, &error_bits))
281 fail_mirror(ms->mirror + i,
Mikulas Patocka64b30c42009-12-10 23:52:02 +0000282 DM_RAID1_FLUSH_ERROR);
Mikulas Patockac0da3742009-12-10 23:52:02 +0000283 return -EIO;
284 }
285
286 return 0;
287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289/*-----------------------------------------------------------------
290 * Recovery.
291 *
292 * When a mirror is first activated we may find that some regions
293 * are in the no-sync state. We have to recover these by
294 * recopying from the default mirror to all the others.
295 *---------------------------------------------------------------*/
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700296static void recovery_complete(int read_err, unsigned long write_err,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 void *context)
298{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100299 struct dm_region *reg = context;
300 struct mirror_set *ms = dm_rh_region_context(reg);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000301 int m, bit = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000303 if (read_err) {
Jonathan Brassowf44db672007-07-12 17:29:04 +0100304 /* Read error means the failure of default mirror. */
305 DMERR_LIMIT("Unable to read primary mirror during recovery");
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000306 fail_mirror(get_default_mirror(ms), DM_RAID1_SYNC_ERROR);
307 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100308
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000309 if (write_err) {
Alasdair G Kergon4cdc1d12008-03-28 14:16:10 -0700310 DMERR_LIMIT("Write error during recovery (error = 0x%lx)",
Jonathan Brassowf44db672007-07-12 17:29:04 +0100311 write_err);
Jonathan Brassow8f0205b2008-02-08 02:11:32 +0000312 /*
313 * Bits correspond to devices (excluding default mirror).
314 * The default mirror cannot change during recovery.
315 */
316 for (m = 0; m < ms->nr_mirrors; m++) {
317 if (&ms->mirror[m] == get_default_mirror(ms))
318 continue;
319 if (test_bit(bit, &write_err))
320 fail_mirror(ms->mirror + m,
321 DM_RAID1_SYNC_ERROR);
322 bit++;
323 }
324 }
Jonathan Brassowf44db672007-07-12 17:29:04 +0100325
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100326 dm_rh_recovery_end(reg, !(read_err || write_err));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100329static int recover(struct mirror_set *ms, struct dm_region *reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
331 int r;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100332 unsigned i;
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100333 struct dm_io_region from, to[DM_KCOPYD_MAX_REGIONS], *dest;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 struct mirror *m;
335 unsigned long flags = 0;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100336 region_t key = dm_rh_get_region_key(reg);
337 sector_t region_size = dm_rh_get_region_size(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338
339 /* fill in the source */
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000340 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 from.bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100342 from.sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
343 if (key == (ms->nr_regions - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 /*
345 * The final region may be smaller than
346 * region_size.
347 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100348 from.count = ms->ti->len & (region_size - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 if (!from.count)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100350 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 } else
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100352 from.count = region_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
354 /* fill in the destinations */
355 for (i = 0, dest = to; i < ms->nr_mirrors; i++) {
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000356 if (&ms->mirror[i] == get_default_mirror(ms))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 continue;
358
359 m = ms->mirror + i;
360 dest->bdev = m->dev->bdev;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100361 dest->sector = m->offset + dm_rh_region_to_sector(ms->rh, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 dest->count = from.count;
363 dest++;
364 }
365
366 /* hand to kcopyd */
Jonathan Brassowf7c83e22008-10-10 13:36:59 +0100367 if (!errors_handled(ms))
368 set_bit(DM_KCOPYD_IGNORE_ERROR, &flags);
369
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +0100370 r = dm_kcopyd_copy(ms->kcopyd_client, &from, ms->nr_mirrors - 1, to,
371 flags, recovery_complete, reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
373 return r;
374}
375
Lidong Zhonged632872015-05-13 14:04:10 +0800376static void reset_ms_flags(struct mirror_set *ms)
377{
378 unsigned int m;
379
380 ms->leg_failure = 0;
381 for (m = 0; m < ms->nr_mirrors; m++) {
382 atomic_set(&(ms->mirror[m].error_count), 0);
383 ms->mirror[m].error_type = 0;
384 }
385}
386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387static void do_recovery(struct mirror_set *ms)
388{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100389 struct dm_region *reg;
390 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 int r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
393 /*
394 * Start quiescing some regions.
395 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100396 dm_rh_recovery_prepare(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
398 /*
399 * Copy any already quiesced regions.
400 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100401 while ((reg = dm_rh_recovery_start(ms->rh))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 r = recover(ms, reg);
403 if (r)
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100404 dm_rh_recovery_end(reg, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 }
406
407 /*
408 * Update the in sync flag.
409 */
410 if (!ms->in_sync &&
411 (log->type->get_sync_count(log) == ms->nr_regions)) {
412 /* the sync is complete */
413 dm_table_event(ms->ti->table);
414 ms->in_sync = 1;
Lidong Zhonged632872015-05-13 14:04:10 +0800415 reset_ms_flags(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 }
417}
418
419/*-----------------------------------------------------------------
420 * Reads
421 *---------------------------------------------------------------*/
422static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
423{
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000424 struct mirror *m = get_default_mirror(ms);
425
426 do {
427 if (likely(!atomic_read(&m->error_count)))
428 return m;
429
430 if (m-- == ms->mirror)
431 m += ms->nr_mirrors;
432 } while (m != get_default_mirror(ms));
433
434 return NULL;
435}
436
437static int default_ok(struct mirror *m)
438{
439 struct mirror *default_mirror = get_default_mirror(m->ms);
440
441 return !atomic_read(&default_mirror->error_count);
442}
443
444static int mirror_available(struct mirror_set *ms, struct bio *bio)
445{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100446 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
447 region_t region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000448
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100449 if (log->type->in_sync(log, region, 0))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700450 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000451
452 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453}
454
455/*
456 * remap a buffer to a particular mirror.
457 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000458static sector_t map_sector(struct mirror *m, struct bio *bio)
459{
Kent Overstreet4f024f32013-10-11 15:44:27 -0700460 if (unlikely(!bio->bi_iter.bi_size))
Mikulas Patocka41841532009-12-10 23:51:59 +0000461 return 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700462 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000463}
464
465static void map_bio(struct mirror *m, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466{
467 bio->bi_bdev = m->dev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700468 bio->bi_iter.bi_sector = map_sector(m, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000469}
470
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100471static void map_region(struct dm_io_region *io, struct mirror *m,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000472 struct bio *bio)
473{
474 io->bdev = m->dev->bdev;
475 io->sector = map_sector(m, bio);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -0800476 io->count = bio_sectors(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000477}
478
Mikulas Patocka04788502009-12-10 23:52:03 +0000479static void hold_bio(struct mirror_set *ms, struct bio *bio)
480{
481 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +0000482 * Lock is required to avoid race condition during suspend
483 * process.
Mikulas Patocka04788502009-12-10 23:52:03 +0000484 */
Takahiro Yasuif0703042010-03-06 02:32:35 +0000485 spin_lock_irq(&ms->lock);
486
Mikulas Patocka04788502009-12-10 23:52:03 +0000487 if (atomic_read(&ms->suspend)) {
Takahiro Yasuif0703042010-03-06 02:32:35 +0000488 spin_unlock_irq(&ms->lock);
489
490 /*
491 * If device is suspended, complete the bio.
492 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000493 if (dm_noflush_suspending(ms->ti))
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200494 bio->bi_status = BLK_STS_DM_REQUEUE;
Mikulas Patocka04788502009-12-10 23:52:03 +0000495 else
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200496 bio->bi_status = BLK_STS_IOERR;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200497
498 bio_endio(bio);
Mikulas Patocka04788502009-12-10 23:52:03 +0000499 return;
500 }
501
502 /*
503 * Hold bio until the suspend is complete.
504 */
Mikulas Patocka04788502009-12-10 23:52:03 +0000505 bio_list_add(&ms->holds, bio);
506 spin_unlock_irq(&ms->lock);
507}
508
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000509/*-----------------------------------------------------------------
510 * Reads
511 *---------------------------------------------------------------*/
512static void read_callback(unsigned long error, void *context)
513{
514 struct bio *bio = context;
515 struct mirror *m;
516
517 m = bio_get_m(bio);
518 bio_set_m(bio, NULL);
519
520 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200521 bio_endio(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000522 return;
523 }
524
525 fail_mirror(m, DM_RAID1_READ_ERROR);
526
527 if (likely(default_ok(m)) || mirror_available(m->ms, bio)) {
528 DMWARN_LIMIT("Read failure on mirror device %s. "
529 "Trying alternative device.",
530 m->dev->name);
Christoph Hellwig70246282016-07-19 11:28:41 +0200531 queue_bio(m->ms, bio, bio_data_dir(bio));
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000532 return;
533 }
534
535 DMERR_LIMIT("Read failure on mirror device %s. Failing I/O.",
536 m->dev->name);
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200537 bio_io_error(bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000538}
539
540/* Asynchronous read. */
541static void read_async_bio(struct mirror *m, struct bio *bio)
542{
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100543 struct dm_io_region io;
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000544 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -0500545 .bi_op = REQ_OP_READ,
546 .bi_op_flags = 0,
Kent Overstreet003b5c52013-10-11 15:45:43 -0700547 .mem.type = DM_IO_BIO,
548 .mem.ptr.bio = bio,
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000549 .notify.fn = read_callback,
550 .notify.context = bio,
551 .client = m->ms->io_client,
552 };
553
554 map_region(&io, m, bio);
555 bio_set_m(bio, m);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100556 BUG_ON(dm_io(&io_req, 1, &io, NULL));
557}
558
559static inline int region_in_sync(struct mirror_set *ms, region_t region,
560 int may_block)
561{
562 int state = dm_rh_get_state(ms->rh, region, may_block);
563 return state == DM_RH_CLEAN || state == DM_RH_DIRTY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564}
565
566static void do_reads(struct mirror_set *ms, struct bio_list *reads)
567{
568 region_t region;
569 struct bio *bio;
570 struct mirror *m;
571
572 while ((bio = bio_list_pop(reads))) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100573 region = dm_rh_bio_to_region(ms->rh, bio);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000574 m = get_default_mirror(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575
576 /*
577 * We can only read balance if the region is in sync.
578 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100579 if (likely(region_in_sync(ms, region, 1)))
Kent Overstreet4f024f32013-10-11 15:44:27 -0700580 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000581 else if (m && atomic_read(&m->error_count))
582 m = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000584 if (likely(m))
585 read_async_bio(m, bio);
586 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200587 bio_io_error(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 }
589}
590
591/*-----------------------------------------------------------------
592 * Writes.
593 *
594 * We do different things with the write io depending on the
595 * state of the region that it's in:
596 *
597 * SYNC: increment pending, use kcopyd to write to *all* mirrors
598 * RECOVERING: delay the io until recovery completes
599 * NOSYNC: increment pending, just write to the default mirror
600 *---------------------------------------------------------------*/
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000601
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000602
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603static void write_callback(unsigned long error, void *context)
604{
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200605 unsigned i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 struct bio *bio = (struct bio *) context;
607 struct mirror_set *ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000608 int should_wake = 0;
609 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000611 ms = bio_get_m(bio)->ms;
612 bio_set_m(bio, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
614 /*
615 * NOTE: We don't decrement the pending count here,
616 * instead it is done by the targets endio function.
617 * This way we handle both writes to SYNC and NOSYNC
618 * regions with the same code.
619 */
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000620 if (likely(!error)) {
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200621 bio_endio(bio);
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000622 return;
623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500625 /*
626 * If the bio is discard, return an error, but do not
627 * degrade the array.
628 */
Mike Christiee6047142016-06-05 14:32:04 -0500629 if (bio_op(bio) == REQ_OP_DISCARD) {
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +0200630 bio->bi_status = BLK_STS_NOTSUPP;
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200631 bio_endio(bio);
Mikulas Patockaf2ed51a2015-02-12 10:09:20 -0500632 return;
633 }
634
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000635 for (i = 0; i < ms->nr_mirrors; i++)
636 if (test_bit(i, &error))
637 fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000638
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000639 /*
640 * Need to raise event. Since raising
641 * events can block, we need to do it in
642 * the main thread.
643 */
644 spin_lock_irqsave(&ms->lock, flags);
645 if (!ms->failures.head)
646 should_wake = 1;
647 bio_list_add(&ms->failures, bio);
648 spin_unlock_irqrestore(&ms->lock, flags);
649 if (should_wake)
650 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651}
652
653static void do_write(struct mirror_set *ms, struct bio *bio)
654{
655 unsigned int i;
Heinz Mauelshagen22a1ceb2008-04-24 21:43:17 +0100656 struct dm_io_region io[ms->nr_mirrors], *dest = io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 struct mirror *m;
Milan Broz88be1632007-05-09 02:33:04 -0700658 struct dm_io_request io_req = {
Mike Christiee6047142016-06-05 14:32:04 -0500659 .bi_op = REQ_OP_WRITE,
Christoph Hellwig70fd7612016-11-01 07:40:10 -0600660 .bi_op_flags = bio->bi_opf & (REQ_FUA | REQ_PREFLUSH),
Kent Overstreet003b5c52013-10-11 15:45:43 -0700661 .mem.type = DM_IO_BIO,
662 .mem.ptr.bio = bio,
Milan Broz88be1632007-05-09 02:33:04 -0700663 .notify.fn = write_callback,
664 .notify.context = bio,
665 .client = ms->io_client,
666 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
Mike Christiee6047142016-06-05 14:32:04 -0500668 if (bio_op(bio) == REQ_OP_DISCARD) {
669 io_req.bi_op = REQ_OP_DISCARD;
Mike Snitzer5fc2ffe2011-01-13 19:59:48 +0000670 io_req.mem.type = DM_IO_KMEM;
671 io_req.mem.ptr.addr = NULL;
672 }
673
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000674 for (i = 0, m = ms->mirror; i < ms->nr_mirrors; i++, m++)
675 map_region(dest++, m, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
Jonathan Brassow06386bb2008-02-08 02:11:37 +0000677 /*
678 * Use default mirror because we only need it to retrieve the reference
679 * to the mirror set in write_callback().
680 */
681 bio_set_m(bio, get_default_mirror(ms));
Milan Broz88be1632007-05-09 02:33:04 -0700682
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100683 BUG_ON(dm_io(&io_req, ms->nr_mirrors, io, NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684}
685
686static void do_writes(struct mirror_set *ms, struct bio_list *writes)
687{
688 int state;
689 struct bio *bio;
690 struct bio_list sync, nosync, recover, *this_list = NULL;
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100691 struct bio_list requeue;
692 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
693 region_t region;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 if (!writes->head)
696 return;
697
698 /*
699 * Classify each write.
700 */
701 bio_list_init(&sync);
702 bio_list_init(&nosync);
703 bio_list_init(&recover);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100704 bio_list_init(&requeue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706 while ((bio = bio_list_pop(writes))) {
Jens Axboe1eff9d32016-08-05 15:35:16 -0600707 if ((bio->bi_opf & REQ_PREFLUSH) ||
Mike Christiee6047142016-06-05 14:32:04 -0500708 (bio_op(bio) == REQ_OP_DISCARD)) {
Mikulas Patocka41841532009-12-10 23:51:59 +0000709 bio_list_add(&sync, bio);
710 continue;
711 }
712
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100713 region = dm_rh_bio_to_region(ms->rh, bio);
714
715 if (log->type->is_remote_recovering &&
716 log->type->is_remote_recovering(log, region)) {
717 bio_list_add(&requeue, bio);
718 continue;
719 }
720
721 state = dm_rh_get_state(ms->rh, region, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 switch (state) {
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100723 case DM_RH_CLEAN:
724 case DM_RH_DIRTY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 this_list = &sync;
726 break;
727
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100728 case DM_RH_NOSYNC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 this_list = &nosync;
730 break;
731
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100732 case DM_RH_RECOVERING:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 this_list = &recover;
734 break;
735 }
736
737 bio_list_add(this_list, bio);
738 }
739
740 /*
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100741 * Add bios that are delayed due to remote recovery
742 * back on to the write queue
743 */
744 if (unlikely(requeue.head)) {
745 spin_lock_irq(&ms->lock);
746 bio_list_merge(&ms->writes, &requeue);
747 spin_unlock_irq(&ms->lock);
Mikulas Patocka69885682009-07-23 20:30:37 +0100748 delayed_wake(ms);
Jonathan Brassow7513c2a2009-04-02 19:55:30 +0100749 }
750
751 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 * Increment the pending counts for any regions that will
753 * be written to (writes to recover regions are going to
754 * be delayed).
755 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100756 dm_rh_inc_pending(ms->rh, &sync);
757 dm_rh_inc_pending(ms->rh, &nosync);
Jonathan Brassowd2b69862009-09-04 20:40:32 +0100758
759 /*
760 * If the flush fails on a previous call and succeeds here,
761 * we must not reset the log_failure variable. We need
762 * userspace interaction to do that.
763 */
764 ms->log_failure = dm_rh_flush(ms->rh) ? 1 : ms->log_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 /*
767 * Dispatch io.
768 */
Mikulas Patocka5528d172010-02-16 18:42:55 +0000769 if (unlikely(ms->log_failure) && errors_handled(ms)) {
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000770 spin_lock_irq(&ms->lock);
771 bio_list_merge(&ms->failures, &sync);
772 spin_unlock_irq(&ms->lock);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100773 wakeup_mirrord(ms);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000774 } else
Jonathan Brassowfc1ff952007-07-12 17:29:15 +0100775 while ((bio = bio_list_pop(&sync)))
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000776 do_write(ms, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 while ((bio = bio_list_pop(&recover)))
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100779 dm_rh_delay(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 while ((bio = bio_list_pop(&nosync))) {
Lidong Zhonged632872015-05-13 14:04:10 +0800782 if (unlikely(ms->leg_failure) && errors_handled(ms) && !keep_log(ms)) {
Mikulas Patockaede5ea02010-03-06 02:32:22 +0000783 spin_lock_irq(&ms->lock);
784 bio_list_add(&ms->failures, bio);
785 spin_unlock_irq(&ms->lock);
786 wakeup_mirrord(ms);
787 } else {
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000788 map_bio(get_default_mirror(ms), bio);
789 generic_make_request(bio);
790 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 }
792}
793
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000794static void do_failures(struct mirror_set *ms, struct bio_list *failures)
795{
796 struct bio *bio;
797
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000798 if (likely(!failures->head))
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000799 return;
800
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000801 /*
802 * If the log has failed, unattempted writes are being
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000803 * put on the holds list. We can't issue those writes
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000804 * until a log has been marked, so we must store them.
805 *
806 * If a 'noflush' suspend is in progress, we can requeue
807 * the I/O's to the core. This give userspace a chance
808 * to reconfigure the mirror, at which point the core
809 * will reissue the writes. If the 'noflush' flag is
810 * not set, we have no choice but to return errors.
811 *
812 * Some writes on the failures list may have been
813 * submitted before the log failure and represent a
814 * failure to write to one of the devices. It is ok
815 * for us to treat them the same and requeue them
816 * as well.
817 */
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000818 while ((bio = bio_list_pop(failures))) {
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000819 if (!ms->log_failure) {
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000820 ms->in_sync = 0;
Mikulas Patockac58098b2009-12-10 23:52:05 +0000821 dm_rh_mark_nosync(ms->rh, bio);
Mikulas Patocka0f398a82009-12-10 23:52:04 +0000822 }
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000823
824 /*
825 * If all the legs are dead, fail the I/O.
Lidong Zhonged632872015-05-13 14:04:10 +0800826 * If the device has failed and keep_log is enabled,
827 * fail the I/O.
828 *
829 * If we have been told to handle errors, and keep_log
830 * isn't enabled, hold the bio and wait for userspace to
831 * deal with the problem.
832 *
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000833 * Otherwise pretend that the I/O succeeded. (This would
834 * be wrong if the failed leg returned after reboot and
835 * got replicated back to the good legs.)
836 */
Lidong Zhonged632872015-05-13 14:04:10 +0800837 if (unlikely(!get_valid_mirror(ms) || (keep_log(ms) && ms->log_failure)))
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200838 bio_io_error(bio);
Lidong Zhonged632872015-05-13 14:04:10 +0800839 else if (errors_handled(ms) && !keep_log(ms))
Mikulas Patocka60f355e2009-12-10 23:52:05 +0000840 hold_bio(ms, bio);
841 else
Christoph Hellwig4246a0b2015-07-20 15:29:37 +0200842 bio_endio(bio);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000843 }
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000844}
845
846static void trigger_event(struct work_struct *work)
847{
848 struct mirror_set *ms =
849 container_of(work, struct mirror_set, trigger_event);
850
851 dm_table_event(ms->ti->table);
852}
853
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854/*-----------------------------------------------------------------
855 * kmirrord
856 *---------------------------------------------------------------*/
Mikulas Patockaa2aebe02008-04-24 22:10:42 +0100857static void do_mirror(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100859 struct mirror_set *ms = container_of(work, struct mirror_set,
860 kmirrord_work);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000861 struct bio_list reads, writes, failures;
862 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000864 spin_lock_irqsave(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 reads = ms->reads;
866 writes = ms->writes;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000867 failures = ms->failures;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 bio_list_init(&ms->reads);
869 bio_list_init(&ms->writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000870 bio_list_init(&ms->failures);
871 spin_unlock_irqrestore(&ms->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100873 dm_rh_update_states(ms->rh, errors_handled(ms));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 do_recovery(ms);
875 do_reads(ms, &reads);
876 do_writes(ms, &writes);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000877 do_failures(ms, &failures);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000878}
879
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880/*-----------------------------------------------------------------
881 * Target functions
882 *---------------------------------------------------------------*/
883static struct mirror_set *alloc_context(unsigned int nr_mirrors,
884 uint32_t region_size,
885 struct dm_target *ti,
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100886 struct dm_dirty_log *dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887{
888 size_t len;
889 struct mirror_set *ms = NULL;
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 len = sizeof(*ms) + (sizeof(ms->mirror[0]) * nr_mirrors);
892
Yoann Padioleaudd00cc42007-07-19 01:49:03 -0700893 ms = kzalloc(len, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 if (!ms) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700895 ti->error = "Cannot allocate mirror context";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return NULL;
897 }
898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 spin_lock_init(&ms->lock);
Mikulas Patocka5339fc22009-12-10 23:52:06 +0000900 bio_list_init(&ms->reads);
901 bio_list_init(&ms->writes);
902 bio_list_init(&ms->failures);
903 bio_list_init(&ms->holds);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
905 ms->ti = ti;
906 ms->nr_mirrors = nr_mirrors;
907 ms->nr_regions = dm_sector_div_up(ti->len, region_size);
908 ms->in_sync = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000909 ms->log_failure = 0;
Mikulas Patocka929be8f2009-12-10 23:52:06 +0000910 ms->leg_failure = 0;
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +0000911 atomic_set(&ms->suspend, 0);
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000912 atomic_set(&ms->default_mirror, DEFAULT_MIRROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Mikulas Patockabda8efe2011-05-29 13:03:09 +0100914 ms->io_client = dm_io_client_create();
Milan Broz88be1632007-05-09 02:33:04 -0700915 if (IS_ERR(ms->io_client)) {
916 ti->error = "Error creating dm_io client";
917 kfree(ms);
918 return NULL;
919 }
920
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100921 ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
922 wakeup_all_recovery_waiters,
923 ms->ti->begin, MAX_RECOVERY,
924 dl, region_size, ms->nr_regions);
925 if (IS_ERR(ms->rh)) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700926 ti->error = "Error creating dirty region hash";
Dmitry Monakhova72cf732007-10-19 22:38:39 +0100927 dm_io_client_destroy(ms->io_client);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 kfree(ms);
929 return NULL;
930 }
931
932 return ms;
933}
934
935static void free_context(struct mirror_set *ms, struct dm_target *ti,
936 unsigned int m)
937{
938 while (m--)
939 dm_put_device(ti, ms->mirror[m].dev);
940
Milan Broz88be1632007-05-09 02:33:04 -0700941 dm_io_client_destroy(ms->io_client);
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100942 dm_region_hash_destroy(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 kfree(ms);
944}
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
947 unsigned int mirror, char **argv)
948{
Andrew Morton4ee218c2006-03-27 01:17:48 -0800949 unsigned long long offset;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100950 char dummy;
Vivek Goyale80d1c82015-07-31 09:20:36 -0400951 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100953 if (sscanf(argv[1], "%llu%c", &offset, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700954 ti->error = "Invalid offset";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 return -EINVAL;
956 }
957
Vivek Goyale80d1c82015-07-31 09:20:36 -0400958 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
959 &ms->mirror[mirror].dev);
960 if (ret) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700961 ti->error = "Device lookup failure";
Vivek Goyale80d1c82015-07-31 09:20:36 -0400962 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 }
964
Jonathan Brassowaa5617c2007-10-19 22:47:58 +0100965 ms->mirror[mirror].ms = ms;
Jonathan Brassow72f4b312008-02-08 02:11:29 +0000966 atomic_set(&(ms->mirror[mirror].error_count), 0);
967 ms->mirror[mirror].error_type = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 ms->mirror[mirror].offset = offset;
969
970 return 0;
971}
972
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973/*
974 * Create dirty log: log_type #log_params <log_params>
975 */
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100976static struct dm_dirty_log *create_dirty_log(struct dm_target *ti,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100977 unsigned argc, char **argv,
978 unsigned *args_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979{
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +0100980 unsigned param_count;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +0100981 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100982 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983
984 if (argc < 2) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700985 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 return NULL;
987 }
988
Mikulas Patocka31998ef2012-03-28 18:41:26 +0100989 if (sscanf(argv[1], "%u%c", &param_count, &dummy) != 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700990 ti->error = "Invalid mirror log argument count";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 return NULL;
992 }
993
994 *args_used = 2 + param_count;
995
996 if (argc < *args_used) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -0700997 ti->error = "Insufficient mirror log arguments";
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 return NULL;
999 }
1000
Mikulas Patockac0da3742009-12-10 23:52:02 +00001001 dl = dm_dirty_log_create(argv[0], ti, mirror_flush, param_count,
1002 argv + 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 if (!dl) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001004 ti->error = "Error creating mirror dirty log";
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return NULL;
1006 }
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 return dl;
1009}
1010
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001011static int parse_features(struct mirror_set *ms, unsigned argc, char **argv,
1012 unsigned *args_used)
1013{
1014 unsigned num_features;
1015 struct dm_target *ti = ms->ti;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001016 char dummy;
Lidong Zhonged632872015-05-13 14:04:10 +08001017 int i;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001018
1019 *args_used = 0;
1020
1021 if (!argc)
1022 return 0;
1023
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001024 if (sscanf(argv[0], "%u%c", &num_features, &dummy) != 1) {
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001025 ti->error = "Invalid number of features";
1026 return -EINVAL;
1027 }
1028
1029 argc--;
1030 argv++;
1031 (*args_used)++;
1032
1033 if (num_features > argc) {
1034 ti->error = "Not enough arguments to support feature count";
1035 return -EINVAL;
1036 }
1037
Lidong Zhonged632872015-05-13 14:04:10 +08001038 for (i = 0; i < num_features; i++) {
1039 if (!strcmp("handle_errors", argv[0]))
1040 ms->features |= DM_RAID1_HANDLE_ERRORS;
1041 else if (!strcmp("keep_log", argv[0]))
1042 ms->features |= DM_RAID1_KEEP_LOG;
1043 else {
1044 ti->error = "Unrecognised feature requested";
1045 return -EINVAL;
1046 }
1047
1048 argc--;
1049 argv++;
1050 (*args_used)++;
1051 }
1052 if (!errors_handled(ms) && keep_log(ms)) {
1053 ti->error = "keep_log feature requires the handle_errors feature";
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001054 return -EINVAL;
1055 }
1056
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001057 return 0;
1058}
1059
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060/*
1061 * Construct a mirror mapping:
1062 *
1063 * log_type #log_params <log_params>
1064 * #mirrors [mirror_path offset]{2,}
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001065 * [#features <features>]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066 *
1067 * log_type is "core" or "disk"
1068 * #log_params is between 1 and 3
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001069 *
Lidong Zhonged632872015-05-13 14:04:10 +08001070 * If present, supported features are "handle_errors" and "keep_log".
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072static int mirror_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1073{
1074 int r;
1075 unsigned int nr_mirrors, m, args_used;
1076 struct mirror_set *ms;
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001077 struct dm_dirty_log *dl;
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001078 char dummy;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
1080 dl = create_dirty_log(ti, argc, argv, &args_used);
1081 if (!dl)
1082 return -EINVAL;
1083
1084 argv += args_used;
1085 argc -= args_used;
1086
Mikulas Patocka31998ef2012-03-28 18:41:26 +01001087 if (!argc || sscanf(argv[0], "%u%c", &nr_mirrors, &dummy) != 1 ||
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001088 nr_mirrors < 2 || nr_mirrors > DM_KCOPYD_MAX_REGIONS + 1) {
Alasdair G Kergon72d94862006-06-26 00:27:35 -07001089 ti->error = "Invalid number of mirrors";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001090 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 return -EINVAL;
1092 }
1093
1094 argv++, argc--;
1095
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001096 if (argc < nr_mirrors * 2) {
1097 ti->error = "Too few mirror arguments";
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001098 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 return -EINVAL;
1100 }
1101
1102 ms = alloc_context(nr_mirrors, dl->type->get_region_size(dl), ti, dl);
1103 if (!ms) {
Heinz Mauelshagen416cd172008-04-24 21:43:35 +01001104 dm_dirty_log_destroy(dl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 return -ENOMEM;
1106 }
1107
1108 /* Get the mirror parameter sets */
1109 for (m = 0; m < nr_mirrors; m++) {
1110 r = get_mirror(ms, ti, m, argv);
1111 if (r) {
1112 free_context(ms, ti, m);
1113 return r;
1114 }
1115 argv += 2;
1116 argc -= 2;
1117 }
1118
1119 ti->private = ms;
Mike Snitzer542f9032012-07-27 15:08:00 +01001120
1121 r = dm_set_target_max_io_len(ti, dm_rh_get_region_size(ms->rh));
1122 if (r)
1123 goto err_free_context;
1124
Alasdair G Kergon55a62ee2013-03-01 22:45:47 +00001125 ti->num_flush_bios = 1;
1126 ti->num_discard_bios = 1;
Mike Snitzer30187e12016-01-31 13:28:26 -05001127 ti->per_io_data_size = sizeof(struct dm_raid1_bio_record);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128
Tejun Heo670368a2013-07-30 08:40:21 -04001129 ms->kmirrord_wq = alloc_workqueue("kmirrord", WQ_MEM_RECLAIM, 0);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001130 if (!ms->kmirrord_wq) {
1131 DMERR("couldn't start kmirrord");
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001132 r = -ENOMEM;
1133 goto err_free_context;
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001134 }
1135 INIT_WORK(&ms->kmirrord_work, do_mirror);
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001136 init_timer(&ms->timer);
1137 ms->timer_pending = 0;
Jonathan Brassow72f4b312008-02-08 02:11:29 +00001138 INIT_WORK(&ms->trigger_event, trigger_event);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001139
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001140 r = parse_features(ms, argc, argv, &args_used);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001141 if (r)
1142 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001143
1144 argv += args_used;
1145 argc -= args_used;
1146
Jonathan Brassowf44db672007-07-12 17:29:04 +01001147 /*
1148 * Any read-balancing addition depends on the
1149 * DM_RAID1_HANDLE_ERRORS flag being present.
1150 * This is because the decision to balance depends
1151 * on the sync state of a region. If the above
1152 * flag is not present, we ignore errors; and
1153 * the sync state may be inaccurate.
1154 */
1155
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001156 if (argc) {
1157 ti->error = "Too many mirror arguments";
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001158 r = -EINVAL;
1159 goto err_destroy_wq;
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001160 }
1161
Mikulas Patockadf5d2e92013-03-01 22:45:49 +00001162 ms->kcopyd_client = dm_kcopyd_client_create(&dm_kcopyd_throttle);
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001163 if (IS_ERR(ms->kcopyd_client)) {
1164 r = PTR_ERR(ms->kcopyd_client);
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001165 goto err_destroy_wq;
Mikulas Patockafa34ce72011-05-29 13:03:13 +01001166 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001168 wakeup_mirrord(ms);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return 0;
Dmitry Monakhova72cf732007-10-19 22:38:39 +01001170
1171err_destroy_wq:
1172 destroy_workqueue(ms->kmirrord_wq);
1173err_free_context:
1174 free_context(ms, ti, ms->nr_mirrors);
1175 return r;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
1178static void mirror_dtr(struct dm_target *ti)
1179{
1180 struct mirror_set *ms = (struct mirror_set *) ti->private;
1181
Mikulas Patockaa2aebe02008-04-24 22:10:42 +01001182 del_timer_sync(&ms->timer);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001183 flush_workqueue(ms->kmirrord_wq);
Tejun Heo43829732012-08-20 14:51:24 -07001184 flush_work(&ms->trigger_event);
Heinz Mauelshageneb69aca2008-04-24 21:43:19 +01001185 dm_kcopyd_client_destroy(ms->kcopyd_client);
Holger Smolinski6ad36fe2007-05-09 02:32:50 -07001186 destroy_workqueue(ms->kmirrord_wq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 free_context(ms, ti, ms->nr_mirrors);
1188}
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190/*
1191 * Mirror mapping function
1192 */
Mikulas Patocka7de3ee52012-12-21 20:23:41 +00001193static int mirror_map(struct dm_target *ti, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194{
Christoph Hellwig70246282016-07-19 11:28:41 +02001195 int r, rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 struct mirror *m;
1197 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001198 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Mikulas Patocka0045d612012-12-21 20:23:40 +00001199 struct dm_raid1_bio_record *bio_record =
1200 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
1201
Mike Snitzercd15fb62017-06-15 08:39:15 -04001202 bio_record->details.bi_bdev = NULL;
1203
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 if (rw == WRITE) {
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001205 /* Save region for mirror_end_io() handler */
Mikulas Patocka0045d612012-12-21 20:23:40 +00001206 bio_record->write_region = dm_rh_bio_to_region(ms->rh, bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001208 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209 }
1210
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001211 r = log->type->in_sync(log, dm_rh_bio_to_region(ms->rh, bio), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 if (r < 0 && r != -EWOULDBLOCK)
Christoph Hellwig846785e2017-06-03 09:38:02 +02001213 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 /*
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001216 * If region is not in-sync queue the bio.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001218 if (!r || (r == -EWOULDBLOCK)) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001219 if (bio->bi_opf & REQ_RAHEAD)
Christoph Hellwig846785e2017-06-03 09:38:02 +02001220 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 queue_bio(ms, bio, rw);
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001223 return DM_MAPIO_SUBMITTED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 }
1225
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001226 /*
1227 * The region is in-sync and we can perform reads directly.
1228 * Store enough information so we can retry if it fails.
1229 */
Kent Overstreet4f024f32013-10-11 15:44:27 -07001230 m = choose_mirror(ms, bio->bi_iter.bi_sector);
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001231 if (unlikely(!m))
Christoph Hellwig846785e2017-06-03 09:38:02 +02001232 return DM_MAPIO_KILL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001234 dm_bio_record(&bio_record->details, bio);
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001235 bio_record->m = m;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001236
1237 map_bio(m, bio);
1238
Kiyoshi Uedad2a7ad22006-12-08 02:41:06 -08001239 return DM_MAPIO_REMAPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240}
1241
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001242static int mirror_end_io(struct dm_target *ti, struct bio *bio,
1243 blk_status_t *error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244{
Christoph Hellwig70246282016-07-19 11:28:41 +02001245 int rw = bio_data_dir(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 struct mirror_set *ms = (struct mirror_set *) ti->private;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001247 struct mirror *m = NULL;
1248 struct dm_bio_details *bd = NULL;
Mikulas Patocka0045d612012-12-21 20:23:40 +00001249 struct dm_raid1_bio_record *bio_record =
1250 dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 /*
1253 * We need to dec pending if this was a write.
1254 */
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001255 if (rw == WRITE) {
Jens Axboe1eff9d32016-08-05 15:35:16 -06001256 if (!(bio->bi_opf & REQ_PREFLUSH) &&
Mike Christie28a8f0d2016-06-05 14:32:25 -05001257 bio_op(bio) != REQ_OP_DISCARD)
Mikulas Patocka0045d612012-12-21 20:23:40 +00001258 dm_rh_dec(ms->rh, bio_record->write_region);
Christoph Hellwig1be56902017-06-03 09:38:03 +02001259 return DM_ENDIO_DONE;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001262 if (*error == BLK_STS_NOTSUPP)
Mike Snitzercd15fb62017-06-15 08:39:15 -04001263 goto out;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001264
Christoph Hellwig9966afa2017-06-03 09:37:57 +02001265 if (bio->bi_opf & REQ_RAHEAD)
Mike Snitzercd15fb62017-06-15 08:39:15 -04001266 goto out;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001267
Christoph Hellwig1be56902017-06-03 09:38:03 +02001268 if (unlikely(*error)) {
Mike Snitzercd15fb62017-06-15 08:39:15 -04001269 if (!bio_record->details.bi_bdev) {
1270 /*
1271 * There wasn't enough memory to record necessary
1272 * information for a retry or there was no other
1273 * mirror in-sync.
1274 */
1275 DMERR_LIMIT("Mirror read failed.");
Linus Torvaldsc6b1e362017-07-03 10:34:51 -07001276 return DM_ENDIO_DONE;
Mike Snitzercd15fb62017-06-15 08:39:15 -04001277 }
1278
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001279 m = bio_record->m;
Adrian Bunke03f1a82008-02-19 19:44:19 +00001280
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001281 DMERR("Mirror read failed from %s. Trying alternative device.",
1282 m->dev->name);
1283
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001284 fail_mirror(m, DM_RAID1_READ_ERROR);
1285
1286 /*
1287 * A failed read is requeued for another attempt using an intact
1288 * mirror.
1289 */
1290 if (default_ok(m) || mirror_available(ms, bio)) {
Mikulas Patocka89c7cd82012-12-21 20:23:39 +00001291 bd = &bio_record->details;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001292
1293 dm_bio_restore(bd, bio);
Mike Snitzercd15fb62017-06-15 08:39:15 -04001294 bio_record->details.bi_bdev = NULL;
Christoph Hellwig4e4cbee2017-06-03 09:38:06 +02001295 bio->bi_status = 0;
Mikulas Patockaf3a44fe2014-02-18 09:57:22 -05001296
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001297 queue_bio(ms, bio, rw);
Mikulas Patocka19cbbc62012-12-21 20:23:32 +00001298 return DM_ENDIO_INCOMPLETE;
Jonathan Brassow06386bb2008-02-08 02:11:37 +00001299 }
1300 DMERR("All replicated volumes dead, failing I/O");
1301 }
1302
Mike Snitzercd15fb62017-06-15 08:39:15 -04001303out:
1304 bio_record->details.bi_bdev = NULL;
1305
Christoph Hellwig1be56902017-06-03 09:38:03 +02001306 return DM_ENDIO_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307}
1308
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001309static void mirror_presuspend(struct dm_target *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310{
1311 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001312 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Mikulas Patocka04788502009-12-10 23:52:03 +00001314 struct bio_list holds;
1315 struct bio *bio;
1316
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001317 atomic_set(&ms->suspend, 1);
1318
1319 /*
Takahiro Yasuif0703042010-03-06 02:32:35 +00001320 * Process bios in the hold list to start recovery waiting
1321 * for bios in the hold list. After the process, no bio has
1322 * a chance to be added in the hold list because ms->suspend
1323 * is set.
1324 */
1325 spin_lock_irq(&ms->lock);
1326 holds = ms->holds;
1327 bio_list_init(&ms->holds);
1328 spin_unlock_irq(&ms->lock);
1329
1330 while ((bio = bio_list_pop(&holds)))
1331 hold_bio(ms, bio);
1332
1333 /*
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001334 * We must finish up all the work that we've
1335 * generated (i.e. recovery work).
1336 */
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001337 dm_rh_stop_recovery(ms->rh);
Jonathan E Brassow33184042006-11-08 17:44:44 -08001338
Jonathan E Brassow33184042006-11-08 17:44:44 -08001339 wait_event(_kmirrord_recovery_stopped,
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001340 !dm_rh_recovery_in_flight(ms->rh));
Jonathan E Brassow33184042006-11-08 17:44:44 -08001341
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001342 if (log->type->presuspend && log->type->presuspend(log))
1343 /* FIXME: need better error handling */
1344 DMWARN("log presuspend failed");
1345
1346 /*
1347 * Now that recovery is complete/stopped and the
1348 * delayed bios are queued, we need to wait for
1349 * the worker thread to complete. This way,
1350 * we know that all of our I/O has been pushed.
1351 */
1352 flush_workqueue(ms->kmirrord_wq);
1353}
1354
1355static void mirror_postsuspend(struct dm_target *ti)
1356{
1357 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001358 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001359
Jonathan Brassow6b3df0d2007-10-19 22:47:57 +01001360 if (log->type->postsuspend && log->type->postsuspend(log))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 /* FIXME: need better error handling */
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001362 DMWARN("log postsuspend failed");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001363}
1364
1365static void mirror_resume(struct dm_target *ti)
1366{
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001367 struct mirror_set *ms = ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001368 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001369
1370 atomic_set(&ms->suspend, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 if (log->type->resume && log->type->resume(log))
1372 /* FIXME: need better error handling */
1373 DMWARN("log resume failed");
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001374 dm_rh_start_recovery(ms->rh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375}
1376
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001377/*
1378 * device_status_char
1379 * @m: mirror device/leg we want the status of
1380 *
1381 * We return one character representing the most severe error
1382 * we have encountered.
1383 * A => Alive - No failures
1384 * D => Dead - A write failure occurred leaving mirror out-of-sync
1385 * S => Sync - A sychronization failure occurred, mirror out-of-sync
1386 * R => Read - A read failure occurred, mirror data unaffected
1387 *
1388 * Returns: <char>
1389 */
1390static char device_status_char(struct mirror *m)
1391{
1392 if (!atomic_read(&(m->error_count)))
1393 return 'A';
1394
Mikulas Patocka64b30c42009-12-10 23:52:02 +00001395 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
1396 (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001397 (test_bit(DM_RAID1_SYNC_ERROR, &(m->error_type))) ? 'S' :
1398 (test_bit(DM_RAID1_READ_ERROR, &(m->error_type))) ? 'R' : 'U';
1399}
1400
1401
Mikulas Patockafd7c0922013-03-01 22:45:44 +00001402static void mirror_status(struct dm_target *ti, status_type_t type,
1403 unsigned status_flags, char *result, unsigned maxlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404{
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001405 unsigned int m, sz = 0;
Lidong Zhonged632872015-05-13 14:04:10 +08001406 int num_feature_args = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 struct mirror_set *ms = (struct mirror_set *) ti->private;
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001408 struct dm_dirty_log *log = dm_rh_dirty_log(ms->rh);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001409 char buffer[ms->nr_mirrors + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 switch (type) {
1412 case STATUSTYPE_INFO:
1413 DMEMIT("%d ", ms->nr_mirrors);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001414 for (m = 0; m < ms->nr_mirrors; m++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 DMEMIT("%s ", ms->mirror[m].dev->name);
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001416 buffer[m] = device_status_char(&(ms->mirror[m]));
1417 }
1418 buffer[m] = '\0';
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001420 DMEMIT("%llu/%llu 1 %s ",
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001421 (unsigned long long)log->type->get_sync_count(log),
Jonathan Brassowaf195ac2008-02-08 02:11:39 +00001422 (unsigned long long)ms->nr_regions, buffer);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001423
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001424 sz += log->type->status(log, type, result+sz, maxlen-sz);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 break;
1427
1428 case STATUSTYPE_TABLE:
Heinz Mauelshagen1f965b12008-10-21 17:45:06 +01001429 sz = log->type->status(log, type, result, maxlen);
Jonathan E Brassow315dcc22007-05-09 02:32:58 -07001430
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001431 DMEMIT("%d", ms->nr_mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 for (m = 0; m < ms->nr_mirrors; m++)
Jonathan Brassowe52b8f62006-10-03 01:15:32 -07001433 DMEMIT(" %s %llu", ms->mirror[m].dev->name,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001434 (unsigned long long)ms->mirror[m].offset);
Jonathan E Brassowa8e6afa2007-05-09 02:32:59 -07001435
Lidong Zhonged632872015-05-13 14:04:10 +08001436 num_feature_args += !!errors_handled(ms);
1437 num_feature_args += !!keep_log(ms);
1438 if (num_feature_args) {
1439 DMEMIT(" %d", num_feature_args);
1440 if (errors_handled(ms))
1441 DMEMIT(" handle_errors");
1442 if (keep_log(ms))
1443 DMEMIT(" keep_log");
1444 }
1445
1446 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448}
1449
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001450static int mirror_iterate_devices(struct dm_target *ti,
1451 iterate_devices_callout_fn fn, void *data)
1452{
1453 struct mirror_set *ms = ti->private;
1454 int ret = 0;
1455 unsigned i;
1456
1457 for (i = 0; !ret && i < ms->nr_mirrors; i++)
1458 ret = fn(ti, ms->mirror[i].dev,
Mike Snitzer5dea2712009-07-23 20:30:42 +01001459 ms->mirror[i].offset, ti->len, data);
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001460
1461 return ret;
1462}
1463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464static struct target_type mirror_target = {
1465 .name = "mirror",
Lidong Zhonged632872015-05-13 14:04:10 +08001466 .version = {1, 14, 0},
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 .module = THIS_MODULE,
1468 .ctr = mirror_ctr,
1469 .dtr = mirror_dtr,
1470 .map = mirror_map,
1471 .end_io = mirror_end_io,
Jonathan Brassowb80aa7a2008-02-08 02:11:35 +00001472 .presuspend = mirror_presuspend,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 .postsuspend = mirror_postsuspend,
1474 .resume = mirror_resume,
1475 .status = mirror_status,
Mike Snitzeraf4874e2009-06-22 10:12:33 +01001476 .iterate_devices = mirror_iterate_devices,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477};
1478
1479static int __init dm_mirror_init(void)
1480{
1481 int r;
1482
Mikulas Patocka95f8fac2009-04-02 19:55:24 +01001483 r = dm_register_target(&mirror_target);
1484 if (r < 0) {
1485 DMERR("Failed to register mirror target");
1486 goto bad_target;
1487 }
1488
1489 return 0;
1490
1491bad_target:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return r;
1493}
1494
1495static void __exit dm_mirror_exit(void)
1496{
Mikulas Patocka10d3bd02009-01-06 03:04:58 +00001497 dm_unregister_target(&mirror_target);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
1499
1500/* Module hooks */
1501module_init(dm_mirror_init);
1502module_exit(dm_mirror_exit);
1503
1504MODULE_DESCRIPTION(DM_NAME " mirror target");
1505MODULE_AUTHOR("Joe Thornber");
1506MODULE_LICENSE("GPL");