blob: cb882aae9e20d4f7032a400884f45f50de57528f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * raid10.c : Multiple Devices driver for Linux
3 *
4 * Copyright (C) 2000-2004 Neil Brown
5 *
6 * RAID-10 support for md.
7 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -03008 * Base on code in raid1.c. See raid1.c for further copyright information.
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
15 *
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090021#include <linux/slab.h>
Stephen Rothwell25570722008-10-15 09:09:21 +110022#include <linux/delay.h>
NeilBrownbff61972009-03-31 14:33:13 +110023#include <linux/blkdev.h>
Paul Gortmaker056075c2011-07-03 13:58:33 -040024#include <linux/module.h>
NeilBrownbff61972009-03-31 14:33:13 +110025#include <linux/seq_file.h>
Christian Dietrich8bda4702011-07-27 11:00:36 +100026#include <linux/ratelimit.h>
NeilBrown3ea7daa2012-05-22 13:53:47 +100027#include <linux/kthread.h>
NeilBrown43b2e5d2009-03-31 14:33:13 +110028#include "md.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110029#include "raid10.h"
Trela, Maciejdab8b292010-03-08 16:02:45 +110030#include "raid0.h"
Christoph Hellwigef740c32009-03-31 14:27:03 +110031#include "bitmap.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33/*
34 * RAID10 provides a combination of RAID0 and RAID1 functionality.
35 * The layout of data is defined by
36 * chunk_size
37 * raid_disks
38 * near_copies (stored in low byte of layout)
39 * far_copies (stored in second byte of layout)
NeilBrownc93983b2006-06-26 00:27:41 -070040 * far_offset (stored in bit 16 of layout )
Jonathan Brassow475901a2013-02-21 13:28:10 +110041 * use_far_sets (stored in bit 17 of layout )
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 *
Jonathan Brassow475901a2013-02-21 13:28:10 +110043 * The data to be stored is divided into chunks using chunksize. Each device
44 * is divided into far_copies sections. In each section, chunks are laid out
45 * in a style similar to raid0, but near_copies copies of each chunk is stored
46 * (each on a different drive). The starting device for each section is offset
47 * near_copies from the starting device of the previous section. Thus there
48 * are (near_copies * far_copies) of each chunk, and each is on a different
49 * drive. near_copies and far_copies must be at least one, and their product
50 * is at most raid_disks.
NeilBrownc93983b2006-06-26 00:27:41 -070051 *
52 * If far_offset is true, then the far_copies are handled a bit differently.
Jonathan Brassow475901a2013-02-21 13:28:10 +110053 * The copies are still in different stripes, but instead of being very far
54 * apart on disk, there are adjacent stripes.
55 *
56 * The far and offset algorithms are handled slightly differently if
57 * 'use_far_sets' is true. In this case, the array's devices are grouped into
58 * sets that are (near_copies * far_copies) in size. The far copied stripes
59 * are still shifted by 'near_copies' devices, but this shifting stays confined
60 * to the set rather than the entire array. This is done to improve the number
61 * of device combinations that can fail without causing the array to fail.
62 * Example 'far' algorithm w/o 'use_far_sets' (each letter represents a chunk
63 * on a device):
64 * A B C D A B C D E
65 * ... ...
66 * D A B C E A B C D
67 * Example 'far' algorithm w/ 'use_far_sets' enabled (sets illustrated w/ []'s):
68 * [A B] [C D] [A B] [C D E]
69 * |...| |...| |...| | ... |
70 * [B A] [D C] [B A] [E C D]
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 */
72
73/*
74 * Number of guaranteed r10bios in case of extreme VM load:
75 */
76#define NR_RAID10_BIOS 256
77
Jonathan Brassow473e87c2012-07-31 10:03:52 +100078/* when we get a read error on a read-only array, we redirect to another
79 * device without failing the first device, or trying to over-write to
80 * correct the read error. To keep track of bad blocks on a per-bio
81 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
82 */
83#define IO_BLOCKED ((struct bio *)1)
84/* When we successfully write to a known bad-block, we need to remove the
85 * bad-block marking which must be done from process context. So we record
86 * the success by setting devs[n].bio to IO_MADE_GOOD
87 */
88#define IO_MADE_GOOD ((struct bio *)2)
89
90#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
91
92/* When there are this many requests queued to be written by
NeilBrown34db0cd2011-10-11 16:50:01 +110093 * the raid10 thread, we become 'congested' to provide back-pressure
94 * for writeback.
95 */
96static int max_queued_requests = 1024;
97
NeilBrowne879a872011-10-11 16:49:02 +110098static void allow_barrier(struct r10conf *conf);
99static void lower_barrier(struct r10conf *conf);
NeilBrown635f6412013-06-11 14:57:09 +1000100static int _enough(struct r10conf *conf, int previous, int ignore);
NeilBrown3ea7daa2012-05-22 13:53:47 +1000101static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
102 int *skipped);
103static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
104static void end_reshape_write(struct bio *bio, int error);
105static void end_reshape(struct r10conf *conf);
NeilBrown0a27ec92006-01-06 00:20:13 -0800106
Al Virodd0fc662005-10-07 07:46:04 +0100107static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108{
NeilBrowne879a872011-10-11 16:49:02 +1100109 struct r10conf *conf = data;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100110 int size = offsetof(struct r10bio, devs[conf->copies]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
NeilBrown69335ef2011-12-23 10:17:54 +1100112 /* allocate a r10bio with room for raid_disks entries in the
113 * bios array */
Jens Axboe7eaceac2011-03-10 08:52:07 +0100114 return kzalloc(size, gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115}
116
117static void r10bio_pool_free(void *r10_bio, void *data)
118{
119 kfree(r10_bio);
120}
121
NeilBrown0310fa22008-08-05 15:54:14 +1000122/* Maximum size of each resync request */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123#define RESYNC_BLOCK_SIZE (64*1024)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
NeilBrown0310fa22008-08-05 15:54:14 +1000125/* amount of memory to reserve for resync requests */
126#define RESYNC_WINDOW (1024*1024)
127/* maximum number of concurrent requests, memory permitting */
128#define RESYNC_DEPTH (32*1024*1024/RESYNC_BLOCK_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130/*
131 * When performing a resync, we need to read and compare, so
132 * we need as many pages are there are copies.
133 * When performing a recovery, we need 2 bios, one for read,
134 * one for write (we recover only one drive per r10buf)
135 *
136 */
Al Virodd0fc662005-10-07 07:46:04 +0100137static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
NeilBrowne879a872011-10-11 16:49:02 +1100139 struct r10conf *conf = data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 struct page *page;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100141 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 struct bio *bio;
143 int i, j;
144 int nalloc;
145
146 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
Jens Axboe7eaceac2011-03-10 08:52:07 +0100147 if (!r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
NeilBrown3ea7daa2012-05-22 13:53:47 +1000150 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery) ||
151 test_bit(MD_RECOVERY_RESHAPE, &conf->mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 nalloc = conf->copies; /* resync */
153 else
154 nalloc = 2; /* recovery */
155
156 /*
157 * Allocate bios.
158 */
159 for (j = nalloc ; j-- ; ) {
NeilBrown67465572010-10-26 17:33:54 +1100160 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 if (!bio)
162 goto out_free_bio;
163 r10_bio->devs[j].bio = bio;
NeilBrown69335ef2011-12-23 10:17:54 +1100164 if (!conf->have_replacement)
165 continue;
166 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
167 if (!bio)
168 goto out_free_bio;
169 r10_bio->devs[j].repl_bio = bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 }
171 /*
172 * Allocate RESYNC_PAGES data pages and attach them
173 * where needed.
174 */
175 for (j = 0 ; j < nalloc; j++) {
NeilBrown69335ef2011-12-23 10:17:54 +1100176 struct bio *rbio = r10_bio->devs[j].repl_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 bio = r10_bio->devs[j].bio;
178 for (i = 0; i < RESYNC_PAGES; i++) {
NeilBrown3ea7daa2012-05-22 13:53:47 +1000179 if (j > 0 && !test_bit(MD_RECOVERY_SYNC,
180 &conf->mddev->recovery)) {
181 /* we can share bv_page's during recovery
182 * and reshape */
Namhyung Kimc65060a2011-07-18 17:38:49 +1000183 struct bio *rbio = r10_bio->devs[0].bio;
184 page = rbio->bi_io_vec[i].bv_page;
185 get_page(page);
186 } else
187 page = alloc_page(gfp_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 if (unlikely(!page))
189 goto out_free_pages;
190
191 bio->bi_io_vec[i].bv_page = page;
NeilBrown69335ef2011-12-23 10:17:54 +1100192 if (rbio)
193 rbio->bi_io_vec[i].bv_page = page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195 }
196
197 return r10_bio;
198
199out_free_pages:
200 for ( ; i > 0 ; i--)
NeilBrown1345b1d2006-01-06 00:20:40 -0800201 safe_put_page(bio->bi_io_vec[i-1].bv_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 while (j--)
203 for (i = 0; i < RESYNC_PAGES ; i++)
NeilBrown1345b1d2006-01-06 00:20:40 -0800204 safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
majianpeng5fdd2cf2012-05-22 13:55:03 +1000205 j = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206out_free_bio:
majianpeng5fdd2cf2012-05-22 13:55:03 +1000207 for ( ; j < nalloc; j++) {
208 if (r10_bio->devs[j].bio)
209 bio_put(r10_bio->devs[j].bio);
NeilBrown69335ef2011-12-23 10:17:54 +1100210 if (r10_bio->devs[j].repl_bio)
211 bio_put(r10_bio->devs[j].repl_bio);
212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 r10bio_pool_free(r10_bio, conf);
214 return NULL;
215}
216
217static void r10buf_pool_free(void *__r10_bio, void *data)
218{
219 int i;
NeilBrowne879a872011-10-11 16:49:02 +1100220 struct r10conf *conf = data;
NeilBrown9f2c9d12011-10-11 16:48:43 +1100221 struct r10bio *r10bio = __r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 int j;
223
224 for (j=0; j < conf->copies; j++) {
225 struct bio *bio = r10bio->devs[j].bio;
226 if (bio) {
227 for (i = 0; i < RESYNC_PAGES; i++) {
NeilBrown1345b1d2006-01-06 00:20:40 -0800228 safe_put_page(bio->bi_io_vec[i].bv_page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 bio->bi_io_vec[i].bv_page = NULL;
230 }
231 bio_put(bio);
232 }
NeilBrown69335ef2011-12-23 10:17:54 +1100233 bio = r10bio->devs[j].repl_bio;
234 if (bio)
235 bio_put(bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 }
237 r10bio_pool_free(r10bio, conf);
238}
239
NeilBrowne879a872011-10-11 16:49:02 +1100240static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 int i;
243
244 for (i = 0; i < conf->copies; i++) {
245 struct bio **bio = & r10_bio->devs[i].bio;
NeilBrown749c55e2011-07-28 11:39:24 +1000246 if (!BIO_SPECIAL(*bio))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 bio_put(*bio);
248 *bio = NULL;
NeilBrown69335ef2011-12-23 10:17:54 +1100249 bio = &r10_bio->devs[i].repl_bio;
250 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
251 bio_put(*bio);
252 *bio = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 }
254}
255
NeilBrown9f2c9d12011-10-11 16:48:43 +1100256static void free_r10bio(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257{
NeilBrowne879a872011-10-11 16:49:02 +1100258 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 put_all_bios(conf, r10_bio);
261 mempool_free(r10_bio, conf->r10bio_pool);
262}
263
NeilBrown9f2c9d12011-10-11 16:48:43 +1100264static void put_buf(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
NeilBrowne879a872011-10-11 16:49:02 +1100266 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 mempool_free(r10_bio, conf->r10buf_pool);
269
NeilBrown0a27ec92006-01-06 00:20:13 -0800270 lower_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271}
272
NeilBrown9f2c9d12011-10-11 16:48:43 +1100273static void reschedule_retry(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
275 unsigned long flags;
NeilBrownfd01b882011-10-11 16:47:53 +1100276 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +1100277 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
279 spin_lock_irqsave(&conf->device_lock, flags);
280 list_add(&r10_bio->retry_list, &conf->retry_list);
NeilBrown4443ae12006-01-06 00:20:28 -0800281 conf->nr_queued ++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 spin_unlock_irqrestore(&conf->device_lock, flags);
283
Arthur Jones388667b2008-07-25 12:03:38 -0700284 /* wake up frozen array... */
285 wake_up(&conf->wait_barrier);
286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 md_wakeup_thread(mddev->thread);
288}
289
290/*
291 * raid_end_bio_io() is called when we have finished servicing a mirrored
292 * operation and are ready to return a success/failure code to the buffer
293 * cache layer.
294 */
NeilBrown9f2c9d12011-10-11 16:48:43 +1100295static void raid_end_bio_io(struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296{
297 struct bio *bio = r10_bio->master_bio;
NeilBrown856e08e2011-07-28 11:39:23 +1000298 int done;
NeilBrowne879a872011-10-11 16:49:02 +1100299 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
NeilBrown856e08e2011-07-28 11:39:23 +1000301 if (bio->bi_phys_segments) {
302 unsigned long flags;
303 spin_lock_irqsave(&conf->device_lock, flags);
304 bio->bi_phys_segments--;
305 done = (bio->bi_phys_segments == 0);
306 spin_unlock_irqrestore(&conf->device_lock, flags);
307 } else
308 done = 1;
309 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
310 clear_bit(BIO_UPTODATE, &bio->bi_flags);
311 if (done) {
312 bio_endio(bio, 0);
313 /*
314 * Wake up any possible resync thread that waits for the device
315 * to go idle.
316 */
317 allow_barrier(conf);
318 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 free_r10bio(r10_bio);
320}
321
322/*
323 * Update disk head position estimator based on IRQ completion info.
324 */
NeilBrown9f2c9d12011-10-11 16:48:43 +1100325static inline void update_head_pos(int slot, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326{
NeilBrowne879a872011-10-11 16:49:02 +1100327 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
329 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
330 r10_bio->devs[slot].addr + (r10_bio->sectors);
331}
332
Namhyung Kim778ca012011-07-18 17:38:47 +1000333/*
334 * Find the disk number which triggered given bio
335 */
NeilBrowne879a872011-10-11 16:49:02 +1100336static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
NeilBrown69335ef2011-12-23 10:17:54 +1100337 struct bio *bio, int *slotp, int *replp)
Namhyung Kim778ca012011-07-18 17:38:47 +1000338{
339 int slot;
NeilBrown69335ef2011-12-23 10:17:54 +1100340 int repl = 0;
Namhyung Kim778ca012011-07-18 17:38:47 +1000341
NeilBrown69335ef2011-12-23 10:17:54 +1100342 for (slot = 0; slot < conf->copies; slot++) {
Namhyung Kim778ca012011-07-18 17:38:47 +1000343 if (r10_bio->devs[slot].bio == bio)
344 break;
NeilBrown69335ef2011-12-23 10:17:54 +1100345 if (r10_bio->devs[slot].repl_bio == bio) {
346 repl = 1;
347 break;
348 }
349 }
Namhyung Kim778ca012011-07-18 17:38:47 +1000350
351 BUG_ON(slot == conf->copies);
352 update_head_pos(slot, r10_bio);
353
NeilBrown749c55e2011-07-28 11:39:24 +1000354 if (slotp)
355 *slotp = slot;
NeilBrown69335ef2011-12-23 10:17:54 +1100356 if (replp)
357 *replp = repl;
Namhyung Kim778ca012011-07-18 17:38:47 +1000358 return r10_bio->devs[slot].devnum;
359}
360
NeilBrown6712ecf2007-09-27 12:47:43 +0200361static void raid10_end_read_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
363 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
NeilBrown9f2c9d12011-10-11 16:48:43 +1100364 struct r10bio *r10_bio = bio->bi_private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 int slot, dev;
NeilBrownabbf0982011-12-23 10:17:54 +1100366 struct md_rdev *rdev;
NeilBrowne879a872011-10-11 16:49:02 +1100367 struct r10conf *conf = r10_bio->mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 slot = r10_bio->read_slot;
371 dev = r10_bio->devs[slot].devnum;
NeilBrownabbf0982011-12-23 10:17:54 +1100372 rdev = r10_bio->devs[slot].rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 /*
374 * this branch is our 'one mirror IO has finished' event handler:
375 */
NeilBrown4443ae12006-01-06 00:20:28 -0800376 update_head_pos(slot, r10_bio);
377
378 if (uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 /*
380 * Set R10BIO_Uptodate in our master bio, so that
381 * we will return a good error code to the higher
382 * levels even if IO on some other mirrored buffer fails.
383 *
384 * The 'master' represents the composite IO operation to
385 * user-side. So if something waits for IO, then it will
386 * wait for the 'master' bio.
387 */
388 set_bit(R10BIO_Uptodate, &r10_bio->state);
NeilBrownfae8cc52012-02-14 11:10:10 +1100389 } else {
390 /* If all other devices that store this block have
391 * failed, we want to return the error upwards rather
392 * than fail the last device. Here we redefine
393 * "uptodate" to mean "Don't want to retry"
394 */
NeilBrown635f6412013-06-11 14:57:09 +1000395 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
396 rdev->raid_disk))
NeilBrownfae8cc52012-02-14 11:10:10 +1100397 uptodate = 1;
NeilBrownfae8cc52012-02-14 11:10:10 +1100398 }
399 if (uptodate) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 raid_end_bio_io(r10_bio);
NeilBrownabbf0982011-12-23 10:17:54 +1100401 rdev_dec_pending(rdev, conf->mddev);
NeilBrown4443ae12006-01-06 00:20:28 -0800402 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 /*
NeilBrown7c4e06f2011-05-11 14:53:17 +1000404 * oops, read error - keep the refcount on the rdev
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 */
406 char b[BDEVNAME_SIZE];
Christian Dietrich8bda4702011-07-27 11:00:36 +1000407 printk_ratelimited(KERN_ERR
408 "md/raid10:%s: %s: rescheduling sector %llu\n",
409 mdname(conf->mddev),
NeilBrownabbf0982011-12-23 10:17:54 +1100410 bdevname(rdev->bdev, b),
Christian Dietrich8bda4702011-07-27 11:00:36 +1000411 (unsigned long long)r10_bio->sector);
NeilBrown856e08e2011-07-28 11:39:23 +1000412 set_bit(R10BIO_ReadError, &r10_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 reschedule_retry(r10_bio);
414 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
NeilBrown9f2c9d12011-10-11 16:48:43 +1100417static void close_write(struct r10bio *r10_bio)
NeilBrownbd870a12011-07-28 11:39:24 +1000418{
419 /* clear the bitmap if all writes complete successfully */
420 bitmap_endwrite(r10_bio->mddev->bitmap, r10_bio->sector,
421 r10_bio->sectors,
422 !test_bit(R10BIO_Degraded, &r10_bio->state),
423 0);
424 md_write_end(r10_bio->mddev);
425}
426
NeilBrown9f2c9d12011-10-11 16:48:43 +1100427static void one_write_done(struct r10bio *r10_bio)
NeilBrown19d5f832011-09-10 17:21:17 +1000428{
429 if (atomic_dec_and_test(&r10_bio->remaining)) {
430 if (test_bit(R10BIO_WriteError, &r10_bio->state))
431 reschedule_retry(r10_bio);
432 else {
433 close_write(r10_bio);
434 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
435 reschedule_retry(r10_bio);
436 else
437 raid_end_bio_io(r10_bio);
438 }
439 }
440}
441
NeilBrown6712ecf2007-09-27 12:47:43 +0200442static void raid10_end_write_request(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443{
444 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
NeilBrown9f2c9d12011-10-11 16:48:43 +1100445 struct r10bio *r10_bio = bio->bi_private;
Namhyung Kim778ca012011-07-18 17:38:47 +1000446 int dev;
NeilBrown749c55e2011-07-28 11:39:24 +1000447 int dec_rdev = 1;
NeilBrowne879a872011-10-11 16:49:02 +1100448 struct r10conf *conf = r10_bio->mddev->private;
NeilBrown475b0322011-12-23 10:17:55 +1100449 int slot, repl;
NeilBrown4ca40c22011-12-23 10:17:55 +1100450 struct md_rdev *rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
NeilBrown475b0322011-12-23 10:17:55 +1100452 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453
NeilBrown475b0322011-12-23 10:17:55 +1100454 if (repl)
455 rdev = conf->mirrors[dev].replacement;
NeilBrown4ca40c22011-12-23 10:17:55 +1100456 if (!rdev) {
457 smp_rmb();
458 repl = 0;
NeilBrown475b0322011-12-23 10:17:55 +1100459 rdev = conf->mirrors[dev].rdev;
NeilBrown4ca40c22011-12-23 10:17:55 +1100460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 /*
462 * this branch is our 'one mirror IO has finished' event handler:
463 */
NeilBrown6cce3b232006-01-06 00:20:16 -0800464 if (!uptodate) {
NeilBrown475b0322011-12-23 10:17:55 +1100465 if (repl)
466 /* Never record new bad blocks to replacement,
467 * just fail it.
468 */
469 md_error(rdev->mddev, rdev);
470 else {
471 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +1100472 if (!test_and_set_bit(WantReplacement, &rdev->flags))
473 set_bit(MD_RECOVERY_NEEDED,
474 &rdev->mddev->recovery);
NeilBrown475b0322011-12-23 10:17:55 +1100475 set_bit(R10BIO_WriteError, &r10_bio->state);
476 dec_rdev = 0;
477 }
NeilBrown749c55e2011-07-28 11:39:24 +1000478 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 /*
480 * Set R10BIO_Uptodate in our master bio, so that
481 * we will return a good error code for to the higher
482 * levels even if IO on some other mirrored buffer fails.
483 *
484 * The 'master' represents the composite IO operation to
485 * user-side. So if something waits for IO, then it will
486 * wait for the 'master' bio.
487 */
NeilBrown749c55e2011-07-28 11:39:24 +1000488 sector_t first_bad;
489 int bad_sectors;
490
Alex Lyakas3056e3a2013-06-04 20:42:21 +0300491 /*
492 * Do not set R10BIO_Uptodate if the current device is
493 * rebuilding or Faulty. This is because we cannot use
494 * such device for properly reading the data back (we could
495 * potentially use it, if the current write would have felt
496 * before rdev->recovery_offset, but for simplicity we don't
497 * check this here.
498 */
499 if (test_bit(In_sync, &rdev->flags) &&
500 !test_bit(Faulty, &rdev->flags))
501 set_bit(R10BIO_Uptodate, &r10_bio->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502
NeilBrown749c55e2011-07-28 11:39:24 +1000503 /* Maybe we can clear some bad blocks. */
NeilBrown475b0322011-12-23 10:17:55 +1100504 if (is_badblock(rdev,
NeilBrown749c55e2011-07-28 11:39:24 +1000505 r10_bio->devs[slot].addr,
506 r10_bio->sectors,
507 &first_bad, &bad_sectors)) {
508 bio_put(bio);
NeilBrown475b0322011-12-23 10:17:55 +1100509 if (repl)
510 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
511 else
512 r10_bio->devs[slot].bio = IO_MADE_GOOD;
NeilBrown749c55e2011-07-28 11:39:24 +1000513 dec_rdev = 0;
514 set_bit(R10BIO_MadeGood, &r10_bio->state);
515 }
516 }
517
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 /*
519 *
520 * Let's see if all mirrored write operations have finished
521 * already.
522 */
NeilBrown19d5f832011-09-10 17:21:17 +1000523 one_write_done(r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +1000524 if (dec_rdev)
NeilBrown884162d2012-11-22 15:12:09 +1100525 rdev_dec_pending(rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528/*
529 * RAID10 layout manager
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300530 * As well as the chunksize and raid_disks count, there are two
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 * parameters: near_copies and far_copies.
532 * near_copies * far_copies must be <= raid_disks.
533 * Normally one of these will be 1.
534 * If both are 1, we get raid0.
535 * If near_copies == raid_disks, we get raid1.
536 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300537 * Chunks are laid out in raid0 style with near_copies copies of the
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 * first chunk, followed by near_copies copies of the next chunk and
539 * so on.
540 * If far_copies > 1, then after 1/far_copies of the array has been assigned
541 * as described above, we start again with a device offset of near_copies.
542 * So we effectively have another copy of the whole array further down all
543 * the drives, but with blocks on different drives.
544 * With this layout, and block is never stored twice on the one device.
545 *
546 * raid10_find_phys finds the sector offset of a given virtual sector
NeilBrownc93983b2006-06-26 00:27:41 -0700547 * on each device that it is on.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 *
549 * raid10_find_virt does the reverse mapping, from a device and a
550 * sector offset to a virtual address
551 */
552
NeilBrownf8c9e742012-05-21 09:28:33 +1000553static void __raid10_find_phys(struct geom *geo, struct r10bio *r10bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554{
555 int n,f;
556 sector_t sector;
557 sector_t chunk;
558 sector_t stripe;
559 int dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 int slot = 0;
Jonathan Brassow9a3152a2013-02-21 13:28:10 +1100561 int last_far_set_start, last_far_set_size;
562
563 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
564 last_far_set_start *= geo->far_set_size;
565
566 last_far_set_size = geo->far_set_size;
567 last_far_set_size += (geo->raid_disks % geo->far_set_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
569 /* now calculate first sector/dev */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000570 chunk = r10bio->sector >> geo->chunk_shift;
571 sector = r10bio->sector & geo->chunk_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
NeilBrown5cf00fc2012-05-21 09:28:20 +1000573 chunk *= geo->near_copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 stripe = chunk;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000575 dev = sector_div(stripe, geo->raid_disks);
576 if (geo->far_offset)
577 stripe *= geo->far_copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
NeilBrown5cf00fc2012-05-21 09:28:20 +1000579 sector += stripe << geo->chunk_shift;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580
581 /* and calculate all the others */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000582 for (n = 0; n < geo->near_copies; n++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 int d = dev;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100584 int set;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 sector_t s = sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 r10bio->devs[slot].devnum = d;
Jonathan Brassow4c0ca262013-02-21 13:28:09 +1100587 r10bio->devs[slot].addr = s;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 slot++;
589
NeilBrown5cf00fc2012-05-21 09:28:20 +1000590 for (f = 1; f < geo->far_copies; f++) {
Jonathan Brassow475901a2013-02-21 13:28:10 +1100591 set = d / geo->far_set_size;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000592 d += geo->near_copies;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100593
Jonathan Brassow9a3152a2013-02-21 13:28:10 +1100594 if ((geo->raid_disks % geo->far_set_size) &&
595 (d > last_far_set_start)) {
596 d -= last_far_set_start;
597 d %= last_far_set_size;
598 d += last_far_set_start;
599 } else {
600 d %= geo->far_set_size;
601 d += geo->far_set_size * set;
602 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000603 s += geo->stride;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 r10bio->devs[slot].devnum = d;
605 r10bio->devs[slot].addr = s;
606 slot++;
607 }
608 dev++;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000609 if (dev >= geo->raid_disks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 dev = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000611 sector += (geo->chunk_mask + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 }
613 }
NeilBrownf8c9e742012-05-21 09:28:33 +1000614}
615
616static void raid10_find_phys(struct r10conf *conf, struct r10bio *r10bio)
617{
618 struct geom *geo = &conf->geo;
619
620 if (conf->reshape_progress != MaxSector &&
621 ((r10bio->sector >= conf->reshape_progress) !=
622 conf->mddev->reshape_backwards)) {
623 set_bit(R10BIO_Previous, &r10bio->state);
624 geo = &conf->prev;
625 } else
626 clear_bit(R10BIO_Previous, &r10bio->state);
627
628 __raid10_find_phys(geo, r10bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629}
630
NeilBrowne879a872011-10-11 16:49:02 +1100631static sector_t raid10_find_virt(struct r10conf *conf, sector_t sector, int dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
633 sector_t offset, chunk, vchunk;
NeilBrownf8c9e742012-05-21 09:28:33 +1000634 /* Never use conf->prev as this is only called during resync
635 * or recovery, so reshape isn't happening
636 */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000637 struct geom *geo = &conf->geo;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100638 int far_set_start = (dev / geo->far_set_size) * geo->far_set_size;
639 int far_set_size = geo->far_set_size;
Jonathan Brassow9a3152a2013-02-21 13:28:10 +1100640 int last_far_set_start;
641
642 if (geo->raid_disks % geo->far_set_size) {
643 last_far_set_start = (geo->raid_disks / geo->far_set_size) - 1;
644 last_far_set_start *= geo->far_set_size;
645
646 if (dev >= last_far_set_start) {
647 far_set_size = geo->far_set_size;
648 far_set_size += (geo->raid_disks % geo->far_set_size);
649 far_set_start = last_far_set_start;
650 }
651 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652
NeilBrown5cf00fc2012-05-21 09:28:20 +1000653 offset = sector & geo->chunk_mask;
654 if (geo->far_offset) {
NeilBrownc93983b2006-06-26 00:27:41 -0700655 int fc;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000656 chunk = sector >> geo->chunk_shift;
657 fc = sector_div(chunk, geo->far_copies);
658 dev -= fc * geo->near_copies;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100659 if (dev < far_set_start)
660 dev += far_set_size;
NeilBrownc93983b2006-06-26 00:27:41 -0700661 } else {
NeilBrown5cf00fc2012-05-21 09:28:20 +1000662 while (sector >= geo->stride) {
663 sector -= geo->stride;
Jonathan Brassow475901a2013-02-21 13:28:10 +1100664 if (dev < (geo->near_copies + far_set_start))
665 dev += far_set_size - geo->near_copies;
NeilBrownc93983b2006-06-26 00:27:41 -0700666 else
NeilBrown5cf00fc2012-05-21 09:28:20 +1000667 dev -= geo->near_copies;
NeilBrownc93983b2006-06-26 00:27:41 -0700668 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000669 chunk = sector >> geo->chunk_shift;
NeilBrownc93983b2006-06-26 00:27:41 -0700670 }
NeilBrown5cf00fc2012-05-21 09:28:20 +1000671 vchunk = chunk * geo->raid_disks + dev;
672 sector_div(vchunk, geo->near_copies);
673 return (vchunk << geo->chunk_shift) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674}
675
676/**
677 * raid10_mergeable_bvec -- tell bio layer if a two requests can be merged
678 * @q: request queue
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200679 * @bvm: properties of new bio
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 * @biovec: the request that could be merged to it.
681 *
682 * Return amount of bytes we can accept at this offset
NeilBrown050b6612012-03-19 12:46:39 +1100683 * This requires checking for end-of-chunk if near_copies != raid_disks,
684 * and for subordinate merge_bvec_fns if merge_check_needed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 */
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200686static int raid10_mergeable_bvec(struct request_queue *q,
687 struct bvec_merge_data *bvm,
688 struct bio_vec *biovec)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689{
NeilBrownfd01b882011-10-11 16:47:53 +1100690 struct mddev *mddev = q->queuedata;
NeilBrown050b6612012-03-19 12:46:39 +1100691 struct r10conf *conf = mddev->private;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200692 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693 int max;
NeilBrown3ea7daa2012-05-22 13:53:47 +1000694 unsigned int chunk_sectors;
Alasdair G Kergoncc371e62008-07-03 09:53:43 +0200695 unsigned int bio_sectors = bvm->bi_size >> 9;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000696 struct geom *geo = &conf->geo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
NeilBrown3ea7daa2012-05-22 13:53:47 +1000698 chunk_sectors = (conf->geo.chunk_mask & conf->prev.chunk_mask) + 1;
NeilBrownf8c9e742012-05-21 09:28:33 +1000699 if (conf->reshape_progress != MaxSector &&
700 ((sector >= conf->reshape_progress) !=
701 conf->mddev->reshape_backwards))
702 geo = &conf->prev;
703
NeilBrown5cf00fc2012-05-21 09:28:20 +1000704 if (geo->near_copies < geo->raid_disks) {
NeilBrown050b6612012-03-19 12:46:39 +1100705 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
706 + bio_sectors)) << 9;
707 if (max < 0)
708 /* bio_add cannot handle a negative return */
709 max = 0;
710 if (max <= biovec->bv_len && bio_sectors == 0)
711 return biovec->bv_len;
712 } else
713 max = biovec->bv_len;
714
715 if (mddev->merge_check_needed) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000716 struct {
717 struct r10bio r10_bio;
718 struct r10dev devs[conf->copies];
719 } on_stack;
720 struct r10bio *r10_bio = &on_stack.r10_bio;
NeilBrown050b6612012-03-19 12:46:39 +1100721 int s;
NeilBrownf8c9e742012-05-21 09:28:33 +1000722 if (conf->reshape_progress != MaxSector) {
723 /* Cannot give any guidance during reshape */
724 if (max <= biovec->bv_len && bio_sectors == 0)
725 return biovec->bv_len;
726 return 0;
727 }
NeilBrowne0ee7782012-08-18 09:51:42 +1000728 r10_bio->sector = sector;
729 raid10_find_phys(conf, r10_bio);
NeilBrown050b6612012-03-19 12:46:39 +1100730 rcu_read_lock();
731 for (s = 0; s < conf->copies; s++) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000732 int disk = r10_bio->devs[s].devnum;
NeilBrown050b6612012-03-19 12:46:39 +1100733 struct md_rdev *rdev = rcu_dereference(
734 conf->mirrors[disk].rdev);
735 if (rdev && !test_bit(Faulty, &rdev->flags)) {
736 struct request_queue *q =
737 bdev_get_queue(rdev->bdev);
738 if (q->merge_bvec_fn) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000739 bvm->bi_sector = r10_bio->devs[s].addr
NeilBrown050b6612012-03-19 12:46:39 +1100740 + rdev->data_offset;
741 bvm->bi_bdev = rdev->bdev;
742 max = min(max, q->merge_bvec_fn(
743 q, bvm, biovec));
744 }
745 }
746 rdev = rcu_dereference(conf->mirrors[disk].replacement);
747 if (rdev && !test_bit(Faulty, &rdev->flags)) {
748 struct request_queue *q =
749 bdev_get_queue(rdev->bdev);
750 if (q->merge_bvec_fn) {
NeilBrowne0ee7782012-08-18 09:51:42 +1000751 bvm->bi_sector = r10_bio->devs[s].addr
NeilBrown050b6612012-03-19 12:46:39 +1100752 + rdev->data_offset;
753 bvm->bi_bdev = rdev->bdev;
754 max = min(max, q->merge_bvec_fn(
755 q, bvm, biovec));
756 }
757 }
758 }
759 rcu_read_unlock();
760 }
761 return max;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762}
763
764/*
765 * This routine returns the disk from which the requested read should
766 * be done. There is a per-array 'next expected sequential IO' sector
767 * number - if this matches on the next IO then we use the last disk.
768 * There is also a per-disk 'last know head position' sector that is
769 * maintained from IRQ contexts, both the normal and the resync IO
770 * completion handlers update this position correctly. If there is no
771 * perfect sequential match then we pick the disk whose head is closest.
772 *
773 * If there are 2 mirrors in the same 2 devices, performance degrades
774 * because position is mirror, not device based.
775 *
776 * The rdev for the device selected will have nr_pending incremented.
777 */
778
779/*
780 * FIXME: possibly should rethink readbalancing and do it differently
781 * depending on near_copies / far_copies geometry.
782 */
NeilBrown96c3fd12011-12-23 10:17:54 +1100783static struct md_rdev *read_balance(struct r10conf *conf,
784 struct r10bio *r10_bio,
785 int *max_sectors)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786{
NeilBrownaf3a2cd2010-05-08 08:20:17 +1000787 const sector_t this_sector = r10_bio->sector;
NeilBrown56d99122011-05-11 14:27:03 +1000788 int disk, slot;
NeilBrown856e08e2011-07-28 11:39:23 +1000789 int sectors = r10_bio->sectors;
790 int best_good_sectors;
NeilBrown56d99122011-05-11 14:27:03 +1000791 sector_t new_distance, best_dist;
Jonathan Brassow3bbae042012-07-31 10:03:52 +1000792 struct md_rdev *best_rdev, *rdev = NULL;
NeilBrown56d99122011-05-11 14:27:03 +1000793 int do_balance;
794 int best_slot;
NeilBrown5cf00fc2012-05-21 09:28:20 +1000795 struct geom *geo = &conf->geo;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797 raid10_find_phys(conf, r10_bio);
798 rcu_read_lock();
NeilBrown56d99122011-05-11 14:27:03 +1000799retry:
NeilBrown856e08e2011-07-28 11:39:23 +1000800 sectors = r10_bio->sectors;
NeilBrown56d99122011-05-11 14:27:03 +1000801 best_slot = -1;
NeilBrownabbf0982011-12-23 10:17:54 +1100802 best_rdev = NULL;
NeilBrown56d99122011-05-11 14:27:03 +1000803 best_dist = MaxSector;
NeilBrown856e08e2011-07-28 11:39:23 +1000804 best_good_sectors = 0;
NeilBrown56d99122011-05-11 14:27:03 +1000805 do_balance = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 /*
807 * Check if we can balance. We can balance on the whole
NeilBrown6cce3b232006-01-06 00:20:16 -0800808 * device if no resync is going on (recovery is ok), or below
809 * the resync window. We take the first readable disk when
810 * above the resync window.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 */
812 if (conf->mddev->recovery_cp < MaxSector
NeilBrown56d99122011-05-11 14:27:03 +1000813 && (this_sector + sectors >= conf->next_resync))
814 do_balance = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815
NeilBrown56d99122011-05-11 14:27:03 +1000816 for (slot = 0; slot < conf->copies ; slot++) {
NeilBrown856e08e2011-07-28 11:39:23 +1000817 sector_t first_bad;
818 int bad_sectors;
819 sector_t dev_sector;
820
NeilBrown56d99122011-05-11 14:27:03 +1000821 if (r10_bio->devs[slot].bio == IO_BLOCKED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 continue;
NeilBrown56d99122011-05-11 14:27:03 +1000823 disk = r10_bio->devs[slot].devnum;
NeilBrownabbf0982011-12-23 10:17:54 +1100824 rdev = rcu_dereference(conf->mirrors[disk].replacement);
825 if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
NeilBrown050b6612012-03-19 12:46:39 +1100826 test_bit(Unmerged, &rdev->flags) ||
NeilBrownabbf0982011-12-23 10:17:54 +1100827 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
828 rdev = rcu_dereference(conf->mirrors[disk].rdev);
NeilBrown050b6612012-03-19 12:46:39 +1100829 if (rdev == NULL ||
830 test_bit(Faulty, &rdev->flags) ||
831 test_bit(Unmerged, &rdev->flags))
NeilBrownabbf0982011-12-23 10:17:54 +1100832 continue;
833 if (!test_bit(In_sync, &rdev->flags) &&
834 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
NeilBrown56d99122011-05-11 14:27:03 +1000835 continue;
836
NeilBrown856e08e2011-07-28 11:39:23 +1000837 dev_sector = r10_bio->devs[slot].addr;
838 if (is_badblock(rdev, dev_sector, sectors,
839 &first_bad, &bad_sectors)) {
840 if (best_dist < MaxSector)
841 /* Already have a better slot */
842 continue;
843 if (first_bad <= dev_sector) {
844 /* Cannot read here. If this is the
845 * 'primary' device, then we must not read
846 * beyond 'bad_sectors' from another device.
847 */
848 bad_sectors -= (dev_sector - first_bad);
849 if (!do_balance && sectors > bad_sectors)
850 sectors = bad_sectors;
851 if (best_good_sectors > sectors)
852 best_good_sectors = sectors;
853 } else {
854 sector_t good_sectors =
855 first_bad - dev_sector;
856 if (good_sectors > best_good_sectors) {
857 best_good_sectors = good_sectors;
858 best_slot = slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100859 best_rdev = rdev;
NeilBrown856e08e2011-07-28 11:39:23 +1000860 }
861 if (!do_balance)
862 /* Must read from here */
863 break;
864 }
865 continue;
866 } else
867 best_good_sectors = sectors;
868
NeilBrown56d99122011-05-11 14:27:03 +1000869 if (!do_balance)
870 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
NeilBrown22dfdf52005-11-28 13:44:09 -0800872 /* This optimisation is debatable, and completely destroys
873 * sequential read speed for 'far copies' arrays. So only
874 * keep it for 'near' arrays, and review those later.
875 */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000876 if (geo->near_copies > 1 && !atomic_read(&rdev->nr_pending))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 break;
Keld Simonsen8ed3a192008-03-04 14:29:34 -0800878
879 /* for far > 1 always use the lowest address */
NeilBrown5cf00fc2012-05-21 09:28:20 +1000880 if (geo->far_copies > 1)
NeilBrown56d99122011-05-11 14:27:03 +1000881 new_distance = r10_bio->devs[slot].addr;
Keld Simonsen8ed3a192008-03-04 14:29:34 -0800882 else
NeilBrown56d99122011-05-11 14:27:03 +1000883 new_distance = abs(r10_bio->devs[slot].addr -
884 conf->mirrors[disk].head_position);
885 if (new_distance < best_dist) {
886 best_dist = new_distance;
887 best_slot = slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100888 best_rdev = rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 }
890 }
NeilBrownabbf0982011-12-23 10:17:54 +1100891 if (slot >= conf->copies) {
NeilBrown56d99122011-05-11 14:27:03 +1000892 slot = best_slot;
NeilBrownabbf0982011-12-23 10:17:54 +1100893 rdev = best_rdev;
894 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
NeilBrown56d99122011-05-11 14:27:03 +1000896 if (slot >= 0) {
NeilBrown56d99122011-05-11 14:27:03 +1000897 atomic_inc(&rdev->nr_pending);
898 if (test_bit(Faulty, &rdev->flags)) {
899 /* Cannot risk returning a device that failed
900 * before we inc'ed nr_pending
901 */
902 rdev_dec_pending(rdev, conf->mddev);
903 goto retry;
904 }
905 r10_bio->read_slot = slot;
906 } else
NeilBrown96c3fd12011-12-23 10:17:54 +1100907 rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 rcu_read_unlock();
NeilBrown856e08e2011-07-28 11:39:23 +1000909 *max_sectors = best_good_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
NeilBrown96c3fd12011-12-23 10:17:54 +1100911 return rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912}
913
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +1000914int md_raid10_congested(struct mddev *mddev, int bits)
NeilBrown0d129222006-10-03 01:15:54 -0700915{
NeilBrowne879a872011-10-11 16:49:02 +1100916 struct r10conf *conf = mddev->private;
NeilBrown0d129222006-10-03 01:15:54 -0700917 int i, ret = 0;
918
NeilBrown34db0cd2011-10-11 16:50:01 +1100919 if ((bits & (1 << BDI_async_congested)) &&
920 conf->pending_count >= max_queued_requests)
921 return 1;
922
NeilBrown0d129222006-10-03 01:15:54 -0700923 rcu_read_lock();
NeilBrownf8c9e742012-05-21 09:28:33 +1000924 for (i = 0;
925 (i < conf->geo.raid_disks || i < conf->prev.raid_disks)
926 && ret == 0;
927 i++) {
NeilBrown3cb03002011-10-11 16:45:26 +1100928 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
NeilBrown0d129222006-10-03 01:15:54 -0700929 if (rdev && !test_bit(Faulty, &rdev->flags)) {
Jens Axboe165125e2007-07-24 09:28:11 +0200930 struct request_queue *q = bdev_get_queue(rdev->bdev);
NeilBrown0d129222006-10-03 01:15:54 -0700931
932 ret |= bdi_congested(&q->backing_dev_info, bits);
933 }
934 }
935 rcu_read_unlock();
936 return ret;
937}
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +1000938EXPORT_SYMBOL_GPL(md_raid10_congested);
939
940static int raid10_congested(void *data, int bits)
941{
942 struct mddev *mddev = data;
943
944 return mddev_congested(mddev, bits) ||
945 md_raid10_congested(mddev, bits);
946}
NeilBrown0d129222006-10-03 01:15:54 -0700947
NeilBrowne879a872011-10-11 16:49:02 +1100948static void flush_pending_writes(struct r10conf *conf)
NeilBrowna35e63e2008-03-04 14:29:29 -0800949{
950 /* Any writes that have been queued but are awaiting
951 * bitmap updates get flushed here.
NeilBrowna35e63e2008-03-04 14:29:29 -0800952 */
NeilBrowna35e63e2008-03-04 14:29:29 -0800953 spin_lock_irq(&conf->device_lock);
954
955 if (conf->pending_bio_list.head) {
956 struct bio *bio;
957 bio = bio_list_get(&conf->pending_bio_list);
NeilBrown34db0cd2011-10-11 16:50:01 +1100958 conf->pending_count = 0;
NeilBrowna35e63e2008-03-04 14:29:29 -0800959 spin_unlock_irq(&conf->device_lock);
960 /* flush any pending bitmap writes to disk
961 * before proceeding w/ I/O */
962 bitmap_unplug(conf->mddev->bitmap);
NeilBrown34db0cd2011-10-11 16:50:01 +1100963 wake_up(&conf->wait_barrier);
NeilBrowna35e63e2008-03-04 14:29:29 -0800964
965 while (bio) { /* submit pending writes */
966 struct bio *next = bio->bi_next;
967 bio->bi_next = NULL;
Shaohua Li532a2a32012-10-11 13:30:52 +1100968 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
969 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
970 /* Just ignore it */
971 bio_endio(bio, 0);
972 else
973 generic_make_request(bio);
NeilBrowna35e63e2008-03-04 14:29:29 -0800974 bio = next;
975 }
NeilBrowna35e63e2008-03-04 14:29:29 -0800976 } else
977 spin_unlock_irq(&conf->device_lock);
NeilBrowna35e63e2008-03-04 14:29:29 -0800978}
Jens Axboe7eaceac2011-03-10 08:52:07 +0100979
NeilBrown0a27ec92006-01-06 00:20:13 -0800980/* Barriers....
981 * Sometimes we need to suspend IO while we do something else,
982 * either some resync/recovery, or reconfigure the array.
983 * To do this we raise a 'barrier'.
984 * The 'barrier' is a counter that can be raised multiple times
985 * to count how many activities are happening which preclude
986 * normal IO.
987 * We can only raise the barrier if there is no pending IO.
988 * i.e. if nr_pending == 0.
989 * We choose only to raise the barrier if no-one is waiting for the
990 * barrier to go down. This means that as soon as an IO request
991 * is ready, no other operations which require a barrier will start
992 * until the IO request has had a chance.
993 *
994 * So: regular IO calls 'wait_barrier'. When that returns there
995 * is no backgroup IO happening, It must arrange to call
996 * allow_barrier when it has finished its IO.
997 * backgroup IO calls must call raise_barrier. Once that returns
998 * there is no normal IO happeing. It must arrange to call
999 * lower_barrier when the particular background IO completes.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
NeilBrowne879a872011-10-11 16:49:02 +11001002static void raise_barrier(struct r10conf *conf, int force)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003{
NeilBrown6cce3b232006-01-06 00:20:16 -08001004 BUG_ON(force && !conf->barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 spin_lock_irq(&conf->resync_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
NeilBrown6cce3b232006-01-06 00:20:16 -08001007 /* Wait until no block IO is waiting (unless 'force') */
1008 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
Lukas Czernereed8c022012-11-30 11:42:40 +01001009 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -08001010
1011 /* block any new IO from starting */
1012 conf->barrier++;
1013
NeilBrownc3b328a2011-04-18 18:25:43 +10001014 /* Now wait for all pending IO to complete */
NeilBrown0a27ec92006-01-06 00:20:13 -08001015 wait_event_lock_irq(conf->wait_barrier,
1016 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
Lukas Czernereed8c022012-11-30 11:42:40 +01001017 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -08001018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 spin_unlock_irq(&conf->resync_lock);
1020}
1021
NeilBrowne879a872011-10-11 16:49:02 +11001022static void lower_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -08001023{
1024 unsigned long flags;
1025 spin_lock_irqsave(&conf->resync_lock, flags);
1026 conf->barrier--;
1027 spin_unlock_irqrestore(&conf->resync_lock, flags);
1028 wake_up(&conf->wait_barrier);
1029}
1030
NeilBrowne879a872011-10-11 16:49:02 +11001031static void wait_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -08001032{
1033 spin_lock_irq(&conf->resync_lock);
1034 if (conf->barrier) {
1035 conf->nr_waiting++;
NeilBrownd6b42dc2012-03-19 12:46:38 +11001036 /* Wait for the barrier to drop.
1037 * However if there are already pending
1038 * requests (preventing the barrier from
1039 * rising completely), and the
1040 * pre-process bio queue isn't empty,
1041 * then don't wait, as we need to empty
1042 * that queue to get the nr_pending
1043 * count down.
1044 */
1045 wait_event_lock_irq(conf->wait_barrier,
1046 !conf->barrier ||
1047 (conf->nr_pending &&
1048 current->bio_list &&
1049 !bio_list_empty(current->bio_list)),
Lukas Czernereed8c022012-11-30 11:42:40 +01001050 conf->resync_lock);
NeilBrown0a27ec92006-01-06 00:20:13 -08001051 conf->nr_waiting--;
1052 }
1053 conf->nr_pending++;
1054 spin_unlock_irq(&conf->resync_lock);
1055}
1056
NeilBrowne879a872011-10-11 16:49:02 +11001057static void allow_barrier(struct r10conf *conf)
NeilBrown0a27ec92006-01-06 00:20:13 -08001058{
1059 unsigned long flags;
1060 spin_lock_irqsave(&conf->resync_lock, flags);
1061 conf->nr_pending--;
1062 spin_unlock_irqrestore(&conf->resync_lock, flags);
1063 wake_up(&conf->wait_barrier);
1064}
1065
NeilBrowne2d59922013-06-12 11:01:22 +10001066static void freeze_array(struct r10conf *conf, int extra)
NeilBrown4443ae12006-01-06 00:20:28 -08001067{
1068 /* stop syncio and normal IO and wait for everything to
NeilBrownf1885932006-01-06 00:20:42 -08001069 * go quiet.
NeilBrown4443ae12006-01-06 00:20:28 -08001070 * We increment barrier and nr_waiting, and then
NeilBrowne2d59922013-06-12 11:01:22 +10001071 * wait until nr_pending match nr_queued+extra
NeilBrown1c830532008-03-04 14:29:35 -08001072 * This is called in the context of one normal IO request
1073 * that has failed. Thus any sync request that might be pending
1074 * will be blocked by nr_pending, and we need to wait for
1075 * pending IO requests to complete or be queued for re-try.
NeilBrowne2d59922013-06-12 11:01:22 +10001076 * Thus the number queued (nr_queued) plus this request (extra)
NeilBrown1c830532008-03-04 14:29:35 -08001077 * must match the number of pending IOs (nr_pending) before
1078 * we continue.
NeilBrown4443ae12006-01-06 00:20:28 -08001079 */
1080 spin_lock_irq(&conf->resync_lock);
1081 conf->barrier++;
1082 conf->nr_waiting++;
Lukas Czernereed8c022012-11-30 11:42:40 +01001083 wait_event_lock_irq_cmd(conf->wait_barrier,
NeilBrowne2d59922013-06-12 11:01:22 +10001084 conf->nr_pending == conf->nr_queued+extra,
Lukas Czernereed8c022012-11-30 11:42:40 +01001085 conf->resync_lock,
1086 flush_pending_writes(conf));
NeilBrownc3b328a2011-04-18 18:25:43 +10001087
NeilBrown4443ae12006-01-06 00:20:28 -08001088 spin_unlock_irq(&conf->resync_lock);
1089}
1090
NeilBrowne879a872011-10-11 16:49:02 +11001091static void unfreeze_array(struct r10conf *conf)
NeilBrown4443ae12006-01-06 00:20:28 -08001092{
1093 /* reverse the effect of the freeze */
1094 spin_lock_irq(&conf->resync_lock);
1095 conf->barrier--;
1096 conf->nr_waiting--;
1097 wake_up(&conf->wait_barrier);
1098 spin_unlock_irq(&conf->resync_lock);
1099}
1100
NeilBrownf8c9e742012-05-21 09:28:33 +10001101static sector_t choose_data_offset(struct r10bio *r10_bio,
1102 struct md_rdev *rdev)
1103{
1104 if (!test_bit(MD_RECOVERY_RESHAPE, &rdev->mddev->recovery) ||
1105 test_bit(R10BIO_Previous, &r10_bio->state))
1106 return rdev->data_offset;
1107 else
1108 return rdev->new_data_offset;
1109}
1110
NeilBrown57c67df2012-10-11 13:32:13 +11001111struct raid10_plug_cb {
1112 struct blk_plug_cb cb;
1113 struct bio_list pending;
1114 int pending_cnt;
1115};
1116
1117static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1118{
1119 struct raid10_plug_cb *plug = container_of(cb, struct raid10_plug_cb,
1120 cb);
1121 struct mddev *mddev = plug->cb.data;
1122 struct r10conf *conf = mddev->private;
1123 struct bio *bio;
1124
NeilBrown874807a2012-11-27 12:14:40 +11001125 if (from_schedule || current->bio_list) {
NeilBrown57c67df2012-10-11 13:32:13 +11001126 spin_lock_irq(&conf->device_lock);
1127 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1128 conf->pending_count += plug->pending_cnt;
1129 spin_unlock_irq(&conf->device_lock);
NeilBrownee0b0242013-02-25 12:38:29 +11001130 wake_up(&conf->wait_barrier);
NeilBrown57c67df2012-10-11 13:32:13 +11001131 md_wakeup_thread(mddev->thread);
1132 kfree(plug);
1133 return;
1134 }
1135
1136 /* we aren't scheduling, so we can do the write-out directly. */
1137 bio = bio_list_get(&plug->pending);
1138 bitmap_unplug(mddev->bitmap);
1139 wake_up(&conf->wait_barrier);
1140
1141 while (bio) { /* submit pending writes */
1142 struct bio *next = bio->bi_next;
1143 bio->bi_next = NULL;
Shaohua Li32f9f572013-04-28 18:26:38 +08001144 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
1145 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1146 /* Just ignore it */
1147 bio_endio(bio, 0);
1148 else
1149 generic_make_request(bio);
NeilBrown57c67df2012-10-11 13:32:13 +11001150 bio = next;
1151 }
1152 kfree(plug);
1153}
1154
Kent Overstreet20d01892013-11-23 18:21:01 -08001155static void __make_request(struct mddev *mddev, struct bio *bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156{
NeilBrowne879a872011-10-11 16:49:02 +11001157 struct r10conf *conf = mddev->private;
NeilBrown9f2c9d12011-10-11 16:48:43 +11001158 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 struct bio *read_bio;
1160 int i;
Jens Axboea3623572005-11-01 09:26:16 +01001161 const int rw = bio_data_dir(bio);
NeilBrown2c7d46e2010-08-18 16:16:05 +10001162 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
Tejun Heoe9c74692010-09-03 11:56:18 +02001163 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
Shaohua Li532a2a32012-10-11 13:30:52 +11001164 const unsigned long do_discard = (bio->bi_rw
1165 & (REQ_DISCARD | REQ_SECURE));
Joe Lawrencec8dc9c62013-02-21 13:28:09 +11001166 const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
NeilBrown6cce3b232006-01-06 00:20:16 -08001167 unsigned long flags;
NeilBrown3cb03002011-10-11 16:45:26 +11001168 struct md_rdev *blocked_rdev;
NeilBrown57c67df2012-10-11 13:32:13 +11001169 struct blk_plug_cb *cb;
1170 struct raid10_plug_cb *plug = NULL;
NeilBrownd4432c22011-07-28 11:39:24 +10001171 int sectors_handled;
1172 int max_sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10001173 int sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174
NeilBrowncc13b1d2014-05-05 13:34:37 +10001175 /*
1176 * Register the new request and wait if the reconstruction
1177 * thread has put up a bar for new requests.
1178 * Continue immediately if no resync is active currently.
1179 */
1180 wait_barrier(conf);
1181
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08001182 sectors = bio_sectors(bio);
NeilBrown3ea7daa2012-05-22 13:53:47 +10001183 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Kent Overstreet4f024f32013-10-11 15:44:27 -07001184 bio->bi_iter.bi_sector < conf->reshape_progress &&
1185 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10001186 /* IO spans the reshape position. Need to wait for
1187 * reshape to pass
1188 */
1189 allow_barrier(conf);
1190 wait_event(conf->wait_barrier,
Kent Overstreet4f024f32013-10-11 15:44:27 -07001191 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1192 conf->reshape_progress >= bio->bi_iter.bi_sector +
1193 sectors);
NeilBrown3ea7daa2012-05-22 13:53:47 +10001194 wait_barrier(conf);
1195 }
1196 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1197 bio_data_dir(bio) == WRITE &&
1198 (mddev->reshape_backwards
Kent Overstreet4f024f32013-10-11 15:44:27 -07001199 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1200 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1201 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1202 bio->bi_iter.bi_sector < conf->reshape_progress))) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10001203 /* Need to update reshape_position in metadata */
1204 mddev->reshape_position = conf->reshape_progress;
1205 set_bit(MD_CHANGE_DEVS, &mddev->flags);
1206 set_bit(MD_CHANGE_PENDING, &mddev->flags);
1207 md_wakeup_thread(mddev->thread);
1208 wait_event(mddev->sb_wait,
1209 !test_bit(MD_CHANGE_PENDING, &mddev->flags));
1210
1211 conf->reshape_safe = mddev->reshape_position;
1212 }
1213
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1215
1216 r10_bio->master_bio = bio;
NeilBrown3ea7daa2012-05-22 13:53:47 +10001217 r10_bio->sectors = sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219 r10_bio->mddev = mddev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001220 r10_bio->sector = bio->bi_iter.bi_sector;
NeilBrown6cce3b232006-01-06 00:20:16 -08001221 r10_bio->state = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222
NeilBrown856e08e2011-07-28 11:39:23 +10001223 /* We might need to issue multiple reads to different
1224 * devices if there are bad blocks around, so we keep
1225 * track of the number of reads in bio->bi_phys_segments.
1226 * If this is 0, there is only one r10_bio and no locking
1227 * will be needed when the request completes. If it is
1228 * non-zero, then it is the number of not-completed requests.
1229 */
1230 bio->bi_phys_segments = 0;
1231 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1232
Jens Axboea3623572005-11-01 09:26:16 +01001233 if (rw == READ) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 /*
1235 * read balancing logic:
1236 */
NeilBrown96c3fd12011-12-23 10:17:54 +11001237 struct md_rdev *rdev;
NeilBrown856e08e2011-07-28 11:39:23 +10001238 int slot;
1239
1240read_again:
NeilBrown96c3fd12011-12-23 10:17:54 +11001241 rdev = read_balance(conf, r10_bio, &max_sectors);
1242 if (!rdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 raid_end_bio_io(r10_bio);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001244 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 }
NeilBrown96c3fd12011-12-23 10:17:54 +11001246 slot = r10_bio->read_slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
NeilBrowna167f662010-10-26 18:31:13 +11001248 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001249 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
Kent Overstreet6678d832013-08-07 11:14:32 -07001250 max_sectors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 r10_bio->devs[slot].bio = read_bio;
NeilBrownabbf0982011-12-23 10:17:54 +11001253 r10_bio->devs[slot].rdev = rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Kent Overstreet4f024f32013-10-11 15:44:27 -07001255 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
NeilBrownf8c9e742012-05-21 09:28:33 +10001256 choose_data_offset(r10_bio, rdev);
NeilBrown96c3fd12011-12-23 10:17:54 +11001257 read_bio->bi_bdev = rdev->bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 read_bio->bi_end_io = raid10_end_read_request;
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +02001259 read_bio->bi_rw = READ | do_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 read_bio->bi_private = r10_bio;
1261
NeilBrown856e08e2011-07-28 11:39:23 +10001262 if (max_sectors < r10_bio->sectors) {
1263 /* Could not read all from this device, so we will
1264 * need another r10_bio.
1265 */
NeilBrownb50c2592014-01-14 10:38:09 +11001266 sectors_handled = (r10_bio->sector + max_sectors
Kent Overstreet4f024f32013-10-11 15:44:27 -07001267 - bio->bi_iter.bi_sector);
NeilBrown856e08e2011-07-28 11:39:23 +10001268 r10_bio->sectors = max_sectors;
1269 spin_lock_irq(&conf->device_lock);
1270 if (bio->bi_phys_segments == 0)
1271 bio->bi_phys_segments = 2;
1272 else
1273 bio->bi_phys_segments++;
NeilBrownb50c2592014-01-14 10:38:09 +11001274 spin_unlock_irq(&conf->device_lock);
NeilBrown856e08e2011-07-28 11:39:23 +10001275 /* Cannot call generic_make_request directly
1276 * as that will be queued in __generic_make_request
1277 * and subsequent mempool_alloc might block
1278 * waiting for it. so hand bio over to raid10d.
1279 */
1280 reschedule_retry(r10_bio);
1281
1282 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1283
1284 r10_bio->master_bio = bio;
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08001285 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
NeilBrown856e08e2011-07-28 11:39:23 +10001286 r10_bio->state = 0;
1287 r10_bio->mddev = mddev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001288 r10_bio->sector = bio->bi_iter.bi_sector +
1289 sectors_handled;
NeilBrown856e08e2011-07-28 11:39:23 +10001290 goto read_again;
1291 } else
1292 generic_make_request(read_bio);
Christoph Hellwig5a7bbad2011-09-12 12:12:01 +02001293 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 }
1295
1296 /*
1297 * WRITE:
1298 */
NeilBrown34db0cd2011-10-11 16:50:01 +11001299 if (conf->pending_count >= max_queued_requests) {
1300 md_wakeup_thread(mddev->thread);
1301 wait_event(conf->wait_barrier,
1302 conf->pending_count < max_queued_requests);
1303 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07001304 /* first select target devices under rcu_lock and
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 * inc refcount on their rdev. Record them by setting
1306 * bios[x] to bio
NeilBrownd4432c22011-07-28 11:39:24 +10001307 * If there are known/acknowledged bad blocks on any device
1308 * on which we have seen a write error, we want to avoid
1309 * writing to those blocks. This potentially requires several
1310 * writes to write around the bad blocks. Each set of writes
1311 * gets its own r10_bio with a set of bios attached. The number
1312 * of r10_bios is recored in bio->bi_phys_segments just as with
1313 * the read case.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314 */
NeilBrownc3b328a2011-04-18 18:25:43 +10001315
NeilBrown69335ef2011-12-23 10:17:54 +11001316 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317 raid10_find_phys(conf, r10_bio);
NeilBrownd4432c22011-07-28 11:39:24 +10001318retry_write:
Harvey Harrisoncb6969e2008-05-06 20:42:32 -07001319 blocked_rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 rcu_read_lock();
NeilBrownd4432c22011-07-28 11:39:24 +10001321 max_sectors = r10_bio->sectors;
1322
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 for (i = 0; i < conf->copies; i++) {
1324 int d = r10_bio->devs[i].devnum;
NeilBrown3cb03002011-10-11 16:45:26 +11001325 struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown475b0322011-12-23 10:17:55 +11001326 struct md_rdev *rrdev = rcu_dereference(
1327 conf->mirrors[d].replacement);
NeilBrown4ca40c22011-12-23 10:17:55 +11001328 if (rdev == rrdev)
1329 rrdev = NULL;
Dan Williams6bfe0b42008-04-30 00:52:32 -07001330 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1331 atomic_inc(&rdev->nr_pending);
1332 blocked_rdev = rdev;
1333 break;
1334 }
NeilBrown475b0322011-12-23 10:17:55 +11001335 if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
1336 atomic_inc(&rrdev->nr_pending);
1337 blocked_rdev = rrdev;
1338 break;
1339 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001340 if (rdev && (test_bit(Faulty, &rdev->flags)
1341 || test_bit(Unmerged, &rdev->flags)))
1342 rdev = NULL;
NeilBrown050b6612012-03-19 12:46:39 +11001343 if (rrdev && (test_bit(Faulty, &rrdev->flags)
1344 || test_bit(Unmerged, &rrdev->flags)))
NeilBrown475b0322011-12-23 10:17:55 +11001345 rrdev = NULL;
1346
NeilBrownd4432c22011-07-28 11:39:24 +10001347 r10_bio->devs[i].bio = NULL;
NeilBrown475b0322011-12-23 10:17:55 +11001348 r10_bio->devs[i].repl_bio = NULL;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001349
1350 if (!rdev && !rrdev) {
NeilBrown6cce3b232006-01-06 00:20:16 -08001351 set_bit(R10BIO_Degraded, &r10_bio->state);
NeilBrownd4432c22011-07-28 11:39:24 +10001352 continue;
NeilBrown6cce3b232006-01-06 00:20:16 -08001353 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001354 if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
NeilBrownd4432c22011-07-28 11:39:24 +10001355 sector_t first_bad;
1356 sector_t dev_sector = r10_bio->devs[i].addr;
1357 int bad_sectors;
1358 int is_bad;
1359
1360 is_bad = is_badblock(rdev, dev_sector,
1361 max_sectors,
1362 &first_bad, &bad_sectors);
1363 if (is_bad < 0) {
1364 /* Mustn't write here until the bad block
1365 * is acknowledged
1366 */
1367 atomic_inc(&rdev->nr_pending);
1368 set_bit(BlockedBadBlocks, &rdev->flags);
1369 blocked_rdev = rdev;
1370 break;
1371 }
1372 if (is_bad && first_bad <= dev_sector) {
1373 /* Cannot write here at all */
1374 bad_sectors -= (dev_sector - first_bad);
1375 if (bad_sectors < max_sectors)
1376 /* Mustn't write more than bad_sectors
1377 * to other devices yet
1378 */
1379 max_sectors = bad_sectors;
1380 /* We don't set R10BIO_Degraded as that
1381 * only applies if the disk is missing,
1382 * so it might be re-added, and we want to
1383 * know to recover this chunk.
1384 * In this case the device is here, and the
1385 * fact that this chunk is not in-sync is
1386 * recorded in the bad block log.
1387 */
1388 continue;
1389 }
1390 if (is_bad) {
1391 int good_sectors = first_bad - dev_sector;
1392 if (good_sectors < max_sectors)
1393 max_sectors = good_sectors;
1394 }
1395 }
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001396 if (rdev) {
1397 r10_bio->devs[i].bio = bio;
1398 atomic_inc(&rdev->nr_pending);
1399 }
NeilBrown475b0322011-12-23 10:17:55 +11001400 if (rrdev) {
1401 r10_bio->devs[i].repl_bio = bio;
1402 atomic_inc(&rrdev->nr_pending);
1403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 }
1405 rcu_read_unlock();
1406
Dan Williams6bfe0b42008-04-30 00:52:32 -07001407 if (unlikely(blocked_rdev)) {
1408 /* Have to wait for this device to get unblocked, then retry */
1409 int j;
1410 int d;
1411
NeilBrown475b0322011-12-23 10:17:55 +11001412 for (j = 0; j < i; j++) {
Dan Williams6bfe0b42008-04-30 00:52:32 -07001413 if (r10_bio->devs[j].bio) {
1414 d = r10_bio->devs[j].devnum;
1415 rdev_dec_pending(conf->mirrors[d].rdev, mddev);
1416 }
NeilBrown475b0322011-12-23 10:17:55 +11001417 if (r10_bio->devs[j].repl_bio) {
NeilBrown4ca40c22011-12-23 10:17:55 +11001418 struct md_rdev *rdev;
NeilBrown475b0322011-12-23 10:17:55 +11001419 d = r10_bio->devs[j].devnum;
NeilBrown4ca40c22011-12-23 10:17:55 +11001420 rdev = conf->mirrors[d].replacement;
1421 if (!rdev) {
1422 /* Race with remove_disk */
1423 smp_mb();
1424 rdev = conf->mirrors[d].rdev;
1425 }
1426 rdev_dec_pending(rdev, mddev);
NeilBrown475b0322011-12-23 10:17:55 +11001427 }
1428 }
Dan Williams6bfe0b42008-04-30 00:52:32 -07001429 allow_barrier(conf);
1430 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1431 wait_barrier(conf);
1432 goto retry_write;
1433 }
1434
NeilBrownd4432c22011-07-28 11:39:24 +10001435 if (max_sectors < r10_bio->sectors) {
1436 /* We are splitting this into multiple parts, so
1437 * we need to prepare for allocating another r10_bio.
1438 */
1439 r10_bio->sectors = max_sectors;
1440 spin_lock_irq(&conf->device_lock);
1441 if (bio->bi_phys_segments == 0)
1442 bio->bi_phys_segments = 2;
1443 else
1444 bio->bi_phys_segments++;
1445 spin_unlock_irq(&conf->device_lock);
1446 }
Kent Overstreet4f024f32013-10-11 15:44:27 -07001447 sectors_handled = r10_bio->sector + max_sectors -
1448 bio->bi_iter.bi_sector;
NeilBrownd4432c22011-07-28 11:39:24 +10001449
NeilBrown4e780642010-10-19 12:54:01 +11001450 atomic_set(&r10_bio->remaining, 1);
NeilBrownd4432c22011-07-28 11:39:24 +10001451 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
NeilBrown06d91a52005-06-21 17:17:12 -07001452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 for (i = 0; i < conf->copies; i++) {
1454 struct bio *mbio;
1455 int d = r10_bio->devs[i].devnum;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001456 if (r10_bio->devs[i].bio) {
1457 struct md_rdev *rdev = conf->mirrors[d].rdev;
1458 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001459 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
Kent Overstreet6678d832013-08-07 11:14:32 -07001460 max_sectors);
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001461 r10_bio->devs[i].bio = mbio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Kent Overstreet4f024f32013-10-11 15:44:27 -07001463 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001464 choose_data_offset(r10_bio,
1465 rdev));
1466 mbio->bi_bdev = rdev->bdev;
1467 mbio->bi_end_io = raid10_end_write_request;
Joe Lawrencec8dc9c62013-02-21 13:28:09 +11001468 mbio->bi_rw =
1469 WRITE | do_sync | do_fua | do_discard | do_same;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001470 mbio->bi_private = r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001472 atomic_inc(&r10_bio->remaining);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001474 cb = blk_check_plugged(raid10_unplug, mddev,
1475 sizeof(*plug));
1476 if (cb)
1477 plug = container_of(cb, struct raid10_plug_cb,
1478 cb);
1479 else
1480 plug = NULL;
1481 spin_lock_irqsave(&conf->device_lock, flags);
1482 if (plug) {
1483 bio_list_add(&plug->pending, mbio);
1484 plug->pending_cnt++;
1485 } else {
1486 bio_list_add(&conf->pending_bio_list, mbio);
1487 conf->pending_count++;
1488 }
1489 spin_unlock_irqrestore(&conf->device_lock, flags);
1490 if (!plug)
1491 md_wakeup_thread(mddev->thread);
1492 }
NeilBrown57c67df2012-10-11 13:32:13 +11001493
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001494 if (r10_bio->devs[i].repl_bio) {
1495 struct md_rdev *rdev = conf->mirrors[d].replacement;
1496 if (rdev == NULL) {
1497 /* Replacement just got moved to main 'rdev' */
1498 smp_mb();
1499 rdev = conf->mirrors[d].rdev;
1500 }
1501 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07001502 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
Kent Overstreet6678d832013-08-07 11:14:32 -07001503 max_sectors);
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001504 r10_bio->devs[i].repl_bio = mbio;
1505
Kent Overstreet4f024f32013-10-11 15:44:27 -07001506 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001507 choose_data_offset(
1508 r10_bio, rdev));
1509 mbio->bi_bdev = rdev->bdev;
1510 mbio->bi_end_io = raid10_end_write_request;
Joe Lawrencec8dc9c62013-02-21 13:28:09 +11001511 mbio->bi_rw =
1512 WRITE | do_sync | do_fua | do_discard | do_same;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001513 mbio->bi_private = r10_bio;
1514
1515 atomic_inc(&r10_bio->remaining);
1516 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrown57c67df2012-10-11 13:32:13 +11001517 bio_list_add(&conf->pending_bio_list, mbio);
1518 conf->pending_count++;
NeilBrowne7c0c3f2012-11-22 14:42:49 +11001519 spin_unlock_irqrestore(&conf->device_lock, flags);
1520 if (!mddev_check_plugged(mddev))
1521 md_wakeup_thread(mddev->thread);
NeilBrown57c67df2012-10-11 13:32:13 +11001522 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 }
1524
NeilBrown079fa162011-09-10 17:21:23 +10001525 /* Don't remove the bias on 'remaining' (one_write_done) until
1526 * after checking if we need to go around again.
1527 */
NeilBrowna35e63e2008-03-04 14:29:29 -08001528
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08001529 if (sectors_handled < bio_sectors(bio)) {
NeilBrown079fa162011-09-10 17:21:23 +10001530 one_write_done(r10_bio);
NeilBrown5e570282011-07-28 11:39:25 +10001531 /* We need another r10_bio. It has already been counted
NeilBrownd4432c22011-07-28 11:39:24 +10001532 * in bio->bi_phys_segments.
1533 */
1534 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1535
1536 r10_bio->master_bio = bio;
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08001537 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
NeilBrownd4432c22011-07-28 11:39:24 +10001538
1539 r10_bio->mddev = mddev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07001540 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
NeilBrownd4432c22011-07-28 11:39:24 +10001541 r10_bio->state = 0;
1542 goto retry_write;
1543 }
NeilBrown079fa162011-09-10 17:21:23 +10001544 one_write_done(r10_bio);
Kent Overstreet20d01892013-11-23 18:21:01 -08001545}
1546
1547static void make_request(struct mddev *mddev, struct bio *bio)
1548{
1549 struct r10conf *conf = mddev->private;
1550 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1551 int chunk_sects = chunk_mask + 1;
1552
1553 struct bio *split;
1554
1555 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1556 md_flush_request(mddev, bio);
1557 return;
1558 }
1559
1560 md_write_start(mddev, bio);
1561
Kent Overstreet20d01892013-11-23 18:21:01 -08001562
1563 do {
1564
1565 /*
1566 * If this request crosses a chunk boundary, we need to split
1567 * it.
1568 */
1569 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1570 bio_sectors(bio) > chunk_sects
1571 && (conf->geo.near_copies < conf->geo.raid_disks
1572 || conf->prev.near_copies <
1573 conf->prev.raid_disks))) {
1574 split = bio_split(bio, chunk_sects -
1575 (bio->bi_iter.bi_sector &
1576 (chunk_sects - 1)),
1577 GFP_NOIO, fs_bio_set);
1578 bio_chain(split, bio);
1579 } else {
1580 split = bio;
1581 }
1582
1583 __make_request(mddev, split);
1584 } while (split != bio);
NeilBrown079fa162011-09-10 17:21:23 +10001585
1586 /* In case raid10d snuck in to freeze_array */
1587 wake_up(&conf->wait_barrier);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588}
1589
NeilBrownfd01b882011-10-11 16:47:53 +11001590static void status(struct seq_file *seq, struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591{
NeilBrowne879a872011-10-11 16:49:02 +11001592 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 int i;
1594
NeilBrown5cf00fc2012-05-21 09:28:20 +10001595 if (conf->geo.near_copies < conf->geo.raid_disks)
Andre Noll9d8f0362009-06-18 08:45:01 +10001596 seq_printf(seq, " %dK chunks", mddev->chunk_sectors / 2);
NeilBrown5cf00fc2012-05-21 09:28:20 +10001597 if (conf->geo.near_copies > 1)
1598 seq_printf(seq, " %d near-copies", conf->geo.near_copies);
1599 if (conf->geo.far_copies > 1) {
1600 if (conf->geo.far_offset)
1601 seq_printf(seq, " %d offset-copies", conf->geo.far_copies);
NeilBrownc93983b2006-06-26 00:27:41 -07001602 else
NeilBrown5cf00fc2012-05-21 09:28:20 +10001603 seq_printf(seq, " %d far-copies", conf->geo.far_copies);
NeilBrownc93983b2006-06-26 00:27:41 -07001604 }
NeilBrown5cf00fc2012-05-21 09:28:20 +10001605 seq_printf(seq, " [%d/%d] [", conf->geo.raid_disks,
1606 conf->geo.raid_disks - mddev->degraded);
1607 for (i = 0; i < conf->geo.raid_disks; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 seq_printf(seq, "%s",
1609 conf->mirrors[i].rdev &&
NeilBrownb2d444d2005-11-08 21:39:31 -08001610 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611 seq_printf(seq, "]");
1612}
1613
NeilBrown700c7212011-07-27 11:00:36 +10001614/* check if there are enough drives for
1615 * every block to appear on atleast one.
1616 * Don't consider the device numbered 'ignore'
1617 * as we might be about to remove it.
1618 */
NeilBrown635f6412013-06-11 14:57:09 +10001619static int _enough(struct r10conf *conf, int previous, int ignore)
NeilBrown700c7212011-07-27 11:00:36 +10001620{
1621 int first = 0;
NeilBrown725d6e52013-06-11 15:08:03 +10001622 int has_enough = 0;
NeilBrown635f6412013-06-11 14:57:09 +10001623 int disks, ncopies;
1624 if (previous) {
1625 disks = conf->prev.raid_disks;
1626 ncopies = conf->prev.near_copies;
1627 } else {
1628 disks = conf->geo.raid_disks;
1629 ncopies = conf->geo.near_copies;
1630 }
NeilBrown700c7212011-07-27 11:00:36 +10001631
NeilBrown725d6e52013-06-11 15:08:03 +10001632 rcu_read_lock();
NeilBrown700c7212011-07-27 11:00:36 +10001633 do {
1634 int n = conf->copies;
1635 int cnt = 0;
NeilBrown80b48122012-09-27 12:35:21 +10001636 int this = first;
NeilBrown700c7212011-07-27 11:00:36 +10001637 while (n--) {
NeilBrown725d6e52013-06-11 15:08:03 +10001638 struct md_rdev *rdev;
1639 if (this != ignore &&
1640 (rdev = rcu_dereference(conf->mirrors[this].rdev)) &&
1641 test_bit(In_sync, &rdev->flags))
NeilBrown700c7212011-07-27 11:00:36 +10001642 cnt++;
NeilBrown635f6412013-06-11 14:57:09 +10001643 this = (this+1) % disks;
NeilBrown700c7212011-07-27 11:00:36 +10001644 }
1645 if (cnt == 0)
NeilBrown725d6e52013-06-11 15:08:03 +10001646 goto out;
NeilBrown635f6412013-06-11 14:57:09 +10001647 first = (first + ncopies) % disks;
NeilBrown700c7212011-07-27 11:00:36 +10001648 } while (first != 0);
NeilBrown725d6e52013-06-11 15:08:03 +10001649 has_enough = 1;
1650out:
1651 rcu_read_unlock();
1652 return has_enough;
NeilBrown700c7212011-07-27 11:00:36 +10001653}
1654
NeilBrownf8c9e742012-05-21 09:28:33 +10001655static int enough(struct r10conf *conf, int ignore)
1656{
NeilBrown635f6412013-06-11 14:57:09 +10001657 /* when calling 'enough', both 'prev' and 'geo' must
1658 * be stable.
1659 * This is ensured if ->reconfig_mutex or ->device_lock
1660 * is held.
1661 */
1662 return _enough(conf, 0, ignore) &&
1663 _enough(conf, 1, ignore);
NeilBrownf8c9e742012-05-21 09:28:33 +10001664}
1665
NeilBrownfd01b882011-10-11 16:47:53 +11001666static void error(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667{
1668 char b[BDEVNAME_SIZE];
NeilBrowne879a872011-10-11 16:49:02 +11001669 struct r10conf *conf = mddev->private;
NeilBrown635f6412013-06-11 14:57:09 +10001670 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671
1672 /*
1673 * If it is not operational, then we have already marked it as dead
1674 * else if it is the last working disks, ignore the error, let the
1675 * next level up know.
1676 * else mark the drive as failed
1677 */
NeilBrown635f6412013-06-11 14:57:09 +10001678 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrownb2d444d2005-11-08 21:39:31 -08001679 if (test_bit(In_sync, &rdev->flags)
NeilBrown635f6412013-06-11 14:57:09 +10001680 && !enough(conf, rdev->raid_disk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681 /*
1682 * Don't fail the drive, just return an IO error.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 */
NeilBrownc04be0a2006-10-03 01:15:53 -07001684 spin_unlock_irqrestore(&conf->device_lock, flags);
NeilBrown635f6412013-06-11 14:57:09 +10001685 return;
1686 }
1687 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1688 mddev->degraded++;
1689 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 * if recovery is running, make sure it aborts.
1691 */
NeilBrowndfc70642008-05-23 13:04:39 -07001692 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 }
NeilBrownde393cd2011-07-28 11:31:48 +10001694 set_bit(Blocked, &rdev->flags);
NeilBrownb2d444d2005-11-08 21:39:31 -08001695 set_bit(Faulty, &rdev->flags);
NeilBrown850b2b42006-10-03 01:15:46 -07001696 set_bit(MD_CHANGE_DEVS, &mddev->flags);
NeilBrown635f6412013-06-11 14:57:09 +10001697 spin_unlock_irqrestore(&conf->device_lock, flags);
Joe Perches067032b2011-01-14 09:14:33 +11001698 printk(KERN_ALERT
1699 "md/raid10:%s: Disk failure on %s, disabling device.\n"
1700 "md/raid10:%s: Operation continuing on %d devices.\n",
NeilBrown128595e2010-05-03 14:47:14 +10001701 mdname(mddev), bdevname(rdev->bdev, b),
NeilBrown5cf00fc2012-05-21 09:28:20 +10001702 mdname(mddev), conf->geo.raid_disks - mddev->degraded);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703}
1704
NeilBrowne879a872011-10-11 16:49:02 +11001705static void print_conf(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706{
1707 int i;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001708 struct raid10_info *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
NeilBrown128595e2010-05-03 14:47:14 +10001710 printk(KERN_DEBUG "RAID10 conf printout:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 if (!conf) {
NeilBrown128595e2010-05-03 14:47:14 +10001712 printk(KERN_DEBUG "(!conf)\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 return;
1714 }
NeilBrown5cf00fc2012-05-21 09:28:20 +10001715 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->geo.raid_disks - conf->mddev->degraded,
1716 conf->geo.raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
NeilBrown5cf00fc2012-05-21 09:28:20 +10001718 for (i = 0; i < conf->geo.raid_disks; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 char b[BDEVNAME_SIZE];
1720 tmp = conf->mirrors + i;
1721 if (tmp->rdev)
NeilBrown128595e2010-05-03 14:47:14 +10001722 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n",
NeilBrownb2d444d2005-11-08 21:39:31 -08001723 i, !test_bit(In_sync, &tmp->rdev->flags),
1724 !test_bit(Faulty, &tmp->rdev->flags),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 bdevname(tmp->rdev->bdev,b));
1726 }
1727}
1728
NeilBrowne879a872011-10-11 16:49:02 +11001729static void close_sync(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730{
NeilBrown0a27ec92006-01-06 00:20:13 -08001731 wait_barrier(conf);
1732 allow_barrier(conf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733
1734 mempool_destroy(conf->r10buf_pool);
1735 conf->r10buf_pool = NULL;
1736}
1737
NeilBrownfd01b882011-10-11 16:47:53 +11001738static int raid10_spare_active(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739{
1740 int i;
NeilBrowne879a872011-10-11 16:49:02 +11001741 struct r10conf *conf = mddev->private;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001742 struct raid10_info *tmp;
NeilBrown6b965622010-08-18 11:56:59 +10001743 int count = 0;
1744 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
1746 /*
1747 * Find all non-in_sync disks within the RAID10 configuration
1748 * and mark them in_sync
1749 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10001750 for (i = 0; i < conf->geo.raid_disks; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 tmp = conf->mirrors + i;
NeilBrown4ca40c22011-12-23 10:17:55 +11001752 if (tmp->replacement
1753 && tmp->replacement->recovery_offset == MaxSector
1754 && !test_bit(Faulty, &tmp->replacement->flags)
1755 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
1756 /* Replacement has just become active */
1757 if (!tmp->rdev
1758 || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
1759 count++;
1760 if (tmp->rdev) {
1761 /* Replaced device not technically faulty,
1762 * but we need to be sure it gets removed
1763 * and never re-added.
1764 */
1765 set_bit(Faulty, &tmp->rdev->flags);
1766 sysfs_notify_dirent_safe(
1767 tmp->rdev->sysfs_state);
1768 }
1769 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
1770 } else if (tmp->rdev
Lukasz Dorau61e49472013-10-24 12:55:17 +11001771 && tmp->rdev->recovery_offset == MaxSector
NeilBrown4ca40c22011-12-23 10:17:55 +11001772 && !test_bit(Faulty, &tmp->rdev->flags)
1773 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
NeilBrown6b965622010-08-18 11:56:59 +10001774 count++;
Jonathan Brassow2863b9e2012-10-11 13:38:58 +11001775 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 }
1777 }
NeilBrown6b965622010-08-18 11:56:59 +10001778 spin_lock_irqsave(&conf->device_lock, flags);
1779 mddev->degraded -= count;
1780 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
1782 print_conf(conf);
NeilBrown6b965622010-08-18 11:56:59 +10001783 return count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784}
1785
1786
NeilBrownfd01b882011-10-11 16:47:53 +11001787static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788{
NeilBrowne879a872011-10-11 16:49:02 +11001789 struct r10conf *conf = mddev->private;
Neil Brown199050e2008-06-28 08:31:33 +10001790 int err = -EEXIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 int mirror;
Neil Brown6c2fce22008-06-28 08:31:31 +10001792 int first = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10001793 int last = conf->geo.raid_disks - 1;
NeilBrown050b6612012-03-19 12:46:39 +11001794 struct request_queue *q = bdev_get_queue(rdev->bdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795
1796 if (mddev->recovery_cp < MaxSector)
1797 /* only hot-add to in-sync arrays, as recovery is
1798 * very different from resync
1799 */
Neil Brown199050e2008-06-28 08:31:33 +10001800 return -EBUSY;
NeilBrown635f6412013-06-11 14:57:09 +10001801 if (rdev->saved_raid_disk < 0 && !_enough(conf, 1, -1))
Neil Brown199050e2008-06-28 08:31:33 +10001802 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
NeilBrowna53a6c82008-11-06 17:28:20 +11001804 if (rdev->raid_disk >= 0)
Neil Brown6c2fce22008-06-28 08:31:31 +10001805 first = last = rdev->raid_disk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
NeilBrown050b6612012-03-19 12:46:39 +11001807 if (q->merge_bvec_fn) {
1808 set_bit(Unmerged, &rdev->flags);
1809 mddev->merge_check_needed = 1;
1810 }
1811
Namhyung Kim2c4193d2011-07-18 17:38:43 +10001812 if (rdev->saved_raid_disk >= first &&
NeilBrown6cce3b232006-01-06 00:20:16 -08001813 conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1814 mirror = rdev->saved_raid_disk;
1815 else
Neil Brown6c2fce22008-06-28 08:31:31 +10001816 mirror = first;
NeilBrown2bb77732011-07-27 11:00:36 +10001817 for ( ; mirror <= last ; mirror++) {
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001818 struct raid10_info *p = &conf->mirrors[mirror];
NeilBrown2bb77732011-07-27 11:00:36 +10001819 if (p->recovery_disabled == mddev->recovery_disabled)
1820 continue;
NeilBrownb7044d42011-12-23 10:17:56 +11001821 if (p->rdev) {
1822 if (!test_bit(WantReplacement, &p->rdev->flags) ||
1823 p->replacement != NULL)
1824 continue;
1825 clear_bit(In_sync, &rdev->flags);
1826 set_bit(Replacement, &rdev->flags);
1827 rdev->raid_disk = mirror;
1828 err = 0;
Jonathan Brassow9092c022013-05-02 14:19:24 -05001829 if (mddev->gendisk)
1830 disk_stack_limits(mddev->gendisk, rdev->bdev,
1831 rdev->data_offset << 9);
NeilBrownb7044d42011-12-23 10:17:56 +11001832 conf->fullsync = 1;
1833 rcu_assign_pointer(p->replacement, rdev);
1834 break;
1835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836
Jonathan Brassow9092c022013-05-02 14:19:24 -05001837 if (mddev->gendisk)
1838 disk_stack_limits(mddev->gendisk, rdev->bdev,
1839 rdev->data_offset << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
NeilBrown2bb77732011-07-27 11:00:36 +10001841 p->head_position = 0;
NeilBrownd890fa22011-10-26 11:54:39 +11001842 p->recovery_disabled = mddev->recovery_disabled - 1;
NeilBrown2bb77732011-07-27 11:00:36 +10001843 rdev->raid_disk = mirror;
1844 err = 0;
1845 if (rdev->saved_raid_disk != mirror)
1846 conf->fullsync = 1;
1847 rcu_assign_pointer(p->rdev, rdev);
1848 break;
1849 }
NeilBrown050b6612012-03-19 12:46:39 +11001850 if (err == 0 && test_bit(Unmerged, &rdev->flags)) {
1851 /* Some requests might not have seen this new
1852 * merge_bvec_fn. We must wait for them to complete
1853 * before merging the device fully.
1854 * First we make sure any code which has tested
1855 * our function has submitted the request, then
1856 * we wait for all outstanding requests to complete.
1857 */
1858 synchronize_sched();
NeilBrowne2d59922013-06-12 11:01:22 +10001859 freeze_array(conf, 0);
1860 unfreeze_array(conf);
NeilBrown050b6612012-03-19 12:46:39 +11001861 clear_bit(Unmerged, &rdev->flags);
1862 }
Andre Nollac5e7112009-08-03 10:59:47 +10001863 md_integrity_add_rdev(rdev, mddev);
Jonathan Brassowed30be02012-10-31 11:42:30 +11001864 if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
Shaohua Li532a2a32012-10-11 13:30:52 +11001865 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
1866
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 print_conf(conf);
Neil Brown199050e2008-06-28 08:31:33 +10001868 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869}
1870
NeilBrownb8321b62011-12-23 10:17:51 +11001871static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872{
NeilBrowne879a872011-10-11 16:49:02 +11001873 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 int err = 0;
NeilBrownb8321b62011-12-23 10:17:51 +11001875 int number = rdev->raid_disk;
NeilBrownc8ab9032011-12-23 10:17:54 +11001876 struct md_rdev **rdevp;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10001877 struct raid10_info *p = conf->mirrors + number;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878
1879 print_conf(conf);
NeilBrownc8ab9032011-12-23 10:17:54 +11001880 if (rdev == p->rdev)
1881 rdevp = &p->rdev;
1882 else if (rdev == p->replacement)
1883 rdevp = &p->replacement;
1884 else
1885 return 0;
1886
1887 if (test_bit(In_sync, &rdev->flags) ||
1888 atomic_read(&rdev->nr_pending)) {
1889 err = -EBUSY;
1890 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 }
NeilBrownc8ab9032011-12-23 10:17:54 +11001892 /* Only remove faulty devices if recovery
1893 * is not possible.
1894 */
1895 if (!test_bit(Faulty, &rdev->flags) &&
1896 mddev->recovery_disabled != p->recovery_disabled &&
NeilBrown4ca40c22011-12-23 10:17:55 +11001897 (!p->replacement || p->replacement == rdev) &&
NeilBrown63aced62012-05-22 13:55:33 +10001898 number < conf->geo.raid_disks &&
NeilBrownc8ab9032011-12-23 10:17:54 +11001899 enough(conf, -1)) {
1900 err = -EBUSY;
1901 goto abort;
1902 }
1903 *rdevp = NULL;
1904 synchronize_rcu();
1905 if (atomic_read(&rdev->nr_pending)) {
1906 /* lost the race, try later */
1907 err = -EBUSY;
1908 *rdevp = rdev;
1909 goto abort;
NeilBrown4ca40c22011-12-23 10:17:55 +11001910 } else if (p->replacement) {
1911 /* We must have just cleared 'rdev' */
1912 p->rdev = p->replacement;
1913 clear_bit(Replacement, &p->replacement->flags);
1914 smp_mb(); /* Make sure other CPUs may see both as identical
1915 * but will never see neither -- if they are careful.
1916 */
1917 p->replacement = NULL;
1918 clear_bit(WantReplacement, &rdev->flags);
1919 } else
1920 /* We might have just remove the Replacement as faulty
1921 * Clear the flag just in case
1922 */
1923 clear_bit(WantReplacement, &rdev->flags);
1924
NeilBrownc8ab9032011-12-23 10:17:54 +11001925 err = md_integrity_register(mddev);
1926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927abort:
1928
1929 print_conf(conf);
1930 return err;
1931}
1932
1933
NeilBrown6712ecf2007-09-27 12:47:43 +02001934static void end_sync_read(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935{
NeilBrown9f2c9d12011-10-11 16:48:43 +11001936 struct r10bio *r10_bio = bio->bi_private;
NeilBrowne879a872011-10-11 16:49:02 +11001937 struct r10conf *conf = r10_bio->mddev->private;
Namhyung Kim778ca012011-07-18 17:38:47 +10001938 int d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939
NeilBrown3ea7daa2012-05-22 13:53:47 +10001940 if (bio == r10_bio->master_bio) {
1941 /* this is a reshape read */
1942 d = r10_bio->read_slot; /* really the read dev */
1943 } else
1944 d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
NeilBrown0eb3ff12006-01-06 00:20:29 -08001945
1946 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
1947 set_bit(R10BIO_Uptodate, &r10_bio->state);
NeilBrowne684e412011-07-28 11:39:25 +10001948 else
1949 /* The write handler will notice the lack of
1950 * R10BIO_Uptodate and record any errors etc
1951 */
NeilBrown4dbcdc72006-01-06 00:20:52 -08001952 atomic_add(r10_bio->sectors,
1953 &conf->mirrors[d].rdev->corrected_errors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
1955 /* for reconstruct, we always reschedule after a read.
1956 * for resync, only after all reads
1957 */
NeilBrown73d5c382009-02-25 13:18:47 +11001958 rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
1960 atomic_dec_and_test(&r10_bio->remaining)) {
1961 /* we have read all the blocks,
1962 * do the comparison in process context in raid10d
1963 */
1964 reschedule_retry(r10_bio);
1965 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966}
1967
NeilBrown9f2c9d12011-10-11 16:48:43 +11001968static void end_sync_request(struct r10bio *r10_bio)
NeilBrown5e570282011-07-28 11:39:25 +10001969{
NeilBrownfd01b882011-10-11 16:47:53 +11001970 struct mddev *mddev = r10_bio->mddev;
NeilBrown5e570282011-07-28 11:39:25 +10001971
1972 while (atomic_dec_and_test(&r10_bio->remaining)) {
1973 if (r10_bio->master_bio == NULL) {
1974 /* the primary of several recovery bios */
1975 sector_t s = r10_bio->sectors;
1976 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1977 test_bit(R10BIO_WriteError, &r10_bio->state))
1978 reschedule_retry(r10_bio);
1979 else
1980 put_buf(r10_bio);
1981 md_done_sync(mddev, s, 1);
1982 break;
1983 } else {
NeilBrown9f2c9d12011-10-11 16:48:43 +11001984 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
NeilBrown5e570282011-07-28 11:39:25 +10001985 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
1986 test_bit(R10BIO_WriteError, &r10_bio->state))
1987 reschedule_retry(r10_bio);
1988 else
1989 put_buf(r10_bio);
1990 r10_bio = r10_bio2;
1991 }
1992 }
1993}
1994
NeilBrown6712ecf2007-09-27 12:47:43 +02001995static void end_sync_write(struct bio *bio, int error)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996{
1997 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
NeilBrown9f2c9d12011-10-11 16:48:43 +11001998 struct r10bio *r10_bio = bio->bi_private;
NeilBrownfd01b882011-10-11 16:47:53 +11001999 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11002000 struct r10conf *conf = mddev->private;
Namhyung Kim778ca012011-07-18 17:38:47 +10002001 int d;
NeilBrown749c55e2011-07-28 11:39:24 +10002002 sector_t first_bad;
2003 int bad_sectors;
2004 int slot;
NeilBrown9ad1aef2011-12-23 10:17:55 +11002005 int repl;
NeilBrown4ca40c22011-12-23 10:17:55 +11002006 struct md_rdev *rdev = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
NeilBrown9ad1aef2011-12-23 10:17:55 +11002008 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2009 if (repl)
2010 rdev = conf->mirrors[d].replacement;
NeilBrown547414d2012-03-13 11:21:20 +11002011 else
NeilBrown9ad1aef2011-12-23 10:17:55 +11002012 rdev = conf->mirrors[d].rdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002014 if (!uptodate) {
NeilBrown9ad1aef2011-12-23 10:17:55 +11002015 if (repl)
2016 md_error(mddev, rdev);
2017 else {
2018 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11002019 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2020 set_bit(MD_RECOVERY_NEEDED,
2021 &rdev->mddev->recovery);
NeilBrown9ad1aef2011-12-23 10:17:55 +11002022 set_bit(R10BIO_WriteError, &r10_bio->state);
2023 }
2024 } else if (is_badblock(rdev,
NeilBrown749c55e2011-07-28 11:39:24 +10002025 r10_bio->devs[slot].addr,
2026 r10_bio->sectors,
2027 &first_bad, &bad_sectors))
2028 set_bit(R10BIO_MadeGood, &r10_bio->state);
NeilBrowndfc70642008-05-23 13:04:39 -07002029
NeilBrown9ad1aef2011-12-23 10:17:55 +11002030 rdev_dec_pending(rdev, mddev);
NeilBrown5e570282011-07-28 11:39:25 +10002031
2032 end_sync_request(r10_bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033}
2034
2035/*
2036 * Note: sync and recover and handled very differently for raid10
2037 * This code is for resync.
2038 * For resync, we read through virtual addresses and read all blocks.
2039 * If there is any error, we schedule a write. The lowest numbered
2040 * drive is authoritative.
2041 * However requests come for physical address, so we need to map.
2042 * For every physical address there are raid_disks/copies virtual addresses,
2043 * which is always are least one, but is not necessarly an integer.
2044 * This means that a physical address can span multiple chunks, so we may
2045 * have to submit multiple io requests for a single sync request.
2046 */
2047/*
2048 * We check if all blocks are in-sync and only write to blocks that
2049 * aren't in sync
2050 */
NeilBrown9f2c9d12011-10-11 16:48:43 +11002051static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052{
NeilBrowne879a872011-10-11 16:49:02 +11002053 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 int i, first;
2055 struct bio *tbio, *fbio;
majianpengf4380a92012-04-12 16:04:47 +10002056 int vcnt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 atomic_set(&r10_bio->remaining, 1);
2059
2060 /* find the first device with a block */
2061 for (i=0; i<conf->copies; i++)
2062 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags))
2063 break;
2064
2065 if (i == conf->copies)
2066 goto done;
2067
2068 first = i;
2069 fbio = r10_bio->devs[i].bio;
2070
majianpengf4380a92012-04-12 16:04:47 +10002071 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 /* now find blocks with errors */
NeilBrown0eb3ff12006-01-06 00:20:29 -08002073 for (i=0 ; i < conf->copies ; i++) {
2074 int j, d;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 tbio = r10_bio->devs[i].bio;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002077
2078 if (tbio->bi_end_io != end_sync_read)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 continue;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002080 if (i == first)
2081 continue;
2082 if (test_bit(BIO_UPTODATE, &r10_bio->devs[i].bio->bi_flags)) {
2083 /* We know that the bi_io_vec layout is the same for
2084 * both 'first' and 'i', so we just compare them.
2085 * All vec entries are PAGE_SIZE;
2086 */
NeilBrown7bb23c42013-07-16 16:50:47 +10002087 int sectors = r10_bio->sectors;
2088 for (j = 0; j < vcnt; j++) {
2089 int len = PAGE_SIZE;
2090 if (sectors < (len / 512))
2091 len = sectors * 512;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002092 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
2093 page_address(tbio->bi_io_vec[j].bv_page),
NeilBrown7bb23c42013-07-16 16:50:47 +10002094 len))
NeilBrown0eb3ff12006-01-06 00:20:29 -08002095 break;
NeilBrown7bb23c42013-07-16 16:50:47 +10002096 sectors -= len/512;
2097 }
NeilBrown0eb3ff12006-01-06 00:20:29 -08002098 if (j == vcnt)
2099 continue;
Jianpeng Ma7f7583d2012-10-11 14:17:59 +11002100 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
NeilBrownf84ee362011-07-28 11:39:25 +10002101 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
2102 /* Don't fix anything. */
2103 continue;
NeilBrown0eb3ff12006-01-06 00:20:29 -08002104 }
NeilBrownf84ee362011-07-28 11:39:25 +10002105 /* Ok, we need to write this bio, either to correct an
2106 * inconsistency or to correct an unreadable block.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 * First we need to fixup bv_offset, bv_len and
2108 * bi_vecs, as the read request might have corrupted these
2109 */
Kent Overstreet8be185f2012-09-06 14:14:43 -07002110 bio_reset(tbio);
2111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 tbio->bi_vcnt = vcnt;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002113 tbio->bi_iter.bi_size = r10_bio->sectors << 9;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 tbio->bi_rw = WRITE;
2115 tbio->bi_private = r10_bio;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002116 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118 for (j=0; j < vcnt ; j++) {
2119 tbio->bi_io_vec[j].bv_offset = 0;
2120 tbio->bi_io_vec[j].bv_len = PAGE_SIZE;
2121
2122 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2123 page_address(fbio->bi_io_vec[j].bv_page),
2124 PAGE_SIZE);
2125 }
2126 tbio->bi_end_io = end_sync_write;
2127
2128 d = r10_bio->devs[i].devnum;
2129 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2130 atomic_inc(&r10_bio->remaining);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002131 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Kent Overstreet4f024f32013-10-11 15:44:27 -07002133 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2135 generic_make_request(tbio);
2136 }
2137
NeilBrown9ad1aef2011-12-23 10:17:55 +11002138 /* Now write out to any replacement devices
2139 * that are active
2140 */
2141 for (i = 0; i < conf->copies; i++) {
2142 int j, d;
NeilBrown9ad1aef2011-12-23 10:17:55 +11002143
2144 tbio = r10_bio->devs[i].repl_bio;
2145 if (!tbio || !tbio->bi_end_io)
2146 continue;
2147 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2148 && r10_bio->devs[i].bio != fbio)
2149 for (j = 0; j < vcnt; j++)
2150 memcpy(page_address(tbio->bi_io_vec[j].bv_page),
2151 page_address(fbio->bi_io_vec[j].bv_page),
2152 PAGE_SIZE);
2153 d = r10_bio->devs[i].devnum;
2154 atomic_inc(&r10_bio->remaining);
2155 md_sync_acct(conf->mirrors[d].replacement->bdev,
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002156 bio_sectors(tbio));
NeilBrown9ad1aef2011-12-23 10:17:55 +11002157 generic_make_request(tbio);
2158 }
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160done:
2161 if (atomic_dec_and_test(&r10_bio->remaining)) {
2162 md_done_sync(mddev, r10_bio->sectors, 1);
2163 put_buf(r10_bio);
2164 }
2165}
2166
2167/*
2168 * Now for the recovery code.
2169 * Recovery happens across physical sectors.
2170 * We recover all non-is_sync drives by finding the virtual address of
2171 * each, and then choose a working drive that also has that virt address.
2172 * There is a separate r10_bio for each non-in_sync drive.
2173 * Only the first two slots are in use. The first for reading,
2174 * The second for writing.
2175 *
2176 */
NeilBrown9f2c9d12011-10-11 16:48:43 +11002177static void fix_recovery_read_error(struct r10bio *r10_bio)
NeilBrown5e570282011-07-28 11:39:25 +10002178{
2179 /* We got a read error during recovery.
2180 * We repeat the read in smaller page-sized sections.
2181 * If a read succeeds, write it to the new device or record
2182 * a bad block if we cannot.
2183 * If a read fails, record a bad block on both old and
2184 * new devices.
2185 */
NeilBrownfd01b882011-10-11 16:47:53 +11002186 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11002187 struct r10conf *conf = mddev->private;
NeilBrown5e570282011-07-28 11:39:25 +10002188 struct bio *bio = r10_bio->devs[0].bio;
2189 sector_t sect = 0;
2190 int sectors = r10_bio->sectors;
2191 int idx = 0;
2192 int dr = r10_bio->devs[0].devnum;
2193 int dw = r10_bio->devs[1].devnum;
2194
2195 while (sectors) {
2196 int s = sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11002197 struct md_rdev *rdev;
NeilBrown5e570282011-07-28 11:39:25 +10002198 sector_t addr;
2199 int ok;
2200
2201 if (s > (PAGE_SIZE>>9))
2202 s = PAGE_SIZE >> 9;
2203
2204 rdev = conf->mirrors[dr].rdev;
2205 addr = r10_bio->devs[0].addr + sect,
2206 ok = sync_page_io(rdev,
2207 addr,
2208 s << 9,
2209 bio->bi_io_vec[idx].bv_page,
2210 READ, false);
2211 if (ok) {
2212 rdev = conf->mirrors[dw].rdev;
2213 addr = r10_bio->devs[1].addr + sect;
2214 ok = sync_page_io(rdev,
2215 addr,
2216 s << 9,
2217 bio->bi_io_vec[idx].bv_page,
2218 WRITE, false);
NeilBrownb7044d42011-12-23 10:17:56 +11002219 if (!ok) {
NeilBrown5e570282011-07-28 11:39:25 +10002220 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11002221 if (!test_and_set_bit(WantReplacement,
2222 &rdev->flags))
2223 set_bit(MD_RECOVERY_NEEDED,
2224 &rdev->mddev->recovery);
2225 }
NeilBrown5e570282011-07-28 11:39:25 +10002226 }
2227 if (!ok) {
2228 /* We don't worry if we cannot set a bad block -
2229 * it really is bad so there is no loss in not
2230 * recording it yet
2231 */
2232 rdev_set_badblocks(rdev, addr, s, 0);
2233
2234 if (rdev != conf->mirrors[dw].rdev) {
2235 /* need bad block on destination too */
NeilBrown3cb03002011-10-11 16:45:26 +11002236 struct md_rdev *rdev2 = conf->mirrors[dw].rdev;
NeilBrown5e570282011-07-28 11:39:25 +10002237 addr = r10_bio->devs[1].addr + sect;
2238 ok = rdev_set_badblocks(rdev2, addr, s, 0);
2239 if (!ok) {
2240 /* just abort the recovery */
2241 printk(KERN_NOTICE
2242 "md/raid10:%s: recovery aborted"
2243 " due to read error\n",
2244 mdname(mddev));
2245
2246 conf->mirrors[dw].recovery_disabled
2247 = mddev->recovery_disabled;
2248 set_bit(MD_RECOVERY_INTR,
2249 &mddev->recovery);
2250 break;
2251 }
2252 }
2253 }
2254
2255 sectors -= s;
2256 sect += s;
2257 idx++;
2258 }
2259}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
NeilBrown9f2c9d12011-10-11 16:48:43 +11002261static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262{
NeilBrowne879a872011-10-11 16:49:02 +11002263 struct r10conf *conf = mddev->private;
Namhyung Kimc65060a2011-07-18 17:38:49 +10002264 int d;
NeilBrown24afd802011-12-23 10:17:55 +11002265 struct bio *wbio, *wbio2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
NeilBrown5e570282011-07-28 11:39:25 +10002267 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2268 fix_recovery_read_error(r10_bio);
2269 end_sync_request(r10_bio);
2270 return;
2271 }
2272
Namhyung Kimc65060a2011-07-18 17:38:49 +10002273 /*
2274 * share the pages with the first bio
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 * and submit the write request
2276 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 d = r10_bio->devs[1].devnum;
NeilBrown24afd802011-12-23 10:17:55 +11002278 wbio = r10_bio->devs[1].bio;
2279 wbio2 = r10_bio->devs[1].repl_bio;
NeilBrown0eb25bb2013-07-24 15:37:42 +10002280 /* Need to test wbio2->bi_end_io before we call
2281 * generic_make_request as if the former is NULL,
2282 * the latter is free to free wbio2.
2283 */
2284 if (wbio2 && !wbio2->bi_end_io)
2285 wbio2 = NULL;
NeilBrown24afd802011-12-23 10:17:55 +11002286 if (wbio->bi_end_io) {
2287 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002288 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
NeilBrown24afd802011-12-23 10:17:55 +11002289 generic_make_request(wbio);
2290 }
NeilBrown0eb25bb2013-07-24 15:37:42 +10002291 if (wbio2) {
NeilBrown24afd802011-12-23 10:17:55 +11002292 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2293 md_sync_acct(conf->mirrors[d].replacement->bdev,
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002294 bio_sectors(wbio2));
NeilBrown24afd802011-12-23 10:17:55 +11002295 generic_make_request(wbio2);
2296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297}
2298
2299
2300/*
Robert Becker1e509152009-12-14 12:49:58 +11002301 * Used by fix_read_error() to decay the per rdev read_errors.
2302 * We halve the read error count for every hour that has elapsed
2303 * since the last recorded read error.
2304 *
2305 */
NeilBrownfd01b882011-10-11 16:47:53 +11002306static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
Robert Becker1e509152009-12-14 12:49:58 +11002307{
2308 struct timespec cur_time_mon;
2309 unsigned long hours_since_last;
2310 unsigned int read_errors = atomic_read(&rdev->read_errors);
2311
2312 ktime_get_ts(&cur_time_mon);
2313
2314 if (rdev->last_read_error.tv_sec == 0 &&
2315 rdev->last_read_error.tv_nsec == 0) {
2316 /* first time we've seen a read error */
2317 rdev->last_read_error = cur_time_mon;
2318 return;
2319 }
2320
2321 hours_since_last = (cur_time_mon.tv_sec -
2322 rdev->last_read_error.tv_sec) / 3600;
2323
2324 rdev->last_read_error = cur_time_mon;
2325
2326 /*
2327 * if hours_since_last is > the number of bits in read_errors
2328 * just set read errors to 0. We do this to avoid
2329 * overflowing the shift of read_errors by hours_since_last.
2330 */
2331 if (hours_since_last >= 8 * sizeof(read_errors))
2332 atomic_set(&rdev->read_errors, 0);
2333 else
2334 atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
2335}
2336
NeilBrown3cb03002011-10-11 16:45:26 +11002337static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
NeilBrown58c54fc2011-07-28 11:39:25 +10002338 int sectors, struct page *page, int rw)
2339{
2340 sector_t first_bad;
2341 int bad_sectors;
2342
2343 if (is_badblock(rdev, sector, sectors, &first_bad, &bad_sectors)
2344 && (rw == READ || test_bit(WriteErrorSeen, &rdev->flags)))
2345 return -1;
2346 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
2347 /* success */
2348 return 1;
NeilBrownb7044d42011-12-23 10:17:56 +11002349 if (rw == WRITE) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002350 set_bit(WriteErrorSeen, &rdev->flags);
NeilBrownb7044d42011-12-23 10:17:56 +11002351 if (!test_and_set_bit(WantReplacement, &rdev->flags))
2352 set_bit(MD_RECOVERY_NEEDED,
2353 &rdev->mddev->recovery);
2354 }
NeilBrown58c54fc2011-07-28 11:39:25 +10002355 /* need to record an error - either for the block or the device */
2356 if (!rdev_set_badblocks(rdev, sector, sectors, 0))
2357 md_error(rdev->mddev, rdev);
2358 return 0;
2359}
2360
Robert Becker1e509152009-12-14 12:49:58 +11002361/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 * This is a kernel thread which:
2363 *
2364 * 1. Retries failed read operations on working mirrors.
2365 * 2. Updates the raid superblock when problems encounter.
NeilBrown6814d532006-10-03 01:15:45 -07002366 * 3. Performs writes following reads for array synchronising.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 */
2368
NeilBrowne879a872011-10-11 16:49:02 +11002369static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
NeilBrown6814d532006-10-03 01:15:45 -07002370{
2371 int sect = 0; /* Offset from r10_bio->sector */
2372 int sectors = r10_bio->sectors;
NeilBrown3cb03002011-10-11 16:45:26 +11002373 struct md_rdev*rdev;
Robert Becker1e509152009-12-14 12:49:58 +11002374 int max_read_errors = atomic_read(&mddev->max_corr_read_errors);
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002375 int d = r10_bio->devs[r10_bio->read_slot].devnum;
Robert Becker1e509152009-12-14 12:49:58 +11002376
NeilBrown7c4e06f2011-05-11 14:53:17 +10002377 /* still own a reference to this rdev, so it cannot
2378 * have been cleared recently.
2379 */
2380 rdev = conf->mirrors[d].rdev;
Robert Becker1e509152009-12-14 12:49:58 +11002381
NeilBrown7c4e06f2011-05-11 14:53:17 +10002382 if (test_bit(Faulty, &rdev->flags))
2383 /* drive has already been failed, just ignore any
2384 more fix_read_error() attempts */
2385 return;
2386
2387 check_decay_read_errors(mddev, rdev);
2388 atomic_inc(&rdev->read_errors);
2389 if (atomic_read(&rdev->read_errors) > max_read_errors) {
2390 char b[BDEVNAME_SIZE];
Robert Becker1e509152009-12-14 12:49:58 +11002391 bdevname(rdev->bdev, b);
2392
NeilBrown7c4e06f2011-05-11 14:53:17 +10002393 printk(KERN_NOTICE
2394 "md/raid10:%s: %s: Raid device exceeded "
2395 "read_error threshold [cur %d:max %d]\n",
2396 mdname(mddev), b,
2397 atomic_read(&rdev->read_errors), max_read_errors);
2398 printk(KERN_NOTICE
2399 "md/raid10:%s: %s: Failing raid device\n",
2400 mdname(mddev), b);
2401 md_error(mddev, conf->mirrors[d].rdev);
NeilBrownfae8cc52012-02-14 11:10:10 +11002402 r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
NeilBrown7c4e06f2011-05-11 14:53:17 +10002403 return;
Robert Becker1e509152009-12-14 12:49:58 +11002404 }
Robert Becker1e509152009-12-14 12:49:58 +11002405
NeilBrown6814d532006-10-03 01:15:45 -07002406 while(sectors) {
2407 int s = sectors;
2408 int sl = r10_bio->read_slot;
2409 int success = 0;
2410 int start;
2411
2412 if (s > (PAGE_SIZE>>9))
2413 s = PAGE_SIZE >> 9;
2414
2415 rcu_read_lock();
2416 do {
NeilBrown8dbed5c2011-07-28 11:39:24 +10002417 sector_t first_bad;
2418 int bad_sectors;
2419
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002420 d = r10_bio->devs[sl].devnum;
NeilBrown6814d532006-10-03 01:15:45 -07002421 rdev = rcu_dereference(conf->mirrors[d].rdev);
2422 if (rdev &&
NeilBrown050b6612012-03-19 12:46:39 +11002423 !test_bit(Unmerged, &rdev->flags) &&
NeilBrown8dbed5c2011-07-28 11:39:24 +10002424 test_bit(In_sync, &rdev->flags) &&
2425 is_badblock(rdev, r10_bio->devs[sl].addr + sect, s,
2426 &first_bad, &bad_sectors) == 0) {
NeilBrown6814d532006-10-03 01:15:45 -07002427 atomic_inc(&rdev->nr_pending);
2428 rcu_read_unlock();
NeilBrown2b193362010-10-27 15:16:40 +11002429 success = sync_page_io(rdev,
NeilBrown6814d532006-10-03 01:15:45 -07002430 r10_bio->devs[sl].addr +
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11002431 sect,
NeilBrown6814d532006-10-03 01:15:45 -07002432 s<<9,
Jonathan Brassowccebd4c2011-01-14 09:14:33 +11002433 conf->tmppage, READ, false);
NeilBrown6814d532006-10-03 01:15:45 -07002434 rdev_dec_pending(rdev, mddev);
2435 rcu_read_lock();
2436 if (success)
2437 break;
2438 }
2439 sl++;
2440 if (sl == conf->copies)
2441 sl = 0;
2442 } while (!success && sl != r10_bio->read_slot);
2443 rcu_read_unlock();
2444
2445 if (!success) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002446 /* Cannot read from anywhere, just mark the block
2447 * as bad on the first device to discourage future
2448 * reads.
2449 */
NeilBrown6814d532006-10-03 01:15:45 -07002450 int dn = r10_bio->devs[r10_bio->read_slot].devnum;
NeilBrown58c54fc2011-07-28 11:39:25 +10002451 rdev = conf->mirrors[dn].rdev;
2452
2453 if (!rdev_set_badblocks(
2454 rdev,
2455 r10_bio->devs[r10_bio->read_slot].addr
2456 + sect,
NeilBrownfae8cc52012-02-14 11:10:10 +11002457 s, 0)) {
NeilBrown58c54fc2011-07-28 11:39:25 +10002458 md_error(mddev, rdev);
NeilBrownfae8cc52012-02-14 11:10:10 +11002459 r10_bio->devs[r10_bio->read_slot].bio
2460 = IO_BLOCKED;
2461 }
NeilBrown6814d532006-10-03 01:15:45 -07002462 break;
2463 }
2464
2465 start = sl;
2466 /* write it back and re-read */
2467 rcu_read_lock();
2468 while (sl != r10_bio->read_slot) {
Robert Becker67b8dc42009-12-14 12:49:57 +11002469 char b[BDEVNAME_SIZE];
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002470
NeilBrown6814d532006-10-03 01:15:45 -07002471 if (sl==0)
2472 sl = conf->copies;
2473 sl--;
2474 d = r10_bio->devs[sl].devnum;
2475 rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown1294b9c2011-07-28 11:39:23 +10002476 if (!rdev ||
NeilBrown050b6612012-03-19 12:46:39 +11002477 test_bit(Unmerged, &rdev->flags) ||
NeilBrown1294b9c2011-07-28 11:39:23 +10002478 !test_bit(In_sync, &rdev->flags))
2479 continue;
2480
2481 atomic_inc(&rdev->nr_pending);
2482 rcu_read_unlock();
NeilBrown58c54fc2011-07-28 11:39:25 +10002483 if (r10_sync_page_io(rdev,
2484 r10_bio->devs[sl].addr +
2485 sect,
NeilBrown055d3742012-07-03 15:55:33 +10002486 s, conf->tmppage, WRITE)
NeilBrown1294b9c2011-07-28 11:39:23 +10002487 == 0) {
2488 /* Well, this device is dead */
2489 printk(KERN_NOTICE
2490 "md/raid10:%s: read correction "
2491 "write failed"
2492 " (%d sectors at %llu on %s)\n",
2493 mdname(mddev), s,
2494 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002495 sect +
2496 choose_data_offset(r10_bio,
2497 rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002498 bdevname(rdev->bdev, b));
2499 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2500 "drive\n",
2501 mdname(mddev),
2502 bdevname(rdev->bdev, b));
NeilBrown6814d532006-10-03 01:15:45 -07002503 }
NeilBrown1294b9c2011-07-28 11:39:23 +10002504 rdev_dec_pending(rdev, mddev);
2505 rcu_read_lock();
NeilBrown6814d532006-10-03 01:15:45 -07002506 }
2507 sl = start;
2508 while (sl != r10_bio->read_slot) {
NeilBrown1294b9c2011-07-28 11:39:23 +10002509 char b[BDEVNAME_SIZE];
Prasanna S. Panchamukhi0544a212010-06-24 13:31:03 +10002510
NeilBrown6814d532006-10-03 01:15:45 -07002511 if (sl==0)
2512 sl = conf->copies;
2513 sl--;
2514 d = r10_bio->devs[sl].devnum;
2515 rdev = rcu_dereference(conf->mirrors[d].rdev);
NeilBrown1294b9c2011-07-28 11:39:23 +10002516 if (!rdev ||
2517 !test_bit(In_sync, &rdev->flags))
2518 continue;
Robert Becker67b8dc42009-12-14 12:49:57 +11002519
NeilBrown1294b9c2011-07-28 11:39:23 +10002520 atomic_inc(&rdev->nr_pending);
2521 rcu_read_unlock();
NeilBrown58c54fc2011-07-28 11:39:25 +10002522 switch (r10_sync_page_io(rdev,
2523 r10_bio->devs[sl].addr +
2524 sect,
NeilBrown055d3742012-07-03 15:55:33 +10002525 s, conf->tmppage,
NeilBrown58c54fc2011-07-28 11:39:25 +10002526 READ)) {
2527 case 0:
NeilBrown1294b9c2011-07-28 11:39:23 +10002528 /* Well, this device is dead */
2529 printk(KERN_NOTICE
2530 "md/raid10:%s: unable to read back "
2531 "corrected sectors"
2532 " (%d sectors at %llu on %s)\n",
2533 mdname(mddev), s,
2534 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002535 sect +
2536 choose_data_offset(r10_bio, rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002537 bdevname(rdev->bdev, b));
2538 printk(KERN_NOTICE "md/raid10:%s: %s: failing "
2539 "drive\n",
2540 mdname(mddev),
2541 bdevname(rdev->bdev, b));
NeilBrown58c54fc2011-07-28 11:39:25 +10002542 break;
2543 case 1:
NeilBrown1294b9c2011-07-28 11:39:23 +10002544 printk(KERN_INFO
2545 "md/raid10:%s: read error corrected"
2546 " (%d sectors at %llu on %s)\n",
2547 mdname(mddev), s,
2548 (unsigned long long)(
NeilBrownf8c9e742012-05-21 09:28:33 +10002549 sect +
2550 choose_data_offset(r10_bio, rdev)),
NeilBrown1294b9c2011-07-28 11:39:23 +10002551 bdevname(rdev->bdev, b));
2552 atomic_add(s, &rdev->corrected_errors);
NeilBrown6814d532006-10-03 01:15:45 -07002553 }
NeilBrown1294b9c2011-07-28 11:39:23 +10002554
2555 rdev_dec_pending(rdev, mddev);
2556 rcu_read_lock();
NeilBrown6814d532006-10-03 01:15:45 -07002557 }
2558 rcu_read_unlock();
2559
2560 sectors -= s;
2561 sect += s;
2562 }
2563}
2564
NeilBrown9f2c9d12011-10-11 16:48:43 +11002565static int narrow_write_error(struct r10bio *r10_bio, int i)
NeilBrownbd870a12011-07-28 11:39:24 +10002566{
2567 struct bio *bio = r10_bio->master_bio;
NeilBrownfd01b882011-10-11 16:47:53 +11002568 struct mddev *mddev = r10_bio->mddev;
NeilBrowne879a872011-10-11 16:49:02 +11002569 struct r10conf *conf = mddev->private;
NeilBrown3cb03002011-10-11 16:45:26 +11002570 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
NeilBrownbd870a12011-07-28 11:39:24 +10002571 /* bio has the data to be written to slot 'i' where
2572 * we just recently had a write error.
2573 * We repeatedly clone the bio and trim down to one block,
2574 * then try the write. Where the write fails we record
2575 * a bad block.
2576 * It is conceivable that the bio doesn't exactly align with
2577 * blocks. We must handle this.
2578 *
2579 * We currently own a reference to the rdev.
2580 */
2581
2582 int block_sectors;
2583 sector_t sector;
2584 int sectors;
2585 int sect_to_write = r10_bio->sectors;
2586 int ok = 1;
2587
2588 if (rdev->badblocks.shift < 0)
2589 return 0;
2590
2591 block_sectors = 1 << rdev->badblocks.shift;
2592 sector = r10_bio->sector;
2593 sectors = ((r10_bio->sector + block_sectors)
2594 & ~(sector_t)(block_sectors - 1))
2595 - sector;
2596
2597 while (sect_to_write) {
2598 struct bio *wbio;
2599 if (sectors > sect_to_write)
2600 sectors = sect_to_write;
2601 /* Write at 'sector' for 'sectors' */
2602 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002603 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2604 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
NeilBrownf8c9e742012-05-21 09:28:33 +10002605 choose_data_offset(r10_bio, rdev) +
NeilBrownbd870a12011-07-28 11:39:24 +10002606 (sector - r10_bio->sector));
2607 wbio->bi_bdev = rdev->bdev;
2608 if (submit_bio_wait(WRITE, wbio) == 0)
2609 /* Failure! */
2610 ok = rdev_set_badblocks(rdev, sector,
2611 sectors, 0)
2612 && ok;
2613
2614 bio_put(wbio);
2615 sect_to_write -= sectors;
2616 sector += sectors;
2617 sectors = block_sectors;
2618 }
2619 return ok;
2620}
2621
NeilBrown9f2c9d12011-10-11 16:48:43 +11002622static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
NeilBrown560f8e52011-07-28 11:39:23 +10002623{
2624 int slot = r10_bio->read_slot;
NeilBrown560f8e52011-07-28 11:39:23 +10002625 struct bio *bio;
NeilBrowne879a872011-10-11 16:49:02 +11002626 struct r10conf *conf = mddev->private;
NeilBrownabbf0982011-12-23 10:17:54 +11002627 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
NeilBrown560f8e52011-07-28 11:39:23 +10002628 char b[BDEVNAME_SIZE];
2629 unsigned long do_sync;
NeilBrown856e08e2011-07-28 11:39:23 +10002630 int max_sectors;
NeilBrown560f8e52011-07-28 11:39:23 +10002631
2632 /* we got a read error. Maybe the drive is bad. Maybe just
2633 * the block and we can fix it.
2634 * We freeze all other IO, and try reading the block from
2635 * other devices. When we find one, we re-write
2636 * and check it that fixes the read error.
2637 * This is all done synchronously while the array is
2638 * frozen.
2639 */
NeilBrownfae8cc52012-02-14 11:10:10 +11002640 bio = r10_bio->devs[slot].bio;
2641 bdevname(bio->bi_bdev, b);
2642 bio_put(bio);
2643 r10_bio->devs[slot].bio = NULL;
2644
NeilBrown560f8e52011-07-28 11:39:23 +10002645 if (mddev->ro == 0) {
NeilBrowne2d59922013-06-12 11:01:22 +10002646 freeze_array(conf, 1);
NeilBrown560f8e52011-07-28 11:39:23 +10002647 fix_read_error(conf, mddev, r10_bio);
2648 unfreeze_array(conf);
NeilBrownfae8cc52012-02-14 11:10:10 +11002649 } else
2650 r10_bio->devs[slot].bio = IO_BLOCKED;
2651
NeilBrownabbf0982011-12-23 10:17:54 +11002652 rdev_dec_pending(rdev, mddev);
NeilBrown560f8e52011-07-28 11:39:23 +10002653
NeilBrown7399c312011-07-28 11:39:23 +10002654read_more:
NeilBrown96c3fd12011-12-23 10:17:54 +11002655 rdev = read_balance(conf, r10_bio, &max_sectors);
2656 if (rdev == NULL) {
NeilBrown560f8e52011-07-28 11:39:23 +10002657 printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
2658 " read error for block %llu\n",
NeilBrown7399c312011-07-28 11:39:23 +10002659 mdname(mddev), b,
NeilBrown560f8e52011-07-28 11:39:23 +10002660 (unsigned long long)r10_bio->sector);
2661 raid_end_bio_io(r10_bio);
NeilBrown560f8e52011-07-28 11:39:23 +10002662 return;
2663 }
2664
2665 do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
NeilBrown560f8e52011-07-28 11:39:23 +10002666 slot = r10_bio->read_slot;
NeilBrown560f8e52011-07-28 11:39:23 +10002667 printk_ratelimited(
2668 KERN_ERR
NeilBrown055d3742012-07-03 15:55:33 +10002669 "md/raid10:%s: %s: redirecting "
NeilBrown560f8e52011-07-28 11:39:23 +10002670 "sector %llu to another mirror\n",
2671 mdname(mddev),
2672 bdevname(rdev->bdev, b),
2673 (unsigned long long)r10_bio->sector);
2674 bio = bio_clone_mddev(r10_bio->master_bio,
2675 GFP_NOIO, mddev);
Kent Overstreet4f024f32013-10-11 15:44:27 -07002676 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
NeilBrown560f8e52011-07-28 11:39:23 +10002677 r10_bio->devs[slot].bio = bio;
NeilBrownabbf0982011-12-23 10:17:54 +11002678 r10_bio->devs[slot].rdev = rdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002679 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
NeilBrownf8c9e742012-05-21 09:28:33 +10002680 + choose_data_offset(r10_bio, rdev);
NeilBrown560f8e52011-07-28 11:39:23 +10002681 bio->bi_bdev = rdev->bdev;
2682 bio->bi_rw = READ | do_sync;
2683 bio->bi_private = r10_bio;
2684 bio->bi_end_io = raid10_end_read_request;
NeilBrown7399c312011-07-28 11:39:23 +10002685 if (max_sectors < r10_bio->sectors) {
2686 /* Drat - have to split this up more */
2687 struct bio *mbio = r10_bio->master_bio;
2688 int sectors_handled =
2689 r10_bio->sector + max_sectors
Kent Overstreet4f024f32013-10-11 15:44:27 -07002690 - mbio->bi_iter.bi_sector;
NeilBrown7399c312011-07-28 11:39:23 +10002691 r10_bio->sectors = max_sectors;
2692 spin_lock_irq(&conf->device_lock);
2693 if (mbio->bi_phys_segments == 0)
2694 mbio->bi_phys_segments = 2;
2695 else
2696 mbio->bi_phys_segments++;
2697 spin_unlock_irq(&conf->device_lock);
2698 generic_make_request(bio);
NeilBrown7399c312011-07-28 11:39:23 +10002699
2700 r10_bio = mempool_alloc(conf->r10bio_pool,
2701 GFP_NOIO);
2702 r10_bio->master_bio = mbio;
Kent Overstreetaa8b57a2013-02-05 15:19:29 -08002703 r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
NeilBrown7399c312011-07-28 11:39:23 +10002704 r10_bio->state = 0;
2705 set_bit(R10BIO_ReadError,
2706 &r10_bio->state);
2707 r10_bio->mddev = mddev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07002708 r10_bio->sector = mbio->bi_iter.bi_sector
NeilBrown7399c312011-07-28 11:39:23 +10002709 + sectors_handled;
2710
2711 goto read_more;
2712 } else
2713 generic_make_request(bio);
NeilBrown560f8e52011-07-28 11:39:23 +10002714}
2715
NeilBrowne879a872011-10-11 16:49:02 +11002716static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
NeilBrown749c55e2011-07-28 11:39:24 +10002717{
2718 /* Some sort of write request has finished and it
2719 * succeeded in writing where we thought there was a
2720 * bad block. So forget the bad block.
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002721 * Or possibly if failed and we need to record
2722 * a bad block.
NeilBrown749c55e2011-07-28 11:39:24 +10002723 */
2724 int m;
NeilBrown3cb03002011-10-11 16:45:26 +11002725 struct md_rdev *rdev;
NeilBrown749c55e2011-07-28 11:39:24 +10002726
2727 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2728 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002729 for (m = 0; m < conf->copies; m++) {
2730 int dev = r10_bio->devs[m].devnum;
2731 rdev = conf->mirrors[dev].rdev;
2732 if (r10_bio->devs[m].bio == NULL)
2733 continue;
2734 if (test_bit(BIO_UPTODATE,
NeilBrown749c55e2011-07-28 11:39:24 +10002735 &r10_bio->devs[m].bio->bi_flags)) {
NeilBrown749c55e2011-07-28 11:39:24 +10002736 rdev_clear_badblocks(
2737 rdev,
2738 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002739 r10_bio->sectors, 0);
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002740 } else {
2741 if (!rdev_set_badblocks(
2742 rdev,
2743 r10_bio->devs[m].addr,
2744 r10_bio->sectors, 0))
2745 md_error(conf->mddev, rdev);
NeilBrown749c55e2011-07-28 11:39:24 +10002746 }
NeilBrown9ad1aef2011-12-23 10:17:55 +11002747 rdev = conf->mirrors[dev].replacement;
2748 if (r10_bio->devs[m].repl_bio == NULL)
2749 continue;
2750 if (test_bit(BIO_UPTODATE,
2751 &r10_bio->devs[m].repl_bio->bi_flags)) {
2752 rdev_clear_badblocks(
2753 rdev,
2754 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002755 r10_bio->sectors, 0);
NeilBrown9ad1aef2011-12-23 10:17:55 +11002756 } else {
2757 if (!rdev_set_badblocks(
2758 rdev,
2759 r10_bio->devs[m].addr,
2760 r10_bio->sectors, 0))
2761 md_error(conf->mddev, rdev);
2762 }
NeilBrown1a0b7cd2011-07-28 11:39:25 +10002763 }
NeilBrown749c55e2011-07-28 11:39:24 +10002764 put_buf(r10_bio);
2765 } else {
NeilBrownbd870a12011-07-28 11:39:24 +10002766 for (m = 0; m < conf->copies; m++) {
2767 int dev = r10_bio->devs[m].devnum;
2768 struct bio *bio = r10_bio->devs[m].bio;
2769 rdev = conf->mirrors[dev].rdev;
2770 if (bio == IO_MADE_GOOD) {
NeilBrown749c55e2011-07-28 11:39:24 +10002771 rdev_clear_badblocks(
2772 rdev,
2773 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002774 r10_bio->sectors, 0);
NeilBrown749c55e2011-07-28 11:39:24 +10002775 rdev_dec_pending(rdev, conf->mddev);
NeilBrownbd870a12011-07-28 11:39:24 +10002776 } else if (bio != NULL &&
2777 !test_bit(BIO_UPTODATE, &bio->bi_flags)) {
2778 if (!narrow_write_error(r10_bio, m)) {
2779 md_error(conf->mddev, rdev);
2780 set_bit(R10BIO_Degraded,
2781 &r10_bio->state);
2782 }
2783 rdev_dec_pending(rdev, conf->mddev);
NeilBrown749c55e2011-07-28 11:39:24 +10002784 }
NeilBrown475b0322011-12-23 10:17:55 +11002785 bio = r10_bio->devs[m].repl_bio;
2786 rdev = conf->mirrors[dev].replacement;
NeilBrown4ca40c22011-12-23 10:17:55 +11002787 if (rdev && bio == IO_MADE_GOOD) {
NeilBrown475b0322011-12-23 10:17:55 +11002788 rdev_clear_badblocks(
2789 rdev,
2790 r10_bio->devs[m].addr,
NeilBrownc6563a82012-05-21 09:27:00 +10002791 r10_bio->sectors, 0);
NeilBrown475b0322011-12-23 10:17:55 +11002792 rdev_dec_pending(rdev, conf->mddev);
2793 }
NeilBrownbd870a12011-07-28 11:39:24 +10002794 }
2795 if (test_bit(R10BIO_WriteError,
2796 &r10_bio->state))
2797 close_write(r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +10002798 raid_end_bio_io(r10_bio);
2799 }
2800}
2801
Shaohua Li4ed87312012-10-11 13:34:00 +11002802static void raid10d(struct md_thread *thread)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803{
Shaohua Li4ed87312012-10-11 13:34:00 +11002804 struct mddev *mddev = thread->mddev;
NeilBrown9f2c9d12011-10-11 16:48:43 +11002805 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002806 unsigned long flags;
NeilBrowne879a872011-10-11 16:49:02 +11002807 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 struct list_head *head = &conf->retry_list;
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002809 struct blk_plug plug;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
2811 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002813 blk_start_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 for (;;) {
NeilBrowna35e63e2008-03-04 14:29:29 -08002815
NeilBrown0021b7b2012-07-31 09:08:14 +02002816 flush_pending_writes(conf);
NeilBrowna35e63e2008-03-04 14:29:29 -08002817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 spin_lock_irqsave(&conf->device_lock, flags);
NeilBrowna35e63e2008-03-04 14:29:29 -08002819 if (list_empty(head)) {
NeilBrown6cce3b232006-01-06 00:20:16 -08002820 spin_unlock_irqrestore(&conf->device_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 break;
NeilBrowna35e63e2008-03-04 14:29:29 -08002822 }
NeilBrown9f2c9d12011-10-11 16:48:43 +11002823 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 list_del(head->prev);
NeilBrown4443ae12006-01-06 00:20:28 -08002825 conf->nr_queued--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 spin_unlock_irqrestore(&conf->device_lock, flags);
2827
2828 mddev = r10_bio->mddev;
NeilBrown070ec552009-06-16 16:54:21 +10002829 conf = mddev->private;
NeilBrownbd870a12011-07-28 11:39:24 +10002830 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2831 test_bit(R10BIO_WriteError, &r10_bio->state))
NeilBrown749c55e2011-07-28 11:39:24 +10002832 handle_write_completed(conf, r10_bio);
NeilBrown3ea7daa2012-05-22 13:53:47 +10002833 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
2834 reshape_request_write(mddev, r10_bio);
NeilBrown749c55e2011-07-28 11:39:24 +10002835 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 sync_request_write(mddev, r10_bio);
Jens Axboe7eaceac2011-03-10 08:52:07 +01002837 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 recovery_request_write(mddev, r10_bio);
NeilBrown856e08e2011-07-28 11:39:23 +10002839 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
NeilBrown560f8e52011-07-28 11:39:23 +10002840 handle_read_error(mddev, r10_bio);
NeilBrown856e08e2011-07-28 11:39:23 +10002841 else {
2842 /* just a partial read to be scheduled from a
2843 * separate context
2844 */
2845 int slot = r10_bio->read_slot;
2846 generic_make_request(r10_bio->devs[slot].bio);
2847 }
NeilBrown4443ae12006-01-06 00:20:28 -08002848
NeilBrown1d9d5242009-10-16 15:55:32 +11002849 cond_resched();
NeilBrownde393cd2011-07-28 11:31:48 +10002850 if (mddev->flags & ~(1<<MD_CHANGE_PENDING))
2851 md_check_recovery(mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
NeilBrowne1dfa0a2011-04-18 18:25:41 +10002853 blk_finish_plug(&plug);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854}
2855
2856
NeilBrowne879a872011-10-11 16:49:02 +11002857static int init_resync(struct r10conf *conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858{
2859 int buffs;
NeilBrown69335ef2011-12-23 10:17:54 +11002860 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861
2862 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
Eric Sesterhennb6385482006-04-02 13:34:29 +02002863 BUG_ON(conf->r10buf_pool);
NeilBrown69335ef2011-12-23 10:17:54 +11002864 conf->have_replacement = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10002865 for (i = 0; i < conf->geo.raid_disks; i++)
NeilBrown69335ef2011-12-23 10:17:54 +11002866 if (conf->mirrors[i].replacement)
2867 conf->have_replacement = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
2869 if (!conf->r10buf_pool)
2870 return -ENOMEM;
2871 conf->next_resync = 0;
2872 return 0;
2873}
2874
2875/*
2876 * perform a "sync" on one "block"
2877 *
2878 * We need to make sure that no normal I/O request - particularly write
2879 * requests - conflict with active sync requests.
2880 *
2881 * This is achieved by tracking pending requests and a 'barrier' concept
2882 * that can be installed to exclude normal IO requests.
2883 *
2884 * Resync and recovery are handled very differently.
2885 * We differentiate by looking at MD_RECOVERY_SYNC in mddev->recovery.
2886 *
2887 * For resync, we iterate over virtual addresses, read all copies,
2888 * and update if there are differences. If only one copy is live,
2889 * skip it.
2890 * For recovery, we iterate over physical addresses, read a good
2891 * value for each non-in_sync drive, and over-write.
2892 *
2893 * So, for recovery we may have several outstanding complex requests for a
2894 * given address, one for each out-of-sync device. We model this by allocating
2895 * a number of r10_bio structures, one for each out-of-sync device.
2896 * As we setup these structures, we collect all bio's together into a list
2897 * which we then process collectively to add pages, and then process again
2898 * to pass to generic_make_request.
2899 *
2900 * The r10_bio structures are linked using a borrowed master_bio pointer.
2901 * This link is counted in ->remaining. When the r10_bio that points to NULL
2902 * has its remaining count decremented to 0, the whole complex operation
2903 * is complete.
2904 *
2905 */
2906
NeilBrownfd01b882011-10-11 16:47:53 +11002907static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
NeilBrownab9d47e2011-05-11 14:54:41 +10002908 int *skipped, int go_faster)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909{
NeilBrowne879a872011-10-11 16:49:02 +11002910 struct r10conf *conf = mddev->private;
NeilBrown9f2c9d12011-10-11 16:48:43 +11002911 struct r10bio *r10_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 struct bio *biolist = NULL, *bio;
2913 sector_t max_sector, nr_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 int i;
NeilBrown6cce3b232006-01-06 00:20:16 -08002915 int max_sync;
NeilBrown57dab0b2010-10-19 10:03:39 +11002916 sector_t sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 sector_t sectors_skipped = 0;
2918 int chunks_skipped = 0;
NeilBrown5cf00fc2012-05-21 09:28:20 +10002919 sector_t chunk_mask = conf->geo.chunk_mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
2921 if (!conf->r10buf_pool)
2922 if (init_resync(conf))
NeilBrown57afd892005-06-21 17:17:13 -07002923 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002925 /*
2926 * Allow skipping a full rebuild for incremental assembly
2927 * of a clean array, like RAID1 does.
2928 */
2929 if (mddev->bitmap == NULL &&
2930 mddev->recovery_cp == MaxSector &&
NeilBrown13765122013-07-04 16:41:53 +10002931 mddev->reshape_position == MaxSector &&
2932 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002933 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
NeilBrown13765122013-07-04 16:41:53 +10002934 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002935 conf->fullsync == 0) {
2936 *skipped = 1;
NeilBrown13765122013-07-04 16:41:53 +10002937 return mddev->dev_sectors - sector_nr;
Martin Wilck7e83ccb2013-04-24 11:42:42 +10002938 }
2939
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 skipped:
Andre Noll58c0fed2009-03-31 14:33:13 +11002941 max_sector = mddev->dev_sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10002942 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ||
2943 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 max_sector = mddev->resync_max_sectors;
2945 if (sector_nr >= max_sector) {
NeilBrown6cce3b232006-01-06 00:20:16 -08002946 /* If we aborted, we need to abort the
2947 * sync on the 'current' bitmap chucks (there can
2948 * be several when recovering multiple devices).
2949 * as we may have started syncing it but not finished.
2950 * We can find the current address in
2951 * mddev->curr_resync, but for recovery,
2952 * we need to convert that to several
2953 * virtual addresses.
2954 */
NeilBrown3ea7daa2012-05-22 13:53:47 +10002955 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2956 end_reshape(conf);
2957 return 0;
2958 }
2959
NeilBrown6cce3b232006-01-06 00:20:16 -08002960 if (mddev->curr_resync < max_sector) { /* aborted */
2961 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery))
2962 bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2963 &sync_blocks, 1);
NeilBrown5cf00fc2012-05-21 09:28:20 +10002964 else for (i = 0; i < conf->geo.raid_disks; i++) {
NeilBrown6cce3b232006-01-06 00:20:16 -08002965 sector_t sect =
2966 raid10_find_virt(conf, mddev->curr_resync, i);
2967 bitmap_end_sync(mddev->bitmap, sect,
2968 &sync_blocks, 1);
2969 }
NeilBrown9ad1aef2011-12-23 10:17:55 +11002970 } else {
2971 /* completed sync */
2972 if ((!mddev->bitmap || conf->fullsync)
2973 && conf->have_replacement
2974 && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
2975 /* Completed a full sync so the replacements
2976 * are now fully recovered.
2977 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10002978 for (i = 0; i < conf->geo.raid_disks; i++)
NeilBrown9ad1aef2011-12-23 10:17:55 +11002979 if (conf->mirrors[i].replacement)
2980 conf->mirrors[i].replacement
2981 ->recovery_offset
2982 = MaxSector;
2983 }
NeilBrown6cce3b232006-01-06 00:20:16 -08002984 conf->fullsync = 0;
NeilBrown9ad1aef2011-12-23 10:17:55 +11002985 }
NeilBrown6cce3b232006-01-06 00:20:16 -08002986 bitmap_close_sync(mddev->bitmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002987 close_sync(conf);
NeilBrown57afd892005-06-21 17:17:13 -07002988 *skipped = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 return sectors_skipped;
2990 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10002991
2992 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
2993 return reshape_request(mddev, sector_nr, skipped);
2994
NeilBrown5cf00fc2012-05-21 09:28:20 +10002995 if (chunks_skipped >= conf->geo.raid_disks) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 /* if there has been nothing to do on any drive,
2997 * then there is nothing to do at all..
2998 */
NeilBrown57afd892005-06-21 17:17:13 -07002999 *skipped = 1;
3000 return (max_sector - sector_nr) + sectors_skipped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003001 }
3002
NeilBrownc6207272008-02-06 01:39:52 -08003003 if (max_sector > mddev->resync_max)
3004 max_sector = mddev->resync_max; /* Don't do IO beyond here */
3005
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 /* make sure whole request will fit in a chunk - if chunks
3007 * are meaningful
3008 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003009 if (conf->geo.near_copies < conf->geo.raid_disks &&
3010 max_sector > (sector_nr | chunk_mask))
3011 max_sector = (sector_nr | chunk_mask) + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 /*
3013 * If there is non-resync activity waiting for us then
3014 * put in a delay to throttle resync.
3015 */
NeilBrown0a27ec92006-01-06 00:20:13 -08003016 if (!go_faster && conf->nr_waiting)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 msleep_interruptible(1000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018
3019 /* Again, very different code for resync and recovery.
3020 * Both must result in an r10bio with a list of bios that
3021 * have bi_end_io, bi_sector, bi_bdev set,
3022 * and bi_private set to the r10bio.
3023 * For recovery, we may actually create several r10bios
3024 * with 2 bios in each, that correspond to the bios in the main one.
3025 * In this case, the subordinate r10bios link back through a
3026 * borrowed master_bio pointer, and the counter in the master
3027 * includes a ref from each subordinate.
3028 */
3029 /* First, we decide what to do and set ->bi_end_io
3030 * To end_sync_read if we want to read, and
3031 * end_sync_write if we will want to write.
3032 */
3033
NeilBrown6cce3b232006-01-06 00:20:16 -08003034 max_sync = RESYNC_PAGES << (PAGE_SHIFT-9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
3036 /* recovery... the complicated one */
NeilBrowne875ece2011-07-28 11:39:24 +10003037 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038 r10_bio = NULL;
3039
NeilBrown5cf00fc2012-05-21 09:28:20 +10003040 for (i = 0 ; i < conf->geo.raid_disks; i++) {
NeilBrownab9d47e2011-05-11 14:54:41 +10003041 int still_degraded;
NeilBrown9f2c9d12011-10-11 16:48:43 +11003042 struct r10bio *rb2;
NeilBrownab9d47e2011-05-11 14:54:41 +10003043 sector_t sect;
3044 int must_sync;
NeilBrowne875ece2011-07-28 11:39:24 +10003045 int any_working;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003046 struct raid10_info *mirror = &conf->mirrors[i];
NeilBrownab9d47e2011-05-11 14:54:41 +10003047
NeilBrown24afd802011-12-23 10:17:55 +11003048 if ((mirror->rdev == NULL ||
3049 test_bit(In_sync, &mirror->rdev->flags))
3050 &&
3051 (mirror->replacement == NULL ||
3052 test_bit(Faulty,
3053 &mirror->replacement->flags)))
NeilBrownab9d47e2011-05-11 14:54:41 +10003054 continue;
3055
3056 still_degraded = 0;
3057 /* want to reconstruct this device */
3058 rb2 = r10_bio;
3059 sect = raid10_find_virt(conf, sector_nr, i);
NeilBrownfc448a12012-07-03 10:37:30 +10003060 if (sect >= mddev->resync_max_sectors) {
3061 /* last stripe is not complete - don't
3062 * try to recover this sector.
3063 */
3064 continue;
3065 }
NeilBrown24afd802011-12-23 10:17:55 +11003066 /* Unless we are doing a full sync, or a replacement
3067 * we only need to recover the block if it is set in
3068 * the bitmap
NeilBrownab9d47e2011-05-11 14:54:41 +10003069 */
3070 must_sync = bitmap_start_sync(mddev->bitmap, sect,
3071 &sync_blocks, 1);
3072 if (sync_blocks < max_sync)
3073 max_sync = sync_blocks;
3074 if (!must_sync &&
NeilBrown24afd802011-12-23 10:17:55 +11003075 mirror->replacement == NULL &&
NeilBrownab9d47e2011-05-11 14:54:41 +10003076 !conf->fullsync) {
3077 /* yep, skip the sync_blocks here, but don't assume
3078 * that there will never be anything to do here
NeilBrown6cce3b232006-01-06 00:20:16 -08003079 */
NeilBrownab9d47e2011-05-11 14:54:41 +10003080 chunks_skipped = -1;
3081 continue;
3082 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003083
NeilBrownab9d47e2011-05-11 14:54:41 +10003084 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3085 raise_barrier(conf, rb2 != NULL);
3086 atomic_set(&r10_bio->remaining, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003087
NeilBrownab9d47e2011-05-11 14:54:41 +10003088 r10_bio->master_bio = (struct bio*)rb2;
3089 if (rb2)
3090 atomic_inc(&rb2->remaining);
3091 r10_bio->mddev = mddev;
3092 set_bit(R10BIO_IsRecover, &r10_bio->state);
3093 r10_bio->sector = sect;
NeilBrown6cce3b232006-01-06 00:20:16 -08003094
NeilBrownab9d47e2011-05-11 14:54:41 +10003095 raid10_find_phys(conf, r10_bio);
NeilBrown18055562009-05-07 12:48:10 +10003096
NeilBrownab9d47e2011-05-11 14:54:41 +10003097 /* Need to check if the array will still be
3098 * degraded
3099 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003100 for (j = 0; j < conf->geo.raid_disks; j++)
NeilBrownab9d47e2011-05-11 14:54:41 +10003101 if (conf->mirrors[j].rdev == NULL ||
3102 test_bit(Faulty, &conf->mirrors[j].rdev->flags)) {
3103 still_degraded = 1;
NeilBrown87fc7672005-09-09 16:24:04 -07003104 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003106
3107 must_sync = bitmap_start_sync(mddev->bitmap, sect,
3108 &sync_blocks, still_degraded);
3109
NeilBrowne875ece2011-07-28 11:39:24 +10003110 any_working = 0;
NeilBrownab9d47e2011-05-11 14:54:41 +10003111 for (j=0; j<conf->copies;j++) {
NeilBrowne875ece2011-07-28 11:39:24 +10003112 int k;
NeilBrownab9d47e2011-05-11 14:54:41 +10003113 int d = r10_bio->devs[j].devnum;
NeilBrown5e570282011-07-28 11:39:25 +10003114 sector_t from_addr, to_addr;
NeilBrown3cb03002011-10-11 16:45:26 +11003115 struct md_rdev *rdev;
NeilBrown40c356c2011-07-28 11:39:24 +10003116 sector_t sector, first_bad;
3117 int bad_sectors;
NeilBrownab9d47e2011-05-11 14:54:41 +10003118 if (!conf->mirrors[d].rdev ||
3119 !test_bit(In_sync, &conf->mirrors[d].rdev->flags))
3120 continue;
3121 /* This is where we read from */
NeilBrowne875ece2011-07-28 11:39:24 +10003122 any_working = 1;
NeilBrown40c356c2011-07-28 11:39:24 +10003123 rdev = conf->mirrors[d].rdev;
3124 sector = r10_bio->devs[j].addr;
3125
3126 if (is_badblock(rdev, sector, max_sync,
3127 &first_bad, &bad_sectors)) {
3128 if (first_bad > sector)
3129 max_sync = first_bad - sector;
3130 else {
3131 bad_sectors -= (sector
3132 - first_bad);
3133 if (max_sync > bad_sectors)
3134 max_sync = bad_sectors;
3135 continue;
3136 }
3137 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003138 bio = r10_bio->devs[0].bio;
Kent Overstreet8be185f2012-09-06 14:14:43 -07003139 bio_reset(bio);
NeilBrownab9d47e2011-05-11 14:54:41 +10003140 bio->bi_next = biolist;
3141 biolist = bio;
3142 bio->bi_private = r10_bio;
3143 bio->bi_end_io = end_sync_read;
3144 bio->bi_rw = READ;
NeilBrown5e570282011-07-28 11:39:25 +10003145 from_addr = r10_bio->devs[j].addr;
Kent Overstreet4f024f32013-10-11 15:44:27 -07003146 bio->bi_iter.bi_sector = from_addr +
3147 rdev->data_offset;
NeilBrown24afd802011-12-23 10:17:55 +11003148 bio->bi_bdev = rdev->bdev;
3149 atomic_inc(&rdev->nr_pending);
3150 /* and we write to 'i' (if not in_sync) */
NeilBrownab9d47e2011-05-11 14:54:41 +10003151
3152 for (k=0; k<conf->copies; k++)
3153 if (r10_bio->devs[k].devnum == i)
3154 break;
3155 BUG_ON(k == conf->copies);
NeilBrown5e570282011-07-28 11:39:25 +10003156 to_addr = r10_bio->devs[k].addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003157 r10_bio->devs[0].devnum = d;
NeilBrown5e570282011-07-28 11:39:25 +10003158 r10_bio->devs[0].addr = from_addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003159 r10_bio->devs[1].devnum = i;
NeilBrown5e570282011-07-28 11:39:25 +10003160 r10_bio->devs[1].addr = to_addr;
NeilBrownab9d47e2011-05-11 14:54:41 +10003161
NeilBrown24afd802011-12-23 10:17:55 +11003162 rdev = mirror->rdev;
3163 if (!test_bit(In_sync, &rdev->flags)) {
3164 bio = r10_bio->devs[1].bio;
Kent Overstreet8be185f2012-09-06 14:14:43 -07003165 bio_reset(bio);
NeilBrown24afd802011-12-23 10:17:55 +11003166 bio->bi_next = biolist;
3167 biolist = bio;
3168 bio->bi_private = r10_bio;
3169 bio->bi_end_io = end_sync_write;
3170 bio->bi_rw = WRITE;
Kent Overstreet4f024f32013-10-11 15:44:27 -07003171 bio->bi_iter.bi_sector = to_addr
NeilBrown24afd802011-12-23 10:17:55 +11003172 + rdev->data_offset;
3173 bio->bi_bdev = rdev->bdev;
3174 atomic_inc(&r10_bio->remaining);
3175 } else
3176 r10_bio->devs[1].bio->bi_end_io = NULL;
3177
3178 /* and maybe write to replacement */
3179 bio = r10_bio->devs[1].repl_bio;
3180 if (bio)
3181 bio->bi_end_io = NULL;
3182 rdev = mirror->replacement;
3183 /* Note: if rdev != NULL, then bio
3184 * cannot be NULL as r10buf_pool_alloc will
3185 * have allocated it.
3186 * So the second test here is pointless.
3187 * But it keeps semantic-checkers happy, and
3188 * this comment keeps human reviewers
3189 * happy.
3190 */
3191 if (rdev == NULL || bio == NULL ||
3192 test_bit(Faulty, &rdev->flags))
3193 break;
Kent Overstreet8be185f2012-09-06 14:14:43 -07003194 bio_reset(bio);
NeilBrown24afd802011-12-23 10:17:55 +11003195 bio->bi_next = biolist;
3196 biolist = bio;
3197 bio->bi_private = r10_bio;
3198 bio->bi_end_io = end_sync_write;
3199 bio->bi_rw = WRITE;
Kent Overstreet4f024f32013-10-11 15:44:27 -07003200 bio->bi_iter.bi_sector = to_addr +
3201 rdev->data_offset;
NeilBrown24afd802011-12-23 10:17:55 +11003202 bio->bi_bdev = rdev->bdev;
3203 atomic_inc(&r10_bio->remaining);
NeilBrownab9d47e2011-05-11 14:54:41 +10003204 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003206 if (j == conf->copies) {
NeilBrowne875ece2011-07-28 11:39:24 +10003207 /* Cannot recover, so abort the recovery or
3208 * record a bad block */
NeilBrowne875ece2011-07-28 11:39:24 +10003209 if (any_working) {
3210 /* problem is that there are bad blocks
3211 * on other device(s)
3212 */
3213 int k;
3214 for (k = 0; k < conf->copies; k++)
3215 if (r10_bio->devs[k].devnum == i)
3216 break;
NeilBrown24afd802011-12-23 10:17:55 +11003217 if (!test_bit(In_sync,
3218 &mirror->rdev->flags)
3219 && !rdev_set_badblocks(
3220 mirror->rdev,
3221 r10_bio->devs[k].addr,
3222 max_sync, 0))
3223 any_working = 0;
3224 if (mirror->replacement &&
3225 !rdev_set_badblocks(
3226 mirror->replacement,
NeilBrowne875ece2011-07-28 11:39:24 +10003227 r10_bio->devs[k].addr,
3228 max_sync, 0))
3229 any_working = 0;
3230 }
3231 if (!any_working) {
3232 if (!test_and_set_bit(MD_RECOVERY_INTR,
3233 &mddev->recovery))
3234 printk(KERN_INFO "md/raid10:%s: insufficient "
3235 "working devices for recovery.\n",
3236 mdname(mddev));
NeilBrown24afd802011-12-23 10:17:55 +11003237 mirror->recovery_disabled
NeilBrowne875ece2011-07-28 11:39:24 +10003238 = mddev->recovery_disabled;
3239 }
NeilBrowne8b84912014-01-06 10:35:34 +11003240 put_buf(r10_bio);
3241 if (rb2)
3242 atomic_dec(&rb2->remaining);
3243 r10_bio = rb2;
NeilBrownab9d47e2011-05-11 14:54:41 +10003244 break;
3245 }
3246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247 if (biolist == NULL) {
3248 while (r10_bio) {
NeilBrown9f2c9d12011-10-11 16:48:43 +11003249 struct r10bio *rb2 = r10_bio;
3250 r10_bio = (struct r10bio*) rb2->master_bio;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003251 rb2->master_bio = NULL;
3252 put_buf(rb2);
3253 }
3254 goto giveup;
3255 }
3256 } else {
3257 /* resync. Schedule a read for every block at this virt offset */
3258 int count = 0;
NeilBrown6cce3b232006-01-06 00:20:16 -08003259
NeilBrown78200d42009-02-25 13:18:47 +11003260 bitmap_cond_end_sync(mddev->bitmap, sector_nr);
3261
NeilBrown6cce3b232006-01-06 00:20:16 -08003262 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
3263 &sync_blocks, mddev->degraded) &&
NeilBrownab9d47e2011-05-11 14:54:41 +10003264 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED,
3265 &mddev->recovery)) {
NeilBrown6cce3b232006-01-06 00:20:16 -08003266 /* We can skip this block */
3267 *skipped = 1;
3268 return sync_blocks + sectors_skipped;
3269 }
3270 if (sync_blocks < max_sync)
3271 max_sync = sync_blocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3273
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274 r10_bio->mddev = mddev;
3275 atomic_set(&r10_bio->remaining, 0);
NeilBrown6cce3b232006-01-06 00:20:16 -08003276 raise_barrier(conf, 0);
3277 conf->next_resync = sector_nr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278
3279 r10_bio->master_bio = NULL;
3280 r10_bio->sector = sector_nr;
3281 set_bit(R10BIO_IsSync, &r10_bio->state);
3282 raid10_find_phys(conf, r10_bio);
NeilBrown5cf00fc2012-05-21 09:28:20 +10003283 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284
NeilBrown5cf00fc2012-05-21 09:28:20 +10003285 for (i = 0; i < conf->copies; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 int d = r10_bio->devs[i].devnum;
NeilBrown40c356c2011-07-28 11:39:24 +10003287 sector_t first_bad, sector;
3288 int bad_sectors;
3289
NeilBrown9ad1aef2011-12-23 10:17:55 +11003290 if (r10_bio->devs[i].repl_bio)
3291 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3292
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293 bio = r10_bio->devs[i].bio;
Kent Overstreet8be185f2012-09-06 14:14:43 -07003294 bio_reset(bio);
NeilBrownaf03b8e2007-06-16 10:16:06 -07003295 clear_bit(BIO_UPTODATE, &bio->bi_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 if (conf->mirrors[d].rdev == NULL ||
NeilBrownb2d444d2005-11-08 21:39:31 -08003297 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298 continue;
NeilBrown40c356c2011-07-28 11:39:24 +10003299 sector = r10_bio->devs[i].addr;
3300 if (is_badblock(conf->mirrors[d].rdev,
3301 sector, max_sync,
3302 &first_bad, &bad_sectors)) {
3303 if (first_bad > sector)
3304 max_sync = first_bad - sector;
3305 else {
3306 bad_sectors -= (sector - first_bad);
3307 if (max_sync > bad_sectors)
Dan Carpenter91502f02012-10-11 14:20:58 +11003308 max_sync = bad_sectors;
NeilBrown40c356c2011-07-28 11:39:24 +10003309 continue;
3310 }
3311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3313 atomic_inc(&r10_bio->remaining);
3314 bio->bi_next = biolist;
3315 biolist = bio;
3316 bio->bi_private = r10_bio;
3317 bio->bi_end_io = end_sync_read;
NeilBrown802ba062006-12-13 00:34:13 -08003318 bio->bi_rw = READ;
Kent Overstreet4f024f32013-10-11 15:44:27 -07003319 bio->bi_iter.bi_sector = sector +
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 conf->mirrors[d].rdev->data_offset;
3321 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3322 count++;
NeilBrown9ad1aef2011-12-23 10:17:55 +11003323
3324 if (conf->mirrors[d].replacement == NULL ||
3325 test_bit(Faulty,
3326 &conf->mirrors[d].replacement->flags))
3327 continue;
3328
3329 /* Need to set up for writing to the replacement */
3330 bio = r10_bio->devs[i].repl_bio;
Kent Overstreet8be185f2012-09-06 14:14:43 -07003331 bio_reset(bio);
NeilBrown9ad1aef2011-12-23 10:17:55 +11003332 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3333
3334 sector = r10_bio->devs[i].addr;
3335 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
3336 bio->bi_next = biolist;
3337 biolist = bio;
3338 bio->bi_private = r10_bio;
3339 bio->bi_end_io = end_sync_write;
3340 bio->bi_rw = WRITE;
Kent Overstreet4f024f32013-10-11 15:44:27 -07003341 bio->bi_iter.bi_sector = sector +
NeilBrown9ad1aef2011-12-23 10:17:55 +11003342 conf->mirrors[d].replacement->data_offset;
3343 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3344 count++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 }
3346
3347 if (count < 2) {
3348 for (i=0; i<conf->copies; i++) {
3349 int d = r10_bio->devs[i].devnum;
3350 if (r10_bio->devs[i].bio->bi_end_io)
NeilBrownab9d47e2011-05-11 14:54:41 +10003351 rdev_dec_pending(conf->mirrors[d].rdev,
3352 mddev);
NeilBrown9ad1aef2011-12-23 10:17:55 +11003353 if (r10_bio->devs[i].repl_bio &&
3354 r10_bio->devs[i].repl_bio->bi_end_io)
3355 rdev_dec_pending(
3356 conf->mirrors[d].replacement,
3357 mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 }
3359 put_buf(r10_bio);
3360 biolist = NULL;
3361 goto giveup;
3362 }
3363 }
3364
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 nr_sectors = 0;
NeilBrown6cce3b232006-01-06 00:20:16 -08003366 if (sector_nr + max_sync < max_sector)
3367 max_sector = sector_nr + max_sync;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 do {
3369 struct page *page;
3370 int len = PAGE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 if (sector_nr + (len>>9) > max_sector)
3372 len = (max_sector - sector_nr) << 9;
3373 if (len == 0)
3374 break;
3375 for (bio= biolist ; bio ; bio=bio->bi_next) {
NeilBrownab9d47e2011-05-11 14:54:41 +10003376 struct bio *bio2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377 page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
NeilBrownab9d47e2011-05-11 14:54:41 +10003378 if (bio_add_page(bio, page, len, 0))
3379 continue;
3380
3381 /* stop here */
3382 bio->bi_io_vec[bio->bi_vcnt].bv_page = page;
3383 for (bio2 = biolist;
3384 bio2 && bio2 != bio;
3385 bio2 = bio2->bi_next) {
3386 /* remove last page from this bio */
3387 bio2->bi_vcnt--;
Kent Overstreet4f024f32013-10-11 15:44:27 -07003388 bio2->bi_iter.bi_size -= len;
NeilBrownab9d47e2011-05-11 14:54:41 +10003389 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 }
NeilBrownab9d47e2011-05-11 14:54:41 +10003391 goto bio_full;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 }
3393 nr_sectors += len>>9;
3394 sector_nr += len>>9;
3395 } while (biolist->bi_vcnt < RESYNC_PAGES);
3396 bio_full:
3397 r10_bio->sectors = nr_sectors;
3398
3399 while (biolist) {
3400 bio = biolist;
3401 biolist = biolist->bi_next;
3402
3403 bio->bi_next = NULL;
3404 r10_bio = bio->bi_private;
3405 r10_bio->sectors = nr_sectors;
3406
3407 if (bio->bi_end_io == end_sync_read) {
3408 md_sync_acct(bio->bi_bdev, nr_sectors);
NeilBrown7bb23c42013-07-16 16:50:47 +10003409 set_bit(BIO_UPTODATE, &bio->bi_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410 generic_make_request(bio);
3411 }
3412 }
3413
NeilBrown57afd892005-06-21 17:17:13 -07003414 if (sectors_skipped)
3415 /* pretend they weren't skipped, it makes
3416 * no important difference in this case
3417 */
3418 md_done_sync(mddev, sectors_skipped, 1);
3419
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 return sectors_skipped + nr_sectors;
3421 giveup:
3422 /* There is nowhere to write, so all non-sync
NeilBrowne875ece2011-07-28 11:39:24 +10003423 * drives must be failed or in resync, all drives
3424 * have a bad block, so try the next chunk...
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 */
NeilBrown09b40682009-02-25 13:18:47 +11003426 if (sector_nr + max_sync < max_sector)
3427 max_sector = sector_nr + max_sync;
3428
3429 sectors_skipped += (max_sector - sector_nr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430 chunks_skipped ++;
3431 sector_nr = max_sector;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432 goto skipped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433}
3434
Dan Williams80c3a6c2009-03-17 18:10:40 -07003435static sector_t
NeilBrownfd01b882011-10-11 16:47:53 +11003436raid10_size(struct mddev *mddev, sector_t sectors, int raid_disks)
Dan Williams80c3a6c2009-03-17 18:10:40 -07003437{
3438 sector_t size;
NeilBrowne879a872011-10-11 16:49:02 +11003439 struct r10conf *conf = mddev->private;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003440
3441 if (!raid_disks)
NeilBrown3ea7daa2012-05-22 13:53:47 +10003442 raid_disks = min(conf->geo.raid_disks,
3443 conf->prev.raid_disks);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003444 if (!sectors)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003445 sectors = conf->dev_sectors;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003446
NeilBrown5cf00fc2012-05-21 09:28:20 +10003447 size = sectors >> conf->geo.chunk_shift;
3448 sector_div(size, conf->geo.far_copies);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003449 size = size * raid_disks;
NeilBrown5cf00fc2012-05-21 09:28:20 +10003450 sector_div(size, conf->geo.near_copies);
Dan Williams80c3a6c2009-03-17 18:10:40 -07003451
NeilBrown5cf00fc2012-05-21 09:28:20 +10003452 return size << conf->geo.chunk_shift;
Dan Williams80c3a6c2009-03-17 18:10:40 -07003453}
3454
NeilBrown6508fdb2012-05-17 10:08:45 +10003455static void calc_sectors(struct r10conf *conf, sector_t size)
3456{
3457 /* Calculate the number of sectors-per-device that will
3458 * actually be used, and set conf->dev_sectors and
3459 * conf->stride
3460 */
3461
NeilBrown5cf00fc2012-05-21 09:28:20 +10003462 size = size >> conf->geo.chunk_shift;
3463 sector_div(size, conf->geo.far_copies);
3464 size = size * conf->geo.raid_disks;
3465 sector_div(size, conf->geo.near_copies);
NeilBrown6508fdb2012-05-17 10:08:45 +10003466 /* 'size' is now the number of chunks in the array */
3467 /* calculate "used chunks per device" */
3468 size = size * conf->copies;
3469
3470 /* We need to round up when dividing by raid_disks to
3471 * get the stride size.
3472 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003473 size = DIV_ROUND_UP_SECTOR_T(size, conf->geo.raid_disks);
NeilBrown6508fdb2012-05-17 10:08:45 +10003474
NeilBrown5cf00fc2012-05-21 09:28:20 +10003475 conf->dev_sectors = size << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003476
NeilBrown5cf00fc2012-05-21 09:28:20 +10003477 if (conf->geo.far_offset)
3478 conf->geo.stride = 1 << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003479 else {
NeilBrown5cf00fc2012-05-21 09:28:20 +10003480 sector_div(size, conf->geo.far_copies);
3481 conf->geo.stride = size << conf->geo.chunk_shift;
NeilBrown6508fdb2012-05-17 10:08:45 +10003482 }
3483}
Trela, Maciejdab8b292010-03-08 16:02:45 +11003484
NeilBrowndeb200d2012-05-21 09:28:33 +10003485enum geo_type {geo_new, geo_old, geo_start};
3486static int setup_geo(struct geom *geo, struct mddev *mddev, enum geo_type new)
3487{
3488 int nc, fc, fo;
3489 int layout, chunk, disks;
3490 switch (new) {
3491 case geo_old:
3492 layout = mddev->layout;
3493 chunk = mddev->chunk_sectors;
3494 disks = mddev->raid_disks - mddev->delta_disks;
3495 break;
3496 case geo_new:
3497 layout = mddev->new_layout;
3498 chunk = mddev->new_chunk_sectors;
3499 disks = mddev->raid_disks;
3500 break;
3501 default: /* avoid 'may be unused' warnings */
3502 case geo_start: /* new when starting reshape - raid_disks not
3503 * updated yet. */
3504 layout = mddev->new_layout;
3505 chunk = mddev->new_chunk_sectors;
3506 disks = mddev->raid_disks + mddev->delta_disks;
3507 break;
3508 }
Jonathan Brassow475901a2013-02-21 13:28:10 +11003509 if (layout >> 18)
NeilBrowndeb200d2012-05-21 09:28:33 +10003510 return -1;
3511 if (chunk < (PAGE_SIZE >> 9) ||
3512 !is_power_of_2(chunk))
3513 return -2;
3514 nc = layout & 255;
3515 fc = (layout >> 8) & 255;
3516 fo = layout & (1<<16);
3517 geo->raid_disks = disks;
3518 geo->near_copies = nc;
3519 geo->far_copies = fc;
3520 geo->far_offset = fo;
Jonathan Brassow475901a2013-02-21 13:28:10 +11003521 geo->far_set_size = (layout & (1<<17)) ? disks / fc : disks;
NeilBrowndeb200d2012-05-21 09:28:33 +10003522 geo->chunk_mask = chunk - 1;
3523 geo->chunk_shift = ffz(~chunk);
3524 return nc*fc;
3525}
3526
NeilBrowne879a872011-10-11 16:49:02 +11003527static struct r10conf *setup_conf(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528{
NeilBrowne879a872011-10-11 16:49:02 +11003529 struct r10conf *conf = NULL;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003530 int err = -EINVAL;
NeilBrowndeb200d2012-05-21 09:28:33 +10003531 struct geom geo;
3532 int copies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533
NeilBrowndeb200d2012-05-21 09:28:33 +10003534 copies = setup_geo(&geo, mddev, geo_new);
3535
3536 if (copies == -2) {
NeilBrown128595e2010-05-03 14:47:14 +10003537 printk(KERN_ERR "md/raid10:%s: chunk size must be "
3538 "at least PAGE_SIZE(%ld) and be a power of 2.\n",
3539 mdname(mddev), PAGE_SIZE);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003540 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 }
NeilBrown2604b702006-01-06 00:20:36 -08003542
NeilBrowndeb200d2012-05-21 09:28:33 +10003543 if (copies < 2 || copies > mddev->raid_disks) {
NeilBrown128595e2010-05-03 14:47:14 +10003544 printk(KERN_ERR "md/raid10:%s: unsupported raid10 layout: 0x%8x\n",
Maciej Trelaf73ea872010-06-16 11:46:29 +01003545 mdname(mddev), mddev->new_layout);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 goto out;
3547 }
Trela, Maciejdab8b292010-03-08 16:02:45 +11003548
3549 err = -ENOMEM;
NeilBrowne879a872011-10-11 16:49:02 +11003550 conf = kzalloc(sizeof(struct r10conf), GFP_KERNEL);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003551 if (!conf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 goto out;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003553
NeilBrown3ea7daa2012-05-22 13:53:47 +10003554 /* FIXME calc properly */
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003555 conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks +
NeilBrown78eaa0d2013-07-02 15:58:05 +10003556 max(0,-mddev->delta_disks)),
Trela, Maciejdab8b292010-03-08 16:02:45 +11003557 GFP_KERNEL);
3558 if (!conf->mirrors)
3559 goto out;
NeilBrown4443ae12006-01-06 00:20:28 -08003560
3561 conf->tmppage = alloc_page(GFP_KERNEL);
3562 if (!conf->tmppage)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003563 goto out;
3564
NeilBrowndeb200d2012-05-21 09:28:33 +10003565 conf->geo = geo;
3566 conf->copies = copies;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003567 conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
3568 r10bio_pool_free, conf);
3569 if (!conf->r10bio_pool)
3570 goto out;
3571
NeilBrown6508fdb2012-05-17 10:08:45 +10003572 calc_sectors(conf, mddev->dev_sectors);
NeilBrown3ea7daa2012-05-22 13:53:47 +10003573 if (mddev->reshape_position == MaxSector) {
3574 conf->prev = conf->geo;
3575 conf->reshape_progress = MaxSector;
3576 } else {
3577 if (setup_geo(&conf->prev, mddev, geo_old) != conf->copies) {
3578 err = -EINVAL;
3579 goto out;
3580 }
3581 conf->reshape_progress = mddev->reshape_position;
3582 if (conf->prev.far_offset)
3583 conf->prev.stride = 1 << conf->prev.chunk_shift;
3584 else
3585 /* far_copies must be 1 */
3586 conf->prev.stride = conf->dev_sectors;
3587 }
Neil Browne7e72bf2008-05-14 16:05:54 -07003588 spin_lock_init(&conf->device_lock);
Trela, Maciejdab8b292010-03-08 16:02:45 +11003589 INIT_LIST_HEAD(&conf->retry_list);
3590
3591 spin_lock_init(&conf->resync_lock);
3592 init_waitqueue_head(&conf->wait_barrier);
3593
NeilBrown02326052012-07-03 15:56:52 +10003594 conf->thread = md_register_thread(raid10d, mddev, "raid10");
Trela, Maciejdab8b292010-03-08 16:02:45 +11003595 if (!conf->thread)
3596 goto out;
3597
Trela, Maciejdab8b292010-03-08 16:02:45 +11003598 conf->mddev = mddev;
3599 return conf;
3600
3601 out:
NeilBrown3ea7daa2012-05-22 13:53:47 +10003602 if (err == -ENOMEM)
3603 printk(KERN_ERR "md/raid10:%s: couldn't allocate memory.\n",
3604 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11003605 if (conf) {
3606 if (conf->r10bio_pool)
3607 mempool_destroy(conf->r10bio_pool);
3608 kfree(conf->mirrors);
3609 safe_put_page(conf->tmppage);
3610 kfree(conf);
3611 }
3612 return ERR_PTR(err);
3613}
3614
NeilBrownfd01b882011-10-11 16:47:53 +11003615static int run(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003616{
NeilBrowne879a872011-10-11 16:49:02 +11003617 struct r10conf *conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003618 int i, disk_idx, chunk_size;
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003619 struct raid10_info *disk;
NeilBrown3cb03002011-10-11 16:45:26 +11003620 struct md_rdev *rdev;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003621 sector_t size;
NeilBrown3ea7daa2012-05-22 13:53:47 +10003622 sector_t min_offset_diff = 0;
3623 int first = 1;
Shaohua Li532a2a32012-10-11 13:30:52 +11003624 bool discard_supported = false;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003625
3626 if (mddev->private == NULL) {
3627 conf = setup_conf(mddev);
3628 if (IS_ERR(conf))
3629 return PTR_ERR(conf);
3630 mddev->private = conf;
3631 }
3632 conf = mddev->private;
3633 if (!conf)
3634 goto out;
3635
Trela, Maciejdab8b292010-03-08 16:02:45 +11003636 mddev->thread = conf->thread;
3637 conf->thread = NULL;
3638
Martin K. Petersen8f6c2e42009-07-01 11:13:45 +10003639 chunk_size = mddev->chunk_sectors << 9;
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003640 if (mddev->queue) {
Shaohua Li532a2a32012-10-11 13:30:52 +11003641 blk_queue_max_discard_sectors(mddev->queue,
3642 mddev->chunk_sectors);
H. Peter Anvin5026d7a2013-06-12 07:37:43 -07003643 blk_queue_max_write_same_sectors(mddev->queue, 0);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003644 blk_queue_io_min(mddev->queue, chunk_size);
3645 if (conf->geo.raid_disks % conf->geo.near_copies)
3646 blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks);
3647 else
3648 blk_queue_io_opt(mddev->queue, chunk_size *
3649 (conf->geo.raid_disks / conf->geo.near_copies));
3650 }
Martin K. Petersen8f6c2e42009-07-01 11:13:45 +10003651
NeilBrowndafb20f2012-03-19 12:46:39 +11003652 rdev_for_each(rdev, mddev) {
NeilBrown3ea7daa2012-05-22 13:53:47 +10003653 long long diff;
NeilBrownaba336b2012-05-31 15:39:11 +10003654 struct request_queue *q;
NeilBrown34b343c2011-07-28 11:31:47 +10003655
Linus Torvalds1da177e2005-04-16 15:20:36 -07003656 disk_idx = rdev->raid_disk;
NeilBrownf8c9e742012-05-21 09:28:33 +10003657 if (disk_idx < 0)
3658 continue;
3659 if (disk_idx >= conf->geo.raid_disks &&
3660 disk_idx >= conf->prev.raid_disks)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 continue;
3662 disk = conf->mirrors + disk_idx;
3663
NeilBrown56a25592011-12-23 10:17:55 +11003664 if (test_bit(Replacement, &rdev->flags)) {
3665 if (disk->replacement)
3666 goto out_free_conf;
3667 disk->replacement = rdev;
3668 } else {
3669 if (disk->rdev)
3670 goto out_free_conf;
3671 disk->rdev = rdev;
3672 }
NeilBrownaba336b2012-05-31 15:39:11 +10003673 q = bdev_get_queue(rdev->bdev);
3674 if (q->merge_bvec_fn)
3675 mddev->merge_check_needed = 1;
NeilBrown3ea7daa2012-05-22 13:53:47 +10003676 diff = (rdev->new_data_offset - rdev->data_offset);
3677 if (!mddev->reshape_backwards)
3678 diff = -diff;
3679 if (diff < 0)
3680 diff = 0;
3681 if (first || diff < min_offset_diff)
3682 min_offset_diff = diff;
NeilBrown56a25592011-12-23 10:17:55 +11003683
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003684 if (mddev->gendisk)
3685 disk_stack_limits(mddev->gendisk, rdev->bdev,
3686 rdev->data_offset << 9);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003687
3688 disk->head_position = 0;
Shaohua Li532a2a32012-10-11 13:30:52 +11003689
3690 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3691 discard_supported = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003692 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10003693
Jonathan Brassowed30be02012-10-31 11:42:30 +11003694 if (mddev->queue) {
3695 if (discard_supported)
3696 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
3697 mddev->queue);
3698 else
3699 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD,
3700 mddev->queue);
3701 }
NeilBrown6d508242005-09-09 16:24:03 -07003702 /* need to check that every block has at least one working mirror */
NeilBrown700c7212011-07-27 11:00:36 +10003703 if (!enough(conf, -1)) {
NeilBrown128595e2010-05-03 14:47:14 +10003704 printk(KERN_ERR "md/raid10:%s: not enough operational mirrors.\n",
NeilBrown6d508242005-09-09 16:24:03 -07003705 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003706 goto out_free_conf;
3707 }
3708
NeilBrown3ea7daa2012-05-22 13:53:47 +10003709 if (conf->reshape_progress != MaxSector) {
3710 /* must ensure that shape change is supported */
3711 if (conf->geo.far_copies != 1 &&
3712 conf->geo.far_offset == 0)
3713 goto out_free_conf;
3714 if (conf->prev.far_copies != 1 &&
NeilBrown78eaa0d2013-07-02 15:58:05 +10003715 conf->prev.far_offset == 0)
NeilBrown3ea7daa2012-05-22 13:53:47 +10003716 goto out_free_conf;
3717 }
3718
Linus Torvalds1da177e2005-04-16 15:20:36 -07003719 mddev->degraded = 0;
NeilBrownf8c9e742012-05-21 09:28:33 +10003720 for (i = 0;
3721 i < conf->geo.raid_disks
3722 || i < conf->prev.raid_disks;
3723 i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003724
3725 disk = conf->mirrors + i;
3726
NeilBrown56a25592011-12-23 10:17:55 +11003727 if (!disk->rdev && disk->replacement) {
3728 /* The replacement is all we have - use it */
3729 disk->rdev = disk->replacement;
3730 disk->replacement = NULL;
3731 clear_bit(Replacement, &disk->rdev->flags);
3732 }
3733
NeilBrown5fd6c1d2006-06-26 00:27:40 -07003734 if (!disk->rdev ||
NeilBrown2e333e82006-10-21 10:24:07 -07003735 !test_bit(In_sync, &disk->rdev->flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 disk->head_position = 0;
3737 mddev->degraded++;
NeilBrown0b59bb62014-01-14 16:30:10 +11003738 if (disk->rdev &&
3739 disk->rdev->saved_raid_disk < 0)
Neil Brown8c2e8702008-06-28 08:30:52 +10003740 conf->fullsync = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741 }
NeilBrownd890fa22011-10-26 11:54:39 +11003742 disk->recovery_disabled = mddev->recovery_disabled - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 }
3744
Andre Noll8c6ac862009-06-18 08:48:06 +10003745 if (mddev->recovery_cp != MaxSector)
NeilBrown128595e2010-05-03 14:47:14 +10003746 printk(KERN_NOTICE "md/raid10:%s: not clean"
Andre Noll8c6ac862009-06-18 08:48:06 +10003747 " -- starting background reconstruction\n",
3748 mdname(mddev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003749 printk(KERN_INFO
NeilBrown128595e2010-05-03 14:47:14 +10003750 "md/raid10:%s: active with %d out of %d devices\n",
NeilBrown5cf00fc2012-05-21 09:28:20 +10003751 mdname(mddev), conf->geo.raid_disks - mddev->degraded,
3752 conf->geo.raid_disks);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003753 /*
3754 * Ok, everything is just fine now
3755 */
Trela, Maciejdab8b292010-03-08 16:02:45 +11003756 mddev->dev_sectors = conf->dev_sectors;
3757 size = raid10_size(mddev, 0, 0);
3758 md_set_array_sectors(mddev, size);
3759 mddev->resync_max_sectors = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003760
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003761 if (mddev->queue) {
NeilBrown5cf00fc2012-05-21 09:28:20 +10003762 int stripe = conf->geo.raid_disks *
Andre Noll9d8f0362009-06-18 08:45:01 +10003763 ((mddev->chunk_sectors << 9) / PAGE_SIZE);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003764 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
3765 mddev->queue->backing_dev_info.congested_data = mddev;
3766
3767 /* Calculate max read-ahead size.
3768 * We need to readahead at least twice a whole stripe....
3769 * maybe...
3770 */
NeilBrown5cf00fc2012-05-21 09:28:20 +10003771 stripe /= conf->geo.near_copies;
NeilBrown3ea7daa2012-05-22 13:53:47 +10003772 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
3773 mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003774 blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775 }
3776
Martin K. Petersena91a2782011-03-17 11:11:05 +01003777
3778 if (md_integrity_register(mddev))
3779 goto out_free_conf;
3780
NeilBrown3ea7daa2012-05-22 13:53:47 +10003781 if (conf->reshape_progress != MaxSector) {
3782 unsigned long before_length, after_length;
3783
3784 before_length = ((1 << conf->prev.chunk_shift) *
3785 conf->prev.far_copies);
3786 after_length = ((1 << conf->geo.chunk_shift) *
3787 conf->geo.far_copies);
3788
3789 if (max(before_length, after_length) > min_offset_diff) {
3790 /* This cannot work */
3791 printk("md/raid10: offset difference not enough to continue reshape\n");
3792 goto out_free_conf;
3793 }
3794 conf->offset_diff = min_offset_diff;
3795
3796 conf->reshape_safe = conf->reshape_progress;
3797 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3798 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3799 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
3800 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
3801 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
3802 "reshape");
3803 }
3804
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 return 0;
3806
3807out_free_conf:
NeilBrown01f96c02011-09-21 15:30:20 +10003808 md_unregister_thread(&mddev->thread);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809 if (conf->r10bio_pool)
3810 mempool_destroy(conf->r10bio_pool);
NeilBrown1345b1d2006-01-06 00:20:40 -08003811 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07003812 kfree(conf->mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003813 kfree(conf);
3814 mddev->private = NULL;
3815out:
3816 return -EIO;
3817}
3818
NeilBrownfd01b882011-10-11 16:47:53 +11003819static int stop(struct mddev *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820{
NeilBrowne879a872011-10-11 16:49:02 +11003821 struct r10conf *conf = mddev->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822
NeilBrown409c57f2009-03-31 14:39:39 +11003823 raise_barrier(conf, 0);
3824 lower_barrier(conf);
3825
NeilBrown01f96c02011-09-21 15:30:20 +10003826 md_unregister_thread(&mddev->thread);
Jonathan Brassowcc4d1ef2012-07-31 10:03:53 +10003827 if (mddev->queue)
3828 /* the unplug fn references 'conf'*/
3829 blk_sync_queue(mddev->queue);
3830
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831 if (conf->r10bio_pool)
3832 mempool_destroy(conf->r10bio_pool);
Hirokazu Takahashi0fea7ed2013-04-24 11:42:44 +10003833 safe_put_page(conf->tmppage);
Jesper Juhl990a8ba2005-06-21 17:17:30 -07003834 kfree(conf->mirrors);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 kfree(conf);
3836 mddev->private = NULL;
3837 return 0;
3838}
3839
NeilBrownfd01b882011-10-11 16:47:53 +11003840static void raid10_quiesce(struct mddev *mddev, int state)
NeilBrown6cce3b232006-01-06 00:20:16 -08003841{
NeilBrowne879a872011-10-11 16:49:02 +11003842 struct r10conf *conf = mddev->private;
NeilBrown6cce3b232006-01-06 00:20:16 -08003843
3844 switch(state) {
3845 case 1:
3846 raise_barrier(conf, 0);
3847 break;
3848 case 0:
3849 lower_barrier(conf);
3850 break;
3851 }
NeilBrown6cce3b232006-01-06 00:20:16 -08003852}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003853
NeilBrown006a09a2012-03-19 12:46:40 +11003854static int raid10_resize(struct mddev *mddev, sector_t sectors)
3855{
3856 /* Resize of 'far' arrays is not supported.
3857 * For 'near' and 'offset' arrays we can set the
3858 * number of sectors used to be an appropriate multiple
3859 * of the chunk size.
3860 * For 'offset', this is far_copies*chunksize.
3861 * For 'near' the multiplier is the LCM of
3862 * near_copies and raid_disks.
3863 * So if far_copies > 1 && !far_offset, fail.
3864 * Else find LCM(raid_disks, near_copy)*far_copies and
3865 * multiply by chunk_size. Then round to this number.
3866 * This is mostly done by raid10_size()
3867 */
3868 struct r10conf *conf = mddev->private;
3869 sector_t oldsize, size;
3870
NeilBrownf8c9e742012-05-21 09:28:33 +10003871 if (mddev->reshape_position != MaxSector)
3872 return -EBUSY;
3873
NeilBrown5cf00fc2012-05-21 09:28:20 +10003874 if (conf->geo.far_copies > 1 && !conf->geo.far_offset)
NeilBrown006a09a2012-03-19 12:46:40 +11003875 return -EINVAL;
3876
3877 oldsize = raid10_size(mddev, 0, 0);
3878 size = raid10_size(mddev, sectors, 0);
NeilBrowna4a61252012-05-22 13:55:27 +10003879 if (mddev->external_size &&
3880 mddev->array_sectors > size)
NeilBrown006a09a2012-03-19 12:46:40 +11003881 return -EINVAL;
NeilBrowna4a61252012-05-22 13:55:27 +10003882 if (mddev->bitmap) {
3883 int ret = bitmap_resize(mddev->bitmap, size, 0, 0);
3884 if (ret)
3885 return ret;
3886 }
3887 md_set_array_sectors(mddev, size);
NeilBrown006a09a2012-03-19 12:46:40 +11003888 set_capacity(mddev->gendisk, mddev->array_sectors);
3889 revalidate_disk(mddev->gendisk);
3890 if (sectors > mddev->dev_sectors &&
3891 mddev->recovery_cp > oldsize) {
3892 mddev->recovery_cp = oldsize;
3893 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3894 }
NeilBrown6508fdb2012-05-17 10:08:45 +10003895 calc_sectors(conf, sectors);
3896 mddev->dev_sectors = conf->dev_sectors;
NeilBrown006a09a2012-03-19 12:46:40 +11003897 mddev->resync_max_sectors = size;
3898 return 0;
3899}
3900
NeilBrownfd01b882011-10-11 16:47:53 +11003901static void *raid10_takeover_raid0(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003902{
NeilBrown3cb03002011-10-11 16:45:26 +11003903 struct md_rdev *rdev;
NeilBrowne879a872011-10-11 16:49:02 +11003904 struct r10conf *conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003905
3906 if (mddev->degraded > 0) {
NeilBrown128595e2010-05-03 14:47:14 +10003907 printk(KERN_ERR "md/raid10:%s: Error: degraded raid0!\n",
3908 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11003909 return ERR_PTR(-EINVAL);
3910 }
3911
Trela, Maciejdab8b292010-03-08 16:02:45 +11003912 /* Set new parameters */
3913 mddev->new_level = 10;
3914 /* new layout: far_copies = 1, near_copies = 2 */
3915 mddev->new_layout = (1<<8) + 2;
3916 mddev->new_chunk_sectors = mddev->chunk_sectors;
3917 mddev->delta_disks = mddev->raid_disks;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003918 mddev->raid_disks *= 2;
3919 /* make sure it will be not marked as dirty */
3920 mddev->recovery_cp = MaxSector;
3921
3922 conf = setup_conf(mddev);
Krzysztof Wojcik02214dc2011-02-04 14:18:26 +01003923 if (!IS_ERR(conf)) {
NeilBrowndafb20f2012-03-19 12:46:39 +11003924 rdev_for_each(rdev, mddev)
NeilBrowne93f68a2010-06-15 09:36:03 +01003925 if (rdev->raid_disk >= 0)
3926 rdev->new_raid_disk = rdev->raid_disk * 2;
Krzysztof Wojcik02214dc2011-02-04 14:18:26 +01003927 conf->barrier = 1;
3928 }
3929
Trela, Maciejdab8b292010-03-08 16:02:45 +11003930 return conf;
3931}
3932
NeilBrownfd01b882011-10-11 16:47:53 +11003933static void *raid10_takeover(struct mddev *mddev)
Trela, Maciejdab8b292010-03-08 16:02:45 +11003934{
NeilBrowne373ab12011-10-11 16:48:59 +11003935 struct r0conf *raid0_conf;
Trela, Maciejdab8b292010-03-08 16:02:45 +11003936
3937 /* raid10 can take over:
3938 * raid0 - providing it has only two drives
3939 */
3940 if (mddev->level == 0) {
3941 /* for raid0 takeover only one zone is supported */
NeilBrowne373ab12011-10-11 16:48:59 +11003942 raid0_conf = mddev->private;
3943 if (raid0_conf->nr_strip_zones > 1) {
NeilBrown128595e2010-05-03 14:47:14 +10003944 printk(KERN_ERR "md/raid10:%s: cannot takeover raid 0"
3945 " with more than one zone.\n",
3946 mdname(mddev));
Trela, Maciejdab8b292010-03-08 16:02:45 +11003947 return ERR_PTR(-EINVAL);
3948 }
3949 return raid10_takeover_raid0(mddev);
3950 }
3951 return ERR_PTR(-EINVAL);
3952}
3953
NeilBrown3ea7daa2012-05-22 13:53:47 +10003954static int raid10_check_reshape(struct mddev *mddev)
3955{
3956 /* Called when there is a request to change
3957 * - layout (to ->new_layout)
3958 * - chunk size (to ->new_chunk_sectors)
3959 * - raid_disks (by delta_disks)
3960 * or when trying to restart a reshape that was ongoing.
3961 *
3962 * We need to validate the request and possibly allocate
3963 * space if that might be an issue later.
3964 *
3965 * Currently we reject any reshape of a 'far' mode array,
3966 * allow chunk size to change if new is generally acceptable,
3967 * allow raid_disks to increase, and allow
3968 * a switch between 'near' mode and 'offset' mode.
3969 */
3970 struct r10conf *conf = mddev->private;
3971 struct geom geo;
3972
3973 if (conf->geo.far_copies != 1 && !conf->geo.far_offset)
3974 return -EINVAL;
3975
3976 if (setup_geo(&geo, mddev, geo_start) != conf->copies)
3977 /* mustn't change number of copies */
3978 return -EINVAL;
3979 if (geo.far_copies > 1 && !geo.far_offset)
3980 /* Cannot switch to 'far' mode */
3981 return -EINVAL;
3982
3983 if (mddev->array_sectors & geo.chunk_mask)
3984 /* not factor of array size */
3985 return -EINVAL;
3986
NeilBrown3ea7daa2012-05-22 13:53:47 +10003987 if (!enough(conf, -1))
3988 return -EINVAL;
3989
3990 kfree(conf->mirrors_new);
3991 conf->mirrors_new = NULL;
3992 if (mddev->delta_disks > 0) {
3993 /* allocate new 'mirrors' list */
3994 conf->mirrors_new = kzalloc(
Jonathan Brassowdc280d982012-07-31 10:03:52 +10003995 sizeof(struct raid10_info)
NeilBrown3ea7daa2012-05-22 13:53:47 +10003996 *(mddev->raid_disks +
3997 mddev->delta_disks),
3998 GFP_KERNEL);
3999 if (!conf->mirrors_new)
4000 return -ENOMEM;
4001 }
4002 return 0;
4003}
4004
4005/*
4006 * Need to check if array has failed when deciding whether to:
4007 * - start an array
4008 * - remove non-faulty devices
4009 * - add a spare
4010 * - allow a reshape
4011 * This determination is simple when no reshape is happening.
4012 * However if there is a reshape, we need to carefully check
4013 * both the before and after sections.
4014 * This is because some failed devices may only affect one
4015 * of the two sections, and some non-in_sync devices may
4016 * be insync in the section most affected by failed devices.
4017 */
4018static int calc_degraded(struct r10conf *conf)
4019{
4020 int degraded, degraded2;
4021 int i;
4022
4023 rcu_read_lock();
4024 degraded = 0;
4025 /* 'prev' section first */
4026 for (i = 0; i < conf->prev.raid_disks; i++) {
4027 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4028 if (!rdev || test_bit(Faulty, &rdev->flags))
4029 degraded++;
4030 else if (!test_bit(In_sync, &rdev->flags))
4031 /* When we can reduce the number of devices in
4032 * an array, this might not contribute to
4033 * 'degraded'. It does now.
4034 */
4035 degraded++;
4036 }
4037 rcu_read_unlock();
4038 if (conf->geo.raid_disks == conf->prev.raid_disks)
4039 return degraded;
4040 rcu_read_lock();
4041 degraded2 = 0;
4042 for (i = 0; i < conf->geo.raid_disks; i++) {
4043 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
4044 if (!rdev || test_bit(Faulty, &rdev->flags))
4045 degraded2++;
4046 else if (!test_bit(In_sync, &rdev->flags)) {
4047 /* If reshape is increasing the number of devices,
4048 * this section has already been recovered, so
4049 * it doesn't contribute to degraded.
4050 * else it does.
4051 */
4052 if (conf->geo.raid_disks <= conf->prev.raid_disks)
4053 degraded2++;
4054 }
4055 }
4056 rcu_read_unlock();
4057 if (degraded2 > degraded)
4058 return degraded2;
4059 return degraded;
4060}
4061
4062static int raid10_start_reshape(struct mddev *mddev)
4063{
4064 /* A 'reshape' has been requested. This commits
4065 * the various 'new' fields and sets MD_RECOVER_RESHAPE
4066 * This also checks if there are enough spares and adds them
4067 * to the array.
4068 * We currently require enough spares to make the final
4069 * array non-degraded. We also require that the difference
4070 * between old and new data_offset - on each device - is
4071 * enough that we never risk over-writing.
4072 */
4073
4074 unsigned long before_length, after_length;
4075 sector_t min_offset_diff = 0;
4076 int first = 1;
4077 struct geom new;
4078 struct r10conf *conf = mddev->private;
4079 struct md_rdev *rdev;
4080 int spares = 0;
NeilBrownbb63a702012-05-22 13:55:28 +10004081 int ret;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004082
4083 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery))
4084 return -EBUSY;
4085
4086 if (setup_geo(&new, mddev, geo_start) != conf->copies)
4087 return -EINVAL;
4088
4089 before_length = ((1 << conf->prev.chunk_shift) *
4090 conf->prev.far_copies);
4091 after_length = ((1 << conf->geo.chunk_shift) *
4092 conf->geo.far_copies);
4093
4094 rdev_for_each(rdev, mddev) {
4095 if (!test_bit(In_sync, &rdev->flags)
4096 && !test_bit(Faulty, &rdev->flags))
4097 spares++;
4098 if (rdev->raid_disk >= 0) {
4099 long long diff = (rdev->new_data_offset
4100 - rdev->data_offset);
4101 if (!mddev->reshape_backwards)
4102 diff = -diff;
4103 if (diff < 0)
4104 diff = 0;
4105 if (first || diff < min_offset_diff)
4106 min_offset_diff = diff;
4107 }
4108 }
4109
4110 if (max(before_length, after_length) > min_offset_diff)
4111 return -EINVAL;
4112
4113 if (spares < mddev->delta_disks)
4114 return -EINVAL;
4115
4116 conf->offset_diff = min_offset_diff;
4117 spin_lock_irq(&conf->device_lock);
4118 if (conf->mirrors_new) {
4119 memcpy(conf->mirrors_new, conf->mirrors,
Jonathan Brassowdc280d982012-07-31 10:03:52 +10004120 sizeof(struct raid10_info)*conf->prev.raid_disks);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004121 smp_mb();
4122 kfree(conf->mirrors_old); /* FIXME and elsewhere */
4123 conf->mirrors_old = conf->mirrors;
4124 conf->mirrors = conf->mirrors_new;
4125 conf->mirrors_new = NULL;
4126 }
4127 setup_geo(&conf->geo, mddev, geo_start);
4128 smp_mb();
4129 if (mddev->reshape_backwards) {
4130 sector_t size = raid10_size(mddev, 0, 0);
4131 if (size < mddev->array_sectors) {
4132 spin_unlock_irq(&conf->device_lock);
4133 printk(KERN_ERR "md/raid10:%s: array size must be reduce before number of disks\n",
4134 mdname(mddev));
4135 return -EINVAL;
4136 }
4137 mddev->resync_max_sectors = size;
4138 conf->reshape_progress = size;
4139 } else
4140 conf->reshape_progress = 0;
4141 spin_unlock_irq(&conf->device_lock);
4142
NeilBrownbb63a702012-05-22 13:55:28 +10004143 if (mddev->delta_disks && mddev->bitmap) {
4144 ret = bitmap_resize(mddev->bitmap,
4145 raid10_size(mddev, 0,
4146 conf->geo.raid_disks),
4147 0, 0);
4148 if (ret)
4149 goto abort;
4150 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10004151 if (mddev->delta_disks > 0) {
4152 rdev_for_each(rdev, mddev)
4153 if (rdev->raid_disk < 0 &&
4154 !test_bit(Faulty, &rdev->flags)) {
4155 if (raid10_add_disk(mddev, rdev) == 0) {
4156 if (rdev->raid_disk >=
4157 conf->prev.raid_disks)
4158 set_bit(In_sync, &rdev->flags);
4159 else
4160 rdev->recovery_offset = 0;
4161
4162 if (sysfs_link_rdev(mddev, rdev))
4163 /* Failure here is OK */;
4164 }
4165 } else if (rdev->raid_disk >= conf->prev.raid_disks
4166 && !test_bit(Faulty, &rdev->flags)) {
4167 /* This is a spare that was manually added */
4168 set_bit(In_sync, &rdev->flags);
4169 }
4170 }
4171 /* When a reshape changes the number of devices,
4172 * ->degraded is measured against the larger of the
4173 * pre and post numbers.
4174 */
4175 spin_lock_irq(&conf->device_lock);
4176 mddev->degraded = calc_degraded(conf);
4177 spin_unlock_irq(&conf->device_lock);
4178 mddev->raid_disks = conf->geo.raid_disks;
4179 mddev->reshape_position = conf->reshape_progress;
4180 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4181
4182 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
4183 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
4184 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
4185 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
4186
4187 mddev->sync_thread = md_register_thread(md_do_sync, mddev,
4188 "reshape");
4189 if (!mddev->sync_thread) {
NeilBrownbb63a702012-05-22 13:55:28 +10004190 ret = -EAGAIN;
4191 goto abort;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004192 }
4193 conf->reshape_checkpoint = jiffies;
4194 md_wakeup_thread(mddev->sync_thread);
4195 md_new_event(mddev);
4196 return 0;
NeilBrownbb63a702012-05-22 13:55:28 +10004197
4198abort:
4199 mddev->recovery = 0;
4200 spin_lock_irq(&conf->device_lock);
4201 conf->geo = conf->prev;
4202 mddev->raid_disks = conf->geo.raid_disks;
4203 rdev_for_each(rdev, mddev)
4204 rdev->new_data_offset = rdev->data_offset;
4205 smp_wmb();
4206 conf->reshape_progress = MaxSector;
4207 mddev->reshape_position = MaxSector;
4208 spin_unlock_irq(&conf->device_lock);
4209 return ret;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004210}
4211
4212/* Calculate the last device-address that could contain
4213 * any block from the chunk that includes the array-address 's'
4214 * and report the next address.
4215 * i.e. the address returned will be chunk-aligned and after
4216 * any data that is in the chunk containing 's'.
4217 */
4218static sector_t last_dev_address(sector_t s, struct geom *geo)
4219{
4220 s = (s | geo->chunk_mask) + 1;
4221 s >>= geo->chunk_shift;
4222 s *= geo->near_copies;
4223 s = DIV_ROUND_UP_SECTOR_T(s, geo->raid_disks);
4224 s *= geo->far_copies;
4225 s <<= geo->chunk_shift;
4226 return s;
4227}
4228
4229/* Calculate the first device-address that could contain
4230 * any block from the chunk that includes the array-address 's'.
4231 * This too will be the start of a chunk
4232 */
4233static sector_t first_dev_address(sector_t s, struct geom *geo)
4234{
4235 s >>= geo->chunk_shift;
4236 s *= geo->near_copies;
4237 sector_div(s, geo->raid_disks);
4238 s *= geo->far_copies;
4239 s <<= geo->chunk_shift;
4240 return s;
4241}
4242
4243static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4244 int *skipped)
4245{
4246 /* We simply copy at most one chunk (smallest of old and new)
4247 * at a time, possibly less if that exceeds RESYNC_PAGES,
4248 * or we hit a bad block or something.
4249 * This might mean we pause for normal IO in the middle of
4250 * a chunk, but that is not a problem was mddev->reshape_position
4251 * can record any location.
4252 *
4253 * If we will want to write to a location that isn't
4254 * yet recorded as 'safe' (i.e. in metadata on disk) then
4255 * we need to flush all reshape requests and update the metadata.
4256 *
4257 * When reshaping forwards (e.g. to more devices), we interpret
4258 * 'safe' as the earliest block which might not have been copied
4259 * down yet. We divide this by previous stripe size and multiply
4260 * by previous stripe length to get lowest device offset that we
4261 * cannot write to yet.
4262 * We interpret 'sector_nr' as an address that we want to write to.
4263 * From this we use last_device_address() to find where we might
4264 * write to, and first_device_address on the 'safe' position.
4265 * If this 'next' write position is after the 'safe' position,
4266 * we must update the metadata to increase the 'safe' position.
4267 *
4268 * When reshaping backwards, we round in the opposite direction
4269 * and perform the reverse test: next write position must not be
4270 * less than current safe position.
4271 *
4272 * In all this the minimum difference in data offsets
4273 * (conf->offset_diff - always positive) allows a bit of slack,
4274 * so next can be after 'safe', but not by more than offset_disk
4275 *
4276 * We need to prepare all the bios here before we start any IO
4277 * to ensure the size we choose is acceptable to all devices.
4278 * The means one for each copy for write-out and an extra one for
4279 * read-in.
4280 * We store the read-in bio in ->master_bio and the others in
4281 * ->devs[x].bio and ->devs[x].repl_bio.
4282 */
4283 struct r10conf *conf = mddev->private;
4284 struct r10bio *r10_bio;
4285 sector_t next, safe, last;
4286 int max_sectors;
4287 int nr_sectors;
4288 int s;
4289 struct md_rdev *rdev;
4290 int need_flush = 0;
4291 struct bio *blist;
4292 struct bio *bio, *read_bio;
4293 int sectors_done = 0;
4294
4295 if (sector_nr == 0) {
4296 /* If restarting in the middle, skip the initial sectors */
4297 if (mddev->reshape_backwards &&
4298 conf->reshape_progress < raid10_size(mddev, 0, 0)) {
4299 sector_nr = (raid10_size(mddev, 0, 0)
4300 - conf->reshape_progress);
4301 } else if (!mddev->reshape_backwards &&
4302 conf->reshape_progress > 0)
4303 sector_nr = conf->reshape_progress;
4304 if (sector_nr) {
4305 mddev->curr_resync_completed = sector_nr;
4306 sysfs_notify(&mddev->kobj, NULL, "sync_completed");
4307 *skipped = 1;
4308 return sector_nr;
4309 }
4310 }
4311
4312 /* We don't use sector_nr to track where we are up to
4313 * as that doesn't work well for ->reshape_backwards.
4314 * So just use ->reshape_progress.
4315 */
4316 if (mddev->reshape_backwards) {
4317 /* 'next' is the earliest device address that we might
4318 * write to for this chunk in the new layout
4319 */
4320 next = first_dev_address(conf->reshape_progress - 1,
4321 &conf->geo);
4322
4323 /* 'safe' is the last device address that we might read from
4324 * in the old layout after a restart
4325 */
4326 safe = last_dev_address(conf->reshape_safe - 1,
4327 &conf->prev);
4328
4329 if (next + conf->offset_diff < safe)
4330 need_flush = 1;
4331
4332 last = conf->reshape_progress - 1;
4333 sector_nr = last & ~(sector_t)(conf->geo.chunk_mask
4334 & conf->prev.chunk_mask);
4335 if (sector_nr + RESYNC_BLOCK_SIZE/512 < last)
4336 sector_nr = last + 1 - RESYNC_BLOCK_SIZE/512;
4337 } else {
4338 /* 'next' is after the last device address that we
4339 * might write to for this chunk in the new layout
4340 */
4341 next = last_dev_address(conf->reshape_progress, &conf->geo);
4342
4343 /* 'safe' is the earliest device address that we might
4344 * read from in the old layout after a restart
4345 */
4346 safe = first_dev_address(conf->reshape_safe, &conf->prev);
4347
4348 /* Need to update metadata if 'next' might be beyond 'safe'
4349 * as that would possibly corrupt data
4350 */
4351 if (next > safe + conf->offset_diff)
4352 need_flush = 1;
4353
4354 sector_nr = conf->reshape_progress;
4355 last = sector_nr | (conf->geo.chunk_mask
4356 & conf->prev.chunk_mask);
4357
4358 if (sector_nr + RESYNC_BLOCK_SIZE/512 <= last)
4359 last = sector_nr + RESYNC_BLOCK_SIZE/512 - 1;
4360 }
4361
4362 if (need_flush ||
4363 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) {
4364 /* Need to update reshape_position in metadata */
4365 wait_barrier(conf);
4366 mddev->reshape_position = conf->reshape_progress;
4367 if (mddev->reshape_backwards)
4368 mddev->curr_resync_completed = raid10_size(mddev, 0, 0)
4369 - conf->reshape_progress;
4370 else
4371 mddev->curr_resync_completed = conf->reshape_progress;
4372 conf->reshape_checkpoint = jiffies;
4373 set_bit(MD_CHANGE_DEVS, &mddev->flags);
4374 md_wakeup_thread(mddev->thread);
4375 wait_event(mddev->sb_wait, mddev->flags == 0 ||
NeilBrownc91abf52013-11-19 12:02:01 +11004376 test_bit(MD_RECOVERY_INTR, &mddev->recovery));
4377 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
4378 allow_barrier(conf);
4379 return sectors_done;
4380 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10004381 conf->reshape_safe = mddev->reshape_position;
4382 allow_barrier(conf);
4383 }
4384
4385read_more:
4386 /* Now schedule reads for blocks from sector_nr to last */
4387 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4388 raise_barrier(conf, sectors_done != 0);
4389 atomic_set(&r10_bio->remaining, 0);
4390 r10_bio->mddev = mddev;
4391 r10_bio->sector = sector_nr;
4392 set_bit(R10BIO_IsReshape, &r10_bio->state);
4393 r10_bio->sectors = last - sector_nr + 1;
4394 rdev = read_balance(conf, r10_bio, &max_sectors);
4395 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4396
4397 if (!rdev) {
4398 /* Cannot read from here, so need to record bad blocks
4399 * on all the target devices.
4400 */
4401 // FIXME
4402 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4403 return sectors_done;
4404 }
4405
4406 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4407
4408 read_bio->bi_bdev = rdev->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07004409 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
NeilBrown3ea7daa2012-05-22 13:53:47 +10004410 + rdev->data_offset);
4411 read_bio->bi_private = r10_bio;
4412 read_bio->bi_end_io = end_sync_read;
4413 read_bio->bi_rw = READ;
4414 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4415 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4416 read_bio->bi_vcnt = 0;
Kent Overstreet4f024f32013-10-11 15:44:27 -07004417 read_bio->bi_iter.bi_size = 0;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004418 r10_bio->master_bio = read_bio;
4419 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4420
4421 /* Now find the locations in the new layout */
4422 __raid10_find_phys(&conf->geo, r10_bio);
4423
4424 blist = read_bio;
4425 read_bio->bi_next = NULL;
4426
4427 for (s = 0; s < conf->copies*2; s++) {
4428 struct bio *b;
4429 int d = r10_bio->devs[s/2].devnum;
4430 struct md_rdev *rdev2;
4431 if (s&1) {
4432 rdev2 = conf->mirrors[d].replacement;
4433 b = r10_bio->devs[s/2].repl_bio;
4434 } else {
4435 rdev2 = conf->mirrors[d].rdev;
4436 b = r10_bio->devs[s/2].bio;
4437 }
4438 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4439 continue;
Kent Overstreet8be185f2012-09-06 14:14:43 -07004440
4441 bio_reset(b);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004442 b->bi_bdev = rdev2->bdev;
Kent Overstreet4f024f32013-10-11 15:44:27 -07004443 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4444 rdev2->new_data_offset;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004445 b->bi_private = r10_bio;
4446 b->bi_end_io = end_reshape_write;
4447 b->bi_rw = WRITE;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004448 b->bi_next = blist;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004449 blist = b;
4450 }
4451
4452 /* Now add as many pages as possible to all of these bios. */
4453
4454 nr_sectors = 0;
4455 for (s = 0 ; s < max_sectors; s += PAGE_SIZE >> 9) {
4456 struct page *page = r10_bio->devs[0].bio->bi_io_vec[s/(PAGE_SIZE>>9)].bv_page;
4457 int len = (max_sectors - s) << 9;
4458 if (len > PAGE_SIZE)
4459 len = PAGE_SIZE;
4460 for (bio = blist; bio ; bio = bio->bi_next) {
4461 struct bio *bio2;
4462 if (bio_add_page(bio, page, len, 0))
4463 continue;
4464
4465 /* Didn't fit, must stop */
4466 for (bio2 = blist;
4467 bio2 && bio2 != bio;
4468 bio2 = bio2->bi_next) {
4469 /* Remove last page from this bio */
4470 bio2->bi_vcnt--;
Kent Overstreet4f024f32013-10-11 15:44:27 -07004471 bio2->bi_iter.bi_size -= len;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004472 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4473 }
4474 goto bio_full;
4475 }
4476 sector_nr += len >> 9;
4477 nr_sectors += len >> 9;
4478 }
4479bio_full:
4480 r10_bio->sectors = nr_sectors;
4481
4482 /* Now submit the read */
4483 md_sync_acct(read_bio->bi_bdev, r10_bio->sectors);
4484 atomic_inc(&r10_bio->remaining);
4485 read_bio->bi_next = NULL;
4486 generic_make_request(read_bio);
4487 sector_nr += nr_sectors;
4488 sectors_done += nr_sectors;
4489 if (sector_nr <= last)
4490 goto read_more;
4491
4492 /* Now that we have done the whole section we can
4493 * update reshape_progress
4494 */
4495 if (mddev->reshape_backwards)
4496 conf->reshape_progress -= sectors_done;
4497 else
4498 conf->reshape_progress += sectors_done;
4499
4500 return sectors_done;
4501}
4502
4503static void end_reshape_request(struct r10bio *r10_bio);
4504static int handle_reshape_read_error(struct mddev *mddev,
4505 struct r10bio *r10_bio);
4506static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4507{
4508 /* Reshape read completed. Hopefully we have a block
4509 * to write out.
4510 * If we got a read error then we do sync 1-page reads from
4511 * elsewhere until we find the data - or give up.
4512 */
4513 struct r10conf *conf = mddev->private;
4514 int s;
4515
4516 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4517 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4518 /* Reshape has been aborted */
4519 md_done_sync(mddev, r10_bio->sectors, 0);
4520 return;
4521 }
4522
4523 /* We definitely have the data in the pages, schedule the
4524 * writes.
4525 */
4526 atomic_set(&r10_bio->remaining, 1);
4527 for (s = 0; s < conf->copies*2; s++) {
4528 struct bio *b;
4529 int d = r10_bio->devs[s/2].devnum;
4530 struct md_rdev *rdev;
4531 if (s&1) {
4532 rdev = conf->mirrors[d].replacement;
4533 b = r10_bio->devs[s/2].repl_bio;
4534 } else {
4535 rdev = conf->mirrors[d].rdev;
4536 b = r10_bio->devs[s/2].bio;
4537 }
4538 if (!rdev || test_bit(Faulty, &rdev->flags))
4539 continue;
4540 atomic_inc(&rdev->nr_pending);
4541 md_sync_acct(b->bi_bdev, r10_bio->sectors);
4542 atomic_inc(&r10_bio->remaining);
4543 b->bi_next = NULL;
4544 generic_make_request(b);
4545 }
4546 end_reshape_request(r10_bio);
4547}
4548
4549static void end_reshape(struct r10conf *conf)
4550{
4551 if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery))
4552 return;
4553
4554 spin_lock_irq(&conf->device_lock);
4555 conf->prev = conf->geo;
4556 md_finish_reshape(conf->mddev);
4557 smp_wmb();
4558 conf->reshape_progress = MaxSector;
4559 spin_unlock_irq(&conf->device_lock);
4560
4561 /* read-ahead size must cover two whole stripes, which is
4562 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
4563 */
4564 if (conf->mddev->queue) {
4565 int stripe = conf->geo.raid_disks *
4566 ((conf->mddev->chunk_sectors << 9) / PAGE_SIZE);
4567 stripe /= conf->geo.near_copies;
4568 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe)
4569 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe;
4570 }
4571 conf->fullsync = 0;
4572}
4573
4574
4575static int handle_reshape_read_error(struct mddev *mddev,
4576 struct r10bio *r10_bio)
4577{
4578 /* Use sync reads to get the blocks from somewhere else */
4579 int sectors = r10_bio->sectors;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004580 struct r10conf *conf = mddev->private;
NeilBrowne0ee7782012-08-18 09:51:42 +10004581 struct {
4582 struct r10bio r10_bio;
4583 struct r10dev devs[conf->copies];
4584 } on_stack;
4585 struct r10bio *r10b = &on_stack.r10_bio;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004586 int slot = 0;
4587 int idx = 0;
4588 struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec;
4589
NeilBrowne0ee7782012-08-18 09:51:42 +10004590 r10b->sector = r10_bio->sector;
4591 __raid10_find_phys(&conf->prev, r10b);
NeilBrown3ea7daa2012-05-22 13:53:47 +10004592
4593 while (sectors) {
4594 int s = sectors;
4595 int success = 0;
4596 int first_slot = slot;
4597
4598 if (s > (PAGE_SIZE >> 9))
4599 s = PAGE_SIZE >> 9;
4600
4601 while (!success) {
NeilBrowne0ee7782012-08-18 09:51:42 +10004602 int d = r10b->devs[slot].devnum;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004603 struct md_rdev *rdev = conf->mirrors[d].rdev;
4604 sector_t addr;
4605 if (rdev == NULL ||
4606 test_bit(Faulty, &rdev->flags) ||
4607 !test_bit(In_sync, &rdev->flags))
4608 goto failed;
4609
NeilBrowne0ee7782012-08-18 09:51:42 +10004610 addr = r10b->devs[slot].addr + idx * PAGE_SIZE;
NeilBrown3ea7daa2012-05-22 13:53:47 +10004611 success = sync_page_io(rdev,
4612 addr,
4613 s << 9,
4614 bvec[idx].bv_page,
4615 READ, false);
4616 if (success)
4617 break;
4618 failed:
4619 slot++;
4620 if (slot >= conf->copies)
4621 slot = 0;
4622 if (slot == first_slot)
4623 break;
4624 }
4625 if (!success) {
4626 /* couldn't read this block, must give up */
4627 set_bit(MD_RECOVERY_INTR,
4628 &mddev->recovery);
4629 return -EIO;
4630 }
4631 sectors -= s;
4632 idx++;
4633 }
4634 return 0;
4635}
4636
4637static void end_reshape_write(struct bio *bio, int error)
4638{
4639 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
4640 struct r10bio *r10_bio = bio->bi_private;
4641 struct mddev *mddev = r10_bio->mddev;
4642 struct r10conf *conf = mddev->private;
4643 int d;
4644 int slot;
4645 int repl;
4646 struct md_rdev *rdev = NULL;
4647
4648 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4649 if (repl)
4650 rdev = conf->mirrors[d].replacement;
4651 if (!rdev) {
4652 smp_mb();
4653 rdev = conf->mirrors[d].rdev;
4654 }
4655
4656 if (!uptodate) {
4657 /* FIXME should record badblock */
4658 md_error(mddev, rdev);
4659 }
4660
4661 rdev_dec_pending(rdev, mddev);
4662 end_reshape_request(r10_bio);
4663}
4664
4665static void end_reshape_request(struct r10bio *r10_bio)
4666{
4667 if (!atomic_dec_and_test(&r10_bio->remaining))
4668 return;
4669 md_done_sync(r10_bio->mddev, r10_bio->sectors, 1);
4670 bio_put(r10_bio->master_bio);
4671 put_buf(r10_bio);
4672}
4673
4674static void raid10_finish_reshape(struct mddev *mddev)
4675{
4676 struct r10conf *conf = mddev->private;
4677
4678 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery))
4679 return;
4680
4681 if (mddev->delta_disks > 0) {
4682 sector_t size = raid10_size(mddev, 0, 0);
4683 md_set_array_sectors(mddev, size);
4684 if (mddev->recovery_cp > mddev->resync_max_sectors) {
4685 mddev->recovery_cp = mddev->resync_max_sectors;
4686 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
4687 }
4688 mddev->resync_max_sectors = size;
4689 set_capacity(mddev->gendisk, mddev->array_sectors);
4690 revalidate_disk(mddev->gendisk);
NeilBrown63aced62012-05-22 13:55:33 +10004691 } else {
4692 int d;
4693 for (d = conf->geo.raid_disks ;
4694 d < conf->geo.raid_disks - mddev->delta_disks;
4695 d++) {
4696 struct md_rdev *rdev = conf->mirrors[d].rdev;
4697 if (rdev)
4698 clear_bit(In_sync, &rdev->flags);
4699 rdev = conf->mirrors[d].replacement;
4700 if (rdev)
4701 clear_bit(In_sync, &rdev->flags);
4702 }
NeilBrown3ea7daa2012-05-22 13:53:47 +10004703 }
4704 mddev->layout = mddev->new_layout;
4705 mddev->chunk_sectors = 1 << conf->geo.chunk_shift;
4706 mddev->reshape_position = MaxSector;
4707 mddev->delta_disks = 0;
4708 mddev->reshape_backwards = 0;
4709}
4710
NeilBrown84fc4b52011-10-11 16:49:58 +11004711static struct md_personality raid10_personality =
Linus Torvalds1da177e2005-04-16 15:20:36 -07004712{
4713 .name = "raid10",
NeilBrown2604b702006-01-06 00:20:36 -08004714 .level = 10,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004715 .owner = THIS_MODULE,
4716 .make_request = make_request,
4717 .run = run,
4718 .stop = stop,
4719 .status = status,
4720 .error_handler = error,
4721 .hot_add_disk = raid10_add_disk,
4722 .hot_remove_disk= raid10_remove_disk,
4723 .spare_active = raid10_spare_active,
4724 .sync_request = sync_request,
NeilBrown6cce3b232006-01-06 00:20:16 -08004725 .quiesce = raid10_quiesce,
Dan Williams80c3a6c2009-03-17 18:10:40 -07004726 .size = raid10_size,
NeilBrown006a09a2012-03-19 12:46:40 +11004727 .resize = raid10_resize,
Trela, Maciejdab8b292010-03-08 16:02:45 +11004728 .takeover = raid10_takeover,
NeilBrown3ea7daa2012-05-22 13:53:47 +10004729 .check_reshape = raid10_check_reshape,
4730 .start_reshape = raid10_start_reshape,
4731 .finish_reshape = raid10_finish_reshape,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732};
4733
4734static int __init raid_init(void)
4735{
NeilBrown2604b702006-01-06 00:20:36 -08004736 return register_md_personality(&raid10_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004737}
4738
4739static void raid_exit(void)
4740{
NeilBrown2604b702006-01-06 00:20:36 -08004741 unregister_md_personality(&raid10_personality);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004742}
4743
4744module_init(raid_init);
4745module_exit(raid_exit);
4746MODULE_LICENSE("GPL");
NeilBrown0efb9e62009-12-14 12:49:58 +11004747MODULE_DESCRIPTION("RAID10 (striped mirror) personality for MD");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004748MODULE_ALIAS("md-personality-9"); /* RAID10 */
NeilBrownd9d166c2006-01-06 00:20:51 -08004749MODULE_ALIAS("md-raid10");
NeilBrown2604b702006-01-06 00:20:36 -08004750MODULE_ALIAS("md-level-10");
NeilBrown34db0cd2011-10-11 16:50:01 +11004751
4752module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);