Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * raid5.c : Multiple Devices driver for Linux |
| 3 | * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman |
| 4 | * Copyright (C) 1999, 2000 Ingo Molnar |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5 | * Copyright (C) 2002, 2003 H. Peter Anvin |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 7 | * RAID-4/5/6 management functions. |
| 8 | * Thanks to Penguin Computing for making the RAID-6 development possible |
| 9 | * by donating a test server! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2, or (at your option) |
| 14 | * any later version. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * (for example /usr/src/linux/COPYING); if not, write to the Free |
| 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 19 | */ |
| 20 | |
NeilBrown | ae3c20c | 2006-07-10 04:44:17 -0700 | [diff] [blame] | 21 | /* |
| 22 | * BITMAP UNPLUGGING: |
| 23 | * |
| 24 | * The sequencing for updating the bitmap reliably is a little |
| 25 | * subtle (and I got it wrong the first time) so it deserves some |
| 26 | * explanation. |
| 27 | * |
| 28 | * We group bitmap updates into batches. Each batch has a number. |
| 29 | * We may write out several batches at once, but that isn't very important. |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 30 | * conf->seq_write is the number of the last batch successfully written. |
| 31 | * conf->seq_flush is the number of the last batch that was closed to |
NeilBrown | ae3c20c | 2006-07-10 04:44:17 -0700 | [diff] [blame] | 32 | * new additions. |
| 33 | * When we discover that we will need to write to any block in a stripe |
| 34 | * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 35 | * the number of the batch it will be in. This is seq_flush+1. |
NeilBrown | ae3c20c | 2006-07-10 04:44:17 -0700 | [diff] [blame] | 36 | * When we are ready to do a write, if that batch hasn't been written yet, |
| 37 | * we plug the array and queue the stripe for later. |
| 38 | * When an unplug happens, we increment bm_flush, thus closing the current |
| 39 | * batch. |
| 40 | * When we notice that bm_flush > bm_write, we write out all pending updates |
| 41 | * to the bitmap, and advance bm_write to where bm_flush was. |
| 42 | * This may occasionally write a bit out twice, but is sure never to |
| 43 | * miss any bits. |
| 44 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 46 | #include <linux/blkdev.h> |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 47 | #include <linux/kthread.h> |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 48 | #include <linux/raid/pq.h> |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 49 | #include <linux/async_tx.h> |
Paul Gortmaker | 056075c | 2011-07-03 13:58:33 -0400 | [diff] [blame] | 50 | #include <linux/module.h> |
Dan Williams | 07a3b41 | 2009-08-29 19:13:13 -0700 | [diff] [blame] | 51 | #include <linux/async.h> |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 52 | #include <linux/seq_file.h> |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 53 | #include <linux/cpu.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 54 | #include <linux/slab.h> |
Christian Dietrich | 8bda470 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 55 | #include <linux/ratelimit.h> |
NeilBrown | 43b2e5d | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 56 | #include "md.h" |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 57 | #include "raid5.h" |
Trela Maciej | 54071b3 | 2010-03-08 16:02:42 +1100 | [diff] [blame] | 58 | #include "raid0.h" |
Christoph Hellwig | ef740c3 | 2009-03-31 14:27:03 +1100 | [diff] [blame] | 59 | #include "bitmap.h" |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 60 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | /* |
| 62 | * Stripe cache |
| 63 | */ |
| 64 | |
| 65 | #define NR_STRIPES 256 |
| 66 | #define STRIPE_SIZE PAGE_SIZE |
| 67 | #define STRIPE_SHIFT (PAGE_SHIFT - 9) |
| 68 | #define STRIPE_SECTORS (STRIPE_SIZE>>9) |
| 69 | #define IO_THRESHOLD 1 |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 70 | #define BYPASS_THRESHOLD 1 |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 71 | #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #define HASH_MASK (NR_HASH - 1) |
| 73 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 74 | static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) |
NeilBrown | db298e1 | 2011-10-07 14:23:00 +1100 | [diff] [blame] | 75 | { |
| 76 | int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; |
| 77 | return &conf->stripe_hashtbl[hash]; |
| 78 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
| 80 | /* bio's attached to a stripe+device for I/O are linked together in bi_sector |
| 81 | * order without overlap. There may be several bio's per stripe+device, and |
| 82 | * a bio could span several devices. |
| 83 | * When walking this list for a particular stripe+device, we must never proceed |
| 84 | * beyond a bio that extends past this device, as the next bio might no longer |
| 85 | * be valid. |
NeilBrown | db298e1 | 2011-10-07 14:23:00 +1100 | [diff] [blame] | 86 | * This function is used to determine the 'next' bio in the list, given the sector |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | * of the current stripe+device |
| 88 | */ |
NeilBrown | db298e1 | 2011-10-07 14:23:00 +1100 | [diff] [blame] | 89 | static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) |
| 90 | { |
| 91 | int sectors = bio->bi_size >> 9; |
| 92 | if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) |
| 93 | return bio->bi_next; |
| 94 | else |
| 95 | return NULL; |
| 96 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 98 | /* |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 99 | * We maintain a biased count of active stripes in the bottom 16 bits of |
| 100 | * bi_phys_segments, and a count of processed stripes in the upper 16 bits |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 101 | */ |
| 102 | static inline int raid5_bi_phys_segments(struct bio *bio) |
| 103 | { |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 104 | return bio->bi_phys_segments & 0xffff; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 105 | } |
| 106 | |
| 107 | static inline int raid5_bi_hw_segments(struct bio *bio) |
| 108 | { |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 109 | return (bio->bi_phys_segments >> 16) & 0xffff; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | static inline int raid5_dec_bi_phys_segments(struct bio *bio) |
| 113 | { |
| 114 | --bio->bi_phys_segments; |
| 115 | return raid5_bi_phys_segments(bio); |
| 116 | } |
| 117 | |
| 118 | static inline int raid5_dec_bi_hw_segments(struct bio *bio) |
| 119 | { |
| 120 | unsigned short val = raid5_bi_hw_segments(bio); |
| 121 | |
| 122 | --val; |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 123 | bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 124 | return val; |
| 125 | } |
| 126 | |
| 127 | static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) |
| 128 | { |
Namhyung Kim | 9b2dc8b | 2011-06-13 14:48:22 +0900 | [diff] [blame] | 129 | bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 130 | } |
| 131 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 132 | /* Find first data disk in a raid6 stripe */ |
| 133 | static inline int raid6_d0(struct stripe_head *sh) |
| 134 | { |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 135 | if (sh->ddf_layout) |
| 136 | /* ddf always start from first device */ |
| 137 | return 0; |
| 138 | /* md starts just after Q block */ |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 139 | if (sh->qd_idx == sh->disks - 1) |
| 140 | return 0; |
| 141 | else |
| 142 | return sh->qd_idx + 1; |
| 143 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 144 | static inline int raid6_next_disk(int disk, int raid_disks) |
| 145 | { |
| 146 | disk++; |
| 147 | return (disk < raid_disks) ? disk : 0; |
| 148 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 149 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 150 | /* When walking through the disks in a raid5, starting at raid6_d0, |
| 151 | * We need to map each disk to a 'slot', where the data disks are slot |
| 152 | * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk |
| 153 | * is raid_disks-1. This help does that mapping. |
| 154 | */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 155 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, |
| 156 | int *count, int syndrome_disks) |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 157 | { |
Dan Williams | 6629542 | 2009-10-19 18:09:32 -0700 | [diff] [blame] | 158 | int slot = *count; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 159 | |
NeilBrown | e4424fe | 2009-10-16 16:27:34 +1100 | [diff] [blame] | 160 | if (sh->ddf_layout) |
Dan Williams | 6629542 | 2009-10-19 18:09:32 -0700 | [diff] [blame] | 161 | (*count)++; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 162 | if (idx == sh->pd_idx) |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 163 | return syndrome_disks; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 164 | if (idx == sh->qd_idx) |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 165 | return syndrome_disks + 1; |
NeilBrown | e4424fe | 2009-10-16 16:27:34 +1100 | [diff] [blame] | 166 | if (!sh->ddf_layout) |
Dan Williams | 6629542 | 2009-10-19 18:09:32 -0700 | [diff] [blame] | 167 | (*count)++; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 168 | return slot; |
| 169 | } |
| 170 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 171 | static void return_io(struct bio *return_bi) |
| 172 | { |
| 173 | struct bio *bi = return_bi; |
| 174 | while (bi) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 175 | |
| 176 | return_bi = bi->bi_next; |
| 177 | bi->bi_next = NULL; |
| 178 | bi->bi_size = 0; |
Neil Brown | 0e13fe23 | 2008-06-28 08:31:20 +1000 | [diff] [blame] | 179 | bio_endio(bi, 0); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 180 | bi = return_bi; |
| 181 | } |
| 182 | } |
| 183 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 184 | static void print_raid5_conf (struct r5conf *conf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 185 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 186 | static int stripe_operations_active(struct stripe_head *sh) |
| 187 | { |
| 188 | return sh->check_state || sh->reconstruct_state || |
| 189 | test_bit(STRIPE_BIOFILL_RUN, &sh->state) || |
| 190 | test_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 191 | } |
| 192 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 193 | static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | { |
| 195 | if (atomic_dec_and_test(&sh->count)) { |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 196 | BUG_ON(!list_empty(&sh->lru)); |
| 197 | BUG_ON(atomic_read(&conf->active_stripes)==0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | if (test_bit(STRIPE_HANDLE, &sh->state)) { |
NeilBrown | 482c083 | 2011-04-18 18:25:42 +1000 | [diff] [blame] | 199 | if (test_bit(STRIPE_DELAYED, &sh->state)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | list_add_tail(&sh->lru, &conf->delayed_list); |
NeilBrown | 482c083 | 2011-04-18 18:25:42 +1000 | [diff] [blame] | 201 | else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && |
| 202 | sh->bm_seq - conf->seq_write > 0) |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 203 | list_add_tail(&sh->lru, &conf->bitmap_list); |
NeilBrown | 482c083 | 2011-04-18 18:25:42 +1000 | [diff] [blame] | 204 | else { |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 205 | clear_bit(STRIPE_BIT_DELAY, &sh->state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | list_add_tail(&sh->lru, &conf->handle_list); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 207 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | md_wakeup_thread(conf->mddev->thread); |
| 209 | } else { |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 210 | BUG_ON(stripe_operations_active(sh)); |
majianpeng | 41fe75f | 2012-03-13 11:21:25 +1100 | [diff] [blame] | 211 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 212 | if (atomic_dec_return(&conf->preread_active_stripes) |
| 213 | < IO_THRESHOLD) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | md_wakeup_thread(conf->mddev->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | atomic_dec(&conf->active_stripes); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 216 | if (!test_bit(STRIPE_EXPANDING, &sh->state)) { |
| 217 | list_add_tail(&sh->lru, &conf->inactive_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | wake_up(&conf->wait_for_stripe); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 219 | if (conf->retry_read_aligned) |
| 220 | md_wakeup_thread(conf->mddev->thread); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 221 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | } |
| 223 | } |
| 224 | } |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 225 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 226 | static void release_stripe(struct stripe_head *sh) |
| 227 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 228 | struct r5conf *conf = sh->raid_conf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | unsigned long flags; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 230 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | spin_lock_irqsave(&conf->device_lock, flags); |
| 232 | __release_stripe(conf, sh); |
| 233 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 234 | } |
| 235 | |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 236 | static inline void remove_hash(struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 237 | { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 238 | pr_debug("remove_hash(), stripe %llu\n", |
| 239 | (unsigned long long)sh->sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 241 | hlist_del_init(&sh->hash); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | } |
| 243 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 244 | static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 245 | { |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 246 | struct hlist_head *hp = stripe_hash(conf, sh->sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 248 | pr_debug("insert_hash(), stripe %llu\n", |
| 249 | (unsigned long long)sh->sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 251 | hlist_add_head(&sh->hash, hp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | |
| 255 | /* find an idle stripe, make sure it is unhashed, and return it. */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 256 | static struct stripe_head *get_free_stripe(struct r5conf *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | { |
| 258 | struct stripe_head *sh = NULL; |
| 259 | struct list_head *first; |
| 260 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | if (list_empty(&conf->inactive_list)) |
| 262 | goto out; |
| 263 | first = conf->inactive_list.next; |
| 264 | sh = list_entry(first, struct stripe_head, lru); |
| 265 | list_del_init(first); |
| 266 | remove_hash(sh); |
| 267 | atomic_inc(&conf->active_stripes); |
| 268 | out: |
| 269 | return sh; |
| 270 | } |
| 271 | |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 272 | static void shrink_buffers(struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | { |
| 274 | struct page *p; |
| 275 | int i; |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 276 | int num = sh->raid_conf->pool_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 278 | for (i = 0; i < num ; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | p = sh->dev[i].page; |
| 280 | if (!p) |
| 281 | continue; |
| 282 | sh->dev[i].page = NULL; |
NeilBrown | 2d1f3b5 | 2006-01-06 00:20:31 -0800 | [diff] [blame] | 283 | put_page(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
| 285 | } |
| 286 | |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 287 | static int grow_buffers(struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | { |
| 289 | int i; |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 290 | int num = sh->raid_conf->pool_size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 291 | |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 292 | for (i = 0; i < num; i++) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 293 | struct page *page; |
| 294 | |
| 295 | if (!(page = alloc_page(GFP_KERNEL))) { |
| 296 | return 1; |
| 297 | } |
| 298 | sh->dev[i].page = page; |
| 299 | } |
| 300 | return 0; |
| 301 | } |
| 302 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 303 | static void raid5_build_block(struct stripe_head *sh, int i, int previous); |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 304 | static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 305 | struct stripe_head *sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 307 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 309 | struct r5conf *conf = sh->raid_conf; |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 310 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 311 | |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 312 | BUG_ON(atomic_read(&sh->count) != 0); |
| 313 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 314 | BUG_ON(stripe_operations_active(sh)); |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 315 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 316 | pr_debug("init_stripe called, stripe %llu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | (unsigned long long)sh->sector); |
| 318 | |
| 319 | remove_hash(sh); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 320 | |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 321 | sh->generation = conf->generation - previous; |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 322 | sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | sh->sector = sector; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 324 | stripe_set_idx(sector, conf, previous, sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | sh->state = 0; |
| 326 | |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 327 | |
| 328 | for (i = sh->disks; i--; ) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | struct r5dev *dev = &sh->dev[i]; |
| 330 | |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 331 | if (dev->toread || dev->read || dev->towrite || dev->written || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | test_bit(R5_LOCKED, &dev->flags)) { |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 333 | printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | (unsigned long long)sh->sector, i, dev->toread, |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 335 | dev->read, dev->towrite, dev->written, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | test_bit(R5_LOCKED, &dev->flags)); |
NeilBrown | 8cfa7b0 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 337 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 338 | } |
| 339 | dev->flags = 0; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 340 | raid5_build_block(sh, i, previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } |
| 342 | insert_hash(conf, sh); |
| 343 | } |
| 344 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 345 | static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 346 | short generation) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | { |
| 348 | struct stripe_head *sh; |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 349 | struct hlist_node *hn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 351 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 352 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 353 | if (sh->sector == sector && sh->generation == generation) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | return sh; |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 355 | pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | return NULL; |
| 357 | } |
| 358 | |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 359 | /* |
| 360 | * Need to check if array has failed when deciding whether to: |
| 361 | * - start an array |
| 362 | * - remove non-faulty devices |
| 363 | * - add a spare |
| 364 | * - allow a reshape |
| 365 | * This determination is simple when no reshape is happening. |
| 366 | * However if there is a reshape, we need to carefully check |
| 367 | * both the before and after sections. |
| 368 | * This is because some failed devices may only affect one |
| 369 | * of the two sections, and some non-in_sync devices may |
| 370 | * be insync in the section most affected by failed devices. |
| 371 | */ |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 372 | static int calc_degraded(struct r5conf *conf) |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 373 | { |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 374 | int degraded, degraded2; |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 375 | int i; |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 376 | |
| 377 | rcu_read_lock(); |
| 378 | degraded = 0; |
| 379 | for (i = 0; i < conf->previous_raid_disks; i++) { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 380 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 381 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
| 382 | degraded++; |
| 383 | else if (test_bit(In_sync, &rdev->flags)) |
| 384 | ; |
| 385 | else |
| 386 | /* not in-sync or faulty. |
| 387 | * If the reshape increases the number of devices, |
| 388 | * this is being recovered by the reshape, so |
| 389 | * this 'previous' section is not in_sync. |
| 390 | * If the number of devices is being reduced however, |
| 391 | * the device can only be part of the array if |
| 392 | * we are reverting a reshape, so this section will |
| 393 | * be in-sync. |
| 394 | */ |
| 395 | if (conf->raid_disks >= conf->previous_raid_disks) |
| 396 | degraded++; |
| 397 | } |
| 398 | rcu_read_unlock(); |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 399 | if (conf->raid_disks == conf->previous_raid_disks) |
| 400 | return degraded; |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 401 | rcu_read_lock(); |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 402 | degraded2 = 0; |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 403 | for (i = 0; i < conf->raid_disks; i++) { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 404 | struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 405 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 406 | degraded2++; |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 407 | else if (test_bit(In_sync, &rdev->flags)) |
| 408 | ; |
| 409 | else |
| 410 | /* not in-sync or faulty. |
| 411 | * If reshape increases the number of devices, this |
| 412 | * section has already been recovered, else it |
| 413 | * almost certainly hasn't. |
| 414 | */ |
| 415 | if (conf->raid_disks <= conf->previous_raid_disks) |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 416 | degraded2++; |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 417 | } |
| 418 | rcu_read_unlock(); |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 419 | if (degraded2 > degraded) |
| 420 | return degraded2; |
| 421 | return degraded; |
| 422 | } |
| 423 | |
| 424 | static int has_failed(struct r5conf *conf) |
| 425 | { |
| 426 | int degraded; |
| 427 | |
| 428 | if (conf->mddev->reshape_position == MaxSector) |
| 429 | return conf->mddev->degraded > conf->max_degraded; |
| 430 | |
| 431 | degraded = calc_degraded(conf); |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 432 | if (degraded > conf->max_degraded) |
| 433 | return 1; |
| 434 | return 0; |
| 435 | } |
| 436 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 437 | static struct stripe_head * |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 438 | get_active_stripe(struct r5conf *conf, sector_t sector, |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 439 | int previous, int noblock, int noquiesce) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | { |
| 441 | struct stripe_head *sh; |
| 442 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 443 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | |
| 445 | spin_lock_irq(&conf->device_lock); |
| 446 | |
| 447 | do { |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 448 | wait_event_lock_irq(conf->wait_for_stripe, |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 449 | conf->quiesce == 0 || noquiesce, |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 450 | conf->device_lock, /* nothing */); |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 451 | sh = __find_stripe(conf, sector, conf->generation - previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | if (!sh) { |
| 453 | if (!conf->inactive_blocked) |
| 454 | sh = get_free_stripe(conf); |
| 455 | if (noblock && sh == NULL) |
| 456 | break; |
| 457 | if (!sh) { |
| 458 | conf->inactive_blocked = 1; |
| 459 | wait_event_lock_irq(conf->wait_for_stripe, |
| 460 | !list_empty(&conf->inactive_list) && |
NeilBrown | 5036805 | 2005-12-12 02:39:17 -0800 | [diff] [blame] | 461 | (atomic_read(&conf->active_stripes) |
| 462 | < (conf->max_nr_stripes *3/4) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | || !conf->inactive_blocked), |
| 464 | conf->device_lock, |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 465 | ); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | conf->inactive_blocked = 0; |
| 467 | } else |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 468 | init_stripe(sh, sector, previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | } else { |
| 470 | if (atomic_read(&sh->count)) { |
NeilBrown | ab69ae1 | 2009-03-31 15:26:47 +1100 | [diff] [blame] | 471 | BUG_ON(!list_empty(&sh->lru) |
| 472 | && !test_bit(STRIPE_EXPANDING, &sh->state)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | } else { |
| 474 | if (!test_bit(STRIPE_HANDLE, &sh->state)) |
| 475 | atomic_inc(&conf->active_stripes); |
NeilBrown | ff4e8d9 | 2006-07-10 04:44:16 -0700 | [diff] [blame] | 476 | if (list_empty(&sh->lru) && |
| 477 | !test_bit(STRIPE_EXPANDING, &sh->state)) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 478 | BUG(); |
| 479 | list_del_init(&sh->lru); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | } |
| 481 | } |
| 482 | } while (sh == NULL); |
| 483 | |
| 484 | if (sh) |
| 485 | atomic_inc(&sh->count); |
| 486 | |
| 487 | spin_unlock_irq(&conf->device_lock); |
| 488 | return sh; |
| 489 | } |
| 490 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 491 | static void |
| 492 | raid5_end_read_request(struct bio *bi, int error); |
| 493 | static void |
| 494 | raid5_end_write_request(struct bio *bi, int error); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 495 | |
Dan Williams | c4e5ac0 | 2008-06-28 08:31:53 +1000 | [diff] [blame] | 496 | static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 497 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 498 | struct r5conf *conf = sh->raid_conf; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 499 | int i, disks = sh->disks; |
| 500 | |
| 501 | might_sleep(); |
| 502 | |
| 503 | for (i = disks; i--; ) { |
| 504 | int rw; |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 505 | int replace_only = 0; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 506 | struct bio *bi, *rbi; |
| 507 | struct md_rdev *rdev, *rrdev = NULL; |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 508 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { |
| 509 | if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) |
| 510 | rw = WRITE_FUA; |
| 511 | else |
| 512 | rw = WRITE; |
| 513 | } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 514 | rw = READ; |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 515 | else if (test_and_clear_bit(R5_WantReplace, |
| 516 | &sh->dev[i].flags)) { |
| 517 | rw = WRITE; |
| 518 | replace_only = 1; |
| 519 | } else |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 520 | continue; |
| 521 | |
| 522 | bi = &sh->dev[i].req; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 523 | rbi = &sh->dev[i].rreq; /* For writing to replacement */ |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 524 | |
| 525 | bi->bi_rw = rw; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 526 | rbi->bi_rw = rw; |
| 527 | if (rw & WRITE) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 528 | bi->bi_end_io = raid5_end_write_request; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 529 | rbi->bi_end_io = raid5_end_write_request; |
| 530 | } else |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 531 | bi->bi_end_io = raid5_end_read_request; |
| 532 | |
| 533 | rcu_read_lock(); |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 534 | rrdev = rcu_dereference(conf->disks[i].replacement); |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 535 | smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ |
| 536 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 537 | if (!rdev) { |
| 538 | rdev = rrdev; |
| 539 | rrdev = NULL; |
| 540 | } |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 541 | if (rw & WRITE) { |
| 542 | if (replace_only) |
| 543 | rdev = NULL; |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 544 | if (rdev == rrdev) |
| 545 | /* We raced and saw duplicates */ |
| 546 | rrdev = NULL; |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 547 | } else { |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 548 | if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev) |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 549 | rdev = rrdev; |
| 550 | rrdev = NULL; |
| 551 | } |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 552 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 553 | if (rdev && test_bit(Faulty, &rdev->flags)) |
| 554 | rdev = NULL; |
| 555 | if (rdev) |
| 556 | atomic_inc(&rdev->nr_pending); |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 557 | if (rrdev && test_bit(Faulty, &rrdev->flags)) |
| 558 | rrdev = NULL; |
| 559 | if (rrdev) |
| 560 | atomic_inc(&rrdev->nr_pending); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 561 | rcu_read_unlock(); |
| 562 | |
NeilBrown | 73e92e5 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 563 | /* We have already checked bad blocks for reads. Now |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 564 | * need to check for writes. We never accept write errors |
| 565 | * on the replacement, so we don't to check rrdev. |
NeilBrown | 73e92e5 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 566 | */ |
| 567 | while ((rw & WRITE) && rdev && |
| 568 | test_bit(WriteErrorSeen, &rdev->flags)) { |
| 569 | sector_t first_bad; |
| 570 | int bad_sectors; |
| 571 | int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, |
| 572 | &first_bad, &bad_sectors); |
| 573 | if (!bad) |
| 574 | break; |
| 575 | |
| 576 | if (bad < 0) { |
| 577 | set_bit(BlockedBadBlocks, &rdev->flags); |
| 578 | if (!conf->mddev->external && |
| 579 | conf->mddev->flags) { |
| 580 | /* It is very unlikely, but we might |
| 581 | * still need to write out the |
| 582 | * bad block log - better give it |
| 583 | * a chance*/ |
| 584 | md_check_recovery(conf->mddev); |
| 585 | } |
| 586 | md_wait_for_blocked_rdev(rdev, conf->mddev); |
| 587 | } else { |
| 588 | /* Acknowledged bad block - skip the write */ |
| 589 | rdev_dec_pending(rdev, conf->mddev); |
| 590 | rdev = NULL; |
| 591 | } |
| 592 | } |
| 593 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 594 | if (rdev) { |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 595 | if (s->syncing || s->expanding || s->expanded |
| 596 | || s->replacing) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 597 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); |
| 598 | |
Dan Williams | 2b7497f | 2008-06-28 08:31:52 +1000 | [diff] [blame] | 599 | set_bit(STRIPE_IO_STARTED, &sh->state); |
| 600 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 601 | bi->bi_bdev = rdev->bdev; |
| 602 | pr_debug("%s: for %llu schedule op %ld on disc %d\n", |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 603 | __func__, (unsigned long long)sh->sector, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 604 | bi->bi_rw, i); |
| 605 | atomic_inc(&sh->count); |
| 606 | bi->bi_sector = sh->sector + rdev->data_offset; |
| 607 | bi->bi_flags = 1 << BIO_UPTODATE; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 608 | bi->bi_idx = 0; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 609 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
| 610 | bi->bi_io_vec[0].bv_offset = 0; |
| 611 | bi->bi_size = STRIPE_SIZE; |
| 612 | bi->bi_next = NULL; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 613 | if (rrdev) |
| 614 | set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 615 | generic_make_request(bi); |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 616 | } |
| 617 | if (rrdev) { |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 618 | if (s->syncing || s->expanding || s->expanded |
| 619 | || s->replacing) |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 620 | md_sync_acct(rrdev->bdev, STRIPE_SECTORS); |
| 621 | |
| 622 | set_bit(STRIPE_IO_STARTED, &sh->state); |
| 623 | |
| 624 | rbi->bi_bdev = rrdev->bdev; |
| 625 | pr_debug("%s: for %llu schedule op %ld on " |
| 626 | "replacement disc %d\n", |
| 627 | __func__, (unsigned long long)sh->sector, |
| 628 | rbi->bi_rw, i); |
| 629 | atomic_inc(&sh->count); |
| 630 | rbi->bi_sector = sh->sector + rrdev->data_offset; |
| 631 | rbi->bi_flags = 1 << BIO_UPTODATE; |
| 632 | rbi->bi_idx = 0; |
| 633 | rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
| 634 | rbi->bi_io_vec[0].bv_offset = 0; |
| 635 | rbi->bi_size = STRIPE_SIZE; |
| 636 | rbi->bi_next = NULL; |
| 637 | generic_make_request(rbi); |
| 638 | } |
| 639 | if (!rdev && !rrdev) { |
Namhyung Kim | b062962 | 2011-06-14 14:20:19 +1000 | [diff] [blame] | 640 | if (rw & WRITE) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 641 | set_bit(STRIPE_DEGRADED, &sh->state); |
| 642 | pr_debug("skip op %ld on disc %d for sector %llu\n", |
| 643 | bi->bi_rw, i, (unsigned long long)sh->sector); |
| 644 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 645 | set_bit(STRIPE_HANDLE, &sh->state); |
| 646 | } |
| 647 | } |
| 648 | } |
| 649 | |
| 650 | static struct dma_async_tx_descriptor * |
| 651 | async_copy_data(int frombio, struct bio *bio, struct page *page, |
| 652 | sector_t sector, struct dma_async_tx_descriptor *tx) |
| 653 | { |
| 654 | struct bio_vec *bvl; |
| 655 | struct page *bio_page; |
| 656 | int i; |
| 657 | int page_offset; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 658 | struct async_submit_ctl submit; |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 659 | enum async_tx_flags flags = 0; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 660 | |
| 661 | if (bio->bi_sector >= sector) |
| 662 | page_offset = (signed)(bio->bi_sector - sector) * 512; |
| 663 | else |
| 664 | page_offset = (signed)(sector - bio->bi_sector) * -512; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 665 | |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 666 | if (frombio) |
| 667 | flags |= ASYNC_TX_FENCE; |
| 668 | init_async_submit(&submit, flags, tx, NULL, NULL, NULL); |
| 669 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 670 | bio_for_each_segment(bvl, bio, i) { |
Namhyung Kim | fcde907 | 2011-06-14 14:23:57 +1000 | [diff] [blame] | 671 | int len = bvl->bv_len; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 672 | int clen; |
| 673 | int b_offset = 0; |
| 674 | |
| 675 | if (page_offset < 0) { |
| 676 | b_offset = -page_offset; |
| 677 | page_offset += b_offset; |
| 678 | len -= b_offset; |
| 679 | } |
| 680 | |
| 681 | if (len > 0 && page_offset + len > STRIPE_SIZE) |
| 682 | clen = STRIPE_SIZE - page_offset; |
| 683 | else |
| 684 | clen = len; |
| 685 | |
| 686 | if (clen > 0) { |
Namhyung Kim | fcde907 | 2011-06-14 14:23:57 +1000 | [diff] [blame] | 687 | b_offset += bvl->bv_offset; |
| 688 | bio_page = bvl->bv_page; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 689 | if (frombio) |
| 690 | tx = async_memcpy(page, bio_page, page_offset, |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 691 | b_offset, clen, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 692 | else |
| 693 | tx = async_memcpy(bio_page, page, b_offset, |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 694 | page_offset, clen, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 695 | } |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 696 | /* chain the operations */ |
| 697 | submit.depend_tx = tx; |
| 698 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 699 | if (clen < len) /* hit end of page */ |
| 700 | break; |
| 701 | page_offset += len; |
| 702 | } |
| 703 | |
| 704 | return tx; |
| 705 | } |
| 706 | |
| 707 | static void ops_complete_biofill(void *stripe_head_ref) |
| 708 | { |
| 709 | struct stripe_head *sh = stripe_head_ref; |
| 710 | struct bio *return_bi = NULL; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 711 | struct r5conf *conf = sh->raid_conf; |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 712 | int i; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 713 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 714 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 715 | (unsigned long long)sh->sector); |
| 716 | |
| 717 | /* clear completed biofills */ |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 718 | spin_lock_irq(&conf->device_lock); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 719 | for (i = sh->disks; i--; ) { |
| 720 | struct r5dev *dev = &sh->dev[i]; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 721 | |
| 722 | /* acknowledge completion of a biofill operation */ |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 723 | /* and check if we need to reply to a read request, |
| 724 | * new R5_Wantfill requests are held off until |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 725 | * !STRIPE_BIOFILL_RUN |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 726 | */ |
| 727 | if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 728 | struct bio *rbi, *rbi2; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 729 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 730 | BUG_ON(!dev->read); |
| 731 | rbi = dev->read; |
| 732 | dev->read = NULL; |
| 733 | while (rbi && rbi->bi_sector < |
| 734 | dev->sector + STRIPE_SECTORS) { |
| 735 | rbi2 = r5_next_bio(rbi, dev->sector); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 736 | if (!raid5_dec_bi_phys_segments(rbi)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 737 | rbi->bi_next = return_bi; |
| 738 | return_bi = rbi; |
| 739 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 740 | rbi = rbi2; |
| 741 | } |
| 742 | } |
| 743 | } |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 744 | spin_unlock_irq(&conf->device_lock); |
| 745 | clear_bit(STRIPE_BIOFILL_RUN, &sh->state); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 746 | |
| 747 | return_io(return_bi); |
| 748 | |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 749 | set_bit(STRIPE_HANDLE, &sh->state); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 750 | release_stripe(sh); |
| 751 | } |
| 752 | |
| 753 | static void ops_run_biofill(struct stripe_head *sh) |
| 754 | { |
| 755 | struct dma_async_tx_descriptor *tx = NULL; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 756 | struct r5conf *conf = sh->raid_conf; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 757 | struct async_submit_ctl submit; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 758 | int i; |
| 759 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 760 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 761 | (unsigned long long)sh->sector); |
| 762 | |
| 763 | for (i = sh->disks; i--; ) { |
| 764 | struct r5dev *dev = &sh->dev[i]; |
| 765 | if (test_bit(R5_Wantfill, &dev->flags)) { |
| 766 | struct bio *rbi; |
| 767 | spin_lock_irq(&conf->device_lock); |
| 768 | dev->read = rbi = dev->toread; |
| 769 | dev->toread = NULL; |
| 770 | spin_unlock_irq(&conf->device_lock); |
| 771 | while (rbi && rbi->bi_sector < |
| 772 | dev->sector + STRIPE_SECTORS) { |
| 773 | tx = async_copy_data(0, rbi, dev->page, |
| 774 | dev->sector, tx); |
| 775 | rbi = r5_next_bio(rbi, dev->sector); |
| 776 | } |
| 777 | } |
| 778 | } |
| 779 | |
| 780 | atomic_inc(&sh->count); |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 781 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); |
| 782 | async_trigger_callback(&submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 783 | } |
| 784 | |
Dan Williams | 4e7d2c0 | 2009-08-29 19:13:11 -0700 | [diff] [blame] | 785 | static void mark_target_uptodate(struct stripe_head *sh, int target) |
| 786 | { |
| 787 | struct r5dev *tgt; |
| 788 | |
| 789 | if (target < 0) |
| 790 | return; |
| 791 | |
| 792 | tgt = &sh->dev[target]; |
| 793 | set_bit(R5_UPTODATE, &tgt->flags); |
| 794 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 795 | clear_bit(R5_Wantcompute, &tgt->flags); |
| 796 | } |
| 797 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 798 | static void ops_complete_compute(void *stripe_head_ref) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 799 | { |
| 800 | struct stripe_head *sh = stripe_head_ref; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 801 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 802 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 803 | (unsigned long long)sh->sector); |
| 804 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 805 | /* mark the computed target(s) as uptodate */ |
Dan Williams | 4e7d2c0 | 2009-08-29 19:13:11 -0700 | [diff] [blame] | 806 | mark_target_uptodate(sh, sh->ops.target); |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 807 | mark_target_uptodate(sh, sh->ops.target2); |
Dan Williams | 4e7d2c0 | 2009-08-29 19:13:11 -0700 | [diff] [blame] | 808 | |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 809 | clear_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 810 | if (sh->check_state == check_state_compute_run) |
| 811 | sh->check_state = check_state_compute_result; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 812 | set_bit(STRIPE_HANDLE, &sh->state); |
| 813 | release_stripe(sh); |
| 814 | } |
| 815 | |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 816 | /* return a pointer to the address conversion region of the scribble buffer */ |
| 817 | static addr_conv_t *to_addr_conv(struct stripe_head *sh, |
| 818 | struct raid5_percpu *percpu) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 819 | { |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 820 | return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); |
| 821 | } |
| 822 | |
| 823 | static struct dma_async_tx_descriptor * |
| 824 | ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) |
| 825 | { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 826 | int disks = sh->disks; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 827 | struct page **xor_srcs = percpu->scribble; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 828 | int target = sh->ops.target; |
| 829 | struct r5dev *tgt = &sh->dev[target]; |
| 830 | struct page *xor_dest = tgt->page; |
| 831 | int count = 0; |
| 832 | struct dma_async_tx_descriptor *tx; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 833 | struct async_submit_ctl submit; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 834 | int i; |
| 835 | |
| 836 | pr_debug("%s: stripe %llu block: %d\n", |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 837 | __func__, (unsigned long long)sh->sector, target); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 838 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 839 | |
| 840 | for (i = disks; i--; ) |
| 841 | if (i != target) |
| 842 | xor_srcs[count++] = sh->dev[i].page; |
| 843 | |
| 844 | atomic_inc(&sh->count); |
| 845 | |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 846 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 847 | ops_complete_compute, sh, to_addr_conv(sh, percpu)); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 848 | if (unlikely(count == 1)) |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 849 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 850 | else |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 851 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 852 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 853 | return tx; |
| 854 | } |
| 855 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 856 | /* set_syndrome_sources - populate source buffers for gen_syndrome |
| 857 | * @srcs - (struct page *) array of size sh->disks |
| 858 | * @sh - stripe_head to parse |
| 859 | * |
| 860 | * Populates srcs in proper layout order for the stripe and returns the |
| 861 | * 'count' of sources to be used in a call to async_gen_syndrome. The P |
| 862 | * destination buffer is recorded in srcs[count] and the Q destination |
| 863 | * is recorded in srcs[count+1]]. |
| 864 | */ |
| 865 | static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) |
| 866 | { |
| 867 | int disks = sh->disks; |
| 868 | int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); |
| 869 | int d0_idx = raid6_d0(sh); |
| 870 | int count; |
| 871 | int i; |
| 872 | |
| 873 | for (i = 0; i < disks; i++) |
NeilBrown | 5dd33c9 | 2009-10-16 16:40:25 +1100 | [diff] [blame] | 874 | srcs[i] = NULL; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 875 | |
| 876 | count = 0; |
| 877 | i = d0_idx; |
| 878 | do { |
| 879 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); |
| 880 | |
| 881 | srcs[slot] = sh->dev[i].page; |
| 882 | i = raid6_next_disk(i, disks); |
| 883 | } while (i != d0_idx); |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 884 | |
NeilBrown | e4424fe | 2009-10-16 16:27:34 +1100 | [diff] [blame] | 885 | return syndrome_disks; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 886 | } |
| 887 | |
| 888 | static struct dma_async_tx_descriptor * |
| 889 | ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) |
| 890 | { |
| 891 | int disks = sh->disks; |
| 892 | struct page **blocks = percpu->scribble; |
| 893 | int target; |
| 894 | int qd_idx = sh->qd_idx; |
| 895 | struct dma_async_tx_descriptor *tx; |
| 896 | struct async_submit_ctl submit; |
| 897 | struct r5dev *tgt; |
| 898 | struct page *dest; |
| 899 | int i; |
| 900 | int count; |
| 901 | |
| 902 | if (sh->ops.target < 0) |
| 903 | target = sh->ops.target2; |
| 904 | else if (sh->ops.target2 < 0) |
| 905 | target = sh->ops.target; |
| 906 | else |
| 907 | /* we should only have one valid target */ |
| 908 | BUG(); |
| 909 | BUG_ON(target < 0); |
| 910 | pr_debug("%s: stripe %llu block: %d\n", |
| 911 | __func__, (unsigned long long)sh->sector, target); |
| 912 | |
| 913 | tgt = &sh->dev[target]; |
| 914 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 915 | dest = tgt->page; |
| 916 | |
| 917 | atomic_inc(&sh->count); |
| 918 | |
| 919 | if (target == qd_idx) { |
| 920 | count = set_syndrome_sources(blocks, sh); |
| 921 | blocks[count] = NULL; /* regenerating p is not necessary */ |
| 922 | BUG_ON(blocks[count+1] != dest); /* q should already be set */ |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 923 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
| 924 | ops_complete_compute, sh, |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 925 | to_addr_conv(sh, percpu)); |
| 926 | tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); |
| 927 | } else { |
| 928 | /* Compute any data- or p-drive using XOR */ |
| 929 | count = 0; |
| 930 | for (i = disks; i-- ; ) { |
| 931 | if (i == target || i == qd_idx) |
| 932 | continue; |
| 933 | blocks[count++] = sh->dev[i].page; |
| 934 | } |
| 935 | |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 936 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, |
| 937 | NULL, ops_complete_compute, sh, |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 938 | to_addr_conv(sh, percpu)); |
| 939 | tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); |
| 940 | } |
| 941 | |
| 942 | return tx; |
| 943 | } |
| 944 | |
| 945 | static struct dma_async_tx_descriptor * |
| 946 | ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) |
| 947 | { |
| 948 | int i, count, disks = sh->disks; |
| 949 | int syndrome_disks = sh->ddf_layout ? disks : disks-2; |
| 950 | int d0_idx = raid6_d0(sh); |
| 951 | int faila = -1, failb = -1; |
| 952 | int target = sh->ops.target; |
| 953 | int target2 = sh->ops.target2; |
| 954 | struct r5dev *tgt = &sh->dev[target]; |
| 955 | struct r5dev *tgt2 = &sh->dev[target2]; |
| 956 | struct dma_async_tx_descriptor *tx; |
| 957 | struct page **blocks = percpu->scribble; |
| 958 | struct async_submit_ctl submit; |
| 959 | |
| 960 | pr_debug("%s: stripe %llu block1: %d block2: %d\n", |
| 961 | __func__, (unsigned long long)sh->sector, target, target2); |
| 962 | BUG_ON(target < 0 || target2 < 0); |
| 963 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 964 | BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); |
| 965 | |
Dan Williams | 6c910a7 | 2009-09-16 12:24:54 -0700 | [diff] [blame] | 966 | /* we need to open-code set_syndrome_sources to handle the |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 967 | * slot number conversion for 'faila' and 'failb' |
| 968 | */ |
| 969 | for (i = 0; i < disks ; i++) |
NeilBrown | 5dd33c9 | 2009-10-16 16:40:25 +1100 | [diff] [blame] | 970 | blocks[i] = NULL; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 971 | count = 0; |
| 972 | i = d0_idx; |
| 973 | do { |
| 974 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); |
| 975 | |
| 976 | blocks[slot] = sh->dev[i].page; |
| 977 | |
| 978 | if (i == target) |
| 979 | faila = slot; |
| 980 | if (i == target2) |
| 981 | failb = slot; |
| 982 | i = raid6_next_disk(i, disks); |
| 983 | } while (i != d0_idx); |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 984 | |
| 985 | BUG_ON(faila == failb); |
| 986 | if (failb < faila) |
| 987 | swap(faila, failb); |
| 988 | pr_debug("%s: stripe: %llu faila: %d failb: %d\n", |
| 989 | __func__, (unsigned long long)sh->sector, faila, failb); |
| 990 | |
| 991 | atomic_inc(&sh->count); |
| 992 | |
| 993 | if (failb == syndrome_disks+1) { |
| 994 | /* Q disk is one of the missing disks */ |
| 995 | if (faila == syndrome_disks) { |
| 996 | /* Missing P+Q, just recompute */ |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 997 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
| 998 | ops_complete_compute, sh, |
| 999 | to_addr_conv(sh, percpu)); |
NeilBrown | e4424fe | 2009-10-16 16:27:34 +1100 | [diff] [blame] | 1000 | return async_gen_syndrome(blocks, 0, syndrome_disks+2, |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1001 | STRIPE_SIZE, &submit); |
| 1002 | } else { |
| 1003 | struct page *dest; |
| 1004 | int data_target; |
| 1005 | int qd_idx = sh->qd_idx; |
| 1006 | |
| 1007 | /* Missing D+Q: recompute D from P, then recompute Q */ |
| 1008 | if (target == qd_idx) |
| 1009 | data_target = target2; |
| 1010 | else |
| 1011 | data_target = target; |
| 1012 | |
| 1013 | count = 0; |
| 1014 | for (i = disks; i-- ; ) { |
| 1015 | if (i == data_target || i == qd_idx) |
| 1016 | continue; |
| 1017 | blocks[count++] = sh->dev[i].page; |
| 1018 | } |
| 1019 | dest = sh->dev[data_target].page; |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 1020 | init_async_submit(&submit, |
| 1021 | ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, |
| 1022 | NULL, NULL, NULL, |
| 1023 | to_addr_conv(sh, percpu)); |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1024 | tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, |
| 1025 | &submit); |
| 1026 | |
| 1027 | count = set_syndrome_sources(blocks, sh); |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 1028 | init_async_submit(&submit, ASYNC_TX_FENCE, tx, |
| 1029 | ops_complete_compute, sh, |
| 1030 | to_addr_conv(sh, percpu)); |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1031 | return async_gen_syndrome(blocks, 0, count+2, |
| 1032 | STRIPE_SIZE, &submit); |
| 1033 | } |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1034 | } else { |
Dan Williams | 6c910a7 | 2009-09-16 12:24:54 -0700 | [diff] [blame] | 1035 | init_async_submit(&submit, ASYNC_TX_FENCE, NULL, |
| 1036 | ops_complete_compute, sh, |
| 1037 | to_addr_conv(sh, percpu)); |
| 1038 | if (failb == syndrome_disks) { |
| 1039 | /* We're missing D+P. */ |
| 1040 | return async_raid6_datap_recov(syndrome_disks+2, |
| 1041 | STRIPE_SIZE, faila, |
| 1042 | blocks, &submit); |
| 1043 | } else { |
| 1044 | /* We're missing D+D. */ |
| 1045 | return async_raid6_2data_recov(syndrome_disks+2, |
| 1046 | STRIPE_SIZE, faila, failb, |
| 1047 | blocks, &submit); |
| 1048 | } |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1049 | } |
| 1050 | } |
| 1051 | |
| 1052 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1053 | static void ops_complete_prexor(void *stripe_head_ref) |
| 1054 | { |
| 1055 | struct stripe_head *sh = stripe_head_ref; |
| 1056 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1057 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1058 | (unsigned long long)sh->sector); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | static struct dma_async_tx_descriptor * |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1062 | ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, |
| 1063 | struct dma_async_tx_descriptor *tx) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1064 | { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1065 | int disks = sh->disks; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1066 | struct page **xor_srcs = percpu->scribble; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1067 | int count = 0, pd_idx = sh->pd_idx, i; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 1068 | struct async_submit_ctl submit; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1069 | |
| 1070 | /* existing parity data subtracted */ |
| 1071 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
| 1072 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1073 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1074 | (unsigned long long)sh->sector); |
| 1075 | |
| 1076 | for (i = disks; i--; ) { |
| 1077 | struct r5dev *dev = &sh->dev[i]; |
| 1078 | /* Only process blocks that are known to be uptodate */ |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1079 | if (test_bit(R5_Wantdrain, &dev->flags)) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1080 | xor_srcs[count++] = dev->page; |
| 1081 | } |
| 1082 | |
Dan Williams | 0403e38 | 2009-09-08 17:42:50 -0700 | [diff] [blame] | 1083 | init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1084 | ops_complete_prexor, sh, to_addr_conv(sh, percpu)); |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 1085 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1086 | |
| 1087 | return tx; |
| 1088 | } |
| 1089 | |
| 1090 | static struct dma_async_tx_descriptor * |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1091 | ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1092 | { |
| 1093 | int disks = sh->disks; |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1094 | int i; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1095 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1096 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1097 | (unsigned long long)sh->sector); |
| 1098 | |
| 1099 | for (i = disks; i--; ) { |
| 1100 | struct r5dev *dev = &sh->dev[i]; |
| 1101 | struct bio *chosen; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1102 | |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1103 | if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1104 | struct bio *wbi; |
| 1105 | |
NeilBrown | cbe47ec | 2011-07-26 11:20:35 +1000 | [diff] [blame] | 1106 | spin_lock_irq(&sh->raid_conf->device_lock); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1107 | chosen = dev->towrite; |
| 1108 | dev->towrite = NULL; |
| 1109 | BUG_ON(dev->written); |
| 1110 | wbi = dev->written = chosen; |
NeilBrown | cbe47ec | 2011-07-26 11:20:35 +1000 | [diff] [blame] | 1111 | spin_unlock_irq(&sh->raid_conf->device_lock); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1112 | |
| 1113 | while (wbi && wbi->bi_sector < |
| 1114 | dev->sector + STRIPE_SECTORS) { |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 1115 | if (wbi->bi_rw & REQ_FUA) |
| 1116 | set_bit(R5_WantFUA, &dev->flags); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1117 | tx = async_copy_data(1, wbi, dev->page, |
| 1118 | dev->sector, tx); |
| 1119 | wbi = r5_next_bio(wbi, dev->sector); |
| 1120 | } |
| 1121 | } |
| 1122 | } |
| 1123 | |
| 1124 | return tx; |
| 1125 | } |
| 1126 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1127 | static void ops_complete_reconstruct(void *stripe_head_ref) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1128 | { |
| 1129 | struct stripe_head *sh = stripe_head_ref; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1130 | int disks = sh->disks; |
| 1131 | int pd_idx = sh->pd_idx; |
| 1132 | int qd_idx = sh->qd_idx; |
| 1133 | int i; |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 1134 | bool fua = false; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1135 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1136 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1137 | (unsigned long long)sh->sector); |
| 1138 | |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 1139 | for (i = disks; i--; ) |
| 1140 | fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); |
| 1141 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1142 | for (i = disks; i--; ) { |
| 1143 | struct r5dev *dev = &sh->dev[i]; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1144 | |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 1145 | if (dev->written || i == pd_idx || i == qd_idx) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1146 | set_bit(R5_UPTODATE, &dev->flags); |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 1147 | if (fua) |
| 1148 | set_bit(R5_WantFUA, &dev->flags); |
| 1149 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1150 | } |
| 1151 | |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1152 | if (sh->reconstruct_state == reconstruct_state_drain_run) |
| 1153 | sh->reconstruct_state = reconstruct_state_drain_result; |
| 1154 | else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) |
| 1155 | sh->reconstruct_state = reconstruct_state_prexor_drain_result; |
| 1156 | else { |
| 1157 | BUG_ON(sh->reconstruct_state != reconstruct_state_run); |
| 1158 | sh->reconstruct_state = reconstruct_state_result; |
| 1159 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1160 | |
| 1161 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1162 | release_stripe(sh); |
| 1163 | } |
| 1164 | |
| 1165 | static void |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1166 | ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, |
| 1167 | struct dma_async_tx_descriptor *tx) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1168 | { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1169 | int disks = sh->disks; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1170 | struct page **xor_srcs = percpu->scribble; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 1171 | struct async_submit_ctl submit; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1172 | int count = 0, pd_idx = sh->pd_idx, i; |
| 1173 | struct page *xor_dest; |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1174 | int prexor = 0; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1175 | unsigned long flags; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1176 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1177 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1178 | (unsigned long long)sh->sector); |
| 1179 | |
| 1180 | /* check if prexor is active which means only process blocks |
| 1181 | * that are part of a read-modify-write (written) |
| 1182 | */ |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1183 | if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { |
| 1184 | prexor = 1; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1185 | xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
| 1186 | for (i = disks; i--; ) { |
| 1187 | struct r5dev *dev = &sh->dev[i]; |
| 1188 | if (dev->written) |
| 1189 | xor_srcs[count++] = dev->page; |
| 1190 | } |
| 1191 | } else { |
| 1192 | xor_dest = sh->dev[pd_idx].page; |
| 1193 | for (i = disks; i--; ) { |
| 1194 | struct r5dev *dev = &sh->dev[i]; |
| 1195 | if (i != pd_idx) |
| 1196 | xor_srcs[count++] = dev->page; |
| 1197 | } |
| 1198 | } |
| 1199 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1200 | /* 1/ if we prexor'd then the dest is reused as a source |
| 1201 | * 2/ if we did not prexor then we are redoing the parity |
| 1202 | * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST |
| 1203 | * for the synchronous xor case |
| 1204 | */ |
Dan Williams | 88ba2aa | 2009-04-09 16:16:18 -0700 | [diff] [blame] | 1205 | flags = ASYNC_TX_ACK | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1206 | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); |
| 1207 | |
| 1208 | atomic_inc(&sh->count); |
| 1209 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1210 | init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1211 | to_addr_conv(sh, percpu)); |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 1212 | if (unlikely(count == 1)) |
| 1213 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); |
| 1214 | else |
| 1215 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1216 | } |
| 1217 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1218 | static void |
| 1219 | ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, |
| 1220 | struct dma_async_tx_descriptor *tx) |
| 1221 | { |
| 1222 | struct async_submit_ctl submit; |
| 1223 | struct page **blocks = percpu->scribble; |
| 1224 | int count; |
| 1225 | |
| 1226 | pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); |
| 1227 | |
| 1228 | count = set_syndrome_sources(blocks, sh); |
| 1229 | |
| 1230 | atomic_inc(&sh->count); |
| 1231 | |
| 1232 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, |
| 1233 | sh, to_addr_conv(sh, percpu)); |
| 1234 | async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1235 | } |
| 1236 | |
| 1237 | static void ops_complete_check(void *stripe_head_ref) |
| 1238 | { |
| 1239 | struct stripe_head *sh = stripe_head_ref; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1240 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1241 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1242 | (unsigned long long)sh->sector); |
| 1243 | |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 1244 | sh->check_state = check_state_check_result; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1245 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1246 | release_stripe(sh); |
| 1247 | } |
| 1248 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1249 | static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1250 | { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1251 | int disks = sh->disks; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1252 | int pd_idx = sh->pd_idx; |
| 1253 | int qd_idx = sh->qd_idx; |
| 1254 | struct page *xor_dest; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1255 | struct page **xor_srcs = percpu->scribble; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1256 | struct dma_async_tx_descriptor *tx; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 1257 | struct async_submit_ctl submit; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1258 | int count; |
| 1259 | int i; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1260 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1261 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1262 | (unsigned long long)sh->sector); |
| 1263 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1264 | count = 0; |
| 1265 | xor_dest = sh->dev[pd_idx].page; |
| 1266 | xor_srcs[count++] = xor_dest; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1267 | for (i = disks; i--; ) { |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1268 | if (i == pd_idx || i == qd_idx) |
| 1269 | continue; |
| 1270 | xor_srcs[count++] = sh->dev[i].page; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1271 | } |
| 1272 | |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1273 | init_async_submit(&submit, 0, NULL, NULL, NULL, |
| 1274 | to_addr_conv(sh, percpu)); |
Dan Williams | 099f53c | 2009-04-08 14:28:37 -0700 | [diff] [blame] | 1275 | tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 1276 | &sh->ops.zero_sum_result, &submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1277 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1278 | atomic_inc(&sh->count); |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 1279 | init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); |
| 1280 | tx = async_trigger_callback(&submit); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1281 | } |
| 1282 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1283 | static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) |
| 1284 | { |
| 1285 | struct page **srcs = percpu->scribble; |
| 1286 | struct async_submit_ctl submit; |
| 1287 | int count; |
| 1288 | |
| 1289 | pr_debug("%s: stripe %llu checkp: %d\n", __func__, |
| 1290 | (unsigned long long)sh->sector, checkp); |
| 1291 | |
| 1292 | count = set_syndrome_sources(srcs, sh); |
| 1293 | if (!checkp) |
| 1294 | srcs[count] = NULL; |
| 1295 | |
| 1296 | atomic_inc(&sh->count); |
| 1297 | init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, |
| 1298 | sh, to_addr_conv(sh, percpu)); |
| 1299 | async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, |
| 1300 | &sh->ops.zero_sum_result, percpu->spare_page, &submit); |
| 1301 | } |
| 1302 | |
Dan Williams | 417b8d4 | 2009-10-16 16:25:22 +1100 | [diff] [blame] | 1303 | static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1304 | { |
| 1305 | int overlap_clear = 0, i, disks = sh->disks; |
| 1306 | struct dma_async_tx_descriptor *tx = NULL; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1307 | struct r5conf *conf = sh->raid_conf; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1308 | int level = conf->level; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1309 | struct raid5_percpu *percpu; |
| 1310 | unsigned long cpu; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1311 | |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1312 | cpu = get_cpu(); |
| 1313 | percpu = per_cpu_ptr(conf->percpu, cpu); |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 1314 | if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1315 | ops_run_biofill(sh); |
| 1316 | overlap_clear++; |
| 1317 | } |
| 1318 | |
Dan Williams | 7b3a871 | 2008-06-28 08:32:09 +1000 | [diff] [blame] | 1319 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1320 | if (level < 6) |
| 1321 | tx = ops_run_compute5(sh, percpu); |
| 1322 | else { |
| 1323 | if (sh->ops.target2 < 0 || sh->ops.target < 0) |
| 1324 | tx = ops_run_compute6_1(sh, percpu); |
| 1325 | else |
| 1326 | tx = ops_run_compute6_2(sh, percpu); |
| 1327 | } |
| 1328 | /* terminate the chain if reconstruct is not set to be run */ |
| 1329 | if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) |
Dan Williams | 7b3a871 | 2008-06-28 08:32:09 +1000 | [diff] [blame] | 1330 | async_tx_ack(tx); |
| 1331 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1332 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1333 | if (test_bit(STRIPE_OP_PREXOR, &ops_request)) |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1334 | tx = ops_run_prexor(sh, percpu, tx); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1335 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1336 | if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1337 | tx = ops_run_biodrain(sh, tx); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1338 | overlap_clear++; |
| 1339 | } |
| 1340 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1341 | if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { |
| 1342 | if (level < 6) |
| 1343 | ops_run_reconstruct5(sh, percpu, tx); |
| 1344 | else |
| 1345 | ops_run_reconstruct6(sh, percpu, tx); |
| 1346 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1347 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 1348 | if (test_bit(STRIPE_OP_CHECK, &ops_request)) { |
| 1349 | if (sh->check_state == check_state_run) |
| 1350 | ops_run_check_p(sh, percpu); |
| 1351 | else if (sh->check_state == check_state_run_q) |
| 1352 | ops_run_check_pq(sh, percpu, 0); |
| 1353 | else if (sh->check_state == check_state_run_pq) |
| 1354 | ops_run_check_pq(sh, percpu, 1); |
| 1355 | else |
| 1356 | BUG(); |
| 1357 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1358 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1359 | if (overlap_clear) |
| 1360 | for (i = disks; i--; ) { |
| 1361 | struct r5dev *dev = &sh->dev[i]; |
| 1362 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) |
| 1363 | wake_up(&sh->raid_conf->wait_for_overlap); |
| 1364 | } |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1365 | put_cpu(); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1366 | } |
| 1367 | |
Dan Williams | 417b8d4 | 2009-10-16 16:25:22 +1100 | [diff] [blame] | 1368 | #ifdef CONFIG_MULTICORE_RAID456 |
| 1369 | static void async_run_ops(void *param, async_cookie_t cookie) |
| 1370 | { |
| 1371 | struct stripe_head *sh = param; |
| 1372 | unsigned long ops_request = sh->ops.request; |
| 1373 | |
| 1374 | clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); |
| 1375 | wake_up(&sh->ops.wait_for_ops); |
| 1376 | |
| 1377 | __raid_run_ops(sh, ops_request); |
| 1378 | release_stripe(sh); |
| 1379 | } |
| 1380 | |
| 1381 | static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) |
| 1382 | { |
| 1383 | /* since handle_stripe can be called outside of raid5d context |
| 1384 | * we need to ensure sh->ops.request is de-staged before another |
| 1385 | * request arrives |
| 1386 | */ |
| 1387 | wait_event(sh->ops.wait_for_ops, |
| 1388 | !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); |
| 1389 | sh->ops.request = ops_request; |
| 1390 | |
| 1391 | atomic_inc(&sh->count); |
| 1392 | async_schedule(async_run_ops, sh); |
| 1393 | } |
| 1394 | #else |
| 1395 | #define raid_run_ops __raid_run_ops |
| 1396 | #endif |
| 1397 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1398 | static int grow_one_stripe(struct r5conf *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1399 | { |
| 1400 | struct stripe_head *sh; |
Namhyung Kim | 6ce3284 | 2011-07-18 17:38:50 +1000 | [diff] [blame] | 1401 | sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1402 | if (!sh) |
| 1403 | return 0; |
Namhyung Kim | 6ce3284 | 2011-07-18 17:38:50 +1000 | [diff] [blame] | 1404 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1405 | sh->raid_conf = conf; |
Dan Williams | 417b8d4 | 2009-10-16 16:25:22 +1100 | [diff] [blame] | 1406 | #ifdef CONFIG_MULTICORE_RAID456 |
| 1407 | init_waitqueue_head(&sh->ops.wait_for_ops); |
| 1408 | #endif |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1409 | |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 1410 | if (grow_buffers(sh)) { |
| 1411 | shrink_buffers(sh); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1412 | kmem_cache_free(conf->slab_cache, sh); |
| 1413 | return 0; |
| 1414 | } |
| 1415 | /* we just created an active stripe so... */ |
| 1416 | atomic_set(&sh->count, 1); |
| 1417 | atomic_inc(&conf->active_stripes); |
| 1418 | INIT_LIST_HEAD(&sh->lru); |
| 1419 | release_stripe(sh); |
| 1420 | return 1; |
| 1421 | } |
| 1422 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1423 | static int grow_stripes(struct r5conf *conf, int num) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1424 | { |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 1425 | struct kmem_cache *sc; |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 1426 | int devs = max(conf->raid_disks, conf->previous_raid_disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1427 | |
NeilBrown | f4be6b4 | 2010-06-01 19:37:25 +1000 | [diff] [blame] | 1428 | if (conf->mddev->gendisk) |
| 1429 | sprintf(conf->cache_name[0], |
| 1430 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
| 1431 | else |
| 1432 | sprintf(conf->cache_name[0], |
| 1433 | "raid%d-%p", conf->level, conf->mddev); |
| 1434 | sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); |
| 1435 | |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1436 | conf->active_name = 0; |
| 1437 | sc = kmem_cache_create(conf->cache_name[conf->active_name], |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1438 | sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1439 | 0, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1440 | if (!sc) |
| 1441 | return 1; |
| 1442 | conf->slab_cache = sc; |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1443 | conf->pool_size = devs; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1444 | while (num--) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1445 | if (!grow_one_stripe(conf)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1446 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1447 | return 0; |
| 1448 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 1449 | |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1450 | /** |
| 1451 | * scribble_len - return the required size of the scribble region |
| 1452 | * @num - total number of disks in the array |
| 1453 | * |
| 1454 | * The size must be enough to contain: |
| 1455 | * 1/ a struct page pointer for each device in the array +2 |
| 1456 | * 2/ room to convert each entry in (1) to its corresponding dma |
| 1457 | * (dma_map_page()) or page (page_address()) address. |
| 1458 | * |
| 1459 | * Note: the +2 is for the destination buffers of the ddf/raid6 case where we |
| 1460 | * calculate over all devices (not just the data blocks), using zeros in place |
| 1461 | * of the P and Q blocks. |
| 1462 | */ |
| 1463 | static size_t scribble_len(int num) |
| 1464 | { |
| 1465 | size_t len; |
| 1466 | |
| 1467 | len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); |
| 1468 | |
| 1469 | return len; |
| 1470 | } |
| 1471 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1472 | static int resize_stripes(struct r5conf *conf, int newsize) |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1473 | { |
| 1474 | /* Make all the stripes able to hold 'newsize' devices. |
| 1475 | * New slots in each stripe get 'page' set to a new page. |
| 1476 | * |
| 1477 | * This happens in stages: |
| 1478 | * 1/ create a new kmem_cache and allocate the required number of |
| 1479 | * stripe_heads. |
| 1480 | * 2/ gather all the old stripe_heads and tranfer the pages across |
| 1481 | * to the new stripe_heads. This will have the side effect of |
| 1482 | * freezing the array as once all stripe_heads have been collected, |
| 1483 | * no IO will be possible. Old stripe heads are freed once their |
| 1484 | * pages have been transferred over, and the old kmem_cache is |
| 1485 | * freed when all stripes are done. |
| 1486 | * 3/ reallocate conf->disks to be suitable bigger. If this fails, |
| 1487 | * we simple return a failre status - no need to clean anything up. |
| 1488 | * 4/ allocate new pages for the new slots in the new stripe_heads. |
| 1489 | * If this fails, we don't bother trying the shrink the |
| 1490 | * stripe_heads down again, we just leave them as they are. |
| 1491 | * As each stripe_head is processed the new one is released into |
| 1492 | * active service. |
| 1493 | * |
| 1494 | * Once step2 is started, we cannot afford to wait for a write, |
| 1495 | * so we use GFP_NOIO allocations. |
| 1496 | */ |
| 1497 | struct stripe_head *osh, *nsh; |
| 1498 | LIST_HEAD(newstripes); |
| 1499 | struct disk_info *ndisks; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1500 | unsigned long cpu; |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 1501 | int err; |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 1502 | struct kmem_cache *sc; |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1503 | int i; |
| 1504 | |
| 1505 | if (newsize <= conf->pool_size) |
| 1506 | return 0; /* never bother to shrink */ |
| 1507 | |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 1508 | err = md_allow_write(conf->mddev); |
| 1509 | if (err) |
| 1510 | return err; |
NeilBrown | 2a2275d | 2007-01-26 00:57:11 -0800 | [diff] [blame] | 1511 | |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1512 | /* Step 1 */ |
| 1513 | sc = kmem_cache_create(conf->cache_name[1-conf->active_name], |
| 1514 | sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 1515 | 0, 0, NULL); |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1516 | if (!sc) |
| 1517 | return -ENOMEM; |
| 1518 | |
| 1519 | for (i = conf->max_nr_stripes; i; i--) { |
Namhyung Kim | 6ce3284 | 2011-07-18 17:38:50 +1000 | [diff] [blame] | 1520 | nsh = kmem_cache_zalloc(sc, GFP_KERNEL); |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1521 | if (!nsh) |
| 1522 | break; |
| 1523 | |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1524 | nsh->raid_conf = conf; |
Dan Williams | 417b8d4 | 2009-10-16 16:25:22 +1100 | [diff] [blame] | 1525 | #ifdef CONFIG_MULTICORE_RAID456 |
| 1526 | init_waitqueue_head(&nsh->ops.wait_for_ops); |
| 1527 | #endif |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1528 | |
| 1529 | list_add(&nsh->lru, &newstripes); |
| 1530 | } |
| 1531 | if (i) { |
| 1532 | /* didn't get enough, give up */ |
| 1533 | while (!list_empty(&newstripes)) { |
| 1534 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
| 1535 | list_del(&nsh->lru); |
| 1536 | kmem_cache_free(sc, nsh); |
| 1537 | } |
| 1538 | kmem_cache_destroy(sc); |
| 1539 | return -ENOMEM; |
| 1540 | } |
| 1541 | /* Step 2 - Must use GFP_NOIO now. |
| 1542 | * OK, we have enough stripes, start collecting inactive |
| 1543 | * stripes and copying them over |
| 1544 | */ |
| 1545 | list_for_each_entry(nsh, &newstripes, lru) { |
| 1546 | spin_lock_irq(&conf->device_lock); |
| 1547 | wait_event_lock_irq(conf->wait_for_stripe, |
| 1548 | !list_empty(&conf->inactive_list), |
| 1549 | conf->device_lock, |
NeilBrown | 482c083 | 2011-04-18 18:25:42 +1000 | [diff] [blame] | 1550 | ); |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1551 | osh = get_free_stripe(conf); |
| 1552 | spin_unlock_irq(&conf->device_lock); |
| 1553 | atomic_set(&nsh->count, 1); |
| 1554 | for(i=0; i<conf->pool_size; i++) |
| 1555 | nsh->dev[i].page = osh->dev[i].page; |
| 1556 | for( ; i<newsize; i++) |
| 1557 | nsh->dev[i].page = NULL; |
| 1558 | kmem_cache_free(conf->slab_cache, osh); |
| 1559 | } |
| 1560 | kmem_cache_destroy(conf->slab_cache); |
| 1561 | |
| 1562 | /* Step 3. |
| 1563 | * At this point, we are holding all the stripes so the array |
| 1564 | * is completely stalled, so now is a good time to resize |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1565 | * conf->disks and the scribble region |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1566 | */ |
| 1567 | ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); |
| 1568 | if (ndisks) { |
| 1569 | for (i=0; i<conf->raid_disks; i++) |
| 1570 | ndisks[i] = conf->disks[i]; |
| 1571 | kfree(conf->disks); |
| 1572 | conf->disks = ndisks; |
| 1573 | } else |
| 1574 | err = -ENOMEM; |
| 1575 | |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1576 | get_online_cpus(); |
| 1577 | conf->scribble_len = scribble_len(newsize); |
| 1578 | for_each_present_cpu(cpu) { |
| 1579 | struct raid5_percpu *percpu; |
| 1580 | void *scribble; |
| 1581 | |
| 1582 | percpu = per_cpu_ptr(conf->percpu, cpu); |
| 1583 | scribble = kmalloc(conf->scribble_len, GFP_NOIO); |
| 1584 | |
| 1585 | if (scribble) { |
| 1586 | kfree(percpu->scribble); |
| 1587 | percpu->scribble = scribble; |
| 1588 | } else { |
| 1589 | err = -ENOMEM; |
| 1590 | break; |
| 1591 | } |
| 1592 | } |
| 1593 | put_online_cpus(); |
| 1594 | |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1595 | /* Step 4, return new stripes to service */ |
| 1596 | while(!list_empty(&newstripes)) { |
| 1597 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
| 1598 | list_del_init(&nsh->lru); |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 1599 | |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1600 | for (i=conf->raid_disks; i < newsize; i++) |
| 1601 | if (nsh->dev[i].page == NULL) { |
| 1602 | struct page *p = alloc_page(GFP_NOIO); |
| 1603 | nsh->dev[i].page = p; |
| 1604 | if (!p) |
| 1605 | err = -ENOMEM; |
| 1606 | } |
| 1607 | release_stripe(nsh); |
| 1608 | } |
| 1609 | /* critical section pass, GFP_NOIO no longer needed */ |
| 1610 | |
| 1611 | conf->slab_cache = sc; |
| 1612 | conf->active_name = 1-conf->active_name; |
| 1613 | conf->pool_size = newsize; |
| 1614 | return err; |
| 1615 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1616 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1617 | static int drop_one_stripe(struct r5conf *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1618 | { |
| 1619 | struct stripe_head *sh; |
| 1620 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1621 | spin_lock_irq(&conf->device_lock); |
| 1622 | sh = get_free_stripe(conf); |
| 1623 | spin_unlock_irq(&conf->device_lock); |
| 1624 | if (!sh) |
| 1625 | return 0; |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 1626 | BUG_ON(atomic_read(&sh->count)); |
NeilBrown | e4e11e3 | 2010-06-16 16:45:16 +1000 | [diff] [blame] | 1627 | shrink_buffers(sh); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1628 | kmem_cache_free(conf->slab_cache, sh); |
| 1629 | atomic_dec(&conf->active_stripes); |
| 1630 | return 1; |
| 1631 | } |
| 1632 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1633 | static void shrink_stripes(struct r5conf *conf) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1634 | { |
| 1635 | while (drop_one_stripe(conf)) |
| 1636 | ; |
| 1637 | |
NeilBrown | 29fc7e3 | 2006-02-03 03:03:41 -0800 | [diff] [blame] | 1638 | if (conf->slab_cache) |
| 1639 | kmem_cache_destroy(conf->slab_cache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1640 | conf->slab_cache = NULL; |
| 1641 | } |
| 1642 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 1643 | static void raid5_end_read_request(struct bio * bi, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1644 | { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1645 | struct stripe_head *sh = bi->bi_private; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1646 | struct r5conf *conf = sh->raid_conf; |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 1647 | int disks = sh->disks, i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1648 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1649 | char b[BDEVNAME_SIZE]; |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1650 | struct md_rdev *rdev = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1651 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1652 | |
| 1653 | for (i=0 ; i<disks; i++) |
| 1654 | if (bi == &sh->dev[i].req) |
| 1655 | break; |
| 1656 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1657 | pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", |
| 1658 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1659 | uptodate); |
| 1660 | if (i == disks) { |
| 1661 | BUG(); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 1662 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1663 | } |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1664 | if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1665 | /* If replacement finished while this request was outstanding, |
| 1666 | * 'replacement' might be NULL already. |
| 1667 | * In that case it moved down to 'rdev'. |
| 1668 | * rdev is not removed until all requests are finished. |
| 1669 | */ |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1670 | rdev = conf->disks[i].replacement; |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1671 | if (!rdev) |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1672 | rdev = conf->disks[i].rdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 | |
| 1674 | if (uptodate) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1675 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1676 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1677 | /* Note that this cannot happen on a |
| 1678 | * replacement device. We just fail those on |
| 1679 | * any error |
| 1680 | */ |
Christian Dietrich | 8bda470 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 1681 | printk_ratelimited( |
| 1682 | KERN_INFO |
| 1683 | "md/raid:%s: read error corrected" |
| 1684 | " (%lu sectors at %llu on %s)\n", |
| 1685 | mdname(conf->mddev), STRIPE_SECTORS, |
| 1686 | (unsigned long long)(sh->sector |
| 1687 | + rdev->data_offset), |
| 1688 | bdevname(rdev->bdev, b)); |
Namhyung Kim | ddd5115 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 1689 | atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1690 | clear_bit(R5_ReadError, &sh->dev[i].flags); |
| 1691 | clear_bit(R5_ReWrite, &sh->dev[i].flags); |
| 1692 | } |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1693 | if (atomic_read(&rdev->read_errors)) |
| 1694 | atomic_set(&rdev->read_errors, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1695 | } else { |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1696 | const char *bdn = bdevname(rdev->bdev, b); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1697 | int retry = 0; |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1698 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | clear_bit(R5_UPTODATE, &sh->dev[i].flags); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1700 | atomic_inc(&rdev->read_errors); |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1701 | if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) |
| 1702 | printk_ratelimited( |
| 1703 | KERN_WARNING |
| 1704 | "md/raid:%s: read error on replacement device " |
| 1705 | "(sector %llu on %s).\n", |
| 1706 | mdname(conf->mddev), |
| 1707 | (unsigned long long)(sh->sector |
| 1708 | + rdev->data_offset), |
| 1709 | bdn); |
| 1710 | else if (conf->mddev->degraded >= conf->max_degraded) |
Christian Dietrich | 8bda470 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 1711 | printk_ratelimited( |
| 1712 | KERN_WARNING |
| 1713 | "md/raid:%s: read error not correctable " |
| 1714 | "(sector %llu on %s).\n", |
| 1715 | mdname(conf->mddev), |
| 1716 | (unsigned long long)(sh->sector |
| 1717 | + rdev->data_offset), |
| 1718 | bdn); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1719 | else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1720 | /* Oh, no!!! */ |
Christian Dietrich | 8bda470 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 1721 | printk_ratelimited( |
| 1722 | KERN_WARNING |
| 1723 | "md/raid:%s: read error NOT corrected!! " |
| 1724 | "(sector %llu on %s).\n", |
| 1725 | mdname(conf->mddev), |
| 1726 | (unsigned long long)(sh->sector |
| 1727 | + rdev->data_offset), |
| 1728 | bdn); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1729 | else if (atomic_read(&rdev->read_errors) |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1730 | > conf->max_nr_stripes) |
NeilBrown | 14f8d26 | 2006-01-06 00:20:14 -0800 | [diff] [blame] | 1731 | printk(KERN_WARNING |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 1732 | "md/raid:%s: Too many read errors, failing device %s.\n", |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1733 | mdname(conf->mddev), bdn); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1734 | else |
| 1735 | retry = 1; |
| 1736 | if (retry) |
| 1737 | set_bit(R5_ReadError, &sh->dev[i].flags); |
| 1738 | else { |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1739 | clear_bit(R5_ReadError, &sh->dev[i].flags); |
| 1740 | clear_bit(R5_ReWrite, &sh->dev[i].flags); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1741 | md_error(conf->mddev, rdev); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1742 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1743 | } |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1744 | rdev_dec_pending(rdev, conf->mddev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1745 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 1746 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1747 | release_stripe(sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1748 | } |
| 1749 | |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 1750 | static void raid5_end_write_request(struct bio *bi, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1751 | { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1752 | struct stripe_head *sh = bi->bi_private; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1753 | struct r5conf *conf = sh->raid_conf; |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 1754 | int disks = sh->disks, i; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1755 | struct md_rdev *uninitialized_var(rdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1756 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
NeilBrown | b84db56 | 2011-07-28 11:39:23 +1000 | [diff] [blame] | 1757 | sector_t first_bad; |
| 1758 | int bad_sectors; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1759 | int replacement = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1760 | |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1761 | for (i = 0 ; i < disks; i++) { |
| 1762 | if (bi == &sh->dev[i].req) { |
| 1763 | rdev = conf->disks[i].rdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1764 | break; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1765 | } |
| 1766 | if (bi == &sh->dev[i].rreq) { |
| 1767 | rdev = conf->disks[i].replacement; |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1768 | if (rdev) |
| 1769 | replacement = 1; |
| 1770 | else |
| 1771 | /* rdev was removed and 'replacement' |
| 1772 | * replaced it. rdev is not removed |
| 1773 | * until all requests are finished. |
| 1774 | */ |
| 1775 | rdev = conf->disks[i].rdev; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1776 | break; |
| 1777 | } |
| 1778 | } |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1779 | pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1780 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
| 1781 | uptodate); |
| 1782 | if (i == disks) { |
| 1783 | BUG(); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 1784 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1785 | } |
| 1786 | |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1787 | if (replacement) { |
| 1788 | if (!uptodate) |
| 1789 | md_error(conf->mddev, rdev); |
| 1790 | else if (is_badblock(rdev, sh->sector, |
| 1791 | STRIPE_SECTORS, |
| 1792 | &first_bad, &bad_sectors)) |
| 1793 | set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); |
| 1794 | } else { |
| 1795 | if (!uptodate) { |
| 1796 | set_bit(WriteErrorSeen, &rdev->flags); |
| 1797 | set_bit(R5_WriteError, &sh->dev[i].flags); |
NeilBrown | 3a6de29 | 2011-12-23 10:17:54 +1100 | [diff] [blame] | 1798 | if (!test_and_set_bit(WantReplacement, &rdev->flags)) |
| 1799 | set_bit(MD_RECOVERY_NEEDED, |
| 1800 | &rdev->mddev->recovery); |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1801 | } else if (is_badblock(rdev, sh->sector, |
| 1802 | STRIPE_SECTORS, |
| 1803 | &first_bad, &bad_sectors)) |
| 1804 | set_bit(R5_MadeGood, &sh->dev[i].flags); |
| 1805 | } |
| 1806 | rdev_dec_pending(rdev, conf->mddev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1807 | |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1808 | if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) |
| 1809 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1810 | set_bit(STRIPE_HANDLE, &sh->state); |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 1811 | release_stripe(sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1812 | } |
| 1813 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 1814 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1815 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 1816 | static void raid5_build_block(struct stripe_head *sh, int i, int previous) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1817 | { |
| 1818 | struct r5dev *dev = &sh->dev[i]; |
| 1819 | |
| 1820 | bio_init(&dev->req); |
| 1821 | dev->req.bi_io_vec = &dev->vec; |
| 1822 | dev->req.bi_vcnt++; |
| 1823 | dev->req.bi_max_vecs++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1824 | dev->req.bi_private = sh; |
NeilBrown | 995c427 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 1825 | dev->vec.bv_page = dev->page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1826 | |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 1827 | bio_init(&dev->rreq); |
| 1828 | dev->rreq.bi_io_vec = &dev->rvec; |
| 1829 | dev->rreq.bi_vcnt++; |
| 1830 | dev->rreq.bi_max_vecs++; |
| 1831 | dev->rreq.bi_private = sh; |
| 1832 | dev->rvec.bv_page = dev->page; |
| 1833 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | dev->flags = 0; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 1835 | dev->sector = compute_blocknr(sh, i, previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1836 | } |
| 1837 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 1838 | static void error(struct mddev *mddev, struct md_rdev *rdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1839 | { |
| 1840 | char b[BDEVNAME_SIZE]; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1841 | struct r5conf *conf = mddev->private; |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 1842 | unsigned long flags; |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 1843 | pr_debug("raid456: error called\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1844 | |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 1845 | spin_lock_irqsave(&conf->device_lock, flags); |
| 1846 | clear_bit(In_sync, &rdev->flags); |
| 1847 | mddev->degraded = calc_degraded(conf); |
| 1848 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 1849 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
| 1850 | |
NeilBrown | de393cd | 2011-07-28 11:31:48 +1000 | [diff] [blame] | 1851 | set_bit(Blocked, &rdev->flags); |
NeilBrown | 6f8d0c7 | 2011-05-11 14:38:44 +1000 | [diff] [blame] | 1852 | set_bit(Faulty, &rdev->flags); |
| 1853 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 1854 | printk(KERN_ALERT |
| 1855 | "md/raid:%s: Disk failure on %s, disabling device.\n" |
| 1856 | "md/raid:%s: Operation continuing on %d devices.\n", |
| 1857 | mdname(mddev), |
| 1858 | bdevname(rdev->bdev, b), |
| 1859 | mdname(mddev), |
| 1860 | conf->raid_disks - mddev->degraded); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1861 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1862 | |
| 1863 | /* |
| 1864 | * Input: a 'big' sector number, |
| 1865 | * Output: index of the data and parity disk, and the sector # in them. |
| 1866 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 1867 | static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1868 | int previous, int *dd_idx, |
| 1869 | struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1870 | { |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1871 | sector_t stripe, stripe2; |
NeilBrown | 35f2a59 | 2010-04-20 14:13:34 +1000 | [diff] [blame] | 1872 | sector_t chunk_number; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1873 | unsigned int chunk_offset; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1874 | int pd_idx, qd_idx; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1875 | int ddf_layout = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 | sector_t new_sector; |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 1877 | int algorithm = previous ? conf->prev_algo |
| 1878 | : conf->algorithm; |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 1879 | int sectors_per_chunk = previous ? conf->prev_chunk_sectors |
| 1880 | : conf->chunk_sectors; |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1881 | int raid_disks = previous ? conf->previous_raid_disks |
| 1882 | : conf->raid_disks; |
| 1883 | int data_disks = raid_disks - conf->max_degraded; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1884 | |
| 1885 | /* First compute the information on this sector */ |
| 1886 | |
| 1887 | /* |
| 1888 | * Compute the chunk number and the sector offset inside the chunk |
| 1889 | */ |
| 1890 | chunk_offset = sector_div(r_sector, sectors_per_chunk); |
| 1891 | chunk_number = r_sector; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1892 | |
| 1893 | /* |
| 1894 | * Compute the stripe number |
| 1895 | */ |
NeilBrown | 35f2a59 | 2010-04-20 14:13:34 +1000 | [diff] [blame] | 1896 | stripe = chunk_number; |
| 1897 | *dd_idx = sector_div(stripe, data_disks); |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1898 | stripe2 = stripe; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1899 | /* |
| 1900 | * Select the parity disk based on the user selected algorithm. |
| 1901 | */ |
NeilBrown | 8478955 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 1902 | pd_idx = qd_idx = -1; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1903 | switch(conf->level) { |
| 1904 | case 4: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1905 | pd_idx = data_disks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1906 | break; |
| 1907 | case 5: |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 1908 | switch (algorithm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1909 | case ALGORITHM_LEFT_ASYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1910 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1911 | if (*dd_idx >= pd_idx) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1912 | (*dd_idx)++; |
| 1913 | break; |
| 1914 | case ALGORITHM_RIGHT_ASYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1915 | pd_idx = sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1916 | if (*dd_idx >= pd_idx) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1917 | (*dd_idx)++; |
| 1918 | break; |
| 1919 | case ALGORITHM_LEFT_SYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1920 | pd_idx = data_disks - sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1921 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1922 | break; |
| 1923 | case ALGORITHM_RIGHT_SYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1924 | pd_idx = sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1925 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1926 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1927 | case ALGORITHM_PARITY_0: |
| 1928 | pd_idx = 0; |
| 1929 | (*dd_idx)++; |
| 1930 | break; |
| 1931 | case ALGORITHM_PARITY_N: |
| 1932 | pd_idx = data_disks; |
| 1933 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1934 | default: |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1935 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1936 | } |
| 1937 | break; |
| 1938 | case 6: |
| 1939 | |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 1940 | switch (algorithm) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1941 | case ALGORITHM_LEFT_ASYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1942 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1943 | qd_idx = pd_idx + 1; |
| 1944 | if (pd_idx == raid_disks-1) { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1945 | (*dd_idx)++; /* Q D D D P */ |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1946 | qd_idx = 0; |
| 1947 | } else if (*dd_idx >= pd_idx) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1948 | (*dd_idx) += 2; /* D D P Q D */ |
| 1949 | break; |
| 1950 | case ALGORITHM_RIGHT_ASYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1951 | pd_idx = sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1952 | qd_idx = pd_idx + 1; |
| 1953 | if (pd_idx == raid_disks-1) { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1954 | (*dd_idx)++; /* Q D D D P */ |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1955 | qd_idx = 0; |
| 1956 | } else if (*dd_idx >= pd_idx) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1957 | (*dd_idx) += 2; /* D D P Q D */ |
| 1958 | break; |
| 1959 | case ALGORITHM_LEFT_SYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1960 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1961 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1962 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1963 | break; |
| 1964 | case ALGORITHM_RIGHT_SYMMETRIC: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1965 | pd_idx = sector_div(stripe2, raid_disks); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1966 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1967 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1968 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1969 | |
| 1970 | case ALGORITHM_PARITY_0: |
| 1971 | pd_idx = 0; |
| 1972 | qd_idx = 1; |
| 1973 | (*dd_idx) += 2; |
| 1974 | break; |
| 1975 | case ALGORITHM_PARITY_N: |
| 1976 | pd_idx = data_disks; |
| 1977 | qd_idx = data_disks + 1; |
| 1978 | break; |
| 1979 | |
| 1980 | case ALGORITHM_ROTATING_ZERO_RESTART: |
| 1981 | /* Exactly the same as RIGHT_ASYMMETRIC, but or |
| 1982 | * of blocks for computing Q is different. |
| 1983 | */ |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1984 | pd_idx = sector_div(stripe2, raid_disks); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1985 | qd_idx = pd_idx + 1; |
| 1986 | if (pd_idx == raid_disks-1) { |
| 1987 | (*dd_idx)++; /* Q D D D P */ |
| 1988 | qd_idx = 0; |
| 1989 | } else if (*dd_idx >= pd_idx) |
| 1990 | (*dd_idx) += 2; /* D D P Q D */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1991 | ddf_layout = 1; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1992 | break; |
| 1993 | |
| 1994 | case ALGORITHM_ROTATING_N_RESTART: |
| 1995 | /* Same a left_asymmetric, by first stripe is |
| 1996 | * D D D P Q rather than |
| 1997 | * Q D D D P |
| 1998 | */ |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 1999 | stripe2 += 1; |
| 2000 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2001 | qd_idx = pd_idx + 1; |
| 2002 | if (pd_idx == raid_disks-1) { |
| 2003 | (*dd_idx)++; /* Q D D D P */ |
| 2004 | qd_idx = 0; |
| 2005 | } else if (*dd_idx >= pd_idx) |
| 2006 | (*dd_idx) += 2; /* D D P Q D */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2007 | ddf_layout = 1; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2008 | break; |
| 2009 | |
| 2010 | case ALGORITHM_ROTATING_N_CONTINUE: |
| 2011 | /* Same as left_symmetric but Q is before P */ |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 2012 | pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2013 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; |
| 2014 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2015 | ddf_layout = 1; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2016 | break; |
| 2017 | |
| 2018 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 2019 | /* RAID5 left_asymmetric, with Q on last device */ |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 2020 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2021 | if (*dd_idx >= pd_idx) |
| 2022 | (*dd_idx)++; |
| 2023 | qd_idx = raid_disks - 1; |
| 2024 | break; |
| 2025 | |
| 2026 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 2027 | pd_idx = sector_div(stripe2, raid_disks-1); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2028 | if (*dd_idx >= pd_idx) |
| 2029 | (*dd_idx)++; |
| 2030 | qd_idx = raid_disks - 1; |
| 2031 | break; |
| 2032 | |
| 2033 | case ALGORITHM_LEFT_SYMMETRIC_6: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 2034 | pd_idx = data_disks - sector_div(stripe2, raid_disks-1); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2035 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 2036 | qd_idx = raid_disks - 1; |
| 2037 | break; |
| 2038 | |
| 2039 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
NeilBrown | 6e3b96e | 2010-04-23 07:08:28 +1000 | [diff] [blame] | 2040 | pd_idx = sector_div(stripe2, raid_disks-1); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2041 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 2042 | qd_idx = raid_disks - 1; |
| 2043 | break; |
| 2044 | |
| 2045 | case ALGORITHM_PARITY_0_6: |
| 2046 | pd_idx = 0; |
| 2047 | (*dd_idx)++; |
| 2048 | qd_idx = raid_disks - 1; |
| 2049 | break; |
| 2050 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2051 | default: |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2052 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2053 | } |
| 2054 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2055 | } |
| 2056 | |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2057 | if (sh) { |
| 2058 | sh->pd_idx = pd_idx; |
| 2059 | sh->qd_idx = qd_idx; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2060 | sh->ddf_layout = ddf_layout; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2061 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2062 | /* |
| 2063 | * Finally, compute the new sector number |
| 2064 | */ |
| 2065 | new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; |
| 2066 | return new_sector; |
| 2067 | } |
| 2068 | |
| 2069 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 2070 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2071 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2072 | struct r5conf *conf = sh->raid_conf; |
NeilBrown | b875e53 | 2006-12-10 02:20:49 -0800 | [diff] [blame] | 2073 | int raid_disks = sh->disks; |
| 2074 | int data_disks = raid_disks - conf->max_degraded; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2075 | sector_t new_sector = sh->sector, check; |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 2076 | int sectors_per_chunk = previous ? conf->prev_chunk_sectors |
| 2077 | : conf->chunk_sectors; |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 2078 | int algorithm = previous ? conf->prev_algo |
| 2079 | : conf->algorithm; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2080 | sector_t stripe; |
| 2081 | int chunk_offset; |
NeilBrown | 35f2a59 | 2010-04-20 14:13:34 +1000 | [diff] [blame] | 2082 | sector_t chunk_number; |
| 2083 | int dummy1, dd_idx = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2084 | sector_t r_sector; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2085 | struct stripe_head sh2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2086 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2087 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2088 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
| 2089 | stripe = new_sector; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2090 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2091 | if (i == sh->pd_idx) |
| 2092 | return 0; |
| 2093 | switch(conf->level) { |
| 2094 | case 4: break; |
| 2095 | case 5: |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 2096 | switch (algorithm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2097 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 2098 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 2099 | if (i > sh->pd_idx) |
| 2100 | i--; |
| 2101 | break; |
| 2102 | case ALGORITHM_LEFT_SYMMETRIC: |
| 2103 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 2104 | if (i < sh->pd_idx) |
| 2105 | i += raid_disks; |
| 2106 | i -= (sh->pd_idx + 1); |
| 2107 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2108 | case ALGORITHM_PARITY_0: |
| 2109 | i -= 1; |
| 2110 | break; |
| 2111 | case ALGORITHM_PARITY_N: |
| 2112 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2113 | default: |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2114 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2115 | } |
| 2116 | break; |
| 2117 | case 6: |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2118 | if (i == sh->qd_idx) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2119 | return 0; /* It is the Q disk */ |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 2120 | switch (algorithm) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2121 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 2122 | case ALGORITHM_RIGHT_ASYMMETRIC: |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2123 | case ALGORITHM_ROTATING_ZERO_RESTART: |
| 2124 | case ALGORITHM_ROTATING_N_RESTART: |
| 2125 | if (sh->pd_idx == raid_disks-1) |
| 2126 | i--; /* Q D D D P */ |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2127 | else if (i > sh->pd_idx) |
| 2128 | i -= 2; /* D D P Q D */ |
| 2129 | break; |
| 2130 | case ALGORITHM_LEFT_SYMMETRIC: |
| 2131 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 2132 | if (sh->pd_idx == raid_disks-1) |
| 2133 | i--; /* Q D D D P */ |
| 2134 | else { |
| 2135 | /* D D P Q D */ |
| 2136 | if (i < sh->pd_idx) |
| 2137 | i += raid_disks; |
| 2138 | i -= (sh->pd_idx + 2); |
| 2139 | } |
| 2140 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2141 | case ALGORITHM_PARITY_0: |
| 2142 | i -= 2; |
| 2143 | break; |
| 2144 | case ALGORITHM_PARITY_N: |
| 2145 | break; |
| 2146 | case ALGORITHM_ROTATING_N_CONTINUE: |
NeilBrown | e4424fe | 2009-10-16 16:27:34 +1100 | [diff] [blame] | 2147 | /* Like left_symmetric, but P is before Q */ |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2148 | if (sh->pd_idx == 0) |
| 2149 | i--; /* P D D D Q */ |
NeilBrown | e4424fe | 2009-10-16 16:27:34 +1100 | [diff] [blame] | 2150 | else { |
| 2151 | /* D D Q P D */ |
| 2152 | if (i < sh->pd_idx) |
| 2153 | i += raid_disks; |
| 2154 | i -= (sh->pd_idx + 1); |
| 2155 | } |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2156 | break; |
| 2157 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 2158 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 2159 | if (i > sh->pd_idx) |
| 2160 | i--; |
| 2161 | break; |
| 2162 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 2163 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 2164 | if (i < sh->pd_idx) |
| 2165 | i += data_disks + 1; |
| 2166 | i -= (sh->pd_idx + 1); |
| 2167 | break; |
| 2168 | case ALGORITHM_PARITY_0_6: |
| 2169 | i -= 1; |
| 2170 | break; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2171 | default: |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2172 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2173 | } |
| 2174 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2175 | } |
| 2176 | |
| 2177 | chunk_number = stripe * data_disks + i; |
NeilBrown | 35f2a59 | 2010-04-20 14:13:34 +1000 | [diff] [blame] | 2178 | r_sector = chunk_number * sectors_per_chunk + chunk_offset; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2179 | |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2180 | check = raid5_compute_sector(conf, r_sector, |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 2181 | previous, &dummy1, &sh2); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2182 | if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx |
| 2183 | || sh2.qd_idx != sh->qd_idx) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 2184 | printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", |
| 2185 | mdname(conf->mddev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2186 | return 0; |
| 2187 | } |
| 2188 | return r_sector; |
| 2189 | } |
| 2190 | |
| 2191 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2192 | static void |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2193 | schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2194 | int rcw, int expand) |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2195 | { |
| 2196 | int i, pd_idx = sh->pd_idx, disks = sh->disks; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2197 | struct r5conf *conf = sh->raid_conf; |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2198 | int level = conf->level; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2199 | |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2200 | if (rcw) { |
| 2201 | /* if we are not expanding this is a proper write request, and |
| 2202 | * there will be bios with new data to be drained into the |
| 2203 | * stripe cache |
| 2204 | */ |
| 2205 | if (!expand) { |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2206 | sh->reconstruct_state = reconstruct_state_drain_run; |
| 2207 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
| 2208 | } else |
| 2209 | sh->reconstruct_state = reconstruct_state_run; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2210 | |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 2211 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2212 | |
| 2213 | for (i = disks; i--; ) { |
| 2214 | struct r5dev *dev = &sh->dev[i]; |
| 2215 | |
| 2216 | if (dev->towrite) { |
| 2217 | set_bit(R5_LOCKED, &dev->flags); |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 2218 | set_bit(R5_Wantdrain, &dev->flags); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2219 | if (!expand) |
| 2220 | clear_bit(R5_UPTODATE, &dev->flags); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2221 | s->locked++; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2222 | } |
| 2223 | } |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2224 | if (s->locked + conf->max_degraded == disks) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 2225 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2226 | atomic_inc(&conf->pending_full_writes); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2227 | } else { |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2228 | BUG_ON(level == 6); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2229 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || |
| 2230 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); |
| 2231 | |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 2232 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2233 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); |
| 2234 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 2235 | set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2236 | |
| 2237 | for (i = disks; i--; ) { |
| 2238 | struct r5dev *dev = &sh->dev[i]; |
| 2239 | if (i == pd_idx) |
| 2240 | continue; |
| 2241 | |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2242 | if (dev->towrite && |
| 2243 | (test_bit(R5_UPTODATE, &dev->flags) || |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 2244 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 2245 | set_bit(R5_Wantdrain, &dev->flags); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2246 | set_bit(R5_LOCKED, &dev->flags); |
| 2247 | clear_bit(R5_UPTODATE, &dev->flags); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2248 | s->locked++; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2249 | } |
| 2250 | } |
| 2251 | } |
| 2252 | |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2253 | /* keep the parity disk(s) locked while asynchronous operations |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2254 | * are in flight |
| 2255 | */ |
| 2256 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); |
| 2257 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2258 | s->locked++; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2259 | |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2260 | if (level == 6) { |
| 2261 | int qd_idx = sh->qd_idx; |
| 2262 | struct r5dev *dev = &sh->dev[qd_idx]; |
| 2263 | |
| 2264 | set_bit(R5_LOCKED, &dev->flags); |
| 2265 | clear_bit(R5_UPTODATE, &dev->flags); |
| 2266 | s->locked++; |
| 2267 | } |
| 2268 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2269 | pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 2270 | __func__, (unsigned long long)sh->sector, |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2271 | s->locked, s->ops_request); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2272 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2273 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2274 | /* |
| 2275 | * Each stripe/dev can have one or more bion attached. |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2276 | * toread/towrite point to the first in a chain. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2277 | * The bi_next chain must be in order. |
| 2278 | */ |
| 2279 | static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) |
| 2280 | { |
| 2281 | struct bio **bip; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2282 | struct r5conf *conf = sh->raid_conf; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 2283 | int firstwrite=0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2284 | |
NeilBrown | cbe47ec | 2011-07-26 11:20:35 +1000 | [diff] [blame] | 2285 | pr_debug("adding bi b#%llu to stripe s#%llu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2286 | (unsigned long long)bi->bi_sector, |
| 2287 | (unsigned long long)sh->sector); |
| 2288 | |
| 2289 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2290 | spin_lock_irq(&conf->device_lock); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 2291 | if (forwrite) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2292 | bip = &sh->dev[dd_idx].towrite; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 2293 | if (*bip == NULL && sh->dev[dd_idx].written == NULL) |
| 2294 | firstwrite = 1; |
| 2295 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2296 | bip = &sh->dev[dd_idx].toread; |
| 2297 | while (*bip && (*bip)->bi_sector < bi->bi_sector) { |
| 2298 | if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) |
| 2299 | goto overlap; |
| 2300 | bip = & (*bip)->bi_next; |
| 2301 | } |
| 2302 | if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) |
| 2303 | goto overlap; |
| 2304 | |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 2305 | BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2306 | if (*bip) |
| 2307 | bi->bi_next = *bip; |
| 2308 | *bip = bi; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2309 | bi->bi_phys_segments++; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 2310 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2311 | if (forwrite) { |
| 2312 | /* check if page is covered */ |
| 2313 | sector_t sector = sh->dev[dd_idx].sector; |
| 2314 | for (bi=sh->dev[dd_idx].towrite; |
| 2315 | sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && |
| 2316 | bi && bi->bi_sector <= sector; |
| 2317 | bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { |
| 2318 | if (bi->bi_sector + (bi->bi_size>>9) >= sector) |
| 2319 | sector = bi->bi_sector + (bi->bi_size>>9); |
| 2320 | } |
| 2321 | if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) |
| 2322 | set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); |
| 2323 | } |
NeilBrown | cbe47ec | 2011-07-26 11:20:35 +1000 | [diff] [blame] | 2324 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | cbe47ec | 2011-07-26 11:20:35 +1000 | [diff] [blame] | 2325 | |
| 2326 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", |
| 2327 | (unsigned long long)(*bip)->bi_sector, |
| 2328 | (unsigned long long)sh->sector, dd_idx); |
| 2329 | |
| 2330 | if (conf->mddev->bitmap && firstwrite) { |
| 2331 | bitmap_startwrite(conf->mddev->bitmap, sh->sector, |
| 2332 | STRIPE_SECTORS, 0); |
| 2333 | sh->bm_seq = conf->seq_flush+1; |
| 2334 | set_bit(STRIPE_BIT_DELAY, &sh->state); |
| 2335 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2336 | return 1; |
| 2337 | |
| 2338 | overlap: |
| 2339 | set_bit(R5_Overlap, &sh->dev[dd_idx].flags); |
| 2340 | spin_unlock_irq(&conf->device_lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2341 | return 0; |
| 2342 | } |
| 2343 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2344 | static void end_reshape(struct r5conf *conf); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 2345 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2346 | static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2347 | struct stripe_head *sh) |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 2348 | { |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 2349 | int sectors_per_chunk = |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 2350 | previous ? conf->prev_chunk_sectors : conf->chunk_sectors; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2351 | int dd_idx; |
Coywolf Qi Hunt | 2d2063c | 2006-10-03 01:15:50 -0700 | [diff] [blame] | 2352 | int chunk_offset = sector_div(stripe, sectors_per_chunk); |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2353 | int disks = previous ? conf->previous_raid_disks : conf->raid_disks; |
Coywolf Qi Hunt | 2d2063c | 2006-10-03 01:15:50 -0700 | [diff] [blame] | 2354 | |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2355 | raid5_compute_sector(conf, |
| 2356 | stripe * (disks - conf->max_degraded) |
NeilBrown | b875e53 | 2006-12-10 02:20:49 -0800 | [diff] [blame] | 2357 | *sectors_per_chunk + chunk_offset, |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2358 | previous, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2359 | &dd_idx, sh); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 2360 | } |
| 2361 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2362 | static void |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2363 | handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2364 | struct stripe_head_state *s, int disks, |
| 2365 | struct bio **return_bi) |
| 2366 | { |
| 2367 | int i; |
| 2368 | for (i = disks; i--; ) { |
| 2369 | struct bio *bi; |
| 2370 | int bitmap_end = 0; |
| 2371 | |
| 2372 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 2373 | struct md_rdev *rdev; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2374 | rcu_read_lock(); |
| 2375 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 2376 | if (rdev && test_bit(In_sync, &rdev->flags)) |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2377 | atomic_inc(&rdev->nr_pending); |
| 2378 | else |
| 2379 | rdev = NULL; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2380 | rcu_read_unlock(); |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2381 | if (rdev) { |
| 2382 | if (!rdev_set_badblocks( |
| 2383 | rdev, |
| 2384 | sh->sector, |
| 2385 | STRIPE_SECTORS, 0)) |
| 2386 | md_error(conf->mddev, rdev); |
| 2387 | rdev_dec_pending(rdev, conf->mddev); |
| 2388 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2389 | } |
| 2390 | spin_lock_irq(&conf->device_lock); |
| 2391 | /* fail all writes first */ |
| 2392 | bi = sh->dev[i].towrite; |
| 2393 | sh->dev[i].towrite = NULL; |
| 2394 | if (bi) { |
| 2395 | s->to_write--; |
| 2396 | bitmap_end = 1; |
| 2397 | } |
| 2398 | |
| 2399 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 2400 | wake_up(&conf->wait_for_overlap); |
| 2401 | |
| 2402 | while (bi && bi->bi_sector < |
| 2403 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 2404 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
| 2405 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2406 | if (!raid5_dec_bi_phys_segments(bi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2407 | md_write_end(conf->mddev); |
| 2408 | bi->bi_next = *return_bi; |
| 2409 | *return_bi = bi; |
| 2410 | } |
| 2411 | bi = nextbi; |
| 2412 | } |
| 2413 | /* and fail all 'written' */ |
| 2414 | bi = sh->dev[i].written; |
| 2415 | sh->dev[i].written = NULL; |
| 2416 | if (bi) bitmap_end = 1; |
| 2417 | while (bi && bi->bi_sector < |
| 2418 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 2419 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
| 2420 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2421 | if (!raid5_dec_bi_phys_segments(bi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2422 | md_write_end(conf->mddev); |
| 2423 | bi->bi_next = *return_bi; |
| 2424 | *return_bi = bi; |
| 2425 | } |
| 2426 | bi = bi2; |
| 2427 | } |
| 2428 | |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2429 | /* fail any reads if this device is non-operational and |
| 2430 | * the data has not reached the cache yet. |
| 2431 | */ |
| 2432 | if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && |
| 2433 | (!test_bit(R5_Insync, &sh->dev[i].flags) || |
| 2434 | test_bit(R5_ReadError, &sh->dev[i].flags))) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2435 | bi = sh->dev[i].toread; |
| 2436 | sh->dev[i].toread = NULL; |
| 2437 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 2438 | wake_up(&conf->wait_for_overlap); |
| 2439 | if (bi) s->to_read--; |
| 2440 | while (bi && bi->bi_sector < |
| 2441 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 2442 | struct bio *nextbi = |
| 2443 | r5_next_bio(bi, sh->dev[i].sector); |
| 2444 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2445 | if (!raid5_dec_bi_phys_segments(bi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2446 | bi->bi_next = *return_bi; |
| 2447 | *return_bi = bi; |
| 2448 | } |
| 2449 | bi = nextbi; |
| 2450 | } |
| 2451 | } |
| 2452 | spin_unlock_irq(&conf->device_lock); |
| 2453 | if (bitmap_end) |
| 2454 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, |
| 2455 | STRIPE_SECTORS, 0, 0); |
NeilBrown | 8cfa7b0 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2456 | /* If we were in the middle of a write the parity block might |
| 2457 | * still be locked - so just clear all R5_LOCKED flags |
| 2458 | */ |
| 2459 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2460 | } |
| 2461 | |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 2462 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 2463 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 2464 | md_wakeup_thread(conf->mddev->thread); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2465 | } |
| 2466 | |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2467 | static void |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2468 | handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2469 | struct stripe_head_state *s) |
| 2470 | { |
| 2471 | int abort = 0; |
| 2472 | int i; |
| 2473 | |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2474 | clear_bit(STRIPE_SYNCING, &sh->state); |
| 2475 | s->syncing = 0; |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 2476 | s->replacing = 0; |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2477 | /* There is nothing more to do for sync/check/repair. |
NeilBrown | 18b9837 | 2012-04-01 23:48:38 +1000 | [diff] [blame] | 2478 | * Don't even need to abort as that is handled elsewhere |
| 2479 | * if needed, and not always wanted e.g. if there is a known |
| 2480 | * bad block here. |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 2481 | * For recover/replace we need to record a bad block on all |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2482 | * non-sync devices, or abort the recovery |
| 2483 | */ |
NeilBrown | 18b9837 | 2012-04-01 23:48:38 +1000 | [diff] [blame] | 2484 | if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { |
| 2485 | /* During recovery devices cannot be removed, so |
| 2486 | * locking and refcounting of rdevs is not needed |
| 2487 | */ |
| 2488 | for (i = 0; i < conf->raid_disks; i++) { |
| 2489 | struct md_rdev *rdev = conf->disks[i].rdev; |
| 2490 | if (rdev |
| 2491 | && !test_bit(Faulty, &rdev->flags) |
| 2492 | && !test_bit(In_sync, &rdev->flags) |
| 2493 | && !rdev_set_badblocks(rdev, sh->sector, |
| 2494 | STRIPE_SECTORS, 0)) |
| 2495 | abort = 1; |
| 2496 | rdev = conf->disks[i].replacement; |
| 2497 | if (rdev |
| 2498 | && !test_bit(Faulty, &rdev->flags) |
| 2499 | && !test_bit(In_sync, &rdev->flags) |
| 2500 | && !rdev_set_badblocks(rdev, sh->sector, |
| 2501 | STRIPE_SECTORS, 0)) |
| 2502 | abort = 1; |
| 2503 | } |
| 2504 | if (abort) |
| 2505 | conf->recovery_disabled = |
| 2506 | conf->mddev->recovery_disabled; |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2507 | } |
NeilBrown | 18b9837 | 2012-04-01 23:48:38 +1000 | [diff] [blame] | 2508 | md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 2509 | } |
| 2510 | |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 2511 | static int want_replace(struct stripe_head *sh, int disk_idx) |
| 2512 | { |
| 2513 | struct md_rdev *rdev; |
| 2514 | int rv = 0; |
| 2515 | /* Doing recovery so rcu locking not required */ |
| 2516 | rdev = sh->raid_conf->disks[disk_idx].replacement; |
| 2517 | if (rdev |
| 2518 | && !test_bit(Faulty, &rdev->flags) |
| 2519 | && !test_bit(In_sync, &rdev->flags) |
| 2520 | && (rdev->recovery_offset <= sh->sector |
| 2521 | || rdev->mddev->recovery_cp <= sh->sector)) |
| 2522 | rv = 1; |
| 2523 | |
| 2524 | return rv; |
| 2525 | } |
| 2526 | |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2527 | /* fetch_block - checks the given member device to see if its data needs |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2528 | * to be read or computed to satisfy a request. |
| 2529 | * |
| 2530 | * Returns 1 when no more member devices need to be checked, otherwise returns |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2531 | * 0 to tell the loop in handle_stripe_fill to continue |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2532 | */ |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2533 | static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, |
| 2534 | int disk_idx, int disks) |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2535 | { |
| 2536 | struct r5dev *dev = &sh->dev[disk_idx]; |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2537 | struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], |
| 2538 | &sh->dev[s->failed_num[1]] }; |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2539 | |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2540 | /* is the data in this block needed, and can we get it? */ |
| 2541 | if (!test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2542 | !test_bit(R5_UPTODATE, &dev->flags) && |
| 2543 | (dev->toread || |
| 2544 | (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || |
| 2545 | s->syncing || s->expanding || |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 2546 | (s->replacing && want_replace(sh, disk_idx)) || |
NeilBrown | 5d35e09 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2547 | (s->failed >= 1 && fdev[0]->toread) || |
| 2548 | (s->failed >= 2 && fdev[1]->toread) || |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2549 | (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && |
| 2550 | !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || |
| 2551 | (sh->raid_conf->level == 6 && s->failed && s->to_write))) { |
Yuri Tikhonov | 5599bec | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2552 | /* we would like to get this block, possibly by computing it, |
| 2553 | * otherwise read it if the backing disk is insync |
| 2554 | */ |
| 2555 | BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); |
| 2556 | BUG_ON(test_bit(R5_Wantread, &dev->flags)); |
| 2557 | if ((s->uptodate == disks - 1) && |
NeilBrown | f2b3b44 | 2011-07-26 11:35:19 +1000 | [diff] [blame] | 2558 | (s->failed && (disk_idx == s->failed_num[0] || |
| 2559 | disk_idx == s->failed_num[1]))) { |
Yuri Tikhonov | 5599bec | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2560 | /* have disk failed, and we're requested to fetch it; |
| 2561 | * do compute it |
| 2562 | */ |
| 2563 | pr_debug("Computing stripe %llu block %d\n", |
| 2564 | (unsigned long long)sh->sector, disk_idx); |
| 2565 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 2566 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 2567 | set_bit(R5_Wantcompute, &dev->flags); |
| 2568 | sh->ops.target = disk_idx; |
| 2569 | sh->ops.target2 = -1; /* no 2nd target */ |
| 2570 | s->req_compute = 1; |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2571 | /* Careful: from this point on 'uptodate' is in the eye |
| 2572 | * of raid_run_ops which services 'compute' operations |
| 2573 | * before writes. R5_Wantcompute flags a block that will |
| 2574 | * be R5_UPTODATE by the time it is needed for a |
| 2575 | * subsequent operation. |
| 2576 | */ |
Yuri Tikhonov | 5599bec | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2577 | s->uptodate++; |
| 2578 | return 1; |
| 2579 | } else if (s->uptodate == disks-2 && s->failed >= 2) { |
| 2580 | /* Computing 2-failure is *very* expensive; only |
| 2581 | * do it if failed >= 2 |
| 2582 | */ |
| 2583 | int other; |
| 2584 | for (other = disks; other--; ) { |
| 2585 | if (other == disk_idx) |
| 2586 | continue; |
| 2587 | if (!test_bit(R5_UPTODATE, |
| 2588 | &sh->dev[other].flags)) |
| 2589 | break; |
| 2590 | } |
| 2591 | BUG_ON(other < 0); |
| 2592 | pr_debug("Computing stripe %llu blocks %d,%d\n", |
| 2593 | (unsigned long long)sh->sector, |
| 2594 | disk_idx, other); |
| 2595 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 2596 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 2597 | set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); |
| 2598 | set_bit(R5_Wantcompute, &sh->dev[other].flags); |
| 2599 | sh->ops.target = disk_idx; |
| 2600 | sh->ops.target2 = other; |
| 2601 | s->uptodate += 2; |
| 2602 | s->req_compute = 1; |
| 2603 | return 1; |
| 2604 | } else if (test_bit(R5_Insync, &dev->flags)) { |
| 2605 | set_bit(R5_LOCKED, &dev->flags); |
| 2606 | set_bit(R5_Wantread, &dev->flags); |
| 2607 | s->locked++; |
| 2608 | pr_debug("Reading block %d (sync=%d)\n", |
| 2609 | disk_idx, s->syncing); |
| 2610 | } |
| 2611 | } |
| 2612 | |
| 2613 | return 0; |
| 2614 | } |
| 2615 | |
| 2616 | /** |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2617 | * handle_stripe_fill - read or compute data to satisfy pending requests. |
Yuri Tikhonov | 5599bec | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2618 | */ |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2619 | static void handle_stripe_fill(struct stripe_head *sh, |
| 2620 | struct stripe_head_state *s, |
| 2621 | int disks) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2622 | { |
| 2623 | int i; |
Yuri Tikhonov | 5599bec | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2624 | |
| 2625 | /* look for blocks to read/compute, skip this if a compute |
| 2626 | * is already in flight, or if the stripe contents are in the |
| 2627 | * midst of changing due to a write |
| 2628 | */ |
| 2629 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && |
| 2630 | !sh->reconstruct_state) |
| 2631 | for (i = disks; i--; ) |
NeilBrown | 93b3dbc | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2632 | if (fetch_block(sh, s, i, disks)) |
Yuri Tikhonov | 5599bec | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2633 | break; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2634 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2635 | } |
| 2636 | |
| 2637 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2638 | /* handle_stripe_clean_event |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2639 | * any written block on an uptodate or failed drive can be returned. |
| 2640 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but |
| 2641 | * never LOCKED, so we don't need to test 'failed' directly. |
| 2642 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2643 | static void handle_stripe_clean_event(struct r5conf *conf, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2644 | struct stripe_head *sh, int disks, struct bio **return_bi) |
| 2645 | { |
| 2646 | int i; |
| 2647 | struct r5dev *dev; |
| 2648 | |
| 2649 | for (i = disks; i--; ) |
| 2650 | if (sh->dev[i].written) { |
| 2651 | dev = &sh->dev[i]; |
| 2652 | if (!test_bit(R5_LOCKED, &dev->flags) && |
| 2653 | test_bit(R5_UPTODATE, &dev->flags)) { |
| 2654 | /* We can return any write requests */ |
| 2655 | struct bio *wbi, *wbi2; |
| 2656 | int bitmap_end = 0; |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2657 | pr_debug("Return write for disc %d\n", i); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2658 | spin_lock_irq(&conf->device_lock); |
| 2659 | wbi = dev->written; |
| 2660 | dev->written = NULL; |
| 2661 | while (wbi && wbi->bi_sector < |
| 2662 | dev->sector + STRIPE_SECTORS) { |
| 2663 | wbi2 = r5_next_bio(wbi, dev->sector); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2664 | if (!raid5_dec_bi_phys_segments(wbi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2665 | md_write_end(conf->mddev); |
| 2666 | wbi->bi_next = *return_bi; |
| 2667 | *return_bi = wbi; |
| 2668 | } |
| 2669 | wbi = wbi2; |
| 2670 | } |
| 2671 | if (dev->towrite == NULL) |
| 2672 | bitmap_end = 1; |
| 2673 | spin_unlock_irq(&conf->device_lock); |
| 2674 | if (bitmap_end) |
| 2675 | bitmap_endwrite(conf->mddev->bitmap, |
| 2676 | sh->sector, |
| 2677 | STRIPE_SECTORS, |
| 2678 | !test_bit(STRIPE_DEGRADED, &sh->state), |
| 2679 | 0); |
| 2680 | } |
| 2681 | } |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 2682 | |
| 2683 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 2684 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 2685 | md_wakeup_thread(conf->mddev->thread); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2686 | } |
| 2687 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2688 | static void handle_stripe_dirtying(struct r5conf *conf, |
NeilBrown | c8ac180 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2689 | struct stripe_head *sh, |
| 2690 | struct stripe_head_state *s, |
| 2691 | int disks) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2692 | { |
| 2693 | int rmw = 0, rcw = 0, i; |
NeilBrown | c8ac180 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2694 | if (conf->max_degraded == 2) { |
| 2695 | /* RAID6 requires 'rcw' in current implementation |
| 2696 | * Calculate the real rcw later - for now fake it |
| 2697 | * look like rcw is cheaper |
| 2698 | */ |
| 2699 | rcw = 1; rmw = 2; |
| 2700 | } else for (i = disks; i--; ) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2701 | /* would I have to read this buffer for read_modify_write */ |
| 2702 | struct r5dev *dev = &sh->dev[i]; |
| 2703 | if ((dev->towrite || i == sh->pd_idx) && |
| 2704 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2705 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 2706 | test_bit(R5_Wantcompute, &dev->flags))) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2707 | if (test_bit(R5_Insync, &dev->flags)) |
| 2708 | rmw++; |
| 2709 | else |
| 2710 | rmw += 2*disks; /* cannot read it */ |
| 2711 | } |
| 2712 | /* Would I have to read this buffer for reconstruct_write */ |
| 2713 | if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && |
| 2714 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2715 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 2716 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 2717 | if (test_bit(R5_Insync, &dev->flags)) rcw++; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2718 | else |
| 2719 | rcw += 2*disks; |
| 2720 | } |
| 2721 | } |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2722 | pr_debug("for sector %llu, rmw=%d rcw=%d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2723 | (unsigned long long)sh->sector, rmw, rcw); |
| 2724 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2725 | if (rmw < rcw && rmw > 0) |
| 2726 | /* prefer read-modify-write, but need to get some data */ |
| 2727 | for (i = disks; i--; ) { |
| 2728 | struct r5dev *dev = &sh->dev[i]; |
| 2729 | if ((dev->towrite || i == sh->pd_idx) && |
| 2730 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2731 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 2732 | test_bit(R5_Wantcompute, &dev->flags)) && |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2733 | test_bit(R5_Insync, &dev->flags)) { |
| 2734 | if ( |
| 2735 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2736 | pr_debug("Read_old block " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2737 | "%d for r-m-w\n", i); |
| 2738 | set_bit(R5_LOCKED, &dev->flags); |
| 2739 | set_bit(R5_Wantread, &dev->flags); |
| 2740 | s->locked++; |
| 2741 | } else { |
| 2742 | set_bit(STRIPE_DELAYED, &sh->state); |
| 2743 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2744 | } |
| 2745 | } |
| 2746 | } |
NeilBrown | c8ac180 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2747 | if (rcw <= rmw && rcw > 0) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2748 | /* want reconstruct write, but need to get some data */ |
NeilBrown | c8ac180 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2749 | rcw = 0; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2750 | for (i = disks; i--; ) { |
| 2751 | struct r5dev *dev = &sh->dev[i]; |
| 2752 | if (!test_bit(R5_OVERWRITE, &dev->flags) && |
NeilBrown | c8ac180 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2753 | i != sh->pd_idx && i != sh->qd_idx && |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2754 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2755 | !(test_bit(R5_UPTODATE, &dev->flags) || |
NeilBrown | c8ac180 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2756 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 2757 | rcw++; |
| 2758 | if (!test_bit(R5_Insync, &dev->flags)) |
| 2759 | continue; /* it's a failed drive */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2760 | if ( |
| 2761 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2762 | pr_debug("Read_old block " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2763 | "%d for Reconstruct\n", i); |
| 2764 | set_bit(R5_LOCKED, &dev->flags); |
| 2765 | set_bit(R5_Wantread, &dev->flags); |
| 2766 | s->locked++; |
| 2767 | } else { |
| 2768 | set_bit(STRIPE_DELAYED, &sh->state); |
| 2769 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2770 | } |
| 2771 | } |
| 2772 | } |
NeilBrown | c8ac180 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 2773 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2774 | /* now if nothing is locked, and if we have enough data, |
| 2775 | * we can start a write request |
| 2776 | */ |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2777 | /* since handle_stripe can be called at any time we need to handle the |
| 2778 | * case where a compute block operation has been submitted and then a |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 2779 | * subsequent call wants to start a write request. raid_run_ops only |
| 2780 | * handles the case where compute block and reconstruct are requested |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2781 | * simultaneously. If this is not the case then new writes need to be |
| 2782 | * held off until the compute completes. |
| 2783 | */ |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2784 | if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && |
| 2785 | (s->locked == 0 && (rcw == 0 || rmw == 0) && |
| 2786 | !test_bit(STRIPE_BIT_DELAY, &sh->state))) |
Yuri Tikhonov | c0f7bdd | 2009-08-29 19:13:12 -0700 | [diff] [blame] | 2787 | schedule_reconstruction(sh, s, rcw == 0, 0); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2788 | } |
| 2789 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2790 | static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2791 | struct stripe_head_state *s, int disks) |
| 2792 | { |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2793 | struct r5dev *dev = NULL; |
Dan Williams | e89f896 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2794 | |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2795 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2796 | |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2797 | switch (sh->check_state) { |
| 2798 | case check_state_idle: |
| 2799 | /* start a new check operation if there are no failures */ |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2800 | if (s->failed == 0) { |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2801 | BUG_ON(s->uptodate != disks); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2802 | sh->check_state = check_state_run; |
| 2803 | set_bit(STRIPE_OP_CHECK, &s->ops_request); |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2804 | clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2805 | s->uptodate--; |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2806 | break; |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2807 | } |
NeilBrown | f2b3b44 | 2011-07-26 11:35:19 +1000 | [diff] [blame] | 2808 | dev = &sh->dev[s->failed_num[0]]; |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2809 | /* fall through */ |
| 2810 | case check_state_compute_result: |
| 2811 | sh->check_state = check_state_idle; |
| 2812 | if (!dev) |
| 2813 | dev = &sh->dev[sh->pd_idx]; |
| 2814 | |
| 2815 | /* check that a write has not made the stripe insync */ |
| 2816 | if (test_bit(STRIPE_INSYNC, &sh->state)) |
| 2817 | break; |
| 2818 | |
| 2819 | /* either failed parity check, or recovery is happening */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2820 | BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); |
| 2821 | BUG_ON(s->uptodate != disks); |
| 2822 | |
| 2823 | set_bit(R5_LOCKED, &dev->flags); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2824 | s->locked++; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2825 | set_bit(R5_Wantwrite, &dev->flags); |
Dan Williams | 830ea01 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2826 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2827 | clear_bit(STRIPE_DEGRADED, &sh->state); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2828 | set_bit(STRIPE_INSYNC, &sh->state); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2829 | break; |
| 2830 | case check_state_run: |
| 2831 | break; /* we will be called again upon completion */ |
| 2832 | case check_state_check_result: |
| 2833 | sh->check_state = check_state_idle; |
| 2834 | |
| 2835 | /* if a failure occurred during the check operation, leave |
| 2836 | * STRIPE_INSYNC not set and let the stripe be handled again |
| 2837 | */ |
| 2838 | if (s->failed) |
| 2839 | break; |
| 2840 | |
| 2841 | /* handle a successful check operation, if parity is correct |
| 2842 | * we are done. Otherwise update the mismatch count and repair |
| 2843 | * parity if !MD_RECOVERY_CHECK |
| 2844 | */ |
Dan Williams | ad283ea | 2009-08-29 19:09:26 -0700 | [diff] [blame] | 2845 | if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2846 | /* parity is correct (on disc, |
| 2847 | * not in buffer any more) |
| 2848 | */ |
| 2849 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2850 | else { |
| 2851 | conf->mddev->resync_mismatches += STRIPE_SECTORS; |
| 2852 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 2853 | /* don't try to repair!! */ |
| 2854 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2855 | else { |
| 2856 | sh->check_state = check_state_compute_run; |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2857 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2858 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 2859 | set_bit(R5_Wantcompute, |
| 2860 | &sh->dev[sh->pd_idx].flags); |
| 2861 | sh->ops.target = sh->pd_idx; |
Dan Williams | ac6b53b | 2009-07-14 13:40:19 -0700 | [diff] [blame] | 2862 | sh->ops.target2 = -1; |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2863 | s->uptodate++; |
| 2864 | } |
| 2865 | } |
| 2866 | break; |
| 2867 | case check_state_compute_run: |
| 2868 | break; |
| 2869 | default: |
| 2870 | printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", |
| 2871 | __func__, sh->check_state, |
| 2872 | (unsigned long long) sh->sector); |
| 2873 | BUG(); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2874 | } |
| 2875 | } |
| 2876 | |
| 2877 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 2878 | static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 2879 | struct stripe_head_state *s, |
NeilBrown | f2b3b44 | 2011-07-26 11:35:19 +1000 | [diff] [blame] | 2880 | int disks) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2881 | { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2882 | int pd_idx = sh->pd_idx; |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 2883 | int qd_idx = sh->qd_idx; |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2884 | struct r5dev *dev; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2885 | |
| 2886 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2887 | |
| 2888 | BUG_ON(s->failed > 2); |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2889 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2890 | /* Want to check and possibly repair P and Q. |
| 2891 | * However there could be one 'failed' device, in which |
| 2892 | * case we can only check one of them, possibly using the |
| 2893 | * other to generate missing data |
| 2894 | */ |
| 2895 | |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2896 | switch (sh->check_state) { |
| 2897 | case check_state_idle: |
| 2898 | /* start a new check operation if there are < 2 failures */ |
NeilBrown | f2b3b44 | 2011-07-26 11:35:19 +1000 | [diff] [blame] | 2899 | if (s->failed == s->q_failed) { |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2900 | /* The only possible failed device holds Q, so it |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2901 | * makes sense to check P (If anything else were failed, |
| 2902 | * we would have used P to recreate it). |
| 2903 | */ |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2904 | sh->check_state = check_state_run; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2905 | } |
NeilBrown | f2b3b44 | 2011-07-26 11:35:19 +1000 | [diff] [blame] | 2906 | if (!s->q_failed && s->failed < 2) { |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2907 | /* Q is not failed, and we didn't use it to generate |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2908 | * anything, so it makes sense to check it |
| 2909 | */ |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2910 | if (sh->check_state == check_state_run) |
| 2911 | sh->check_state = check_state_run_pq; |
| 2912 | else |
| 2913 | sh->check_state = check_state_run_q; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2914 | } |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 2915 | |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2916 | /* discard potentially stale zero_sum_result */ |
| 2917 | sh->ops.zero_sum_result = 0; |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 2918 | |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2919 | if (sh->check_state == check_state_run) { |
| 2920 | /* async_xor_zero_sum destroys the contents of P */ |
| 2921 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
| 2922 | s->uptodate--; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2923 | } |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2924 | if (sh->check_state >= check_state_run && |
| 2925 | sh->check_state <= check_state_run_pq) { |
| 2926 | /* async_syndrome_zero_sum preserves P and Q, so |
| 2927 | * no need to mark them !uptodate here |
| 2928 | */ |
| 2929 | set_bit(STRIPE_OP_CHECK, &s->ops_request); |
| 2930 | break; |
| 2931 | } |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 2932 | |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2933 | /* we have 2-disk failure */ |
| 2934 | BUG_ON(s->failed != 2); |
| 2935 | /* fall through */ |
| 2936 | case check_state_compute_result: |
| 2937 | sh->check_state = check_state_idle; |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 2938 | |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2939 | /* check that a write has not made the stripe insync */ |
| 2940 | if (test_bit(STRIPE_INSYNC, &sh->state)) |
| 2941 | break; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2942 | |
| 2943 | /* now write out any block on a failed drive, |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2944 | * or P or Q if they were recomputed |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2945 | */ |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2946 | BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2947 | if (s->failed == 2) { |
NeilBrown | f2b3b44 | 2011-07-26 11:35:19 +1000 | [diff] [blame] | 2948 | dev = &sh->dev[s->failed_num[1]]; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2949 | s->locked++; |
| 2950 | set_bit(R5_LOCKED, &dev->flags); |
| 2951 | set_bit(R5_Wantwrite, &dev->flags); |
| 2952 | } |
| 2953 | if (s->failed >= 1) { |
NeilBrown | f2b3b44 | 2011-07-26 11:35:19 +1000 | [diff] [blame] | 2954 | dev = &sh->dev[s->failed_num[0]]; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2955 | s->locked++; |
| 2956 | set_bit(R5_LOCKED, &dev->flags); |
| 2957 | set_bit(R5_Wantwrite, &dev->flags); |
| 2958 | } |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2959 | if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2960 | dev = &sh->dev[pd_idx]; |
| 2961 | s->locked++; |
| 2962 | set_bit(R5_LOCKED, &dev->flags); |
| 2963 | set_bit(R5_Wantwrite, &dev->flags); |
| 2964 | } |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2965 | if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2966 | dev = &sh->dev[qd_idx]; |
| 2967 | s->locked++; |
| 2968 | set_bit(R5_LOCKED, &dev->flags); |
| 2969 | set_bit(R5_Wantwrite, &dev->flags); |
| 2970 | } |
| 2971 | clear_bit(STRIPE_DEGRADED, &sh->state); |
| 2972 | |
| 2973 | set_bit(STRIPE_INSYNC, &sh->state); |
Dan Williams | d82dfee | 2009-07-14 13:40:57 -0700 | [diff] [blame] | 2974 | break; |
| 2975 | case check_state_run: |
| 2976 | case check_state_run_q: |
| 2977 | case check_state_run_pq: |
| 2978 | break; /* we will be called again upon completion */ |
| 2979 | case check_state_check_result: |
| 2980 | sh->check_state = check_state_idle; |
| 2981 | |
| 2982 | /* handle a successful check operation, if parity is correct |
| 2983 | * we are done. Otherwise update the mismatch count and repair |
| 2984 | * parity if !MD_RECOVERY_CHECK |
| 2985 | */ |
| 2986 | if (sh->ops.zero_sum_result == 0) { |
| 2987 | /* both parities are correct */ |
| 2988 | if (!s->failed) |
| 2989 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2990 | else { |
| 2991 | /* in contrast to the raid5 case we can validate |
| 2992 | * parity, but still have a failure to write |
| 2993 | * back |
| 2994 | */ |
| 2995 | sh->check_state = check_state_compute_result; |
| 2996 | /* Returning at this point means that we may go |
| 2997 | * off and bring p and/or q uptodate again so |
| 2998 | * we make sure to check zero_sum_result again |
| 2999 | * to verify if p or q need writeback |
| 3000 | */ |
| 3001 | } |
| 3002 | } else { |
| 3003 | conf->mddev->resync_mismatches += STRIPE_SECTORS; |
| 3004 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 3005 | /* don't try to repair!! */ |
| 3006 | set_bit(STRIPE_INSYNC, &sh->state); |
| 3007 | else { |
| 3008 | int *target = &sh->ops.target; |
| 3009 | |
| 3010 | sh->ops.target = -1; |
| 3011 | sh->ops.target2 = -1; |
| 3012 | sh->check_state = check_state_compute_run; |
| 3013 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 3014 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 3015 | if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { |
| 3016 | set_bit(R5_Wantcompute, |
| 3017 | &sh->dev[pd_idx].flags); |
| 3018 | *target = pd_idx; |
| 3019 | target = &sh->ops.target2; |
| 3020 | s->uptodate++; |
| 3021 | } |
| 3022 | if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { |
| 3023 | set_bit(R5_Wantcompute, |
| 3024 | &sh->dev[qd_idx].flags); |
| 3025 | *target = qd_idx; |
| 3026 | s->uptodate++; |
| 3027 | } |
| 3028 | } |
| 3029 | } |
| 3030 | break; |
| 3031 | case check_state_compute_run: |
| 3032 | break; |
| 3033 | default: |
| 3034 | printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", |
| 3035 | __func__, sh->check_state, |
| 3036 | (unsigned long long) sh->sector); |
| 3037 | BUG(); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3038 | } |
| 3039 | } |
| 3040 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3041 | static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3042 | { |
| 3043 | int i; |
| 3044 | |
| 3045 | /* We have read all the blocks in this stripe and now we need to |
| 3046 | * copy some of them into a target stripe for expand. |
| 3047 | */ |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 3048 | struct dma_async_tx_descriptor *tx = NULL; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3049 | clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 3050 | for (i = 0; i < sh->disks; i++) |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 3051 | if (i != sh->pd_idx && i != sh->qd_idx) { |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3052 | int dd_idx, j; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3053 | struct stripe_head *sh2; |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 3054 | struct async_submit_ctl submit; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3055 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 3056 | sector_t bn = compute_blocknr(sh, i, 1); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3057 | sector_t s = raid5_compute_sector(conf, bn, 0, |
| 3058 | &dd_idx, NULL); |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 3059 | sh2 = get_active_stripe(conf, s, 0, 1, 1); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3060 | if (sh2 == NULL) |
| 3061 | /* so far only the early blocks of this stripe |
| 3062 | * have been requested. When later blocks |
| 3063 | * get requested, we will try again |
| 3064 | */ |
| 3065 | continue; |
| 3066 | if (!test_bit(STRIPE_EXPANDING, &sh2->state) || |
| 3067 | test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { |
| 3068 | /* must have already done this block */ |
| 3069 | release_stripe(sh2); |
| 3070 | continue; |
| 3071 | } |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 3072 | |
| 3073 | /* place all the copies on one channel */ |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 3074 | init_async_submit(&submit, 0, tx, NULL, NULL, NULL); |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 3075 | tx = async_memcpy(sh2->dev[dd_idx].page, |
Dan Williams | 88ba2aa | 2009-04-09 16:16:18 -0700 | [diff] [blame] | 3076 | sh->dev[i].page, 0, 0, STRIPE_SIZE, |
Dan Williams | a08abd8 | 2009-06-03 11:43:59 -0700 | [diff] [blame] | 3077 | &submit); |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 3078 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3079 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); |
| 3080 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); |
| 3081 | for (j = 0; j < conf->raid_disks; j++) |
| 3082 | if (j != sh2->pd_idx && |
NeilBrown | 86c374b | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3083 | j != sh2->qd_idx && |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3084 | !test_bit(R5_Expanded, &sh2->dev[j].flags)) |
| 3085 | break; |
| 3086 | if (j == conf->raid_disks) { |
| 3087 | set_bit(STRIPE_EXPAND_READY, &sh2->state); |
| 3088 | set_bit(STRIPE_HANDLE, &sh2->state); |
| 3089 | } |
| 3090 | release_stripe(sh2); |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 3091 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3092 | } |
NeilBrown | a2e0855 | 2007-09-11 15:23:36 -0700 | [diff] [blame] | 3093 | /* done submitting copies, wait for them to complete */ |
| 3094 | if (tx) { |
| 3095 | async_tx_ack(tx); |
| 3096 | dma_wait_for_async_tx(tx); |
| 3097 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3098 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3099 | |
| 3100 | /* |
| 3101 | * handle_stripe - do things to a stripe. |
| 3102 | * |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3103 | * We lock the stripe by setting STRIPE_ACTIVE and then examine the |
| 3104 | * state of various bits to see what needs to be done. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3105 | * Possible results: |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3106 | * return some read requests which now have data |
| 3107 | * return some write requests which are safely on storage |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3108 | * schedule a read on some buffers |
| 3109 | * schedule a write of some buffers |
| 3110 | * return confirmation of parity correctness |
| 3111 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3112 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3113 | |
NeilBrown | acfe726 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3114 | static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3115 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3116 | struct r5conf *conf = sh->raid_conf; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3117 | int disks = sh->disks; |
NeilBrown | 474af965fe | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3118 | struct r5dev *dev; |
| 3119 | int i; |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3120 | int do_recovery = 0; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3121 | |
NeilBrown | acfe726 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3122 | memset(s, 0, sizeof(*s)); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3123 | |
NeilBrown | acfe726 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3124 | s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 3125 | s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); |
| 3126 | s->failed_num[0] = -1; |
| 3127 | s->failed_num[1] = -1; |
| 3128 | |
| 3129 | /* Now to look around and see what can be done */ |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3130 | rcu_read_lock(); |
NeilBrown | c4c1663 | 2011-07-26 11:34:20 +1000 | [diff] [blame] | 3131 | spin_lock_irq(&conf->device_lock); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3132 | for (i=disks; i--; ) { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 3133 | struct md_rdev *rdev; |
NeilBrown | 31c176e | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3134 | sector_t first_bad; |
| 3135 | int bad_sectors; |
| 3136 | int is_bad = 0; |
NeilBrown | acfe726 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3137 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3138 | dev = &sh->dev[i]; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3139 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3140 | pr_debug("check %d: state 0x%lx read %p write %p written %p\n", |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3141 | i, dev->flags, |
| 3142 | dev->toread, dev->towrite, dev->written); |
Yuri Tikhonov | 6c0069c | 2009-08-29 19:13:13 -0700 | [diff] [blame] | 3143 | /* maybe we can reply to a read |
| 3144 | * |
| 3145 | * new wantfill requests are only permitted while |
| 3146 | * ops_complete_biofill is guaranteed to be inactive |
| 3147 | */ |
| 3148 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && |
| 3149 | !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) |
| 3150 | set_bit(R5_Wantfill, &dev->flags); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3151 | |
| 3152 | /* now count some things */ |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3153 | if (test_bit(R5_LOCKED, &dev->flags)) |
| 3154 | s->locked++; |
| 3155 | if (test_bit(R5_UPTODATE, &dev->flags)) |
| 3156 | s->uptodate++; |
Dan Williams | 2d6e4ec | 2009-09-16 12:11:54 -0700 | [diff] [blame] | 3157 | if (test_bit(R5_Wantcompute, &dev->flags)) { |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3158 | s->compute++; |
| 3159 | BUG_ON(s->compute > 2); |
Dan Williams | 2d6e4ec | 2009-09-16 12:11:54 -0700 | [diff] [blame] | 3160 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3161 | |
NeilBrown | acfe726 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3162 | if (test_bit(R5_Wantfill, &dev->flags)) |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3163 | s->to_fill++; |
NeilBrown | acfe726 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3164 | else if (dev->toread) |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3165 | s->to_read++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3166 | if (dev->towrite) { |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3167 | s->to_write++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3168 | if (!test_bit(R5_OVERWRITE, &dev->flags)) |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3169 | s->non_overwrite++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3170 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3171 | if (dev->written) |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3172 | s->written++; |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3173 | /* Prefer to use the replacement for reads, but only |
| 3174 | * if it is recovered enough and has no bad blocks. |
| 3175 | */ |
| 3176 | rdev = rcu_dereference(conf->disks[i].replacement); |
| 3177 | if (rdev && !test_bit(Faulty, &rdev->flags) && |
| 3178 | rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && |
| 3179 | !is_badblock(rdev, sh->sector, STRIPE_SECTORS, |
| 3180 | &first_bad, &bad_sectors)) |
| 3181 | set_bit(R5_ReadRepl, &dev->flags); |
| 3182 | else { |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3183 | if (rdev) |
| 3184 | set_bit(R5_NeedReplace, &dev->flags); |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3185 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 3186 | clear_bit(R5_ReadRepl, &dev->flags); |
| 3187 | } |
NeilBrown | 9283d8c | 2011-12-08 16:27:57 +1100 | [diff] [blame] | 3188 | if (rdev && test_bit(Faulty, &rdev->flags)) |
| 3189 | rdev = NULL; |
NeilBrown | 31c176e | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3190 | if (rdev) { |
| 3191 | is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, |
| 3192 | &first_bad, &bad_sectors); |
| 3193 | if (s->blocked_rdev == NULL |
| 3194 | && (test_bit(Blocked, &rdev->flags) |
| 3195 | || is_bad < 0)) { |
| 3196 | if (is_bad < 0) |
| 3197 | set_bit(BlockedBadBlocks, |
| 3198 | &rdev->flags); |
| 3199 | s->blocked_rdev = rdev; |
| 3200 | atomic_inc(&rdev->nr_pending); |
| 3201 | } |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3202 | } |
NeilBrown | 415e72d | 2010-06-17 17:25:21 +1000 | [diff] [blame] | 3203 | clear_bit(R5_Insync, &dev->flags); |
| 3204 | if (!rdev) |
| 3205 | /* Not in-sync */; |
NeilBrown | 31c176e | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3206 | else if (is_bad) { |
| 3207 | /* also not in-sync */ |
NeilBrown | 18b9837 | 2012-04-01 23:48:38 +1000 | [diff] [blame] | 3208 | if (!test_bit(WriteErrorSeen, &rdev->flags) && |
| 3209 | test_bit(R5_UPTODATE, &dev->flags)) { |
NeilBrown | 31c176e | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3210 | /* treat as in-sync, but with a read error |
| 3211 | * which we can now try to correct |
| 3212 | */ |
| 3213 | set_bit(R5_Insync, &dev->flags); |
| 3214 | set_bit(R5_ReadError, &dev->flags); |
| 3215 | } |
| 3216 | } else if (test_bit(In_sync, &rdev->flags)) |
NeilBrown | 415e72d | 2010-06-17 17:25:21 +1000 | [diff] [blame] | 3217 | set_bit(R5_Insync, &dev->flags); |
NeilBrown | 30d7a48 | 2011-12-23 09:57:00 +1100 | [diff] [blame] | 3218 | else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) |
NeilBrown | 415e72d | 2010-06-17 17:25:21 +1000 | [diff] [blame] | 3219 | /* in sync if before recovery_offset */ |
NeilBrown | 30d7a48 | 2011-12-23 09:57:00 +1100 | [diff] [blame] | 3220 | set_bit(R5_Insync, &dev->flags); |
| 3221 | else if (test_bit(R5_UPTODATE, &dev->flags) && |
| 3222 | test_bit(R5_Expanded, &dev->flags)) |
| 3223 | /* If we've reshaped into here, we assume it is Insync. |
| 3224 | * We will shortly update recovery_offset to make |
| 3225 | * it official. |
| 3226 | */ |
| 3227 | set_bit(R5_Insync, &dev->flags); |
| 3228 | |
Adam Kwolek | 5d8c71f | 2011-12-09 14:26:11 +1100 | [diff] [blame] | 3229 | if (rdev && test_bit(R5_WriteError, &dev->flags)) { |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3230 | /* This flag does not apply to '.replacement' |
| 3231 | * only to .rdev, so make sure to check that*/ |
| 3232 | struct md_rdev *rdev2 = rcu_dereference( |
| 3233 | conf->disks[i].rdev); |
| 3234 | if (rdev2 == rdev) |
| 3235 | clear_bit(R5_Insync, &dev->flags); |
| 3236 | if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { |
NeilBrown | bc2607f | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3237 | s->handle_bad_blocks = 1; |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3238 | atomic_inc(&rdev2->nr_pending); |
NeilBrown | bc2607f | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3239 | } else |
| 3240 | clear_bit(R5_WriteError, &dev->flags); |
| 3241 | } |
Adam Kwolek | 5d8c71f | 2011-12-09 14:26:11 +1100 | [diff] [blame] | 3242 | if (rdev && test_bit(R5_MadeGood, &dev->flags)) { |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3243 | /* This flag does not apply to '.replacement' |
| 3244 | * only to .rdev, so make sure to check that*/ |
| 3245 | struct md_rdev *rdev2 = rcu_dereference( |
| 3246 | conf->disks[i].rdev); |
| 3247 | if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { |
NeilBrown | b84db56 | 2011-07-28 11:39:23 +1000 | [diff] [blame] | 3248 | s->handle_bad_blocks = 1; |
NeilBrown | 14a75d3 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3249 | atomic_inc(&rdev2->nr_pending); |
NeilBrown | b84db56 | 2011-07-28 11:39:23 +1000 | [diff] [blame] | 3250 | } else |
| 3251 | clear_bit(R5_MadeGood, &dev->flags); |
| 3252 | } |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3253 | if (test_bit(R5_MadeGoodRepl, &dev->flags)) { |
| 3254 | struct md_rdev *rdev2 = rcu_dereference( |
| 3255 | conf->disks[i].replacement); |
| 3256 | if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { |
| 3257 | s->handle_bad_blocks = 1; |
| 3258 | atomic_inc(&rdev2->nr_pending); |
| 3259 | } else |
| 3260 | clear_bit(R5_MadeGoodRepl, &dev->flags); |
| 3261 | } |
NeilBrown | 415e72d | 2010-06-17 17:25:21 +1000 | [diff] [blame] | 3262 | if (!test_bit(R5_Insync, &dev->flags)) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3263 | /* The ReadError flag will just be confusing now */ |
| 3264 | clear_bit(R5_ReadError, &dev->flags); |
| 3265 | clear_bit(R5_ReWrite, &dev->flags); |
| 3266 | } |
NeilBrown | 415e72d | 2010-06-17 17:25:21 +1000 | [diff] [blame] | 3267 | if (test_bit(R5_ReadError, &dev->flags)) |
| 3268 | clear_bit(R5_Insync, &dev->flags); |
| 3269 | if (!test_bit(R5_Insync, &dev->flags)) { |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3270 | if (s->failed < 2) |
| 3271 | s->failed_num[s->failed] = i; |
| 3272 | s->failed++; |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3273 | if (rdev && !test_bit(Faulty, &rdev->flags)) |
| 3274 | do_recovery = 1; |
NeilBrown | 415e72d | 2010-06-17 17:25:21 +1000 | [diff] [blame] | 3275 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3276 | } |
NeilBrown | c4c1663 | 2011-07-26 11:34:20 +1000 | [diff] [blame] | 3277 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3278 | if (test_bit(STRIPE_SYNCING, &sh->state)) { |
| 3279 | /* If there is a failed device being replaced, |
| 3280 | * we must be recovering. |
| 3281 | * else if we are after recovery_cp, we must be syncing |
majianpeng | c6d2e08 | 2012-04-02 01:16:59 +1000 | [diff] [blame] | 3282 | * else if MD_RECOVERY_REQUESTED is set, we also are syncing. |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3283 | * else we can only be replacing |
| 3284 | * sync and recovery both need to read all devices, and so |
| 3285 | * use the same flag. |
| 3286 | */ |
| 3287 | if (do_recovery || |
majianpeng | c6d2e08 | 2012-04-02 01:16:59 +1000 | [diff] [blame] | 3288 | sh->sector >= conf->mddev->recovery_cp || |
| 3289 | test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3290 | s->syncing = 1; |
| 3291 | else |
| 3292 | s->replacing = 1; |
| 3293 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3294 | rcu_read_unlock(); |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3295 | } |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3296 | |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3297 | static void handle_stripe(struct stripe_head *sh) |
| 3298 | { |
| 3299 | struct stripe_head_state s; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3300 | struct r5conf *conf = sh->raid_conf; |
NeilBrown | 3687c06 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3301 | int i; |
NeilBrown | 8478955 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3302 | int prexor; |
| 3303 | int disks = sh->disks; |
NeilBrown | 474af965fe | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3304 | struct r5dev *pdev, *qdev; |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3305 | |
| 3306 | clear_bit(STRIPE_HANDLE, &sh->state); |
Dan Williams | 257a4b4 | 2011-11-08 16:22:06 +1100 | [diff] [blame] | 3307 | if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3308 | /* already being handled, ensure it gets handled |
| 3309 | * again when current action finishes */ |
| 3310 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3311 | return; |
| 3312 | } |
| 3313 | |
| 3314 | if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { |
| 3315 | set_bit(STRIPE_SYNCING, &sh->state); |
| 3316 | clear_bit(STRIPE_INSYNC, &sh->state); |
| 3317 | } |
| 3318 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 3319 | |
| 3320 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
| 3321 | "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", |
| 3322 | (unsigned long long)sh->sector, sh->state, |
| 3323 | atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, |
| 3324 | sh->check_state, sh->reconstruct_state); |
NeilBrown | cc94015 | 2011-07-26 11:35:35 +1000 | [diff] [blame] | 3325 | |
NeilBrown | acfe726 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3326 | analyse_stripe(sh, &s); |
NeilBrown | c5a3100 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3327 | |
NeilBrown | bc2607f | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3328 | if (s.handle_bad_blocks) { |
| 3329 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3330 | goto finish; |
| 3331 | } |
| 3332 | |
NeilBrown | 474af965fe | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3333 | if (unlikely(s.blocked_rdev)) { |
| 3334 | if (s.syncing || s.expanding || s.expanded || |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3335 | s.replacing || s.to_write || s.written) { |
NeilBrown | 474af965fe | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3336 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3337 | goto finish; |
| 3338 | } |
| 3339 | /* There is nothing for the blocked_rdev to block */ |
| 3340 | rdev_dec_pending(s.blocked_rdev, conf->mddev); |
| 3341 | s.blocked_rdev = NULL; |
| 3342 | } |
| 3343 | |
| 3344 | if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { |
| 3345 | set_bit(STRIPE_OP_BIOFILL, &s.ops_request); |
| 3346 | set_bit(STRIPE_BIOFILL_RUN, &sh->state); |
| 3347 | } |
| 3348 | |
| 3349 | pr_debug("locked=%d uptodate=%d to_read=%d" |
| 3350 | " to_write=%d failed=%d failed_num=%d,%d\n", |
| 3351 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
| 3352 | s.failed_num[0], s.failed_num[1]); |
| 3353 | /* check if the array has lost more than max_degraded devices and, |
| 3354 | * if so, some requests might need to be failed. |
| 3355 | */ |
NeilBrown | 9a3f530 | 2011-11-08 16:22:01 +1100 | [diff] [blame] | 3356 | if (s.failed > conf->max_degraded) { |
| 3357 | sh->check_state = 0; |
| 3358 | sh->reconstruct_state = 0; |
| 3359 | if (s.to_read+s.to_write+s.written) |
| 3360 | handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3361 | if (s.syncing + s.replacing) |
NeilBrown | 9a3f530 | 2011-11-08 16:22:01 +1100 | [diff] [blame] | 3362 | handle_failed_sync(conf, sh, &s); |
| 3363 | } |
NeilBrown | 474af965fe | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3364 | |
| 3365 | /* |
| 3366 | * might be able to return some write requests if the parity blocks |
| 3367 | * are safe, or on a failed drive |
| 3368 | */ |
| 3369 | pdev = &sh->dev[sh->pd_idx]; |
| 3370 | s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) |
| 3371 | || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); |
| 3372 | qdev = &sh->dev[sh->qd_idx]; |
| 3373 | s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) |
| 3374 | || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) |
| 3375 | || conf->level < 6; |
| 3376 | |
| 3377 | if (s.written && |
| 3378 | (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) |
| 3379 | && !test_bit(R5_LOCKED, &pdev->flags) |
| 3380 | && test_bit(R5_UPTODATE, &pdev->flags)))) && |
| 3381 | (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) |
| 3382 | && !test_bit(R5_LOCKED, &qdev->flags) |
| 3383 | && test_bit(R5_UPTODATE, &qdev->flags))))) |
| 3384 | handle_stripe_clean_event(conf, sh, disks, &s.return_bi); |
| 3385 | |
| 3386 | /* Now we might consider reading some blocks, either to check/generate |
| 3387 | * parity, or to satisfy requests |
| 3388 | * or to load a block that is being partially written. |
| 3389 | */ |
| 3390 | if (s.to_read || s.non_overwrite |
| 3391 | || (conf->level == 6 && s.to_write && s.failed) |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3392 | || (s.syncing && (s.uptodate + s.compute < disks)) |
| 3393 | || s.replacing |
| 3394 | || s.expanding) |
NeilBrown | 474af965fe | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3395 | handle_stripe_fill(sh, &s, disks); |
| 3396 | |
NeilBrown | 8478955 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3397 | /* Now we check to see if any write operations have recently |
| 3398 | * completed |
| 3399 | */ |
| 3400 | prexor = 0; |
| 3401 | if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) |
| 3402 | prexor = 1; |
| 3403 | if (sh->reconstruct_state == reconstruct_state_drain_result || |
| 3404 | sh->reconstruct_state == reconstruct_state_prexor_drain_result) { |
| 3405 | sh->reconstruct_state = reconstruct_state_idle; |
| 3406 | |
| 3407 | /* All the 'written' buffers and the parity block are ready to |
| 3408 | * be written back to disk |
| 3409 | */ |
| 3410 | BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); |
| 3411 | BUG_ON(sh->qd_idx >= 0 && |
| 3412 | !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags)); |
| 3413 | for (i = disks; i--; ) { |
| 3414 | struct r5dev *dev = &sh->dev[i]; |
| 3415 | if (test_bit(R5_LOCKED, &dev->flags) && |
| 3416 | (i == sh->pd_idx || i == sh->qd_idx || |
| 3417 | dev->written)) { |
| 3418 | pr_debug("Writing block %d\n", i); |
| 3419 | set_bit(R5_Wantwrite, &dev->flags); |
| 3420 | if (prexor) |
| 3421 | continue; |
| 3422 | if (!test_bit(R5_Insync, &dev->flags) || |
| 3423 | ((i == sh->pd_idx || i == sh->qd_idx) && |
| 3424 | s.failed == 0)) |
| 3425 | set_bit(STRIPE_INSYNC, &sh->state); |
| 3426 | } |
| 3427 | } |
| 3428 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 3429 | s.dec_preread_active = 1; |
| 3430 | } |
| 3431 | |
| 3432 | /* Now to consider new write requests and what else, if anything |
| 3433 | * should be read. We do not handle new writes when: |
| 3434 | * 1/ A 'write' operation (copy+xor) is already in flight. |
| 3435 | * 2/ A 'check' operation is in flight, as it may clobber the parity |
| 3436 | * block. |
| 3437 | */ |
| 3438 | if (s.to_write && !sh->reconstruct_state && !sh->check_state) |
| 3439 | handle_stripe_dirtying(conf, sh, &s, disks); |
| 3440 | |
| 3441 | /* maybe we need to check and possibly fix the parity for this stripe |
| 3442 | * Any reads will already have been scheduled, so we just see if enough |
| 3443 | * data is available. The parity check is held off while parity |
| 3444 | * dependent operations are in flight. |
| 3445 | */ |
| 3446 | if (sh->check_state || |
| 3447 | (s.syncing && s.locked == 0 && |
| 3448 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && |
| 3449 | !test_bit(STRIPE_INSYNC, &sh->state))) { |
| 3450 | if (conf->level == 6) |
| 3451 | handle_parity_checks6(conf, sh, &s, disks); |
| 3452 | else |
| 3453 | handle_parity_checks5(conf, sh, &s, disks); |
| 3454 | } |
NeilBrown | c5a3100 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3455 | |
NeilBrown | 9a3e110 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3456 | if (s.replacing && s.locked == 0 |
| 3457 | && !test_bit(STRIPE_INSYNC, &sh->state)) { |
| 3458 | /* Write out to replacement devices where possible */ |
| 3459 | for (i = 0; i < conf->raid_disks; i++) |
| 3460 | if (test_bit(R5_UPTODATE, &sh->dev[i].flags) && |
| 3461 | test_bit(R5_NeedReplace, &sh->dev[i].flags)) { |
| 3462 | set_bit(R5_WantReplace, &sh->dev[i].flags); |
| 3463 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
| 3464 | s.locked++; |
| 3465 | } |
| 3466 | set_bit(STRIPE_INSYNC, &sh->state); |
| 3467 | } |
| 3468 | if ((s.syncing || s.replacing) && s.locked == 0 && |
| 3469 | test_bit(STRIPE_INSYNC, &sh->state)) { |
NeilBrown | c5a3100 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3470 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
| 3471 | clear_bit(STRIPE_SYNCING, &sh->state); |
| 3472 | } |
| 3473 | |
| 3474 | /* If the failed drives are just a ReadError, then we might need |
| 3475 | * to progress the repair/check process |
| 3476 | */ |
| 3477 | if (s.failed <= conf->max_degraded && !conf->mddev->ro) |
| 3478 | for (i = 0; i < s.failed; i++) { |
| 3479 | struct r5dev *dev = &sh->dev[s.failed_num[i]]; |
| 3480 | if (test_bit(R5_ReadError, &dev->flags) |
| 3481 | && !test_bit(R5_LOCKED, &dev->flags) |
| 3482 | && test_bit(R5_UPTODATE, &dev->flags) |
| 3483 | ) { |
| 3484 | if (!test_bit(R5_ReWrite, &dev->flags)) { |
| 3485 | set_bit(R5_Wantwrite, &dev->flags); |
| 3486 | set_bit(R5_ReWrite, &dev->flags); |
| 3487 | set_bit(R5_LOCKED, &dev->flags); |
| 3488 | s.locked++; |
| 3489 | } else { |
| 3490 | /* let's read it back */ |
| 3491 | set_bit(R5_Wantread, &dev->flags); |
| 3492 | set_bit(R5_LOCKED, &dev->flags); |
| 3493 | s.locked++; |
| 3494 | } |
| 3495 | } |
| 3496 | } |
| 3497 | |
| 3498 | |
NeilBrown | 3687c06 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3499 | /* Finish reconstruct operations initiated by the expansion process */ |
| 3500 | if (sh->reconstruct_state == reconstruct_state_result) { |
| 3501 | struct stripe_head *sh_src |
| 3502 | = get_active_stripe(conf, sh->sector, 1, 1, 1); |
| 3503 | if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { |
| 3504 | /* sh cannot be written until sh_src has been read. |
| 3505 | * so arrange for sh to be delayed a little |
| 3506 | */ |
| 3507 | set_bit(STRIPE_DELAYED, &sh->state); |
| 3508 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3509 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, |
| 3510 | &sh_src->state)) |
| 3511 | atomic_inc(&conf->preread_active_stripes); |
| 3512 | release_stripe(sh_src); |
| 3513 | goto finish; |
| 3514 | } |
| 3515 | if (sh_src) |
| 3516 | release_stripe(sh_src); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3517 | |
NeilBrown | 3687c06 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 3518 | sh->reconstruct_state = reconstruct_state_idle; |
| 3519 | clear_bit(STRIPE_EXPANDING, &sh->state); |
| 3520 | for (i = conf->raid_disks; i--; ) { |
| 3521 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
| 3522 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
| 3523 | s.locked++; |
| 3524 | } |
| 3525 | } |
| 3526 | |
| 3527 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && |
| 3528 | !sh->reconstruct_state) { |
| 3529 | /* Need to write out all blocks after computing parity */ |
| 3530 | sh->disks = conf->raid_disks; |
| 3531 | stripe_set_idx(sh->sector, conf, 0, sh); |
| 3532 | schedule_reconstruction(sh, &s, 1, 1); |
| 3533 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { |
| 3534 | clear_bit(STRIPE_EXPAND_READY, &sh->state); |
| 3535 | atomic_dec(&conf->reshape_stripes); |
| 3536 | wake_up(&conf->wait_for_overlap); |
| 3537 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
| 3538 | } |
| 3539 | |
| 3540 | if (s.expanding && s.locked == 0 && |
| 3541 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) |
| 3542 | handle_stripe_expansion(conf, sh); |
| 3543 | |
| 3544 | finish: |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3545 | /* wait for this device to become unblocked */ |
NeilBrown | 43220aa | 2011-08-31 12:49:14 +1000 | [diff] [blame] | 3546 | if (conf->mddev->external && unlikely(s.blocked_rdev)) |
NeilBrown | c5709ef | 2011-07-26 11:35:20 +1000 | [diff] [blame] | 3547 | md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3548 | |
NeilBrown | bc2607f | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3549 | if (s.handle_bad_blocks) |
| 3550 | for (i = disks; i--; ) { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 3551 | struct md_rdev *rdev; |
NeilBrown | bc2607f | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3552 | struct r5dev *dev = &sh->dev[i]; |
| 3553 | if (test_and_clear_bit(R5_WriteError, &dev->flags)) { |
| 3554 | /* We own a safe reference to the rdev */ |
| 3555 | rdev = conf->disks[i].rdev; |
| 3556 | if (!rdev_set_badblocks(rdev, sh->sector, |
| 3557 | STRIPE_SECTORS, 0)) |
| 3558 | md_error(conf->mddev, rdev); |
| 3559 | rdev_dec_pending(rdev, conf->mddev); |
| 3560 | } |
NeilBrown | b84db56 | 2011-07-28 11:39:23 +1000 | [diff] [blame] | 3561 | if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { |
| 3562 | rdev = conf->disks[i].rdev; |
| 3563 | rdev_clear_badblocks(rdev, sh->sector, |
| 3564 | STRIPE_SECTORS); |
| 3565 | rdev_dec_pending(rdev, conf->mddev); |
| 3566 | } |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3567 | if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { |
| 3568 | rdev = conf->disks[i].replacement; |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3569 | if (!rdev) |
| 3570 | /* rdev have been moved down */ |
| 3571 | rdev = conf->disks[i].rdev; |
NeilBrown | 977df36 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 3572 | rdev_clear_badblocks(rdev, sh->sector, |
| 3573 | STRIPE_SECTORS); |
| 3574 | rdev_dec_pending(rdev, conf->mddev); |
| 3575 | } |
NeilBrown | bc2607f | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3576 | } |
| 3577 | |
Yuri Tikhonov | 6c0069c | 2009-08-29 19:13:13 -0700 | [diff] [blame] | 3578 | if (s.ops_request) |
| 3579 | raid_run_ops(sh, s.ops_request); |
| 3580 | |
Dan Williams | f0e43bc | 2008-06-28 08:31:55 +1000 | [diff] [blame] | 3581 | ops_run_io(sh, &s); |
| 3582 | |
NeilBrown | c5709ef | 2011-07-26 11:35:20 +1000 | [diff] [blame] | 3583 | if (s.dec_preread_active) { |
NeilBrown | 729a186 | 2009-12-14 12:49:50 +1100 | [diff] [blame] | 3584 | /* We delay this until after ops_run_io so that if make_request |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 3585 | * is waiting on a flush, it won't continue until the writes |
NeilBrown | 729a186 | 2009-12-14 12:49:50 +1100 | [diff] [blame] | 3586 | * have actually been submitted. |
| 3587 | */ |
| 3588 | atomic_dec(&conf->preread_active_stripes); |
| 3589 | if (atomic_read(&conf->preread_active_stripes) < |
| 3590 | IO_THRESHOLD) |
| 3591 | md_wakeup_thread(conf->mddev->thread); |
| 3592 | } |
| 3593 | |
NeilBrown | c5709ef | 2011-07-26 11:35:20 +1000 | [diff] [blame] | 3594 | return_io(s.return_bi); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3595 | |
Dan Williams | 257a4b4 | 2011-11-08 16:22:06 +1100 | [diff] [blame] | 3596 | clear_bit_unlock(STRIPE_ACTIVE, &sh->state); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3597 | } |
| 3598 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3599 | static void raid5_activate_delayed(struct r5conf *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3600 | { |
| 3601 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { |
| 3602 | while (!list_empty(&conf->delayed_list)) { |
| 3603 | struct list_head *l = conf->delayed_list.next; |
| 3604 | struct stripe_head *sh; |
| 3605 | sh = list_entry(l, struct stripe_head, lru); |
| 3606 | list_del_init(l); |
| 3607 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 3608 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 3609 | atomic_inc(&conf->preread_active_stripes); |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 3610 | list_add_tail(&sh->lru, &conf->hold_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3611 | } |
NeilBrown | 482c083 | 2011-04-18 18:25:42 +1000 | [diff] [blame] | 3612 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3613 | } |
| 3614 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3615 | static void activate_bit_delay(struct r5conf *conf) |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3616 | { |
| 3617 | /* device_lock is held */ |
| 3618 | struct list_head head; |
| 3619 | list_add(&head, &conf->bitmap_list); |
| 3620 | list_del_init(&conf->bitmap_list); |
| 3621 | while (!list_empty(&head)) { |
| 3622 | struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); |
| 3623 | list_del_init(&sh->lru); |
| 3624 | atomic_inc(&sh->count); |
| 3625 | __release_stripe(conf, sh); |
| 3626 | } |
| 3627 | } |
| 3628 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 3629 | int md_raid5_congested(struct mddev *mddev, int bits) |
NeilBrown | f022b2f | 2006-10-03 01:15:56 -0700 | [diff] [blame] | 3630 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3631 | struct r5conf *conf = mddev->private; |
NeilBrown | f022b2f | 2006-10-03 01:15:56 -0700 | [diff] [blame] | 3632 | |
| 3633 | /* No difference between reads and writes. Just check |
| 3634 | * how busy the stripe_cache is |
| 3635 | */ |
NeilBrown | 3fa841d | 2009-09-23 18:10:29 +1000 | [diff] [blame] | 3636 | |
NeilBrown | f022b2f | 2006-10-03 01:15:56 -0700 | [diff] [blame] | 3637 | if (conf->inactive_blocked) |
| 3638 | return 1; |
| 3639 | if (conf->quiesce) |
| 3640 | return 1; |
| 3641 | if (list_empty_careful(&conf->inactive_list)) |
| 3642 | return 1; |
| 3643 | |
| 3644 | return 0; |
| 3645 | } |
NeilBrown | 11d8a6e | 2010-07-26 11:57:07 +1000 | [diff] [blame] | 3646 | EXPORT_SYMBOL_GPL(md_raid5_congested); |
| 3647 | |
| 3648 | static int raid5_congested(void *data, int bits) |
| 3649 | { |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 3650 | struct mddev *mddev = data; |
NeilBrown | 11d8a6e | 2010-07-26 11:57:07 +1000 | [diff] [blame] | 3651 | |
| 3652 | return mddev_congested(mddev, bits) || |
| 3653 | md_raid5_congested(mddev, bits); |
| 3654 | } |
NeilBrown | f022b2f | 2006-10-03 01:15:56 -0700 | [diff] [blame] | 3655 | |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3656 | /* We want read requests to align with chunks where possible, |
| 3657 | * but write requests don't need to. |
| 3658 | */ |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3659 | static int raid5_mergeable_bvec(struct request_queue *q, |
| 3660 | struct bvec_merge_data *bvm, |
| 3661 | struct bio_vec *biovec) |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3662 | { |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 3663 | struct mddev *mddev = q->queuedata; |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3664 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3665 | int max; |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 3666 | unsigned int chunk_sectors = mddev->chunk_sectors; |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3667 | unsigned int bio_sectors = bvm->bi_size >> 9; |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3668 | |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3669 | if ((bvm->bi_rw & 1) == WRITE) |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3670 | return biovec->bv_len; /* always allow writes to be mergeable */ |
| 3671 | |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 3672 | if (mddev->new_chunk_sectors < mddev->chunk_sectors) |
| 3673 | chunk_sectors = mddev->new_chunk_sectors; |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3674 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; |
| 3675 | if (max < 0) max = 0; |
| 3676 | if (max <= biovec->bv_len && bio_sectors == 0) |
| 3677 | return biovec->bv_len; |
| 3678 | else |
| 3679 | return max; |
| 3680 | } |
| 3681 | |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3682 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 3683 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3684 | { |
| 3685 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 3686 | unsigned int chunk_sectors = mddev->chunk_sectors; |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3687 | unsigned int bio_sectors = bio->bi_size >> 9; |
| 3688 | |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 3689 | if (mddev->new_chunk_sectors < mddev->chunk_sectors) |
| 3690 | chunk_sectors = mddev->new_chunk_sectors; |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3691 | return chunk_sectors >= |
| 3692 | ((sector & (chunk_sectors - 1)) + bio_sectors); |
| 3693 | } |
| 3694 | |
| 3695 | /* |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3696 | * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) |
| 3697 | * later sampled by raid5d. |
| 3698 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3699 | static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3700 | { |
| 3701 | unsigned long flags; |
| 3702 | |
| 3703 | spin_lock_irqsave(&conf->device_lock, flags); |
| 3704 | |
| 3705 | bi->bi_next = conf->retry_read_aligned_list; |
| 3706 | conf->retry_read_aligned_list = bi; |
| 3707 | |
| 3708 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 3709 | md_wakeup_thread(conf->mddev->thread); |
| 3710 | } |
| 3711 | |
| 3712 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3713 | static struct bio *remove_bio_from_retry(struct r5conf *conf) |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3714 | { |
| 3715 | struct bio *bi; |
| 3716 | |
| 3717 | bi = conf->retry_read_aligned; |
| 3718 | if (bi) { |
| 3719 | conf->retry_read_aligned = NULL; |
| 3720 | return bi; |
| 3721 | } |
| 3722 | bi = conf->retry_read_aligned_list; |
| 3723 | if(bi) { |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3724 | conf->retry_read_aligned_list = bi->bi_next; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3725 | bi->bi_next = NULL; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 3726 | /* |
| 3727 | * this sets the active strip count to 1 and the processed |
| 3728 | * strip count to zero (upper 8 bits) |
| 3729 | */ |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3730 | bi->bi_phys_segments = 1; /* biased count of active stripes */ |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3731 | } |
| 3732 | |
| 3733 | return bi; |
| 3734 | } |
| 3735 | |
| 3736 | |
| 3737 | /* |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3738 | * The "raid5_align_endio" should check if the read succeeded and if it |
| 3739 | * did, call bio_endio on the original bio (having bio_put the new bio |
| 3740 | * first). |
| 3741 | * If the read failed.. |
| 3742 | */ |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3743 | static void raid5_align_endio(struct bio *bi, int error) |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3744 | { |
| 3745 | struct bio* raid_bi = bi->bi_private; |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 3746 | struct mddev *mddev; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3747 | struct r5conf *conf; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3748 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 3749 | struct md_rdev *rdev; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3750 | |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3751 | bio_put(bi); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3752 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3753 | rdev = (void*)raid_bi->bi_next; |
| 3754 | raid_bi->bi_next = NULL; |
NeilBrown | 2b7f222 | 2010-03-25 16:06:03 +1100 | [diff] [blame] | 3755 | mddev = rdev->mddev; |
| 3756 | conf = mddev->private; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3757 | |
| 3758 | rdev_dec_pending(rdev, conf->mddev); |
| 3759 | |
| 3760 | if (!error && uptodate) { |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3761 | bio_endio(raid_bi, 0); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3762 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
| 3763 | wake_up(&conf->wait_for_stripe); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3764 | return; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3765 | } |
| 3766 | |
| 3767 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3768 | pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3769 | |
| 3770 | add_bio_to_retry(raid_bi, conf); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3771 | } |
| 3772 | |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3773 | static int bio_fits_rdev(struct bio *bi) |
| 3774 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 3775 | struct request_queue *q = bdev_get_queue(bi->bi_bdev); |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3776 | |
Martin K. Petersen | ae03bf6 | 2009-05-22 17:17:50 -0400 | [diff] [blame] | 3777 | if ((bi->bi_size>>9) > queue_max_sectors(q)) |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3778 | return 0; |
| 3779 | blk_recount_segments(q, bi); |
Martin K. Petersen | 8a78362 | 2010-02-26 00:20:39 -0500 | [diff] [blame] | 3780 | if (bi->bi_phys_segments > queue_max_segments(q)) |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3781 | return 0; |
| 3782 | |
| 3783 | if (q->merge_bvec_fn) |
| 3784 | /* it's too hard to apply the merge_bvec_fn at this stage, |
| 3785 | * just just give up |
| 3786 | */ |
| 3787 | return 0; |
| 3788 | |
| 3789 | return 1; |
| 3790 | } |
| 3791 | |
| 3792 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 3793 | static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3794 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3795 | struct r5conf *conf = mddev->private; |
NeilBrown | 8553fe7ec | 2009-12-14 12:49:47 +1100 | [diff] [blame] | 3796 | int dd_idx; |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3797 | struct bio* align_bi; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 3798 | struct md_rdev *rdev; |
NeilBrown | 671488c | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3799 | sector_t end_sector; |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3800 | |
| 3801 | if (!in_chunk_boundary(mddev, raid_bio)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3802 | pr_debug("chunk_aligned_read : non aligned\n"); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3803 | return 0; |
| 3804 | } |
| 3805 | /* |
NeilBrown | a167f66 | 2010-10-26 18:31:13 +1100 | [diff] [blame] | 3806 | * use bio_clone_mddev to make a copy of the bio |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3807 | */ |
NeilBrown | a167f66 | 2010-10-26 18:31:13 +1100 | [diff] [blame] | 3808 | align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3809 | if (!align_bi) |
| 3810 | return 0; |
| 3811 | /* |
| 3812 | * set bi_end_io to a new function, and set bi_private to the |
| 3813 | * original bio. |
| 3814 | */ |
| 3815 | align_bi->bi_end_io = raid5_align_endio; |
| 3816 | align_bi->bi_private = raid_bio; |
| 3817 | /* |
| 3818 | * compute position |
| 3819 | */ |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3820 | align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, |
| 3821 | 0, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3822 | &dd_idx, NULL); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3823 | |
NeilBrown | 671488c | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3824 | end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3825 | rcu_read_lock(); |
NeilBrown | 671488c | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 3826 | rdev = rcu_dereference(conf->disks[dd_idx].replacement); |
| 3827 | if (!rdev || test_bit(Faulty, &rdev->flags) || |
| 3828 | rdev->recovery_offset < end_sector) { |
| 3829 | rdev = rcu_dereference(conf->disks[dd_idx].rdev); |
| 3830 | if (rdev && |
| 3831 | (test_bit(Faulty, &rdev->flags) || |
| 3832 | !(test_bit(In_sync, &rdev->flags) || |
| 3833 | rdev->recovery_offset >= end_sector))) |
| 3834 | rdev = NULL; |
| 3835 | } |
| 3836 | if (rdev) { |
NeilBrown | 31c176e | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3837 | sector_t first_bad; |
| 3838 | int bad_sectors; |
| 3839 | |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3840 | atomic_inc(&rdev->nr_pending); |
| 3841 | rcu_read_unlock(); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3842 | raid_bio->bi_next = (void*)rdev; |
| 3843 | align_bi->bi_bdev = rdev->bdev; |
| 3844 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); |
| 3845 | align_bi->bi_sector += rdev->data_offset; |
| 3846 | |
NeilBrown | 31c176e | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 3847 | if (!bio_fits_rdev(align_bi) || |
| 3848 | is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, |
| 3849 | &first_bad, &bad_sectors)) { |
| 3850 | /* too big in some way, or has a known bad block */ |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3851 | bio_put(align_bi); |
| 3852 | rdev_dec_pending(rdev, mddev); |
| 3853 | return 0; |
| 3854 | } |
| 3855 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3856 | spin_lock_irq(&conf->device_lock); |
| 3857 | wait_event_lock_irq(conf->wait_for_stripe, |
| 3858 | conf->quiesce == 0, |
| 3859 | conf->device_lock, /* nothing */); |
| 3860 | atomic_inc(&conf->active_aligned_reads); |
| 3861 | spin_unlock_irq(&conf->device_lock); |
| 3862 | |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3863 | generic_make_request(align_bi); |
| 3864 | return 1; |
| 3865 | } else { |
| 3866 | rcu_read_unlock(); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3867 | bio_put(align_bi); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3868 | return 0; |
| 3869 | } |
| 3870 | } |
| 3871 | |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 3872 | /* __get_priority_stripe - get the next stripe to process |
| 3873 | * |
| 3874 | * Full stripe writes are allowed to pass preread active stripes up until |
| 3875 | * the bypass_threshold is exceeded. In general the bypass_count |
| 3876 | * increments when the handle_list is handled before the hold_list; however, it |
| 3877 | * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a |
| 3878 | * stripe with in flight i/o. The bypass_count will be reset when the |
| 3879 | * head of the hold_list has changed, i.e. the head was promoted to the |
| 3880 | * handle_list. |
| 3881 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3882 | static struct stripe_head *__get_priority_stripe(struct r5conf *conf) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 3883 | { |
| 3884 | struct stripe_head *sh; |
| 3885 | |
| 3886 | pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", |
| 3887 | __func__, |
| 3888 | list_empty(&conf->handle_list) ? "empty" : "busy", |
| 3889 | list_empty(&conf->hold_list) ? "empty" : "busy", |
| 3890 | atomic_read(&conf->pending_full_writes), conf->bypass_count); |
| 3891 | |
| 3892 | if (!list_empty(&conf->handle_list)) { |
| 3893 | sh = list_entry(conf->handle_list.next, typeof(*sh), lru); |
| 3894 | |
| 3895 | if (list_empty(&conf->hold_list)) |
| 3896 | conf->bypass_count = 0; |
| 3897 | else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { |
| 3898 | if (conf->hold_list.next == conf->last_hold) |
| 3899 | conf->bypass_count++; |
| 3900 | else { |
| 3901 | conf->last_hold = conf->hold_list.next; |
| 3902 | conf->bypass_count -= conf->bypass_threshold; |
| 3903 | if (conf->bypass_count < 0) |
| 3904 | conf->bypass_count = 0; |
| 3905 | } |
| 3906 | } |
| 3907 | } else if (!list_empty(&conf->hold_list) && |
| 3908 | ((conf->bypass_threshold && |
| 3909 | conf->bypass_count > conf->bypass_threshold) || |
| 3910 | atomic_read(&conf->pending_full_writes) == 0)) { |
| 3911 | sh = list_entry(conf->hold_list.next, |
| 3912 | typeof(*sh), lru); |
| 3913 | conf->bypass_count -= conf->bypass_threshold; |
| 3914 | if (conf->bypass_count < 0) |
| 3915 | conf->bypass_count = 0; |
| 3916 | } else |
| 3917 | return NULL; |
| 3918 | |
| 3919 | list_del_init(&sh->lru); |
| 3920 | atomic_inc(&sh->count); |
| 3921 | BUG_ON(atomic_read(&sh->count) != 1); |
| 3922 | return sh; |
| 3923 | } |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3924 | |
Linus Torvalds | b4fdcb0 | 2011-11-04 17:06:58 -0700 | [diff] [blame] | 3925 | static void make_request(struct mddev *mddev, struct bio * bi) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3926 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3927 | struct r5conf *conf = mddev->private; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3928 | int dd_idx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3929 | sector_t new_sector; |
| 3930 | sector_t logical_sector, last_sector; |
| 3931 | struct stripe_head *sh; |
Jens Axboe | a362357 | 2005-11-01 09:26:16 +0100 | [diff] [blame] | 3932 | const int rw = bio_data_dir(bi); |
NeilBrown | 4907732 | 2010-03-25 16:20:56 +1100 | [diff] [blame] | 3933 | int remaining; |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 3934 | int plugged; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3935 | |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 3936 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { |
| 3937 | md_flush_request(mddev, bi); |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 3938 | return; |
NeilBrown | e5dcdd8 | 2005-09-09 16:23:41 -0700 | [diff] [blame] | 3939 | } |
| 3940 | |
NeilBrown | 3d310eb | 2005-06-21 17:17:26 -0700 | [diff] [blame] | 3941 | md_write_start(mddev, bi); |
NeilBrown | 06d91a5 | 2005-06-21 17:17:12 -0700 | [diff] [blame] | 3942 | |
NeilBrown | 802ba06 | 2006-12-13 00:34:13 -0800 | [diff] [blame] | 3943 | if (rw == READ && |
Raz Ben-Jehuda(caro) | 5248861 | 2006-12-10 02:20:48 -0800 | [diff] [blame] | 3944 | mddev->reshape_position == MaxSector && |
NeilBrown | 21a52c6 | 2010-04-01 15:02:13 +1100 | [diff] [blame] | 3945 | chunk_aligned_read(mddev,bi)) |
Christoph Hellwig | 5a7bbad | 2011-09-12 12:12:01 +0200 | [diff] [blame] | 3946 | return; |
Raz Ben-Jehuda(caro) | 5248861 | 2006-12-10 02:20:48 -0800 | [diff] [blame] | 3947 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3948 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 3949 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
| 3950 | bi->bi_next = NULL; |
| 3951 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ |
NeilBrown | 06d91a5 | 2005-06-21 17:17:12 -0700 | [diff] [blame] | 3952 | |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 3953 | plugged = mddev_check_plugged(mddev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3954 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { |
| 3955 | DEFINE_WAIT(w); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3956 | int disks, data_disks; |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3957 | int previous; |
NeilBrown | b578d55 | 2006-03-27 01:18:12 -0800 | [diff] [blame] | 3958 | |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3959 | retry: |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3960 | previous = 0; |
NeilBrown | b0f9ec0 | 2009-03-31 15:27:18 +1100 | [diff] [blame] | 3961 | disks = conf->raid_disks; |
NeilBrown | b578d55 | 2006-03-27 01:18:12 -0800 | [diff] [blame] | 3962 | prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); |
NeilBrown | b0f9ec0 | 2009-03-31 15:27:18 +1100 | [diff] [blame] | 3963 | if (unlikely(conf->reshape_progress != MaxSector)) { |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3964 | /* spinlock is needed as reshape_progress may be |
NeilBrown | df8e7f7 | 2006-03-27 01:18:15 -0800 | [diff] [blame] | 3965 | * 64bit on a 32bit platform, and so it might be |
| 3966 | * possible to see a half-updated value |
Jesper Juhl | aeb878b | 2011-04-10 18:06:17 +0200 | [diff] [blame] | 3967 | * Of course reshape_progress could change after |
NeilBrown | df8e7f7 | 2006-03-27 01:18:15 -0800 | [diff] [blame] | 3968 | * the lock is dropped, so once we get a reference |
| 3969 | * to the stripe that we think it is, we will have |
| 3970 | * to check again. |
| 3971 | */ |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3972 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3973 | if (mddev->delta_disks < 0 |
| 3974 | ? logical_sector < conf->reshape_progress |
| 3975 | : logical_sector >= conf->reshape_progress) { |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3976 | disks = conf->previous_raid_disks; |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3977 | previous = 1; |
| 3978 | } else { |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3979 | if (mddev->delta_disks < 0 |
| 3980 | ? logical_sector < conf->reshape_safe |
| 3981 | : logical_sector >= conf->reshape_safe) { |
NeilBrown | b578d55 | 2006-03-27 01:18:12 -0800 | [diff] [blame] | 3982 | spin_unlock_irq(&conf->device_lock); |
| 3983 | schedule(); |
| 3984 | goto retry; |
| 3985 | } |
| 3986 | } |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3987 | spin_unlock_irq(&conf->device_lock); |
| 3988 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3989 | data_disks = disks - conf->max_degraded; |
| 3990 | |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3991 | new_sector = raid5_compute_sector(conf, logical_sector, |
| 3992 | previous, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3993 | &dd_idx, NULL); |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 3994 | pr_debug("raid456: make_request, sector %llu logical %llu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3995 | (unsigned long long)new_sector, |
| 3996 | (unsigned long long)logical_sector); |
| 3997 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3998 | sh = get_active_stripe(conf, new_sector, previous, |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 3999 | (bi->bi_rw&RWA_MASK), 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4000 | if (sh) { |
NeilBrown | b0f9ec0 | 2009-03-31 15:27:18 +1100 | [diff] [blame] | 4001 | if (unlikely(previous)) { |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 4002 | /* expansion might have moved on while waiting for a |
NeilBrown | df8e7f7 | 2006-03-27 01:18:15 -0800 | [diff] [blame] | 4003 | * stripe, so we must do the range check again. |
| 4004 | * Expansion could still move past after this |
| 4005 | * test, but as we are holding a reference to |
| 4006 | * 'sh', we know that if that happens, |
| 4007 | * STRIPE_EXPANDING will get set and the expansion |
| 4008 | * won't proceed until we finish with the stripe. |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 4009 | */ |
| 4010 | int must_retry = 0; |
| 4011 | spin_lock_irq(&conf->device_lock); |
NeilBrown | b0f9ec0 | 2009-03-31 15:27:18 +1100 | [diff] [blame] | 4012 | if (mddev->delta_disks < 0 |
| 4013 | ? logical_sector >= conf->reshape_progress |
| 4014 | : logical_sector < conf->reshape_progress) |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 4015 | /* mismatch, need to try again */ |
| 4016 | must_retry = 1; |
| 4017 | spin_unlock_irq(&conf->device_lock); |
| 4018 | if (must_retry) { |
| 4019 | release_stripe(sh); |
Dan Williams | 7a3ab90 | 2009-06-16 16:00:33 -0700 | [diff] [blame] | 4020 | schedule(); |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 4021 | goto retry; |
| 4022 | } |
| 4023 | } |
NeilBrown | e62e58a | 2009-07-01 13:15:35 +1000 | [diff] [blame] | 4024 | |
Namhyung Kim | ffd96e3 | 2011-07-18 17:38:51 +1000 | [diff] [blame] | 4025 | if (rw == WRITE && |
NeilBrown | a5c308d | 2009-07-01 13:15:35 +1000 | [diff] [blame] | 4026 | logical_sector >= mddev->suspend_lo && |
NeilBrown | e464eaf | 2006-03-27 01:18:14 -0800 | [diff] [blame] | 4027 | logical_sector < mddev->suspend_hi) { |
| 4028 | release_stripe(sh); |
NeilBrown | e62e58a | 2009-07-01 13:15:35 +1000 | [diff] [blame] | 4029 | /* As the suspend_* range is controlled by |
| 4030 | * userspace, we want an interruptible |
| 4031 | * wait. |
| 4032 | */ |
| 4033 | flush_signals(current); |
| 4034 | prepare_to_wait(&conf->wait_for_overlap, |
| 4035 | &w, TASK_INTERRUPTIBLE); |
| 4036 | if (logical_sector >= mddev->suspend_lo && |
| 4037 | logical_sector < mddev->suspend_hi) |
| 4038 | schedule(); |
NeilBrown | e464eaf | 2006-03-27 01:18:14 -0800 | [diff] [blame] | 4039 | goto retry; |
| 4040 | } |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 4041 | |
| 4042 | if (test_bit(STRIPE_EXPANDING, &sh->state) || |
Namhyung Kim | ffd96e3 | 2011-07-18 17:38:51 +1000 | [diff] [blame] | 4043 | !add_stripe_bio(sh, bi, dd_idx, rw)) { |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 4044 | /* Stripe is busy expanding or |
| 4045 | * add failed due to overlap. Flush everything |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4046 | * and wait a while |
| 4047 | */ |
NeilBrown | 482c083 | 2011-04-18 18:25:42 +1000 | [diff] [blame] | 4048 | md_wakeup_thread(mddev->thread); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4049 | release_stripe(sh); |
| 4050 | schedule(); |
| 4051 | goto retry; |
| 4052 | } |
| 4053 | finish_wait(&conf->wait_for_overlap, &w); |
NeilBrown | 6ed3003 | 2008-02-06 01:40:00 -0800 | [diff] [blame] | 4054 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4055 | clear_bit(STRIPE_DELAYED, &sh->state); |
Tejun Heo | e9c7469 | 2010-09-03 11:56:18 +0200 | [diff] [blame] | 4056 | if ((bi->bi_rw & REQ_SYNC) && |
NeilBrown | 729a186 | 2009-12-14 12:49:50 +1100 | [diff] [blame] | 4057 | !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 4058 | atomic_inc(&conf->preread_active_stripes); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4059 | release_stripe(sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4060 | } else { |
| 4061 | /* cannot get stripe for read-ahead, just give-up */ |
| 4062 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
| 4063 | finish_wait(&conf->wait_for_overlap, &w); |
| 4064 | break; |
| 4065 | } |
| 4066 | |
| 4067 | } |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 4068 | if (!plugged) |
| 4069 | md_wakeup_thread(mddev->thread); |
| 4070 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4071 | spin_lock_irq(&conf->device_lock); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4072 | remaining = raid5_dec_bi_phys_segments(bi); |
NeilBrown | f634475 | 2006-03-27 01:18:17 -0800 | [diff] [blame] | 4073 | spin_unlock_irq(&conf->device_lock); |
| 4074 | if (remaining == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4075 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4076 | if ( rw == WRITE ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4077 | md_write_end(mddev); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 4078 | |
Neil Brown | 0e13fe23 | 2008-06-28 08:31:20 +1000 | [diff] [blame] | 4079 | bio_endio(bi, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4080 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4081 | } |
| 4082 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4083 | static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); |
Dan Williams | b522adc | 2009-03-31 15:00:31 +1100 | [diff] [blame] | 4084 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4085 | static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4086 | { |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4087 | /* reshaping is quite different to recovery/resync so it is |
| 4088 | * handled quite separately ... here. |
| 4089 | * |
| 4090 | * On each call to sync_request, we gather one chunk worth of |
| 4091 | * destination stripes and flag them as expanding. |
| 4092 | * Then we find all the source stripes and request reads. |
| 4093 | * As the reads complete, handle_stripe will copy the data |
| 4094 | * into the destination stripe and release that stripe. |
| 4095 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4096 | struct r5conf *conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4097 | struct stripe_head *sh; |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 4098 | sector_t first_sector, last_sector; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4099 | int raid_disks = conf->previous_raid_disks; |
| 4100 | int data_disks = raid_disks - conf->max_degraded; |
| 4101 | int new_data_disks = conf->raid_disks - conf->max_degraded; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4102 | int i; |
| 4103 | int dd_idx; |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 4104 | sector_t writepos, readpos, safepos; |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4105 | sector_t stripe_addr; |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4106 | int reshape_sectors; |
NeilBrown | ab69ae1 | 2009-03-31 15:26:47 +1100 | [diff] [blame] | 4107 | struct list_head stripes; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4108 | |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4109 | if (sector_nr == 0) { |
| 4110 | /* If restarting in the middle, skip the initial sectors */ |
| 4111 | if (mddev->delta_disks < 0 && |
| 4112 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { |
| 4113 | sector_nr = raid5_size(mddev, 0, 0) |
| 4114 | - conf->reshape_progress; |
NeilBrown | a639755 | 2009-08-13 10:13:00 +1000 | [diff] [blame] | 4115 | } else if (mddev->delta_disks >= 0 && |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4116 | conf->reshape_progress > 0) |
| 4117 | sector_nr = conf->reshape_progress; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4118 | sector_div(sector_nr, new_data_disks); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4119 | if (sector_nr) { |
NeilBrown | 8dee721 | 2009-11-06 14:59:29 +1100 | [diff] [blame] | 4120 | mddev->curr_resync_completed = sector_nr; |
| 4121 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4122 | *skipped = 1; |
| 4123 | return sector_nr; |
| 4124 | } |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4125 | } |
| 4126 | |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4127 | /* We need to process a full chunk at a time. |
| 4128 | * If old and new chunk sizes differ, we need to process the |
| 4129 | * largest of these |
| 4130 | */ |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 4131 | if (mddev->new_chunk_sectors > mddev->chunk_sectors) |
| 4132 | reshape_sectors = mddev->new_chunk_sectors; |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4133 | else |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 4134 | reshape_sectors = mddev->chunk_sectors; |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4135 | |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4136 | /* we update the metadata when there is more than 3Meg |
| 4137 | * in the block range (that is rather arbitrary, should |
| 4138 | * probably be time based) or when the data about to be |
| 4139 | * copied would over-write the source of the data at |
| 4140 | * the front of the range. |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4141 | * i.e. one new_stripe along from reshape_progress new_maps |
| 4142 | * to after where reshape_safe old_maps to |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4143 | */ |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4144 | writepos = conf->reshape_progress; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4145 | sector_div(writepos, new_data_disks); |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 4146 | readpos = conf->reshape_progress; |
| 4147 | sector_div(readpos, data_disks); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4148 | safepos = conf->reshape_safe; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4149 | sector_div(safepos, data_disks); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4150 | if (mddev->delta_disks < 0) { |
NeilBrown | ed37d83 | 2009-05-27 21:39:05 +1000 | [diff] [blame] | 4151 | writepos -= min_t(sector_t, reshape_sectors, writepos); |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 4152 | readpos += reshape_sectors; |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4153 | safepos += reshape_sectors; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4154 | } else { |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4155 | writepos += reshape_sectors; |
NeilBrown | ed37d83 | 2009-05-27 21:39:05 +1000 | [diff] [blame] | 4156 | readpos -= min_t(sector_t, reshape_sectors, readpos); |
| 4157 | safepos -= min_t(sector_t, reshape_sectors, safepos); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4158 | } |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4159 | |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 4160 | /* 'writepos' is the most advanced device address we might write. |
| 4161 | * 'readpos' is the least advanced device address we might read. |
| 4162 | * 'safepos' is the least address recorded in the metadata as having |
| 4163 | * been reshaped. |
| 4164 | * If 'readpos' is behind 'writepos', then there is no way that we can |
| 4165 | * ensure safety in the face of a crash - that must be done by userspace |
| 4166 | * making a backup of the data. So in that case there is no particular |
| 4167 | * rush to update metadata. |
| 4168 | * Otherwise if 'safepos' is behind 'writepos', then we really need to |
| 4169 | * update the metadata to advance 'safepos' to match 'readpos' so that |
| 4170 | * we can be safe in the event of a crash. |
| 4171 | * So we insist on updating metadata if safepos is behind writepos and |
| 4172 | * readpos is beyond writepos. |
| 4173 | * In any case, update the metadata every 10 seconds. |
| 4174 | * Maybe that number should be configurable, but I'm not sure it is |
| 4175 | * worth it.... maybe it could be a multiple of safemode_delay??? |
| 4176 | */ |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4177 | if ((mddev->delta_disks < 0 |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 4178 | ? (safepos > writepos && readpos < writepos) |
| 4179 | : (safepos < writepos && readpos > writepos)) || |
| 4180 | time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4181 | /* Cannot proceed until we've updated the superblock... */ |
| 4182 | wait_event(conf->wait_for_overlap, |
| 4183 | atomic_read(&conf->reshape_stripes)==0); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4184 | mddev->reshape_position = conf->reshape_progress; |
NeilBrown | 75d3da4 | 2011-01-14 09:14:34 +1100 | [diff] [blame] | 4185 | mddev->curr_resync_completed = sector_nr; |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 4186 | conf->reshape_checkpoint = jiffies; |
NeilBrown | 850b2b4 | 2006-10-03 01:15:46 -0700 | [diff] [blame] | 4187 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4188 | md_wakeup_thread(mddev->thread); |
NeilBrown | 850b2b4 | 2006-10-03 01:15:46 -0700 | [diff] [blame] | 4189 | wait_event(mddev->sb_wait, mddev->flags == 0 || |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4190 | kthread_should_stop()); |
| 4191 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4192 | conf->reshape_safe = mddev->reshape_position; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4193 | spin_unlock_irq(&conf->device_lock); |
| 4194 | wake_up(&conf->wait_for_overlap); |
NeilBrown | acb180b | 2009-04-14 16:28:34 +1000 | [diff] [blame] | 4195 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4196 | } |
| 4197 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4198 | if (mddev->delta_disks < 0) { |
| 4199 | BUG_ON(conf->reshape_progress == 0); |
| 4200 | stripe_addr = writepos; |
| 4201 | BUG_ON((mddev->dev_sectors & |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4202 | ~((sector_t)reshape_sectors - 1)) |
| 4203 | - reshape_sectors - stripe_addr |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4204 | != sector_nr); |
| 4205 | } else { |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4206 | BUG_ON(writepos != sector_nr + reshape_sectors); |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4207 | stripe_addr = sector_nr; |
| 4208 | } |
NeilBrown | ab69ae1 | 2009-03-31 15:26:47 +1100 | [diff] [blame] | 4209 | INIT_LIST_HEAD(&stripes); |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4210 | for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4211 | int j; |
NeilBrown | a9f326e | 2009-09-23 18:06:41 +1000 | [diff] [blame] | 4212 | int skipped_disk = 0; |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 4213 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4214 | set_bit(STRIPE_EXPANDING, &sh->state); |
| 4215 | atomic_inc(&conf->reshape_stripes); |
| 4216 | /* If any of this stripe is beyond the end of the old |
| 4217 | * array, then we need to zero those blocks |
| 4218 | */ |
| 4219 | for (j=sh->disks; j--;) { |
| 4220 | sector_t s; |
| 4221 | if (j == sh->pd_idx) |
| 4222 | continue; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4223 | if (conf->level == 6 && |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4224 | j == sh->qd_idx) |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4225 | continue; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame] | 4226 | s = compute_blocknr(sh, j, 0); |
Dan Williams | b522adc | 2009-03-31 15:00:31 +1100 | [diff] [blame] | 4227 | if (s < raid5_size(mddev, 0, 0)) { |
NeilBrown | a9f326e | 2009-09-23 18:06:41 +1000 | [diff] [blame] | 4228 | skipped_disk = 1; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4229 | continue; |
| 4230 | } |
| 4231 | memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); |
| 4232 | set_bit(R5_Expanded, &sh->dev[j].flags); |
| 4233 | set_bit(R5_UPTODATE, &sh->dev[j].flags); |
| 4234 | } |
NeilBrown | a9f326e | 2009-09-23 18:06:41 +1000 | [diff] [blame] | 4235 | if (!skipped_disk) { |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4236 | set_bit(STRIPE_EXPAND_READY, &sh->state); |
| 4237 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4238 | } |
NeilBrown | ab69ae1 | 2009-03-31 15:26:47 +1100 | [diff] [blame] | 4239 | list_add(&sh->lru, &stripes); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4240 | } |
| 4241 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4242 | if (mddev->delta_disks < 0) |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4243 | conf->reshape_progress -= reshape_sectors * new_data_disks; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4244 | else |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4245 | conf->reshape_progress += reshape_sectors * new_data_disks; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4246 | spin_unlock_irq(&conf->device_lock); |
| 4247 | /* Ok, those stripe are ready. We can start scheduling |
| 4248 | * reads on the source stripes. |
| 4249 | * The source stripes are determined by mapping the first and last |
| 4250 | * block on the destination stripes. |
| 4251 | */ |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4252 | first_sector = |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4253 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4254 | 1, &dd_idx, NULL); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4255 | last_sector = |
NeilBrown | 0e6e027 | 2009-06-09 16:32:22 +1000 | [diff] [blame] | 4256 | raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 4257 | * new_data_disks - 1), |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4258 | 1, &dd_idx, NULL); |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 4259 | if (last_sector >= mddev->dev_sectors) |
| 4260 | last_sector = mddev->dev_sectors - 1; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4261 | while (first_sector <= last_sector) { |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 4262 | sh = get_active_stripe(conf, first_sector, 1, 0, 1); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4263 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 4264 | set_bit(STRIPE_HANDLE, &sh->state); |
| 4265 | release_stripe(sh); |
| 4266 | first_sector += STRIPE_SECTORS; |
| 4267 | } |
NeilBrown | ab69ae1 | 2009-03-31 15:26:47 +1100 | [diff] [blame] | 4268 | /* Now that the sources are clearly marked, we can release |
| 4269 | * the destination stripes |
| 4270 | */ |
| 4271 | while (!list_empty(&stripes)) { |
| 4272 | sh = list_entry(stripes.next, struct stripe_head, lru); |
| 4273 | list_del_init(&sh->lru); |
| 4274 | release_stripe(sh); |
| 4275 | } |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 4276 | /* If this takes us to the resync_max point where we have to pause, |
| 4277 | * then we need to write out the superblock. |
| 4278 | */ |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4279 | sector_nr += reshape_sectors; |
NeilBrown | c03f6a1 | 2009-04-17 11:06:30 +1000 | [diff] [blame] | 4280 | if ((sector_nr - mddev->curr_resync_completed) * 2 |
| 4281 | >= mddev->resync_max - mddev->curr_resync_completed) { |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 4282 | /* Cannot proceed until we've updated the superblock... */ |
| 4283 | wait_event(conf->wait_for_overlap, |
| 4284 | atomic_read(&conf->reshape_stripes) == 0); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4285 | mddev->reshape_position = conf->reshape_progress; |
NeilBrown | 75d3da4 | 2011-01-14 09:14:34 +1100 | [diff] [blame] | 4286 | mddev->curr_resync_completed = sector_nr; |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 4287 | conf->reshape_checkpoint = jiffies; |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 4288 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 4289 | md_wakeup_thread(mddev->thread); |
| 4290 | wait_event(mddev->sb_wait, |
| 4291 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) |
| 4292 | || kthread_should_stop()); |
| 4293 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4294 | conf->reshape_safe = mddev->reshape_position; |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 4295 | spin_unlock_irq(&conf->device_lock); |
| 4296 | wake_up(&conf->wait_for_overlap); |
NeilBrown | acb180b | 2009-04-14 16:28:34 +1000 | [diff] [blame] | 4297 | sysfs_notify(&mddev->kobj, NULL, "sync_completed"); |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 4298 | } |
NeilBrown | 7a66138 | 2009-03-31 15:21:40 +1100 | [diff] [blame] | 4299 | return reshape_sectors; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4300 | } |
| 4301 | |
| 4302 | /* FIXME go_faster isn't used */ |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4303 | static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4304 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4305 | struct r5conf *conf = mddev->private; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4306 | struct stripe_head *sh; |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 4307 | sector_t max_sector = mddev->dev_sectors; |
NeilBrown | 57dab0b | 2010-10-19 10:03:39 +1100 | [diff] [blame] | 4308 | sector_t sync_blocks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4309 | int still_degraded = 0; |
| 4310 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4311 | |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4312 | if (sector_nr >= max_sector) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4313 | /* just being told to finish up .. nothing much to do */ |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 4314 | |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4315 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { |
| 4316 | end_reshape(conf); |
| 4317 | return 0; |
| 4318 | } |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4319 | |
| 4320 | if (mddev->curr_resync < max_sector) /* aborted */ |
| 4321 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, |
| 4322 | &sync_blocks, 1); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4323 | else /* completed sync */ |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4324 | conf->fullsync = 0; |
| 4325 | bitmap_close_sync(mddev->bitmap); |
| 4326 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4327 | return 0; |
| 4328 | } |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 4329 | |
NeilBrown | 64bd660 | 2009-08-03 10:59:58 +1000 | [diff] [blame] | 4330 | /* Allow raid5_quiesce to complete */ |
| 4331 | wait_event(conf->wait_for_overlap, conf->quiesce != 2); |
| 4332 | |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 4333 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
| 4334 | return reshape_request(mddev, sector_nr, skipped); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4335 | |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 4336 | /* No need to check resync_max as we never do more than one |
| 4337 | * stripe, and as resync_max will always be on a chunk boundary, |
| 4338 | * if the check in md_do_sync didn't fire, there is no chance |
| 4339 | * of overstepping resync_max here |
| 4340 | */ |
| 4341 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4342 | /* if there is too many failed drives and we are trying |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4343 | * to resync, then assert that we are finished, because there is |
| 4344 | * nothing we can do. |
| 4345 | */ |
NeilBrown | 3285edf | 2006-06-26 00:27:55 -0700 | [diff] [blame] | 4346 | if (mddev->degraded >= conf->max_degraded && |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4347 | test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 4348 | sector_t rv = mddev->dev_sectors - sector_nr; |
NeilBrown | 57afd89 | 2005-06-21 17:17:13 -0700 | [diff] [blame] | 4349 | *skipped = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4350 | return rv; |
| 4351 | } |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4352 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && |
NeilBrown | 3855ad9 | 2005-11-08 21:39:38 -0800 | [diff] [blame] | 4353 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4354 | !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { |
| 4355 | /* we can skip this block, and probably more */ |
| 4356 | sync_blocks /= STRIPE_SECTORS; |
| 4357 | *skipped = 1; |
| 4358 | return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ |
| 4359 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4360 | |
NeilBrown | b47490c | 2008-02-06 01:39:50 -0800 | [diff] [blame] | 4361 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
| 4362 | |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 4363 | sh = get_active_stripe(conf, sector_nr, 0, 1, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4364 | if (sh == NULL) { |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 4365 | sh = get_active_stripe(conf, sector_nr, 0, 0, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4366 | /* make sure we don't swamp the stripe cache if someone else |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4367 | * is trying to get access |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4368 | */ |
Nishanth Aravamudan | 66c006a | 2005-11-07 01:01:17 -0800 | [diff] [blame] | 4369 | schedule_timeout_uninterruptible(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4370 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4371 | /* Need to check if array will still be degraded after recovery/resync |
| 4372 | * We don't need to check the 'failed' flag as when that gets set, |
| 4373 | * recovery aborts. |
| 4374 | */ |
NeilBrown | f001a70 | 2009-06-09 14:30:31 +1000 | [diff] [blame] | 4375 | for (i = 0; i < conf->raid_disks; i++) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4376 | if (conf->disks[i].rdev == NULL) |
| 4377 | still_degraded = 1; |
| 4378 | |
| 4379 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); |
| 4380 | |
NeilBrown | 83206d6 | 2011-07-26 11:19:49 +1000 | [diff] [blame] | 4381 | set_bit(STRIPE_SYNC_REQUESTED, &sh->state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4382 | |
NeilBrown | 1442577 | 2009-10-16 15:55:25 +1100 | [diff] [blame] | 4383 | handle_stripe(sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4384 | release_stripe(sh); |
| 4385 | |
| 4386 | return STRIPE_SECTORS; |
| 4387 | } |
| 4388 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4389 | static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4390 | { |
| 4391 | /* We may not be able to submit a whole bio at once as there |
| 4392 | * may not be enough stripe_heads available. |
| 4393 | * We cannot pre-allocate enough stripe_heads as we may need |
| 4394 | * more than exist in the cache (if we allow ever large chunks). |
| 4395 | * So we do one stripe head at a time and record in |
| 4396 | * ->bi_hw_segments how many have been done. |
| 4397 | * |
| 4398 | * We *know* that this entire raid_bio is in one chunk, so |
| 4399 | * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. |
| 4400 | */ |
| 4401 | struct stripe_head *sh; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4402 | int dd_idx; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4403 | sector_t sector, logical_sector, last_sector; |
| 4404 | int scnt = 0; |
| 4405 | int remaining; |
| 4406 | int handled = 0; |
| 4407 | |
| 4408 | logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4409 | sector = raid5_compute_sector(conf, logical_sector, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4410 | 0, &dd_idx, NULL); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4411 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); |
| 4412 | |
| 4413 | for (; logical_sector < last_sector; |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 4414 | logical_sector += STRIPE_SECTORS, |
| 4415 | sector += STRIPE_SECTORS, |
| 4416 | scnt++) { |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4417 | |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4418 | if (scnt < raid5_bi_hw_segments(raid_bio)) |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4419 | /* already done this stripe */ |
| 4420 | continue; |
| 4421 | |
NeilBrown | a8c906c | 2009-06-09 14:39:59 +1000 | [diff] [blame] | 4422 | sh = get_active_stripe(conf, sector, 0, 1, 0); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4423 | |
| 4424 | if (!sh) { |
| 4425 | /* failed to get a stripe - must wait */ |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4426 | raid5_set_bi_hw_segments(raid_bio, scnt); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4427 | conf->retry_read_aligned = raid_bio; |
| 4428 | return handled; |
| 4429 | } |
| 4430 | |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 4431 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { |
| 4432 | release_stripe(sh); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4433 | raid5_set_bi_hw_segments(raid_bio, scnt); |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 4434 | conf->retry_read_aligned = raid_bio; |
| 4435 | return handled; |
| 4436 | } |
| 4437 | |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4438 | handle_stripe(sh); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4439 | release_stripe(sh); |
| 4440 | handled++; |
| 4441 | } |
| 4442 | spin_lock_irq(&conf->device_lock); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4443 | remaining = raid5_dec_bi_phys_segments(raid_bio); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4444 | spin_unlock_irq(&conf->device_lock); |
Neil Brown | 0e13fe23 | 2008-06-28 08:31:20 +1000 | [diff] [blame] | 4445 | if (remaining == 0) |
| 4446 | bio_endio(raid_bio, 0); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4447 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
| 4448 | wake_up(&conf->wait_for_stripe); |
| 4449 | return handled; |
| 4450 | } |
| 4451 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4452 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4453 | /* |
| 4454 | * This is our raid5 kernel thread. |
| 4455 | * |
| 4456 | * We scan the hash table for stripes which can be handled now. |
| 4457 | * During the scan, completed stripes are saved for us by the interrupt |
| 4458 | * handler, so that they will not have to wait for our next wakeup. |
| 4459 | */ |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4460 | static void raid5d(struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4461 | { |
| 4462 | struct stripe_head *sh; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4463 | struct r5conf *conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4464 | int handled; |
NeilBrown | e1dfa0a | 2011-04-18 18:25:41 +1000 | [diff] [blame] | 4465 | struct blk_plug plug; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4466 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4467 | pr_debug("+++ raid5d active\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4468 | |
| 4469 | md_check_recovery(mddev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4470 | |
NeilBrown | e1dfa0a | 2011-04-18 18:25:41 +1000 | [diff] [blame] | 4471 | blk_start_plug(&plug); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4472 | handled = 0; |
| 4473 | spin_lock_irq(&conf->device_lock); |
| 4474 | while (1) { |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4475 | struct bio *bio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4476 | |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 4477 | if (atomic_read(&mddev->plug_cnt) == 0 && |
| 4478 | !list_empty(&conf->bitmap_list)) { |
| 4479 | /* Now is a good time to flush some bitmap updates */ |
| 4480 | conf->seq_flush++; |
NeilBrown | 700e432 | 2005-11-28 13:44:10 -0800 | [diff] [blame] | 4481 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4482 | bitmap_unplug(mddev->bitmap); |
NeilBrown | 700e432 | 2005-11-28 13:44:10 -0800 | [diff] [blame] | 4483 | spin_lock_irq(&conf->device_lock); |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 4484 | conf->seq_write = conf->seq_flush; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4485 | activate_bit_delay(conf); |
| 4486 | } |
NeilBrown | 7c13edc | 2011-04-18 18:25:43 +1000 | [diff] [blame] | 4487 | if (atomic_read(&mddev->plug_cnt) == 0) |
| 4488 | raid5_activate_delayed(conf); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4489 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4490 | while ((bio = remove_bio_from_retry(conf))) { |
| 4491 | int ok; |
| 4492 | spin_unlock_irq(&conf->device_lock); |
| 4493 | ok = retry_aligned_read(conf, bio); |
| 4494 | spin_lock_irq(&conf->device_lock); |
| 4495 | if (!ok) |
| 4496 | break; |
| 4497 | handled++; |
| 4498 | } |
| 4499 | |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4500 | sh = __get_priority_stripe(conf); |
| 4501 | |
Dan Williams | c9f21aa | 2008-07-23 12:05:51 -0700 | [diff] [blame] | 4502 | if (!sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4503 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4504 | spin_unlock_irq(&conf->device_lock); |
| 4505 | |
| 4506 | handled++; |
Dan Williams | 417b8d4 | 2009-10-16 16:25:22 +1100 | [diff] [blame] | 4507 | handle_stripe(sh); |
| 4508 | release_stripe(sh); |
| 4509 | cond_resched(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4510 | |
NeilBrown | de393cd | 2011-07-28 11:31:48 +1000 | [diff] [blame] | 4511 | if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) |
| 4512 | md_check_recovery(mddev); |
| 4513 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4514 | spin_lock_irq(&conf->device_lock); |
| 4515 | } |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4516 | pr_debug("%d stripes handled\n", handled); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4517 | |
| 4518 | spin_unlock_irq(&conf->device_lock); |
| 4519 | |
Dan Williams | c9f21aa | 2008-07-23 12:05:51 -0700 | [diff] [blame] | 4520 | async_tx_issue_pending_all(); |
NeilBrown | e1dfa0a | 2011-04-18 18:25:41 +1000 | [diff] [blame] | 4521 | blk_finish_plug(&plug); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4522 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4523 | pr_debug("--- raid5d inactive\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4524 | } |
| 4525 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4526 | static ssize_t |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4527 | raid5_show_stripe_cache_size(struct mddev *mddev, char *page) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4528 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4529 | struct r5conf *conf = mddev->private; |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4530 | if (conf) |
| 4531 | return sprintf(page, "%d\n", conf->max_nr_stripes); |
| 4532 | else |
| 4533 | return 0; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4534 | } |
| 4535 | |
NeilBrown | c41d4ac | 2010-06-01 19:37:24 +1000 | [diff] [blame] | 4536 | int |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4537 | raid5_set_cache_size(struct mddev *mddev, int size) |
NeilBrown | c41d4ac | 2010-06-01 19:37:24 +1000 | [diff] [blame] | 4538 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4539 | struct r5conf *conf = mddev->private; |
NeilBrown | c41d4ac | 2010-06-01 19:37:24 +1000 | [diff] [blame] | 4540 | int err; |
| 4541 | |
| 4542 | if (size <= 16 || size > 32768) |
| 4543 | return -EINVAL; |
| 4544 | while (size < conf->max_nr_stripes) { |
| 4545 | if (drop_one_stripe(conf)) |
| 4546 | conf->max_nr_stripes--; |
| 4547 | else |
| 4548 | break; |
| 4549 | } |
| 4550 | err = md_allow_write(mddev); |
| 4551 | if (err) |
| 4552 | return err; |
| 4553 | while (size > conf->max_nr_stripes) { |
| 4554 | if (grow_one_stripe(conf)) |
| 4555 | conf->max_nr_stripes++; |
| 4556 | else break; |
| 4557 | } |
| 4558 | return 0; |
| 4559 | } |
| 4560 | EXPORT_SYMBOL(raid5_set_cache_size); |
| 4561 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4562 | static ssize_t |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4563 | raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4564 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4565 | struct r5conf *conf = mddev->private; |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4566 | unsigned long new; |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 4567 | int err; |
| 4568 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4569 | if (len >= PAGE_SIZE) |
| 4570 | return -EINVAL; |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4571 | if (!conf) |
| 4572 | return -ENODEV; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4573 | |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4574 | if (strict_strtoul(page, 10, &new)) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4575 | return -EINVAL; |
NeilBrown | c41d4ac | 2010-06-01 19:37:24 +1000 | [diff] [blame] | 4576 | err = raid5_set_cache_size(mddev, new); |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 4577 | if (err) |
| 4578 | return err; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4579 | return len; |
| 4580 | } |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4581 | |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4582 | static struct md_sysfs_entry |
| 4583 | raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, |
| 4584 | raid5_show_stripe_cache_size, |
| 4585 | raid5_store_stripe_cache_size); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4586 | |
| 4587 | static ssize_t |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4588 | raid5_show_preread_threshold(struct mddev *mddev, char *page) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4589 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4590 | struct r5conf *conf = mddev->private; |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4591 | if (conf) |
| 4592 | return sprintf(page, "%d\n", conf->bypass_threshold); |
| 4593 | else |
| 4594 | return 0; |
| 4595 | } |
| 4596 | |
| 4597 | static ssize_t |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4598 | raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4599 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4600 | struct r5conf *conf = mddev->private; |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4601 | unsigned long new; |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4602 | if (len >= PAGE_SIZE) |
| 4603 | return -EINVAL; |
| 4604 | if (!conf) |
| 4605 | return -ENODEV; |
| 4606 | |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4607 | if (strict_strtoul(page, 10, &new)) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4608 | return -EINVAL; |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4609 | if (new > conf->max_nr_stripes) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4610 | return -EINVAL; |
| 4611 | conf->bypass_threshold = new; |
| 4612 | return len; |
| 4613 | } |
| 4614 | |
| 4615 | static struct md_sysfs_entry |
| 4616 | raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, |
| 4617 | S_IRUGO | S_IWUSR, |
| 4618 | raid5_show_preread_threshold, |
| 4619 | raid5_store_preread_threshold); |
| 4620 | |
| 4621 | static ssize_t |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4622 | stripe_cache_active_show(struct mddev *mddev, char *page) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4623 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4624 | struct r5conf *conf = mddev->private; |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4625 | if (conf) |
| 4626 | return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); |
| 4627 | else |
| 4628 | return 0; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4629 | } |
| 4630 | |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4631 | static struct md_sysfs_entry |
| 4632 | raid5_stripecache_active = __ATTR_RO(stripe_cache_active); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4633 | |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4634 | static struct attribute *raid5_attrs[] = { |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4635 | &raid5_stripecache_size.attr, |
| 4636 | &raid5_stripecache_active.attr, |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4637 | &raid5_preread_bypass_threshold.attr, |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4638 | NULL, |
| 4639 | }; |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4640 | static struct attribute_group raid5_attrs_group = { |
| 4641 | .name = NULL, |
| 4642 | .attrs = raid5_attrs, |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4643 | }; |
| 4644 | |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4645 | static sector_t |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4646 | raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4647 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4648 | struct r5conf *conf = mddev->private; |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4649 | |
| 4650 | if (!sectors) |
| 4651 | sectors = mddev->dev_sectors; |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4652 | if (!raid_disks) |
NeilBrown | 7ec0547 | 2009-03-31 15:10:36 +1100 | [diff] [blame] | 4653 | /* size is defined by the smallest of previous and new size */ |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4654 | raid_disks = min(conf->raid_disks, conf->previous_raid_disks); |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4655 | |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 4656 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 4657 | sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4658 | return sectors * (raid_disks - conf->max_degraded); |
| 4659 | } |
| 4660 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4661 | static void raid5_free_percpu(struct r5conf *conf) |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4662 | { |
| 4663 | struct raid5_percpu *percpu; |
| 4664 | unsigned long cpu; |
| 4665 | |
| 4666 | if (!conf->percpu) |
| 4667 | return; |
| 4668 | |
| 4669 | get_online_cpus(); |
| 4670 | for_each_possible_cpu(cpu) { |
| 4671 | percpu = per_cpu_ptr(conf->percpu, cpu); |
| 4672 | safe_put_page(percpu->spare_page); |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4673 | kfree(percpu->scribble); |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4674 | } |
| 4675 | #ifdef CONFIG_HOTPLUG_CPU |
| 4676 | unregister_cpu_notifier(&conf->cpu_notify); |
| 4677 | #endif |
| 4678 | put_online_cpus(); |
| 4679 | |
| 4680 | free_percpu(conf->percpu); |
| 4681 | } |
| 4682 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4683 | static void free_conf(struct r5conf *conf) |
Dan Williams | 95fc17a | 2009-07-31 12:39:15 +1000 | [diff] [blame] | 4684 | { |
| 4685 | shrink_stripes(conf); |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4686 | raid5_free_percpu(conf); |
Dan Williams | 95fc17a | 2009-07-31 12:39:15 +1000 | [diff] [blame] | 4687 | kfree(conf->disks); |
| 4688 | kfree(conf->stripe_hashtbl); |
| 4689 | kfree(conf); |
| 4690 | } |
| 4691 | |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4692 | #ifdef CONFIG_HOTPLUG_CPU |
| 4693 | static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, |
| 4694 | void *hcpu) |
| 4695 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4696 | struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4697 | long cpu = (long)hcpu; |
| 4698 | struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); |
| 4699 | |
| 4700 | switch (action) { |
| 4701 | case CPU_UP_PREPARE: |
| 4702 | case CPU_UP_PREPARE_FROZEN: |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4703 | if (conf->level == 6 && !percpu->spare_page) |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4704 | percpu->spare_page = alloc_page(GFP_KERNEL); |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4705 | if (!percpu->scribble) |
| 4706 | percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); |
| 4707 | |
| 4708 | if (!percpu->scribble || |
| 4709 | (conf->level == 6 && !percpu->spare_page)) { |
| 4710 | safe_put_page(percpu->spare_page); |
| 4711 | kfree(percpu->scribble); |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4712 | pr_err("%s: failed memory allocation for cpu%ld\n", |
| 4713 | __func__, cpu); |
Akinobu Mita | 55af6bb | 2010-05-26 14:43:35 -0700 | [diff] [blame] | 4714 | return notifier_from_errno(-ENOMEM); |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4715 | } |
| 4716 | break; |
| 4717 | case CPU_DEAD: |
| 4718 | case CPU_DEAD_FROZEN: |
| 4719 | safe_put_page(percpu->spare_page); |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4720 | kfree(percpu->scribble); |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4721 | percpu->spare_page = NULL; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4722 | percpu->scribble = NULL; |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4723 | break; |
| 4724 | default: |
| 4725 | break; |
| 4726 | } |
| 4727 | return NOTIFY_OK; |
| 4728 | } |
| 4729 | #endif |
| 4730 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4731 | static int raid5_alloc_percpu(struct r5conf *conf) |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4732 | { |
| 4733 | unsigned long cpu; |
| 4734 | struct page *spare_page; |
Tejun Heo | a29d8b8 | 2010-02-02 14:39:15 +0900 | [diff] [blame] | 4735 | struct raid5_percpu __percpu *allcpus; |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4736 | void *scribble; |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4737 | int err; |
| 4738 | |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4739 | allcpus = alloc_percpu(struct raid5_percpu); |
| 4740 | if (!allcpus) |
| 4741 | return -ENOMEM; |
| 4742 | conf->percpu = allcpus; |
| 4743 | |
| 4744 | get_online_cpus(); |
| 4745 | err = 0; |
| 4746 | for_each_present_cpu(cpu) { |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4747 | if (conf->level == 6) { |
| 4748 | spare_page = alloc_page(GFP_KERNEL); |
| 4749 | if (!spare_page) { |
| 4750 | err = -ENOMEM; |
| 4751 | break; |
| 4752 | } |
| 4753 | per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; |
| 4754 | } |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4755 | scribble = kmalloc(conf->scribble_len, GFP_KERNEL); |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4756 | if (!scribble) { |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4757 | err = -ENOMEM; |
| 4758 | break; |
| 4759 | } |
Dan Williams | d6f38f3 | 2009-07-14 11:50:52 -0700 | [diff] [blame] | 4760 | per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4761 | } |
| 4762 | #ifdef CONFIG_HOTPLUG_CPU |
| 4763 | conf->cpu_notify.notifier_call = raid456_cpu_notify; |
| 4764 | conf->cpu_notify.priority = 0; |
| 4765 | if (err == 0) |
| 4766 | err = register_cpu_notifier(&conf->cpu_notify); |
| 4767 | #endif |
| 4768 | put_online_cpus(); |
| 4769 | |
| 4770 | return err; |
| 4771 | } |
| 4772 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4773 | static struct r5conf *setup_conf(struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4774 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4775 | struct r5conf *conf; |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4776 | int raid_disk, memory, max_disks; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 4777 | struct md_rdev *rdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4778 | struct disk_info *disk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4779 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4780 | if (mddev->new_level != 5 |
| 4781 | && mddev->new_level != 4 |
| 4782 | && mddev->new_level != 6) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4783 | printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4784 | mdname(mddev), mddev->new_level); |
| 4785 | return ERR_PTR(-EIO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4786 | } |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4787 | if ((mddev->new_level == 5 |
| 4788 | && !algorithm_valid_raid5(mddev->new_layout)) || |
| 4789 | (mddev->new_level == 6 |
| 4790 | && !algorithm_valid_raid6(mddev->new_layout))) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4791 | printk(KERN_ERR "md/raid:%s: layout %d not supported\n", |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4792 | mdname(mddev), mddev->new_layout); |
| 4793 | return ERR_PTR(-EIO); |
| 4794 | } |
| 4795 | if (mddev->new_level == 6 && mddev->raid_disks < 4) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4796 | printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4797 | mdname(mddev), mddev->raid_disks); |
| 4798 | return ERR_PTR(-EINVAL); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4799 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4800 | |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 4801 | if (!mddev->new_chunk_sectors || |
| 4802 | (mddev->new_chunk_sectors << 9) % PAGE_SIZE || |
| 4803 | !is_power_of_2(mddev->new_chunk_sectors)) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4804 | printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", |
| 4805 | mdname(mddev), mddev->new_chunk_sectors << 9); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4806 | return ERR_PTR(-EINVAL); |
NeilBrown | 4bbf377 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 4807 | } |
| 4808 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4809 | conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4810 | if (conf == NULL) |
| 4811 | goto abort; |
Dan Williams | f5efd45 | 2009-10-16 15:55:38 +1100 | [diff] [blame] | 4812 | spin_lock_init(&conf->device_lock); |
| 4813 | init_waitqueue_head(&conf->wait_for_stripe); |
| 4814 | init_waitqueue_head(&conf->wait_for_overlap); |
| 4815 | INIT_LIST_HEAD(&conf->handle_list); |
| 4816 | INIT_LIST_HEAD(&conf->hold_list); |
| 4817 | INIT_LIST_HEAD(&conf->delayed_list); |
| 4818 | INIT_LIST_HEAD(&conf->bitmap_list); |
| 4819 | INIT_LIST_HEAD(&conf->inactive_list); |
| 4820 | atomic_set(&conf->active_stripes, 0); |
| 4821 | atomic_set(&conf->preread_active_stripes, 0); |
| 4822 | atomic_set(&conf->active_aligned_reads, 0); |
| 4823 | conf->bypass_threshold = BYPASS_THRESHOLD; |
NeilBrown | d890fa2 | 2011-10-26 11:54:39 +1100 | [diff] [blame] | 4824 | conf->recovery_disabled = mddev->recovery_disabled - 1; |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4825 | |
| 4826 | conf->raid_disks = mddev->raid_disks; |
| 4827 | if (mddev->reshape_position == MaxSector) |
| 4828 | conf->previous_raid_disks = mddev->raid_disks; |
| 4829 | else |
| 4830 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4831 | max_disks = max(conf->raid_disks, conf->previous_raid_disks); |
| 4832 | conf->scribble_len = scribble_len(max_disks); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4833 | |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4834 | conf->disks = kzalloc(max_disks * sizeof(struct disk_info), |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4835 | GFP_KERNEL); |
| 4836 | if (!conf->disks) |
| 4837 | goto abort; |
| 4838 | |
| 4839 | conf->mddev = mddev; |
| 4840 | |
| 4841 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) |
| 4842 | goto abort; |
| 4843 | |
Dan Williams | 36d1c64 | 2009-07-14 11:48:22 -0700 | [diff] [blame] | 4844 | conf->level = mddev->new_level; |
| 4845 | if (raid5_alloc_percpu(conf) != 0) |
| 4846 | goto abort; |
| 4847 | |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4848 | pr_debug("raid456: run(%s) called.\n", mdname(mddev)); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4849 | |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 4850 | rdev_for_each(rdev, mddev) { |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4851 | raid_disk = rdev->raid_disk; |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4852 | if (raid_disk >= max_disks |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4853 | || raid_disk < 0) |
| 4854 | continue; |
| 4855 | disk = conf->disks + raid_disk; |
| 4856 | |
NeilBrown | 17045f5 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 4857 | if (test_bit(Replacement, &rdev->flags)) { |
| 4858 | if (disk->replacement) |
| 4859 | goto abort; |
| 4860 | disk->replacement = rdev; |
| 4861 | } else { |
| 4862 | if (disk->rdev) |
| 4863 | goto abort; |
| 4864 | disk->rdev = rdev; |
| 4865 | } |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4866 | |
| 4867 | if (test_bit(In_sync, &rdev->flags)) { |
| 4868 | char b[BDEVNAME_SIZE]; |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4869 | printk(KERN_INFO "md/raid:%s: device %s operational as raid" |
| 4870 | " disk %d\n", |
| 4871 | mdname(mddev), bdevname(rdev->bdev, b), raid_disk); |
Jonathan Brassow | d6b212f | 2011-06-08 18:00:28 -0500 | [diff] [blame] | 4872 | } else if (rdev->saved_raid_disk != raid_disk) |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4873 | /* Cannot rely on bitmap to complete recovery */ |
| 4874 | conf->fullsync = 1; |
| 4875 | } |
| 4876 | |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 4877 | conf->chunk_sectors = mddev->new_chunk_sectors; |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4878 | conf->level = mddev->new_level; |
| 4879 | if (conf->level == 6) |
| 4880 | conf->max_degraded = 2; |
| 4881 | else |
| 4882 | conf->max_degraded = 1; |
| 4883 | conf->algorithm = mddev->new_layout; |
| 4884 | conf->max_nr_stripes = NR_STRIPES; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4885 | conf->reshape_progress = mddev->reshape_position; |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 4886 | if (conf->reshape_progress != MaxSector) { |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 4887 | conf->prev_chunk_sectors = mddev->chunk_sectors; |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 4888 | conf->prev_algo = mddev->layout; |
| 4889 | } |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4890 | |
| 4891 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + |
NeilBrown | 5e5e3e7 | 2009-10-16 16:35:30 +1100 | [diff] [blame] | 4892 | max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4893 | if (grow_stripes(conf, conf->max_nr_stripes)) { |
| 4894 | printk(KERN_ERR |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4895 | "md/raid:%s: couldn't allocate %dkB for buffers\n", |
| 4896 | mdname(mddev), memory); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4897 | goto abort; |
| 4898 | } else |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4899 | printk(KERN_INFO "md/raid:%s: allocated %dkB\n", |
| 4900 | mdname(mddev), memory); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4901 | |
NeilBrown | 0da3c61 | 2009-09-23 18:09:45 +1000 | [diff] [blame] | 4902 | conf->thread = md_register_thread(raid5d, mddev, NULL); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4903 | if (!conf->thread) { |
| 4904 | printk(KERN_ERR |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4905 | "md/raid:%s: couldn't allocate thread.\n", |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4906 | mdname(mddev)); |
| 4907 | goto abort; |
| 4908 | } |
| 4909 | |
| 4910 | return conf; |
| 4911 | |
| 4912 | abort: |
| 4913 | if (conf) { |
Dan Williams | 95fc17a | 2009-07-31 12:39:15 +1000 | [diff] [blame] | 4914 | free_conf(conf); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4915 | return ERR_PTR(-EIO); |
| 4916 | } else |
| 4917 | return ERR_PTR(-ENOMEM); |
| 4918 | } |
| 4919 | |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 4920 | |
| 4921 | static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) |
| 4922 | { |
| 4923 | switch (algo) { |
| 4924 | case ALGORITHM_PARITY_0: |
| 4925 | if (raid_disk < max_degraded) |
| 4926 | return 1; |
| 4927 | break; |
| 4928 | case ALGORITHM_PARITY_N: |
| 4929 | if (raid_disk >= raid_disks - max_degraded) |
| 4930 | return 1; |
| 4931 | break; |
| 4932 | case ALGORITHM_PARITY_0_6: |
| 4933 | if (raid_disk == 0 || |
| 4934 | raid_disk == raid_disks - 1) |
| 4935 | return 1; |
| 4936 | break; |
| 4937 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 4938 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 4939 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 4940 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 4941 | if (raid_disk == raid_disks - 1) |
| 4942 | return 1; |
| 4943 | } |
| 4944 | return 0; |
| 4945 | } |
| 4946 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 4947 | static int run(struct mddev *mddev) |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4948 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 4949 | struct r5conf *conf; |
NeilBrown | 9f7c222 | 2010-07-26 12:04:13 +1000 | [diff] [blame] | 4950 | int working_disks = 0; |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 4951 | int dirty_parity_disks = 0; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 4952 | struct md_rdev *rdev; |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 4953 | sector_t reshape_offset = 0; |
NeilBrown | 17045f5 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 4954 | int i; |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4955 | |
Andre Noll | 8c6ac86 | 2009-06-18 08:48:06 +1000 | [diff] [blame] | 4956 | if (mddev->recovery_cp != MaxSector) |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4957 | printk(KERN_NOTICE "md/raid:%s: not clean" |
Andre Noll | 8c6ac86 | 2009-06-18 08:48:06 +1000 | [diff] [blame] | 4958 | " -- starting background reconstruction\n", |
| 4959 | mdname(mddev)); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4960 | if (mddev->reshape_position != MaxSector) { |
| 4961 | /* Check that we can continue the reshape. |
| 4962 | * Currently only disks can change, it must |
| 4963 | * increase, and we must be past the point where |
| 4964 | * a stripe over-writes itself |
| 4965 | */ |
| 4966 | sector_t here_new, here_old; |
| 4967 | int old_disks; |
Andre Noll | 18b0033 | 2009-03-31 15:00:56 +1100 | [diff] [blame] | 4968 | int max_degraded = (mddev->level == 6 ? 2 : 1); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4969 | |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 4970 | if (mddev->new_level != mddev->level) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4971 | printk(KERN_ERR "md/raid:%s: unsupported reshape " |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4972 | "required - aborting.\n", |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4973 | mdname(mddev)); |
| 4974 | return -EINVAL; |
| 4975 | } |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4976 | old_disks = mddev->raid_disks - mddev->delta_disks; |
| 4977 | /* reshape_position must be on a new-stripe boundary, and one |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4978 | * further up in new geometry must map after here in old |
| 4979 | * geometry. |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4980 | */ |
| 4981 | here_new = mddev->reshape_position; |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 4982 | if (sector_div(here_new, mddev->new_chunk_sectors * |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4983 | (mddev->raid_disks - max_degraded))) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 4984 | printk(KERN_ERR "md/raid:%s: reshape_position not " |
| 4985 | "on a stripe boundary\n", mdname(mddev)); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4986 | return -EINVAL; |
| 4987 | } |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 4988 | reshape_offset = here_new * mddev->new_chunk_sectors; |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4989 | /* here_new is the stripe we will write to */ |
| 4990 | here_old = mddev->reshape_position; |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 4991 | sector_div(here_old, mddev->chunk_sectors * |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4992 | (old_disks-max_degraded)); |
| 4993 | /* here_old is the first stripe that we might need to read |
| 4994 | * from */ |
NeilBrown | 67ac601 | 2009-08-13 10:06:24 +1000 | [diff] [blame] | 4995 | if (mddev->delta_disks == 0) { |
| 4996 | /* We cannot be sure it is safe to start an in-place |
| 4997 | * reshape. It is only safe if user-space if monitoring |
| 4998 | * and taking constant backups. |
| 4999 | * mdadm always starts a situation like this in |
| 5000 | * readonly mode so it can take control before |
| 5001 | * allowing any writes. So just check for that. |
| 5002 | */ |
| 5003 | if ((here_new * mddev->new_chunk_sectors != |
| 5004 | here_old * mddev->chunk_sectors) || |
| 5005 | mddev->ro == 0) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5006 | printk(KERN_ERR "md/raid:%s: in-place reshape must be started" |
| 5007 | " in read-only mode - aborting\n", |
| 5008 | mdname(mddev)); |
NeilBrown | 67ac601 | 2009-08-13 10:06:24 +1000 | [diff] [blame] | 5009 | return -EINVAL; |
| 5010 | } |
| 5011 | } else if (mddev->delta_disks < 0 |
| 5012 | ? (here_new * mddev->new_chunk_sectors <= |
| 5013 | here_old * mddev->chunk_sectors) |
| 5014 | : (here_new * mddev->new_chunk_sectors >= |
| 5015 | here_old * mddev->chunk_sectors)) { |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5016 | /* Reading from the same stripe as writing to - bad */ |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5017 | printk(KERN_ERR "md/raid:%s: reshape_position too early for " |
| 5018 | "auto-recovery - aborting.\n", |
| 5019 | mdname(mddev)); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5020 | return -EINVAL; |
| 5021 | } |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5022 | printk(KERN_INFO "md/raid:%s: reshape will continue\n", |
| 5023 | mdname(mddev)); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5024 | /* OK, we should be able to continue; */ |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5025 | } else { |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5026 | BUG_ON(mddev->level != mddev->new_level); |
| 5027 | BUG_ON(mddev->layout != mddev->new_layout); |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 5028 | BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5029 | BUG_ON(mddev->delta_disks != 0); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5030 | } |
| 5031 | |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5032 | if (mddev->private == NULL) |
| 5033 | conf = setup_conf(mddev); |
| 5034 | else |
| 5035 | conf = mddev->private; |
| 5036 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5037 | if (IS_ERR(conf)) |
| 5038 | return PTR_ERR(conf); |
NeilBrown | 9ffae0c | 2006-01-06 00:20:32 -0800 | [diff] [blame] | 5039 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5040 | mddev->thread = conf->thread; |
| 5041 | conf->thread = NULL; |
| 5042 | mddev->private = conf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5043 | |
NeilBrown | 17045f5 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5044 | for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; |
| 5045 | i++) { |
| 5046 | rdev = conf->disks[i].rdev; |
| 5047 | if (!rdev && conf->disks[i].replacement) { |
| 5048 | /* The replacement is all we have yet */ |
| 5049 | rdev = conf->disks[i].replacement; |
| 5050 | conf->disks[i].replacement = NULL; |
| 5051 | clear_bit(Replacement, &rdev->flags); |
| 5052 | conf->disks[i].rdev = rdev; |
| 5053 | } |
| 5054 | if (!rdev) |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 5055 | continue; |
NeilBrown | 17045f5 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5056 | if (conf->disks[i].replacement && |
| 5057 | conf->reshape_progress != MaxSector) { |
| 5058 | /* replacements and reshape simply do not mix. */ |
| 5059 | printk(KERN_ERR "md: cannot handle concurrent " |
| 5060 | "replacement and reshape.\n"); |
| 5061 | goto abort; |
| 5062 | } |
NeilBrown | 2f11588 | 2010-06-17 17:41:03 +1000 | [diff] [blame] | 5063 | if (test_bit(In_sync, &rdev->flags)) { |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5064 | working_disks++; |
NeilBrown | 2f11588 | 2010-06-17 17:41:03 +1000 | [diff] [blame] | 5065 | continue; |
| 5066 | } |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 5067 | /* This disc is not fully in-sync. However if it |
| 5068 | * just stored parity (beyond the recovery_offset), |
| 5069 | * when we don't need to be concerned about the |
| 5070 | * array being dirty. |
| 5071 | * When reshape goes 'backwards', we never have |
| 5072 | * partially completed devices, so we only need |
| 5073 | * to worry about reshape going forwards. |
| 5074 | */ |
| 5075 | /* Hack because v0.91 doesn't store recovery_offset properly. */ |
| 5076 | if (mddev->major_version == 0 && |
| 5077 | mddev->minor_version > 90) |
| 5078 | rdev->recovery_offset = reshape_offset; |
| 5079 | |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 5080 | if (rdev->recovery_offset < reshape_offset) { |
| 5081 | /* We need to check old and new layout */ |
| 5082 | if (!only_parity(rdev->raid_disk, |
| 5083 | conf->algorithm, |
| 5084 | conf->raid_disks, |
| 5085 | conf->max_degraded)) |
| 5086 | continue; |
| 5087 | } |
| 5088 | if (!only_parity(rdev->raid_disk, |
| 5089 | conf->prev_algo, |
| 5090 | conf->previous_raid_disks, |
| 5091 | conf->max_degraded)) |
| 5092 | continue; |
| 5093 | dirty_parity_disks++; |
| 5094 | } |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5095 | |
NeilBrown | 17045f5 | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5096 | /* |
| 5097 | * 0 for a fully functional array, 1 or 2 for a degraded array. |
| 5098 | */ |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 5099 | mddev->degraded = calc_degraded(conf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5100 | |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 5101 | if (has_failed(conf)) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5102 | printk(KERN_ERR "md/raid:%s: not enough operational devices" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5103 | " (%d/%d failed)\n", |
NeilBrown | 02c2de8 | 2006-10-03 01:15:47 -0700 | [diff] [blame] | 5104 | mdname(mddev), mddev->degraded, conf->raid_disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5105 | goto abort; |
| 5106 | } |
| 5107 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5108 | /* device size must be a multiple of chunk size */ |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 5109 | mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5110 | mddev->resync_max_sectors = mddev->dev_sectors; |
| 5111 | |
NeilBrown | c148ffd | 2009-11-13 17:47:00 +1100 | [diff] [blame] | 5112 | if (mddev->degraded > dirty_parity_disks && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5113 | mddev->recovery_cp != MaxSector) { |
NeilBrown | 6ff8d8ec | 2006-01-06 00:20:15 -0800 | [diff] [blame] | 5114 | if (mddev->ok_start_degraded) |
| 5115 | printk(KERN_WARNING |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5116 | "md/raid:%s: starting dirty degraded array" |
| 5117 | " - data corruption possible.\n", |
NeilBrown | 6ff8d8ec | 2006-01-06 00:20:15 -0800 | [diff] [blame] | 5118 | mdname(mddev)); |
| 5119 | else { |
| 5120 | printk(KERN_ERR |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5121 | "md/raid:%s: cannot start dirty degraded array.\n", |
NeilBrown | 6ff8d8ec | 2006-01-06 00:20:15 -0800 | [diff] [blame] | 5122 | mdname(mddev)); |
| 5123 | goto abort; |
| 5124 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5125 | } |
| 5126 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5127 | if (mddev->degraded == 0) |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5128 | printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" |
| 5129 | " devices, algorithm %d\n", mdname(mddev), conf->level, |
NeilBrown | e183eae | 2009-03-31 15:20:22 +1100 | [diff] [blame] | 5130 | mddev->raid_disks-mddev->degraded, mddev->raid_disks, |
| 5131 | mddev->new_layout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5132 | else |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5133 | printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" |
| 5134 | " out of %d devices, algorithm %d\n", |
| 5135 | mdname(mddev), conf->level, |
| 5136 | mddev->raid_disks - mddev->degraded, |
| 5137 | mddev->raid_disks, mddev->new_layout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5138 | |
| 5139 | print_raid5_conf(conf); |
| 5140 | |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 5141 | if (conf->reshape_progress != MaxSector) { |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 5142 | conf->reshape_safe = conf->reshape_progress; |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5143 | atomic_set(&conf->reshape_stripes, 0); |
| 5144 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 5145 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 5146 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
| 5147 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 5148 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
NeilBrown | 0da3c61 | 2009-09-23 18:09:45 +1000 | [diff] [blame] | 5149 | "reshape"); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5150 | } |
| 5151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5152 | |
| 5153 | /* Ok, everything is just fine now */ |
NeilBrown | a64c876 | 2010-04-14 17:15:37 +1000 | [diff] [blame] | 5154 | if (mddev->to_remove == &raid5_attrs_group) |
| 5155 | mddev->to_remove = NULL; |
NeilBrown | 00bcb4a | 2010-06-01 19:37:23 +1000 | [diff] [blame] | 5156 | else if (mddev->kobj.sd && |
| 5157 | sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) |
NeilBrown | 5e55e2f | 2007-03-26 21:32:14 -0800 | [diff] [blame] | 5158 | printk(KERN_WARNING |
NeilBrown | 4a5add4 | 2010-06-01 19:37:28 +1000 | [diff] [blame] | 5159 | "raid5: failed to create sysfs attributes for %s\n", |
NeilBrown | 5e55e2f | 2007-03-26 21:32:14 -0800 | [diff] [blame] | 5160 | mdname(mddev)); |
NeilBrown | 4a5add4 | 2010-06-01 19:37:28 +1000 | [diff] [blame] | 5161 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
| 5162 | |
| 5163 | if (mddev->queue) { |
NeilBrown | 9f7c222 | 2010-07-26 12:04:13 +1000 | [diff] [blame] | 5164 | int chunk_size; |
NeilBrown | 4a5add4 | 2010-06-01 19:37:28 +1000 | [diff] [blame] | 5165 | /* read-ahead size must cover two whole stripes, which |
| 5166 | * is 2 * (datadisks) * chunksize where 'n' is the |
| 5167 | * number of raid devices |
| 5168 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5169 | int data_disks = conf->previous_raid_disks - conf->max_degraded; |
| 5170 | int stripe = data_disks * |
| 5171 | ((mddev->chunk_sectors << 9) / PAGE_SIZE); |
| 5172 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
| 5173 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
NeilBrown | 4a5add4 | 2010-06-01 19:37:28 +1000 | [diff] [blame] | 5174 | |
| 5175 | blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); |
NeilBrown | 11d8a6e | 2010-07-26 11:57:07 +1000 | [diff] [blame] | 5176 | |
| 5177 | mddev->queue->backing_dev_info.congested_data = mddev; |
| 5178 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
NeilBrown | 9f7c222 | 2010-07-26 12:04:13 +1000 | [diff] [blame] | 5179 | |
| 5180 | chunk_size = mddev->chunk_sectors << 9; |
| 5181 | blk_queue_io_min(mddev->queue, chunk_size); |
| 5182 | blk_queue_io_opt(mddev->queue, chunk_size * |
| 5183 | (conf->raid_disks - conf->max_degraded)); |
| 5184 | |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 5185 | rdev_for_each(rdev, mddev) |
NeilBrown | 9f7c222 | 2010-07-26 12:04:13 +1000 | [diff] [blame] | 5186 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
| 5187 | rdev->data_offset << 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5188 | } |
| 5189 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5190 | return 0; |
| 5191 | abort: |
NeilBrown | 01f96c0 | 2011-09-21 15:30:20 +1000 | [diff] [blame] | 5192 | md_unregister_thread(&mddev->thread); |
NeilBrown | e4f869d | 2011-10-07 14:22:49 +1100 | [diff] [blame] | 5193 | print_raid5_conf(conf); |
| 5194 | free_conf(conf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5195 | mddev->private = NULL; |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5196 | printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5197 | return -EIO; |
| 5198 | } |
| 5199 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5200 | static int stop(struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5201 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5202 | struct r5conf *conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5203 | |
NeilBrown | 01f96c0 | 2011-09-21 15:30:20 +1000 | [diff] [blame] | 5204 | md_unregister_thread(&mddev->thread); |
NeilBrown | 11d8a6e | 2010-07-26 11:57:07 +1000 | [diff] [blame] | 5205 | if (mddev->queue) |
| 5206 | mddev->queue->backing_dev_info.congested_fn = NULL; |
Dan Williams | 95fc17a | 2009-07-31 12:39:15 +1000 | [diff] [blame] | 5207 | free_conf(conf); |
NeilBrown | a64c876 | 2010-04-14 17:15:37 +1000 | [diff] [blame] | 5208 | mddev->private = NULL; |
| 5209 | mddev->to_remove = &raid5_attrs_group; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5210 | return 0; |
| 5211 | } |
| 5212 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5213 | static void status(struct seq_file *seq, struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5214 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5215 | struct r5conf *conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5216 | int i; |
| 5217 | |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 5218 | seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, |
| 5219 | mddev->chunk_sectors / 2, mddev->layout); |
NeilBrown | 02c2de8 | 2006-10-03 01:15:47 -0700 | [diff] [blame] | 5220 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5221 | for (i = 0; i < conf->raid_disks; i++) |
| 5222 | seq_printf (seq, "%s", |
| 5223 | conf->disks[i].rdev && |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 5224 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5225 | seq_printf (seq, "]"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5226 | } |
| 5227 | |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5228 | static void print_raid5_conf (struct r5conf *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5229 | { |
| 5230 | int i; |
| 5231 | struct disk_info *tmp; |
| 5232 | |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5233 | printk(KERN_DEBUG "RAID conf printout:\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5234 | if (!conf) { |
| 5235 | printk("(conf==NULL)\n"); |
| 5236 | return; |
| 5237 | } |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5238 | printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, |
| 5239 | conf->raid_disks, |
| 5240 | conf->raid_disks - conf->mddev->degraded); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5241 | |
| 5242 | for (i = 0; i < conf->raid_disks; i++) { |
| 5243 | char b[BDEVNAME_SIZE]; |
| 5244 | tmp = conf->disks + i; |
| 5245 | if (tmp->rdev) |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5246 | printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", |
| 5247 | i, !test_bit(Faulty, &tmp->rdev->flags), |
| 5248 | bdevname(tmp->rdev->bdev, b)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5249 | } |
| 5250 | } |
| 5251 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5252 | static int raid5_spare_active(struct mddev *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5253 | { |
| 5254 | int i; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5255 | struct r5conf *conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5256 | struct disk_info *tmp; |
NeilBrown | 6b96562 | 2010-08-18 11:56:59 +1000 | [diff] [blame] | 5257 | int count = 0; |
| 5258 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5259 | |
| 5260 | for (i = 0; i < conf->raid_disks; i++) { |
| 5261 | tmp = conf->disks + i; |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5262 | if (tmp->replacement |
| 5263 | && tmp->replacement->recovery_offset == MaxSector |
| 5264 | && !test_bit(Faulty, &tmp->replacement->flags) |
| 5265 | && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { |
| 5266 | /* Replacement has just become active. */ |
| 5267 | if (!tmp->rdev |
| 5268 | || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) |
| 5269 | count++; |
| 5270 | if (tmp->rdev) { |
| 5271 | /* Replaced device not technically faulty, |
| 5272 | * but we need to be sure it gets removed |
| 5273 | * and never re-added. |
| 5274 | */ |
| 5275 | set_bit(Faulty, &tmp->rdev->flags); |
| 5276 | sysfs_notify_dirent_safe( |
| 5277 | tmp->rdev->sysfs_state); |
| 5278 | } |
| 5279 | sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); |
| 5280 | } else if (tmp->rdev |
NeilBrown | 70fffd0 | 2010-06-16 17:01:25 +1000 | [diff] [blame] | 5281 | && tmp->rdev->recovery_offset == MaxSector |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 5282 | && !test_bit(Faulty, &tmp->rdev->flags) |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 5283 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { |
NeilBrown | 6b96562 | 2010-08-18 11:56:59 +1000 | [diff] [blame] | 5284 | count++; |
Jonathan Brassow | 43c73ca | 2011-01-14 09:14:33 +1100 | [diff] [blame] | 5285 | sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5286 | } |
| 5287 | } |
NeilBrown | 6b96562 | 2010-08-18 11:56:59 +1000 | [diff] [blame] | 5288 | spin_lock_irqsave(&conf->device_lock, flags); |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 5289 | mddev->degraded = calc_degraded(conf); |
NeilBrown | 6b96562 | 2010-08-18 11:56:59 +1000 | [diff] [blame] | 5290 | spin_unlock_irqrestore(&conf->device_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5291 | print_raid5_conf(conf); |
NeilBrown | 6b96562 | 2010-08-18 11:56:59 +1000 | [diff] [blame] | 5292 | return count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5293 | } |
| 5294 | |
NeilBrown | b8321b6 | 2011-12-23 10:17:51 +1100 | [diff] [blame] | 5295 | static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5296 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5297 | struct r5conf *conf = mddev->private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5298 | int err = 0; |
NeilBrown | b8321b6 | 2011-12-23 10:17:51 +1100 | [diff] [blame] | 5299 | int number = rdev->raid_disk; |
NeilBrown | 657e3e4 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 5300 | struct md_rdev **rdevp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5301 | struct disk_info *p = conf->disks + number; |
| 5302 | |
| 5303 | print_raid5_conf(conf); |
NeilBrown | 657e3e4 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 5304 | if (rdev == p->rdev) |
| 5305 | rdevp = &p->rdev; |
| 5306 | else if (rdev == p->replacement) |
| 5307 | rdevp = &p->replacement; |
| 5308 | else |
| 5309 | return 0; |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5310 | |
NeilBrown | 657e3e4 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 5311 | if (number >= conf->raid_disks && |
| 5312 | conf->reshape_progress == MaxSector) |
| 5313 | clear_bit(In_sync, &rdev->flags); |
| 5314 | |
| 5315 | if (test_bit(In_sync, &rdev->flags) || |
| 5316 | atomic_read(&rdev->nr_pending)) { |
| 5317 | err = -EBUSY; |
| 5318 | goto abort; |
| 5319 | } |
| 5320 | /* Only remove non-faulty devices if recovery |
| 5321 | * isn't possible. |
| 5322 | */ |
| 5323 | if (!test_bit(Faulty, &rdev->flags) && |
| 5324 | mddev->recovery_disabled != conf->recovery_disabled && |
| 5325 | !has_failed(conf) && |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5326 | (!p->replacement || p->replacement == rdev) && |
NeilBrown | 657e3e4 | 2011-12-23 10:17:52 +1100 | [diff] [blame] | 5327 | number < conf->raid_disks) { |
| 5328 | err = -EBUSY; |
| 5329 | goto abort; |
| 5330 | } |
| 5331 | *rdevp = NULL; |
| 5332 | synchronize_rcu(); |
| 5333 | if (atomic_read(&rdev->nr_pending)) { |
| 5334 | /* lost the race, try later */ |
| 5335 | err = -EBUSY; |
| 5336 | *rdevp = rdev; |
NeilBrown | dd054fc | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5337 | } else if (p->replacement) { |
| 5338 | /* We must have just cleared 'rdev' */ |
| 5339 | p->rdev = p->replacement; |
| 5340 | clear_bit(Replacement, &p->replacement->flags); |
| 5341 | smp_mb(); /* Make sure other CPUs may see both as identical |
| 5342 | * but will never see neither - if they are careful |
| 5343 | */ |
| 5344 | p->replacement = NULL; |
| 5345 | clear_bit(WantReplacement, &rdev->flags); |
| 5346 | } else |
| 5347 | /* We might have just removed the Replacement as faulty- |
| 5348 | * clear the bit just in case |
| 5349 | */ |
| 5350 | clear_bit(WantReplacement, &rdev->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5351 | abort: |
| 5352 | |
| 5353 | print_raid5_conf(conf); |
| 5354 | return err; |
| 5355 | } |
| 5356 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5357 | static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5358 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5359 | struct r5conf *conf = mddev->private; |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 5360 | int err = -EEXIST; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5361 | int disk; |
| 5362 | struct disk_info *p; |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 5363 | int first = 0; |
| 5364 | int last = conf->raid_disks - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5365 | |
NeilBrown | 7f0da59 | 2011-07-28 11:39:22 +1000 | [diff] [blame] | 5366 | if (mddev->recovery_disabled == conf->recovery_disabled) |
| 5367 | return -EBUSY; |
| 5368 | |
NeilBrown | dc10c64 | 2012-03-19 12:46:37 +1100 | [diff] [blame] | 5369 | if (rdev->saved_raid_disk < 0 && has_failed(conf)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5370 | /* no point adding a device */ |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 5371 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5372 | |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 5373 | if (rdev->raid_disk >= 0) |
| 5374 | first = last = rdev->raid_disk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5375 | |
| 5376 | /* |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5377 | * find the disk ... but prefer rdev->saved_raid_disk |
| 5378 | * if possible. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5379 | */ |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5380 | if (rdev->saved_raid_disk >= 0 && |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 5381 | rdev->saved_raid_disk >= first && |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5382 | conf->disks[rdev->saved_raid_disk].rdev == NULL) |
| 5383 | disk = rdev->saved_raid_disk; |
| 5384 | else |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 5385 | disk = first; |
NeilBrown | 7bfec5f | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5386 | for ( ; disk <= last ; disk++) { |
| 5387 | p = conf->disks + disk; |
| 5388 | if (p->rdev == NULL) { |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 5389 | clear_bit(In_sync, &rdev->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5390 | rdev->raid_disk = disk; |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 5391 | err = 0; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5392 | if (rdev->saved_raid_disk != disk) |
| 5393 | conf->fullsync = 1; |
Suzanne Wood | d6065f7 | 2005-11-08 21:39:27 -0800 | [diff] [blame] | 5394 | rcu_assign_pointer(p->rdev, rdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5395 | break; |
| 5396 | } |
NeilBrown | 7bfec5f | 2011-12-23 10:17:53 +1100 | [diff] [blame] | 5397 | if (test_bit(WantReplacement, &p->rdev->flags) && |
| 5398 | p->replacement == NULL) { |
| 5399 | clear_bit(In_sync, &rdev->flags); |
| 5400 | set_bit(Replacement, &rdev->flags); |
| 5401 | rdev->raid_disk = disk; |
| 5402 | err = 0; |
| 5403 | conf->fullsync = 1; |
| 5404 | rcu_assign_pointer(p->replacement, rdev); |
| 5405 | break; |
| 5406 | } |
| 5407 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5408 | print_raid5_conf(conf); |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 5409 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5410 | } |
| 5411 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5412 | static int raid5_resize(struct mddev *mddev, sector_t sectors) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5413 | { |
| 5414 | /* no resync is happening, and there is enough space |
| 5415 | * on all devices, so we can resize. |
| 5416 | * We need to make sure resync covers any new space. |
| 5417 | * If the array is shrinking we should possibly wait until |
| 5418 | * any io in the removed space completes, but it hardly seems |
| 5419 | * worth it. |
| 5420 | */ |
Andre Noll | 9d8f036 | 2009-06-18 08:45:01 +1000 | [diff] [blame] | 5421 | sectors &= ~((sector_t)mddev->chunk_sectors - 1); |
Dan Williams | 1f40362 | 2009-03-31 14:59:03 +1100 | [diff] [blame] | 5422 | md_set_array_sectors(mddev, raid5_size(mddev, sectors, |
| 5423 | mddev->raid_disks)); |
Dan Williams | b522adc | 2009-03-31 15:00:31 +1100 | [diff] [blame] | 5424 | if (mddev->array_sectors > |
| 5425 | raid5_size(mddev, sectors, mddev->raid_disks)) |
| 5426 | return -EINVAL; |
Andre Noll | f233ea5 | 2008-07-21 17:05:22 +1000 | [diff] [blame] | 5427 | set_capacity(mddev->gendisk, mddev->array_sectors); |
NeilBrown | 449aad3 | 2009-08-03 10:59:58 +1000 | [diff] [blame] | 5428 | revalidate_disk(mddev->gendisk); |
NeilBrown | b098636 | 2011-05-11 15:52:21 +1000 | [diff] [blame] | 5429 | if (sectors > mddev->dev_sectors && |
| 5430 | mddev->recovery_cp > mddev->dev_sectors) { |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 5431 | mddev->recovery_cp = mddev->dev_sectors; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5432 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
| 5433 | } |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 5434 | mddev->dev_sectors = sectors; |
NeilBrown | 4b5c7ae | 2005-07-27 11:43:28 -0700 | [diff] [blame] | 5435 | mddev->resync_max_sectors = sectors; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5436 | return 0; |
| 5437 | } |
| 5438 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5439 | static int check_stripe_cache(struct mddev *mddev) |
NeilBrown | 01ee22b | 2009-06-18 08:47:20 +1000 | [diff] [blame] | 5440 | { |
| 5441 | /* Can only proceed if there are plenty of stripe_heads. |
| 5442 | * We need a minimum of one full stripe,, and for sensible progress |
| 5443 | * it is best to have about 4 times that. |
| 5444 | * If we require 4 times, then the default 256 4K stripe_heads will |
| 5445 | * allow for chunk sizes up to 256K, which is probably OK. |
| 5446 | * If the chunk size is greater, user-space should request more |
| 5447 | * stripe_heads first. |
| 5448 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5449 | struct r5conf *conf = mddev->private; |
NeilBrown | 01ee22b | 2009-06-18 08:47:20 +1000 | [diff] [blame] | 5450 | if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 |
| 5451 | > conf->max_nr_stripes || |
| 5452 | ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 |
| 5453 | > conf->max_nr_stripes) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5454 | printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", |
| 5455 | mdname(mddev), |
NeilBrown | 01ee22b | 2009-06-18 08:47:20 +1000 | [diff] [blame] | 5456 | ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) |
| 5457 | / STRIPE_SIZE)*4); |
| 5458 | return 0; |
| 5459 | } |
| 5460 | return 1; |
| 5461 | } |
| 5462 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5463 | static int check_reshape(struct mddev *mddev) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5464 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5465 | struct r5conf *conf = mddev->private; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5466 | |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5467 | if (mddev->delta_disks == 0 && |
| 5468 | mddev->new_layout == mddev->layout && |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 5469 | mddev->new_chunk_sectors == mddev->chunk_sectors) |
NeilBrown | 50ac168 | 2009-06-18 08:47:55 +1000 | [diff] [blame] | 5470 | return 0; /* nothing to do */ |
NeilBrown | dba034e | 2008-08-05 15:54:13 +1000 | [diff] [blame] | 5471 | if (mddev->bitmap) |
| 5472 | /* Cannot grow a bitmap yet */ |
| 5473 | return -EBUSY; |
NeilBrown | 674806d | 2010-06-16 17:17:53 +1000 | [diff] [blame] | 5474 | if (has_failed(conf)) |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5475 | return -EINVAL; |
| 5476 | if (mddev->delta_disks < 0) { |
| 5477 | /* We might be able to shrink, but the devices must |
| 5478 | * be made bigger first. |
| 5479 | * For raid6, 4 is the minimum size. |
| 5480 | * Otherwise 2 is the minimum |
| 5481 | */ |
| 5482 | int min = 2; |
| 5483 | if (mddev->level == 6) |
| 5484 | min = 4; |
| 5485 | if (mddev->raid_disks + mddev->delta_disks < min) |
| 5486 | return -EINVAL; |
| 5487 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5488 | |
NeilBrown | 01ee22b | 2009-06-18 08:47:20 +1000 | [diff] [blame] | 5489 | if (!check_stripe_cache(mddev)) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5490 | return -ENOSPC; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5491 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5492 | return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5493 | } |
| 5494 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5495 | static int raid5_start_reshape(struct mddev *mddev) |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5496 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5497 | struct r5conf *conf = mddev->private; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 5498 | struct md_rdev *rdev; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5499 | int spares = 0; |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 5500 | unsigned long flags; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5501 | |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 5502 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5503 | return -EBUSY; |
| 5504 | |
NeilBrown | 01ee22b | 2009-06-18 08:47:20 +1000 | [diff] [blame] | 5505 | if (!check_stripe_cache(mddev)) |
| 5506 | return -ENOSPC; |
| 5507 | |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 5508 | rdev_for_each(rdev, mddev) |
NeilBrown | 469518a | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5509 | if (!test_bit(In_sync, &rdev->flags) |
| 5510 | && !test_bit(Faulty, &rdev->flags)) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5511 | spares++; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5512 | |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 5513 | if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5514 | /* Not enough devices even to make a degraded array |
| 5515 | * of that size |
| 5516 | */ |
| 5517 | return -EINVAL; |
| 5518 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5519 | /* Refuse to reduce size of the array. Any reductions in |
| 5520 | * array size must be through explicit setting of array_size |
| 5521 | * attribute. |
| 5522 | */ |
| 5523 | if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) |
| 5524 | < mddev->array_sectors) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5525 | printk(KERN_ERR "md/raid:%s: array size must be reduced " |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5526 | "before number of disks\n", mdname(mddev)); |
| 5527 | return -EINVAL; |
| 5528 | } |
| 5529 | |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5530 | atomic_set(&conf->reshape_stripes, 0); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5531 | spin_lock_irq(&conf->device_lock); |
| 5532 | conf->previous_raid_disks = conf->raid_disks; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5533 | conf->raid_disks += mddev->delta_disks; |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 5534 | conf->prev_chunk_sectors = conf->chunk_sectors; |
| 5535 | conf->chunk_sectors = mddev->new_chunk_sectors; |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5536 | conf->prev_algo = conf->algorithm; |
| 5537 | conf->algorithm = mddev->new_layout; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 5538 | if (mddev->delta_disks < 0) |
| 5539 | conf->reshape_progress = raid5_size(mddev, 0, 0); |
| 5540 | else |
| 5541 | conf->reshape_progress = 0; |
| 5542 | conf->reshape_safe = conf->reshape_progress; |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 5543 | conf->generation++; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5544 | spin_unlock_irq(&conf->device_lock); |
| 5545 | |
| 5546 | /* Add some new drives, as many as will fit. |
| 5547 | * We know there are enough to make the newly sized array work. |
NeilBrown | 3424bf6 | 2010-06-17 17:48:26 +1000 | [diff] [blame] | 5548 | * Don't add devices if we are reducing the number of |
| 5549 | * devices in the array. This is because it is not possible |
| 5550 | * to correctly record the "partially reconstructed" state of |
| 5551 | * such devices during the reshape and confusion could result. |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5552 | */ |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5553 | if (mddev->delta_disks >= 0) { |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 5554 | rdev_for_each(rdev, mddev) |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5555 | if (rdev->raid_disk < 0 && |
| 5556 | !test_bit(Faulty, &rdev->flags)) { |
| 5557 | if (raid5_add_disk(mddev, rdev) == 0) { |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5558 | if (rdev->raid_disk |
NeilBrown | 9d4c7d8 | 2012-03-13 11:21:21 +1100 | [diff] [blame] | 5559 | >= conf->previous_raid_disks) |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5560 | set_bit(In_sync, &rdev->flags); |
NeilBrown | 9d4c7d8 | 2012-03-13 11:21:21 +1100 | [diff] [blame] | 5561 | else |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5562 | rdev->recovery_offset = 0; |
Namhyung Kim | 36fad85 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 5563 | |
| 5564 | if (sysfs_link_rdev(mddev, rdev)) |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5565 | /* Failure here is OK */; |
NeilBrown | 50da084 | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5566 | } |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5567 | } else if (rdev->raid_disk >= conf->previous_raid_disks |
| 5568 | && !test_bit(Faulty, &rdev->flags)) { |
| 5569 | /* This is a spare that was manually added */ |
| 5570 | set_bit(In_sync, &rdev->flags); |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5571 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5572 | |
NeilBrown | 87a8dec | 2011-01-31 11:57:43 +1100 | [diff] [blame] | 5573 | /* When a reshape changes the number of devices, |
| 5574 | * ->degraded is measured against the larger of the |
| 5575 | * pre and post number of devices. |
| 5576 | */ |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5577 | spin_lock_irqsave(&conf->device_lock, flags); |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 5578 | mddev->degraded = calc_degraded(conf); |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5579 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 5580 | } |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5581 | mddev->raid_disks = conf->raid_disks; |
NeilBrown | e516402 | 2009-08-03 10:59:57 +1000 | [diff] [blame] | 5582 | mddev->reshape_position = conf->reshape_progress; |
NeilBrown | 850b2b4 | 2006-10-03 01:15:46 -0700 | [diff] [blame] | 5583 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5584 | |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5585 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 5586 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 5587 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
| 5588 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 5589 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
NeilBrown | 0da3c61 | 2009-09-23 18:09:45 +1000 | [diff] [blame] | 5590 | "reshape"); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5591 | if (!mddev->sync_thread) { |
| 5592 | mddev->recovery = 0; |
| 5593 | spin_lock_irq(&conf->device_lock); |
| 5594 | mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 5595 | conf->reshape_progress = MaxSector; |
NeilBrown | 1e3fa9b | 2012-03-13 11:21:18 +1100 | [diff] [blame] | 5596 | mddev->reshape_position = MaxSector; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5597 | spin_unlock_irq(&conf->device_lock); |
| 5598 | return -EAGAIN; |
| 5599 | } |
NeilBrown | c8f517c | 2009-03-31 15:28:40 +1100 | [diff] [blame] | 5600 | conf->reshape_checkpoint = jiffies; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5601 | md_wakeup_thread(mddev->sync_thread); |
| 5602 | md_new_event(mddev); |
| 5603 | return 0; |
| 5604 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5605 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5606 | /* This is called from the reshape thread and should make any |
| 5607 | * changes needed in 'conf' |
| 5608 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5609 | static void end_reshape(struct r5conf *conf) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5610 | { |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5611 | |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5612 | if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 5613 | |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5614 | spin_lock_irq(&conf->device_lock); |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5615 | conf->previous_raid_disks = conf->raid_disks; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 5616 | conf->reshape_progress = MaxSector; |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 5617 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | b0f9ec0 | 2009-03-31 15:27:18 +1100 | [diff] [blame] | 5618 | wake_up(&conf->wait_for_overlap); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5619 | |
| 5620 | /* read-ahead size must cover two whole stripes, which is |
| 5621 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices |
| 5622 | */ |
NeilBrown | 4a5add4 | 2010-06-01 19:37:28 +1000 | [diff] [blame] | 5623 | if (conf->mddev->queue) { |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5624 | int data_disks = conf->raid_disks - conf->max_degraded; |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 5625 | int stripe = data_disks * ((conf->chunk_sectors << 9) |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5626 | / PAGE_SIZE); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5627 | if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
| 5628 | conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
| 5629 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5630 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5631 | } |
| 5632 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5633 | /* This is called from the raid5d thread with mddev_lock held. |
| 5634 | * It makes config changes to the device. |
| 5635 | */ |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5636 | static void raid5_finish_reshape(struct mddev *mddev) |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5637 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5638 | struct r5conf *conf = mddev->private; |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5639 | |
| 5640 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
| 5641 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5642 | if (mddev->delta_disks > 0) { |
| 5643 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
| 5644 | set_capacity(mddev->gendisk, mddev->array_sectors); |
NeilBrown | 449aad3 | 2009-08-03 10:59:58 +1000 | [diff] [blame] | 5645 | revalidate_disk(mddev->gendisk); |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5646 | } else { |
| 5647 | int d; |
NeilBrown | 908f4fb | 2011-12-23 10:17:50 +1100 | [diff] [blame] | 5648 | spin_lock_irq(&conf->device_lock); |
| 5649 | mddev->degraded = calc_degraded(conf); |
| 5650 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5651 | for (d = conf->raid_disks ; |
| 5652 | d < conf->raid_disks - mddev->delta_disks; |
NeilBrown | 1a67dde | 2009-08-13 10:41:49 +1000 | [diff] [blame] | 5653 | d++) { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 5654 | struct md_rdev *rdev = conf->disks[d].rdev; |
NeilBrown | b8321b6 | 2011-12-23 10:17:51 +1100 | [diff] [blame] | 5655 | if (rdev && |
| 5656 | raid5_remove_disk(mddev, rdev) == 0) { |
Namhyung Kim | 36fad85 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 5657 | sysfs_unlink_rdev(mddev, rdev); |
NeilBrown | 1a67dde | 2009-08-13 10:41:49 +1000 | [diff] [blame] | 5658 | rdev->raid_disk = -1; |
| 5659 | } |
| 5660 | } |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5661 | } |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5662 | mddev->layout = conf->algorithm; |
Andre Noll | 09c9e5f | 2009-06-18 08:45:55 +1000 | [diff] [blame] | 5663 | mddev->chunk_sectors = conf->chunk_sectors; |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 5664 | mddev->reshape_position = MaxSector; |
| 5665 | mddev->delta_disks = 0; |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5666 | } |
| 5667 | } |
| 5668 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5669 | static void raid5_quiesce(struct mddev *mddev, int state) |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5670 | { |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5671 | struct r5conf *conf = mddev->private; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5672 | |
| 5673 | switch(state) { |
NeilBrown | e464eaf | 2006-03-27 01:18:14 -0800 | [diff] [blame] | 5674 | case 2: /* resume for a suspend */ |
| 5675 | wake_up(&conf->wait_for_overlap); |
| 5676 | break; |
| 5677 | |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5678 | case 1: /* stop all writes */ |
| 5679 | spin_lock_irq(&conf->device_lock); |
NeilBrown | 64bd660 | 2009-08-03 10:59:58 +1000 | [diff] [blame] | 5680 | /* '2' tells resync/reshape to pause so that all |
| 5681 | * active stripes can drain |
| 5682 | */ |
| 5683 | conf->quiesce = 2; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5684 | wait_event_lock_irq(conf->wait_for_stripe, |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 5685 | atomic_read(&conf->active_stripes) == 0 && |
| 5686 | atomic_read(&conf->active_aligned_reads) == 0, |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5687 | conf->device_lock, /* nothing */); |
NeilBrown | 64bd660 | 2009-08-03 10:59:58 +1000 | [diff] [blame] | 5688 | conf->quiesce = 1; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5689 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | 64bd660 | 2009-08-03 10:59:58 +1000 | [diff] [blame] | 5690 | /* allow reshape to continue */ |
| 5691 | wake_up(&conf->wait_for_overlap); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5692 | break; |
| 5693 | |
| 5694 | case 0: /* re-enable writes */ |
| 5695 | spin_lock_irq(&conf->device_lock); |
| 5696 | conf->quiesce = 0; |
| 5697 | wake_up(&conf->wait_for_stripe); |
NeilBrown | e464eaf | 2006-03-27 01:18:14 -0800 | [diff] [blame] | 5698 | wake_up(&conf->wait_for_overlap); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5699 | spin_unlock_irq(&conf->device_lock); |
| 5700 | break; |
| 5701 | } |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5702 | } |
NeilBrown | b15c2e5 | 2006-01-06 00:20:16 -0800 | [diff] [blame] | 5703 | |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5704 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5705 | static void *raid45_takeover_raid0(struct mddev *mddev, int level) |
Trela Maciej | 54071b3 | 2010-03-08 16:02:42 +1100 | [diff] [blame] | 5706 | { |
NeilBrown | e373ab1 | 2011-10-11 16:48:59 +1100 | [diff] [blame] | 5707 | struct r0conf *raid0_conf = mddev->private; |
Randy Dunlap | d76c842 | 2011-04-21 09:07:26 -0700 | [diff] [blame] | 5708 | sector_t sectors; |
Trela Maciej | 54071b3 | 2010-03-08 16:02:42 +1100 | [diff] [blame] | 5709 | |
Dan Williams | f1b29bc | 2010-05-01 18:09:05 -0700 | [diff] [blame] | 5710 | /* for raid0 takeover only one zone is supported */ |
NeilBrown | e373ab1 | 2011-10-11 16:48:59 +1100 | [diff] [blame] | 5711 | if (raid0_conf->nr_strip_zones > 1) { |
NeilBrown | 0c55e02 | 2010-05-03 14:09:02 +1000 | [diff] [blame] | 5712 | printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", |
| 5713 | mdname(mddev)); |
Dan Williams | f1b29bc | 2010-05-01 18:09:05 -0700 | [diff] [blame] | 5714 | return ERR_PTR(-EINVAL); |
| 5715 | } |
| 5716 | |
NeilBrown | e373ab1 | 2011-10-11 16:48:59 +1100 | [diff] [blame] | 5717 | sectors = raid0_conf->strip_zone[0].zone_end; |
| 5718 | sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); |
NeilBrown | 3b71bd9 | 2011-04-20 15:38:18 +1000 | [diff] [blame] | 5719 | mddev->dev_sectors = sectors; |
Dan Williams | f1b29bc | 2010-05-01 18:09:05 -0700 | [diff] [blame] | 5720 | mddev->new_level = level; |
Trela Maciej | 54071b3 | 2010-03-08 16:02:42 +1100 | [diff] [blame] | 5721 | mddev->new_layout = ALGORITHM_PARITY_N; |
| 5722 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
| 5723 | mddev->raid_disks += 1; |
| 5724 | mddev->delta_disks = 1; |
| 5725 | /* make sure it will be not marked as dirty */ |
| 5726 | mddev->recovery_cp = MaxSector; |
| 5727 | |
| 5728 | return setup_conf(mddev); |
| 5729 | } |
| 5730 | |
| 5731 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5732 | static void *raid5_takeover_raid1(struct mddev *mddev) |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5733 | { |
| 5734 | int chunksect; |
| 5735 | |
| 5736 | if (mddev->raid_disks != 2 || |
| 5737 | mddev->degraded > 1) |
| 5738 | return ERR_PTR(-EINVAL); |
| 5739 | |
| 5740 | /* Should check if there are write-behind devices? */ |
| 5741 | |
| 5742 | chunksect = 64*2; /* 64K by default */ |
| 5743 | |
| 5744 | /* The array must be an exact multiple of chunksize */ |
| 5745 | while (chunksect && (mddev->array_sectors & (chunksect-1))) |
| 5746 | chunksect >>= 1; |
| 5747 | |
| 5748 | if ((chunksect<<9) < STRIPE_SIZE) |
| 5749 | /* array size does not allow a suitable chunk size */ |
| 5750 | return ERR_PTR(-EINVAL); |
| 5751 | |
| 5752 | mddev->new_level = 5; |
| 5753 | mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; |
Andre Noll | 664e7c4 | 2009-06-18 08:45:27 +1000 | [diff] [blame] | 5754 | mddev->new_chunk_sectors = chunksect; |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5755 | |
| 5756 | return setup_conf(mddev); |
| 5757 | } |
| 5758 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5759 | static void *raid5_takeover_raid6(struct mddev *mddev) |
NeilBrown | fc9739c | 2009-03-31 14:57:20 +1100 | [diff] [blame] | 5760 | { |
| 5761 | int new_layout; |
| 5762 | |
| 5763 | switch (mddev->layout) { |
| 5764 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 5765 | new_layout = ALGORITHM_LEFT_ASYMMETRIC; |
| 5766 | break; |
| 5767 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 5768 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC; |
| 5769 | break; |
| 5770 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 5771 | new_layout = ALGORITHM_LEFT_SYMMETRIC; |
| 5772 | break; |
| 5773 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 5774 | new_layout = ALGORITHM_RIGHT_SYMMETRIC; |
| 5775 | break; |
| 5776 | case ALGORITHM_PARITY_0_6: |
| 5777 | new_layout = ALGORITHM_PARITY_0; |
| 5778 | break; |
| 5779 | case ALGORITHM_PARITY_N: |
| 5780 | new_layout = ALGORITHM_PARITY_N; |
| 5781 | break; |
| 5782 | default: |
| 5783 | return ERR_PTR(-EINVAL); |
| 5784 | } |
| 5785 | mddev->new_level = 5; |
| 5786 | mddev->new_layout = new_layout; |
| 5787 | mddev->delta_disks = -1; |
| 5788 | mddev->raid_disks -= 1; |
| 5789 | return setup_conf(mddev); |
| 5790 | } |
| 5791 | |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5792 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5793 | static int raid5_check_reshape(struct mddev *mddev) |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5794 | { |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5795 | /* For a 2-drive array, the layout and chunk size can be changed |
| 5796 | * immediately as not restriping is needed. |
| 5797 | * For larger arrays we record the new value - after validation |
| 5798 | * to be used by a reshape pass. |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5799 | */ |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 5800 | struct r5conf *conf = mddev->private; |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5801 | int new_chunk = mddev->new_chunk_sectors; |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5802 | |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5803 | if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5804 | return -EINVAL; |
| 5805 | if (new_chunk > 0) { |
Andre Noll | 0ba459d | 2009-06-18 08:46:10 +1000 | [diff] [blame] | 5806 | if (!is_power_of_2(new_chunk)) |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5807 | return -EINVAL; |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5808 | if (new_chunk < (PAGE_SIZE>>9)) |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5809 | return -EINVAL; |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5810 | if (mddev->array_sectors & (new_chunk-1)) |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5811 | /* not factor of array size */ |
| 5812 | return -EINVAL; |
| 5813 | } |
| 5814 | |
| 5815 | /* They look valid */ |
| 5816 | |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5817 | if (mddev->raid_disks == 2) { |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5818 | /* can make the change immediately */ |
| 5819 | if (mddev->new_layout >= 0) { |
| 5820 | conf->algorithm = mddev->new_layout; |
| 5821 | mddev->layout = mddev->new_layout; |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5822 | } |
| 5823 | if (new_chunk > 0) { |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5824 | conf->chunk_sectors = new_chunk ; |
| 5825 | mddev->chunk_sectors = new_chunk; |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5826 | } |
| 5827 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 5828 | md_wakeup_thread(mddev->thread); |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5829 | } |
NeilBrown | 50ac168 | 2009-06-18 08:47:55 +1000 | [diff] [blame] | 5830 | return check_reshape(mddev); |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5831 | } |
| 5832 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5833 | static int raid6_check_reshape(struct mddev *mddev) |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5834 | { |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5835 | int new_chunk = mddev->new_chunk_sectors; |
NeilBrown | 50ac168 | 2009-06-18 08:47:55 +1000 | [diff] [blame] | 5836 | |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5837 | if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5838 | return -EINVAL; |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5839 | if (new_chunk > 0) { |
Andre Noll | 0ba459d | 2009-06-18 08:46:10 +1000 | [diff] [blame] | 5840 | if (!is_power_of_2(new_chunk)) |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5841 | return -EINVAL; |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5842 | if (new_chunk < (PAGE_SIZE >> 9)) |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5843 | return -EINVAL; |
NeilBrown | 597a711 | 2009-06-18 08:47:42 +1000 | [diff] [blame] | 5844 | if (mddev->array_sectors & (new_chunk-1)) |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5845 | /* not factor of array size */ |
| 5846 | return -EINVAL; |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5847 | } |
NeilBrown | 88ce493 | 2009-03-31 15:24:23 +1100 | [diff] [blame] | 5848 | |
| 5849 | /* They look valid */ |
NeilBrown | 50ac168 | 2009-06-18 08:47:55 +1000 | [diff] [blame] | 5850 | return check_reshape(mddev); |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5851 | } |
| 5852 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5853 | static void *raid5_takeover(struct mddev *mddev) |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5854 | { |
| 5855 | /* raid5 can take over: |
Dan Williams | f1b29bc | 2010-05-01 18:09:05 -0700 | [diff] [blame] | 5856 | * raid0 - if there is only one strip zone - make it a raid4 layout |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5857 | * raid1 - if there are two drives. We need to know the chunk size |
| 5858 | * raid4 - trivial - just use a raid4 layout. |
| 5859 | * raid6 - Providing it is a *_6 layout |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5860 | */ |
Dan Williams | f1b29bc | 2010-05-01 18:09:05 -0700 | [diff] [blame] | 5861 | if (mddev->level == 0) |
| 5862 | return raid45_takeover_raid0(mddev, 5); |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5863 | if (mddev->level == 1) |
| 5864 | return raid5_takeover_raid1(mddev); |
NeilBrown | e9d4758 | 2009-03-31 14:57:09 +1100 | [diff] [blame] | 5865 | if (mddev->level == 4) { |
| 5866 | mddev->new_layout = ALGORITHM_PARITY_N; |
| 5867 | mddev->new_level = 5; |
| 5868 | return setup_conf(mddev); |
| 5869 | } |
NeilBrown | fc9739c | 2009-03-31 14:57:20 +1100 | [diff] [blame] | 5870 | if (mddev->level == 6) |
| 5871 | return raid5_takeover_raid6(mddev); |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5872 | |
| 5873 | return ERR_PTR(-EINVAL); |
| 5874 | } |
| 5875 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5876 | static void *raid4_takeover(struct mddev *mddev) |
NeilBrown | a78d38a | 2010-03-22 16:53:49 +1100 | [diff] [blame] | 5877 | { |
Dan Williams | f1b29bc | 2010-05-01 18:09:05 -0700 | [diff] [blame] | 5878 | /* raid4 can take over: |
| 5879 | * raid0 - if there is only one strip zone |
| 5880 | * raid5 - if layout is right |
NeilBrown | a78d38a | 2010-03-22 16:53:49 +1100 | [diff] [blame] | 5881 | */ |
Dan Williams | f1b29bc | 2010-05-01 18:09:05 -0700 | [diff] [blame] | 5882 | if (mddev->level == 0) |
| 5883 | return raid45_takeover_raid0(mddev, 4); |
NeilBrown | a78d38a | 2010-03-22 16:53:49 +1100 | [diff] [blame] | 5884 | if (mddev->level == 5 && |
| 5885 | mddev->layout == ALGORITHM_PARITY_N) { |
| 5886 | mddev->new_layout = 0; |
| 5887 | mddev->new_level = 4; |
| 5888 | return setup_conf(mddev); |
| 5889 | } |
| 5890 | return ERR_PTR(-EINVAL); |
| 5891 | } |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5892 | |
NeilBrown | 84fc4b5 | 2011-10-11 16:49:58 +1100 | [diff] [blame] | 5893 | static struct md_personality raid5_personality; |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5894 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 5895 | static void *raid6_takeover(struct mddev *mddev) |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5896 | { |
| 5897 | /* Currently can only take over a raid5. We map the |
| 5898 | * personality to an equivalent raid6 personality |
| 5899 | * with the Q block at the end. |
| 5900 | */ |
| 5901 | int new_layout; |
| 5902 | |
| 5903 | if (mddev->pers != &raid5_personality) |
| 5904 | return ERR_PTR(-EINVAL); |
| 5905 | if (mddev->degraded > 1) |
| 5906 | return ERR_PTR(-EINVAL); |
| 5907 | if (mddev->raid_disks > 253) |
| 5908 | return ERR_PTR(-EINVAL); |
| 5909 | if (mddev->raid_disks < 3) |
| 5910 | return ERR_PTR(-EINVAL); |
| 5911 | |
| 5912 | switch (mddev->layout) { |
| 5913 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 5914 | new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; |
| 5915 | break; |
| 5916 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 5917 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; |
| 5918 | break; |
| 5919 | case ALGORITHM_LEFT_SYMMETRIC: |
| 5920 | new_layout = ALGORITHM_LEFT_SYMMETRIC_6; |
| 5921 | break; |
| 5922 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 5923 | new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; |
| 5924 | break; |
| 5925 | case ALGORITHM_PARITY_0: |
| 5926 | new_layout = ALGORITHM_PARITY_0_6; |
| 5927 | break; |
| 5928 | case ALGORITHM_PARITY_N: |
| 5929 | new_layout = ALGORITHM_PARITY_N; |
| 5930 | break; |
| 5931 | default: |
| 5932 | return ERR_PTR(-EINVAL); |
| 5933 | } |
| 5934 | mddev->new_level = 6; |
| 5935 | mddev->new_layout = new_layout; |
| 5936 | mddev->delta_disks = 1; |
| 5937 | mddev->raid_disks += 1; |
| 5938 | return setup_conf(mddev); |
| 5939 | } |
| 5940 | |
| 5941 | |
NeilBrown | 84fc4b5 | 2011-10-11 16:49:58 +1100 | [diff] [blame] | 5942 | static struct md_personality raid6_personality = |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5943 | { |
| 5944 | .name = "raid6", |
| 5945 | .level = 6, |
| 5946 | .owner = THIS_MODULE, |
| 5947 | .make_request = make_request, |
| 5948 | .run = run, |
| 5949 | .stop = stop, |
| 5950 | .status = status, |
| 5951 | .error_handler = error, |
| 5952 | .hot_add_disk = raid5_add_disk, |
| 5953 | .hot_remove_disk= raid5_remove_disk, |
| 5954 | .spare_active = raid5_spare_active, |
| 5955 | .sync_request = sync_request, |
| 5956 | .resize = raid5_resize, |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 5957 | .size = raid5_size, |
NeilBrown | 50ac168 | 2009-06-18 08:47:55 +1000 | [diff] [blame] | 5958 | .check_reshape = raid6_check_reshape, |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 5959 | .start_reshape = raid5_start_reshape, |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5960 | .finish_reshape = raid5_finish_reshape, |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5961 | .quiesce = raid5_quiesce, |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5962 | .takeover = raid6_takeover, |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5963 | }; |
NeilBrown | 84fc4b5 | 2011-10-11 16:49:58 +1100 | [diff] [blame] | 5964 | static struct md_personality raid5_personality = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5965 | { |
| 5966 | .name = "raid5", |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5967 | .level = 5, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5968 | .owner = THIS_MODULE, |
| 5969 | .make_request = make_request, |
| 5970 | .run = run, |
| 5971 | .stop = stop, |
| 5972 | .status = status, |
| 5973 | .error_handler = error, |
| 5974 | .hot_add_disk = raid5_add_disk, |
| 5975 | .hot_remove_disk= raid5_remove_disk, |
| 5976 | .spare_active = raid5_spare_active, |
| 5977 | .sync_request = sync_request, |
| 5978 | .resize = raid5_resize, |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 5979 | .size = raid5_size, |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5980 | .check_reshape = raid5_check_reshape, |
| 5981 | .start_reshape = raid5_start_reshape, |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5982 | .finish_reshape = raid5_finish_reshape, |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5983 | .quiesce = raid5_quiesce, |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5984 | .takeover = raid5_takeover, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5985 | }; |
| 5986 | |
NeilBrown | 84fc4b5 | 2011-10-11 16:49:58 +1100 | [diff] [blame] | 5987 | static struct md_personality raid4_personality = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5988 | { |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5989 | .name = "raid4", |
| 5990 | .level = 4, |
| 5991 | .owner = THIS_MODULE, |
| 5992 | .make_request = make_request, |
| 5993 | .run = run, |
| 5994 | .stop = stop, |
| 5995 | .status = status, |
| 5996 | .error_handler = error, |
| 5997 | .hot_add_disk = raid5_add_disk, |
| 5998 | .hot_remove_disk= raid5_remove_disk, |
| 5999 | .spare_active = raid5_spare_active, |
| 6000 | .sync_request = sync_request, |
| 6001 | .resize = raid5_resize, |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 6002 | .size = raid5_size, |
NeilBrown | 3d37890 | 2007-03-26 21:32:13 -0800 | [diff] [blame] | 6003 | .check_reshape = raid5_check_reshape, |
| 6004 | .start_reshape = raid5_start_reshape, |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 6005 | .finish_reshape = raid5_finish_reshape, |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 6006 | .quiesce = raid5_quiesce, |
NeilBrown | a78d38a | 2010-03-22 16:53:49 +1100 | [diff] [blame] | 6007 | .takeover = raid4_takeover, |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 6008 | }; |
| 6009 | |
| 6010 | static int __init raid5_init(void) |
| 6011 | { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 6012 | register_md_personality(&raid6_personality); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 6013 | register_md_personality(&raid5_personality); |
| 6014 | register_md_personality(&raid4_personality); |
| 6015 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6016 | } |
| 6017 | |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 6018 | static void raid5_exit(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6019 | { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 6020 | unregister_md_personality(&raid6_personality); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 6021 | unregister_md_personality(&raid5_personality); |
| 6022 | unregister_md_personality(&raid4_personality); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6023 | } |
| 6024 | |
| 6025 | module_init(raid5_init); |
| 6026 | module_exit(raid5_exit); |
| 6027 | MODULE_LICENSE("GPL"); |
NeilBrown | 0efb9e6 | 2009-12-14 12:49:58 +1100 | [diff] [blame] | 6028 | MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6029 | MODULE_ALIAS("md-personality-4"); /* RAID5 */ |
NeilBrown | d9d166c | 2006-01-06 00:20:51 -0800 | [diff] [blame] | 6030 | MODULE_ALIAS("md-raid5"); |
| 6031 | MODULE_ALIAS("md-raid4"); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 6032 | MODULE_ALIAS("md-level-5"); |
| 6033 | MODULE_ALIAS("md-level-4"); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 6034 | MODULE_ALIAS("md-personality-8"); /* RAID6 */ |
| 6035 | MODULE_ALIAS("md-raid6"); |
| 6036 | MODULE_ALIAS("md-level-6"); |
| 6037 | |
| 6038 | /* This used to be two separate modules, they were: */ |
| 6039 | MODULE_ALIAS("raid5"); |
| 6040 | MODULE_ALIAS("raid6"); |