Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * raid5.c : Multiple Devices driver for Linux |
| 3 | * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman |
| 4 | * Copyright (C) 1999, 2000 Ingo Molnar |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5 | * Copyright (C) 2002, 2003 H. Peter Anvin |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | * |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 7 | * RAID-4/5/6 management functions. |
| 8 | * Thanks to Penguin Computing for making the RAID-6 development possible |
| 9 | * by donating a test server! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2, or (at your option) |
| 14 | * any later version. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
| 17 | * (for example /usr/src/linux/COPYING); if not, write to the Free |
| 18 | * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 19 | */ |
| 20 | |
NeilBrown | ae3c20c | 2006-07-10 04:44:17 -0700 | [diff] [blame] | 21 | /* |
| 22 | * BITMAP UNPLUGGING: |
| 23 | * |
| 24 | * The sequencing for updating the bitmap reliably is a little |
| 25 | * subtle (and I got it wrong the first time) so it deserves some |
| 26 | * explanation. |
| 27 | * |
| 28 | * We group bitmap updates into batches. Each batch has a number. |
| 29 | * We may write out several batches at once, but that isn't very important. |
| 30 | * conf->bm_write is the number of the last batch successfully written. |
| 31 | * conf->bm_flush is the number of the last batch that was closed to |
| 32 | * new additions. |
| 33 | * When we discover that we will need to write to any block in a stripe |
| 34 | * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq |
| 35 | * the number of the batch it will be in. This is bm_flush+1. |
| 36 | * When we are ready to do a write, if that batch hasn't been written yet, |
| 37 | * we plug the array and queue the stripe for later. |
| 38 | * When an unplug happens, we increment bm_flush, thus closing the current |
| 39 | * batch. |
| 40 | * When we notice that bm_flush > bm_write, we write out all pending updates |
| 41 | * to the bitmap, and advance bm_write to where bm_flush was. |
| 42 | * This may occasionally write a bit out twice, but is sure never to |
| 43 | * miss any bits. |
| 44 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 46 | #include <linux/blkdev.h> |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 47 | #include <linux/kthread.h> |
Dan Williams | f701d58 | 2009-03-31 15:09:39 +1100 | [diff] [blame] | 48 | #include <linux/raid/pq.h> |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 49 | #include <linux/async_tx.h> |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 50 | #include <linux/seq_file.h> |
NeilBrown | 43b2e5d | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 51 | #include "md.h" |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 52 | #include "raid5.h" |
Christoph Hellwig | ef740c3 | 2009-03-31 14:27:03 +1100 | [diff] [blame] | 53 | #include "bitmap.h" |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 54 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | /* |
| 56 | * Stripe cache |
| 57 | */ |
| 58 | |
| 59 | #define NR_STRIPES 256 |
| 60 | #define STRIPE_SIZE PAGE_SIZE |
| 61 | #define STRIPE_SHIFT (PAGE_SHIFT - 9) |
| 62 | #define STRIPE_SECTORS (STRIPE_SIZE>>9) |
| 63 | #define IO_THRESHOLD 1 |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 64 | #define BYPASS_THRESHOLD 1 |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 65 | #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | #define HASH_MASK (NR_HASH - 1) |
| 67 | |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 68 | #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
| 70 | /* bio's attached to a stripe+device for I/O are linked together in bi_sector |
| 71 | * order without overlap. There may be several bio's per stripe+device, and |
| 72 | * a bio could span several devices. |
| 73 | * When walking this list for a particular stripe+device, we must never proceed |
| 74 | * beyond a bio that extends past this device, as the next bio might no longer |
| 75 | * be valid. |
| 76 | * This macro is used to determine the 'next' bio in the list, given the sector |
| 77 | * of the current stripe+device |
| 78 | */ |
| 79 | #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) |
| 80 | /* |
| 81 | * The following can be used to debug the driver |
| 82 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | #define RAID5_PARANOIA 1 |
| 84 | #if RAID5_PARANOIA && defined(CONFIG_SMP) |
| 85 | # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) |
| 86 | #else |
| 87 | # define CHECK_DEVLOCK() |
| 88 | #endif |
| 89 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 90 | #ifdef DEBUG |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | #define inline |
| 92 | #define __inline__ |
| 93 | #endif |
| 94 | |
Bernd Schubert | 6be9d49 | 2008-05-23 13:04:34 -0700 | [diff] [blame] | 95 | #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args))) |
| 96 | |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 97 | /* |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 98 | * We maintain a biased count of active stripes in the bottom 16 bits of |
| 99 | * bi_phys_segments, and a count of processed stripes in the upper 16 bits |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 100 | */ |
| 101 | static inline int raid5_bi_phys_segments(struct bio *bio) |
| 102 | { |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 103 | return bio->bi_phys_segments & 0xffff; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | static inline int raid5_bi_hw_segments(struct bio *bio) |
| 107 | { |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 108 | return (bio->bi_phys_segments >> 16) & 0xffff; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 109 | } |
| 110 | |
| 111 | static inline int raid5_dec_bi_phys_segments(struct bio *bio) |
| 112 | { |
| 113 | --bio->bi_phys_segments; |
| 114 | return raid5_bi_phys_segments(bio); |
| 115 | } |
| 116 | |
| 117 | static inline int raid5_dec_bi_hw_segments(struct bio *bio) |
| 118 | { |
| 119 | unsigned short val = raid5_bi_hw_segments(bio); |
| 120 | |
| 121 | --val; |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 122 | bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 123 | return val; |
| 124 | } |
| 125 | |
| 126 | static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) |
| 127 | { |
Jens Axboe | 5b99c2f | 2008-08-15 10:56:11 +0200 | [diff] [blame] | 128 | bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 129 | } |
| 130 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 131 | /* Find first data disk in a raid6 stripe */ |
| 132 | static inline int raid6_d0(struct stripe_head *sh) |
| 133 | { |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 134 | if (sh->ddf_layout) |
| 135 | /* ddf always start from first device */ |
| 136 | return 0; |
| 137 | /* md starts just after Q block */ |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 138 | if (sh->qd_idx == sh->disks - 1) |
| 139 | return 0; |
| 140 | else |
| 141 | return sh->qd_idx + 1; |
| 142 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 143 | static inline int raid6_next_disk(int disk, int raid_disks) |
| 144 | { |
| 145 | disk++; |
| 146 | return (disk < raid_disks) ? disk : 0; |
| 147 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 148 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 149 | /* When walking through the disks in a raid5, starting at raid6_d0, |
| 150 | * We need to map each disk to a 'slot', where the data disks are slot |
| 151 | * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk |
| 152 | * is raid_disks-1. This help does that mapping. |
| 153 | */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 154 | static int raid6_idx_to_slot(int idx, struct stripe_head *sh, |
| 155 | int *count, int syndrome_disks) |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 156 | { |
| 157 | int slot; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 158 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 159 | if (idx == sh->pd_idx) |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 160 | return syndrome_disks; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 161 | if (idx == sh->qd_idx) |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 162 | return syndrome_disks + 1; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 163 | slot = (*count)++; |
| 164 | return slot; |
| 165 | } |
| 166 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 167 | static void return_io(struct bio *return_bi) |
| 168 | { |
| 169 | struct bio *bi = return_bi; |
| 170 | while (bi) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 171 | |
| 172 | return_bi = bi->bi_next; |
| 173 | bi->bi_next = NULL; |
| 174 | bi->bi_size = 0; |
Neil Brown | 0e13fe23 | 2008-06-28 08:31:20 +1000 | [diff] [blame] | 175 | bio_endio(bi, 0); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 176 | bi = return_bi; |
| 177 | } |
| 178 | } |
| 179 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | static void print_raid5_conf (raid5_conf_t *conf); |
| 181 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 182 | static int stripe_operations_active(struct stripe_head *sh) |
| 183 | { |
| 184 | return sh->check_state || sh->reconstruct_state || |
| 185 | test_bit(STRIPE_BIOFILL_RUN, &sh->state) || |
| 186 | test_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 187 | } |
| 188 | |
Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 189 | static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | { |
| 191 | if (atomic_dec_and_test(&sh->count)) { |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 192 | BUG_ON(!list_empty(&sh->lru)); |
| 193 | BUG_ON(atomic_read(&conf->active_stripes)==0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | if (test_bit(STRIPE_HANDLE, &sh->state)) { |
NeilBrown | 7c785b7 | 2006-07-10 04:44:16 -0700 | [diff] [blame] | 195 | if (test_bit(STRIPE_DELAYED, &sh->state)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | list_add_tail(&sh->lru, &conf->delayed_list); |
NeilBrown | 7c785b7 | 2006-07-10 04:44:16 -0700 | [diff] [blame] | 197 | blk_plug_device(conf->mddev->queue); |
| 198 | } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && |
NeilBrown | ae3c20c | 2006-07-10 04:44:17 -0700 | [diff] [blame] | 199 | sh->bm_seq - conf->seq_write > 0) { |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 200 | list_add_tail(&sh->lru, &conf->bitmap_list); |
NeilBrown | 7c785b7 | 2006-07-10 04:44:16 -0700 | [diff] [blame] | 201 | blk_plug_device(conf->mddev->queue); |
| 202 | } else { |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 203 | clear_bit(STRIPE_BIT_DELAY, &sh->state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | list_add_tail(&sh->lru, &conf->handle_list); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 205 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | md_wakeup_thread(conf->mddev->thread); |
| 207 | } else { |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 208 | BUG_ON(stripe_operations_active(sh)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
| 210 | atomic_dec(&conf->preread_active_stripes); |
| 211 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) |
| 212 | md_wakeup_thread(conf->mddev->thread); |
| 213 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | atomic_dec(&conf->active_stripes); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 215 | if (!test_bit(STRIPE_EXPANDING, &sh->state)) { |
| 216 | list_add_tail(&sh->lru, &conf->inactive_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | wake_up(&conf->wait_for_stripe); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 218 | if (conf->retry_read_aligned) |
| 219 | md_wakeup_thread(conf->mddev->thread); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 220 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } |
| 222 | } |
| 223 | } |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 224 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | static void release_stripe(struct stripe_head *sh) |
| 226 | { |
| 227 | raid5_conf_t *conf = sh->raid_conf; |
| 228 | unsigned long flags; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 229 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | spin_lock_irqsave(&conf->device_lock, flags); |
| 231 | __release_stripe(conf, sh); |
| 232 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 233 | } |
| 234 | |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 235 | static inline void remove_hash(struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 237 | pr_debug("remove_hash(), stripe %llu\n", |
| 238 | (unsigned long long)sh->sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 240 | hlist_del_init(&sh->hash); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | } |
| 242 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 243 | static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 244 | { |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 245 | struct hlist_head *hp = stripe_hash(conf, sh->sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 247 | pr_debug("insert_hash(), stripe %llu\n", |
| 248 | (unsigned long long)sh->sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | |
| 250 | CHECK_DEVLOCK(); |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 251 | hlist_add_head(&sh->hash, hp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 252 | } |
| 253 | |
| 254 | |
| 255 | /* find an idle stripe, make sure it is unhashed, and return it. */ |
| 256 | static struct stripe_head *get_free_stripe(raid5_conf_t *conf) |
| 257 | { |
| 258 | struct stripe_head *sh = NULL; |
| 259 | struct list_head *first; |
| 260 | |
| 261 | CHECK_DEVLOCK(); |
| 262 | if (list_empty(&conf->inactive_list)) |
| 263 | goto out; |
| 264 | first = conf->inactive_list.next; |
| 265 | sh = list_entry(first, struct stripe_head, lru); |
| 266 | list_del_init(first); |
| 267 | remove_hash(sh); |
| 268 | atomic_inc(&conf->active_stripes); |
| 269 | out: |
| 270 | return sh; |
| 271 | } |
| 272 | |
| 273 | static void shrink_buffers(struct stripe_head *sh, int num) |
| 274 | { |
| 275 | struct page *p; |
| 276 | int i; |
| 277 | |
| 278 | for (i=0; i<num ; i++) { |
| 279 | p = sh->dev[i].page; |
| 280 | if (!p) |
| 281 | continue; |
| 282 | sh->dev[i].page = NULL; |
NeilBrown | 2d1f3b5 | 2006-01-06 00:20:31 -0800 | [diff] [blame] | 283 | put_page(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | } |
| 285 | } |
| 286 | |
| 287 | static int grow_buffers(struct stripe_head *sh, int num) |
| 288 | { |
| 289 | int i; |
| 290 | |
| 291 | for (i=0; i<num; i++) { |
| 292 | struct page *page; |
| 293 | |
| 294 | if (!(page = alloc_page(GFP_KERNEL))) { |
| 295 | return 1; |
| 296 | } |
| 297 | sh->dev[i].page = page; |
| 298 | } |
| 299 | return 0; |
| 300 | } |
| 301 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 302 | static void raid5_build_block(struct stripe_head *sh, int i, int previous); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 303 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, |
| 304 | struct stripe_head *sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 305 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 306 | static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 307 | { |
| 308 | raid5_conf_t *conf = sh->raid_conf; |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 309 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 310 | |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 311 | BUG_ON(atomic_read(&sh->count) != 0); |
| 312 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 313 | BUG_ON(stripe_operations_active(sh)); |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 314 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 315 | CHECK_DEVLOCK(); |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 316 | pr_debug("init_stripe called, stripe %llu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | (unsigned long long)sh->sector); |
| 318 | |
| 319 | remove_hash(sh); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 320 | |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 321 | sh->generation = conf->generation - previous; |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 322 | sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | sh->sector = sector; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 324 | stripe_set_idx(sector, conf, previous, sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | sh->state = 0; |
| 326 | |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 327 | |
| 328 | for (i = sh->disks; i--; ) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | struct r5dev *dev = &sh->dev[i]; |
| 330 | |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 331 | if (dev->toread || dev->read || dev->towrite || dev->written || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 332 | test_bit(R5_LOCKED, &dev->flags)) { |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 333 | printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | (unsigned long long)sh->sector, i, dev->toread, |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 335 | dev->read, dev->towrite, dev->written, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | test_bit(R5_LOCKED, &dev->flags)); |
| 337 | BUG(); |
| 338 | } |
| 339 | dev->flags = 0; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 340 | raid5_build_block(sh, i, previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | } |
| 342 | insert_hash(conf, sh); |
| 343 | } |
| 344 | |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 345 | static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, |
| 346 | short generation) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | { |
| 348 | struct stripe_head *sh; |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 349 | struct hlist_node *hn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 350 | |
| 351 | CHECK_DEVLOCK(); |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 352 | pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 353 | hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 354 | if (sh->sector == sector && sh->generation == generation) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | return sh; |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 356 | pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | return NULL; |
| 358 | } |
| 359 | |
| 360 | static void unplug_slaves(mddev_t *mddev); |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 361 | static void raid5_unplug_device(struct request_queue *q); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 363 | static struct stripe_head * |
| 364 | get_active_stripe(raid5_conf_t *conf, sector_t sector, |
| 365 | int previous, int noblock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | { |
| 367 | struct stripe_head *sh; |
| 368 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 369 | pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 370 | |
| 371 | spin_lock_irq(&conf->device_lock); |
| 372 | |
| 373 | do { |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 374 | wait_event_lock_irq(conf->wait_for_stripe, |
| 375 | conf->quiesce == 0, |
| 376 | conf->device_lock, /* nothing */); |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 377 | sh = __find_stripe(conf, sector, conf->generation - previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 | if (!sh) { |
| 379 | if (!conf->inactive_blocked) |
| 380 | sh = get_free_stripe(conf); |
| 381 | if (noblock && sh == NULL) |
| 382 | break; |
| 383 | if (!sh) { |
| 384 | conf->inactive_blocked = 1; |
| 385 | wait_event_lock_irq(conf->wait_for_stripe, |
| 386 | !list_empty(&conf->inactive_list) && |
NeilBrown | 5036805 | 2005-12-12 02:39:17 -0800 | [diff] [blame] | 387 | (atomic_read(&conf->active_stripes) |
| 388 | < (conf->max_nr_stripes *3/4) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 389 | || !conf->inactive_blocked), |
| 390 | conf->device_lock, |
NeilBrown | f437078 | 2006-07-10 04:44:14 -0700 | [diff] [blame] | 391 | raid5_unplug_device(conf->mddev->queue) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | ); |
| 393 | conf->inactive_blocked = 0; |
| 394 | } else |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 395 | init_stripe(sh, sector, previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | } else { |
| 397 | if (atomic_read(&sh->count)) { |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 398 | BUG_ON(!list_empty(&sh->lru)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | } else { |
| 400 | if (!test_bit(STRIPE_HANDLE, &sh->state)) |
| 401 | atomic_inc(&conf->active_stripes); |
NeilBrown | ff4e8d9 | 2006-07-10 04:44:16 -0700 | [diff] [blame] | 402 | if (list_empty(&sh->lru) && |
| 403 | !test_bit(STRIPE_EXPANDING, &sh->state)) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 404 | BUG(); |
| 405 | list_del_init(&sh->lru); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } |
| 407 | } |
| 408 | } while (sh == NULL); |
| 409 | |
| 410 | if (sh) |
| 411 | atomic_inc(&sh->count); |
| 412 | |
| 413 | spin_unlock_irq(&conf->device_lock); |
| 414 | return sh; |
| 415 | } |
| 416 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 417 | static void |
| 418 | raid5_end_read_request(struct bio *bi, int error); |
| 419 | static void |
| 420 | raid5_end_write_request(struct bio *bi, int error); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 421 | |
Dan Williams | c4e5ac0 | 2008-06-28 08:31:53 +1000 | [diff] [blame] | 422 | static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 423 | { |
| 424 | raid5_conf_t *conf = sh->raid_conf; |
| 425 | int i, disks = sh->disks; |
| 426 | |
| 427 | might_sleep(); |
| 428 | |
| 429 | for (i = disks; i--; ) { |
| 430 | int rw; |
| 431 | struct bio *bi; |
| 432 | mdk_rdev_t *rdev; |
| 433 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) |
| 434 | rw = WRITE; |
| 435 | else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) |
| 436 | rw = READ; |
| 437 | else |
| 438 | continue; |
| 439 | |
| 440 | bi = &sh->dev[i].req; |
| 441 | |
| 442 | bi->bi_rw = rw; |
| 443 | if (rw == WRITE) |
| 444 | bi->bi_end_io = raid5_end_write_request; |
| 445 | else |
| 446 | bi->bi_end_io = raid5_end_read_request; |
| 447 | |
| 448 | rcu_read_lock(); |
| 449 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 450 | if (rdev && test_bit(Faulty, &rdev->flags)) |
| 451 | rdev = NULL; |
| 452 | if (rdev) |
| 453 | atomic_inc(&rdev->nr_pending); |
| 454 | rcu_read_unlock(); |
| 455 | |
| 456 | if (rdev) { |
Dan Williams | c4e5ac0 | 2008-06-28 08:31:53 +1000 | [diff] [blame] | 457 | if (s->syncing || s->expanding || s->expanded) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 458 | md_sync_acct(rdev->bdev, STRIPE_SECTORS); |
| 459 | |
Dan Williams | 2b7497f | 2008-06-28 08:31:52 +1000 | [diff] [blame] | 460 | set_bit(STRIPE_IO_STARTED, &sh->state); |
| 461 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 462 | bi->bi_bdev = rdev->bdev; |
| 463 | pr_debug("%s: for %llu schedule op %ld on disc %d\n", |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 464 | __func__, (unsigned long long)sh->sector, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 465 | bi->bi_rw, i); |
| 466 | atomic_inc(&sh->count); |
| 467 | bi->bi_sector = sh->sector + rdev->data_offset; |
| 468 | bi->bi_flags = 1 << BIO_UPTODATE; |
| 469 | bi->bi_vcnt = 1; |
| 470 | bi->bi_max_vecs = 1; |
| 471 | bi->bi_idx = 0; |
| 472 | bi->bi_io_vec = &sh->dev[i].vec; |
| 473 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
| 474 | bi->bi_io_vec[0].bv_offset = 0; |
| 475 | bi->bi_size = STRIPE_SIZE; |
| 476 | bi->bi_next = NULL; |
| 477 | if (rw == WRITE && |
| 478 | test_bit(R5_ReWrite, &sh->dev[i].flags)) |
| 479 | atomic_add(STRIPE_SECTORS, |
| 480 | &rdev->corrected_errors); |
| 481 | generic_make_request(bi); |
| 482 | } else { |
| 483 | if (rw == WRITE) |
| 484 | set_bit(STRIPE_DEGRADED, &sh->state); |
| 485 | pr_debug("skip op %ld on disc %d for sector %llu\n", |
| 486 | bi->bi_rw, i, (unsigned long long)sh->sector); |
| 487 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 488 | set_bit(STRIPE_HANDLE, &sh->state); |
| 489 | } |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | static struct dma_async_tx_descriptor * |
| 494 | async_copy_data(int frombio, struct bio *bio, struct page *page, |
| 495 | sector_t sector, struct dma_async_tx_descriptor *tx) |
| 496 | { |
| 497 | struct bio_vec *bvl; |
| 498 | struct page *bio_page; |
| 499 | int i; |
| 500 | int page_offset; |
| 501 | |
| 502 | if (bio->bi_sector >= sector) |
| 503 | page_offset = (signed)(bio->bi_sector - sector) * 512; |
| 504 | else |
| 505 | page_offset = (signed)(sector - bio->bi_sector) * -512; |
| 506 | bio_for_each_segment(bvl, bio, i) { |
| 507 | int len = bio_iovec_idx(bio, i)->bv_len; |
| 508 | int clen; |
| 509 | int b_offset = 0; |
| 510 | |
| 511 | if (page_offset < 0) { |
| 512 | b_offset = -page_offset; |
| 513 | page_offset += b_offset; |
| 514 | len -= b_offset; |
| 515 | } |
| 516 | |
| 517 | if (len > 0 && page_offset + len > STRIPE_SIZE) |
| 518 | clen = STRIPE_SIZE - page_offset; |
| 519 | else |
| 520 | clen = len; |
| 521 | |
| 522 | if (clen > 0) { |
| 523 | b_offset += bio_iovec_idx(bio, i)->bv_offset; |
| 524 | bio_page = bio_iovec_idx(bio, i)->bv_page; |
| 525 | if (frombio) |
| 526 | tx = async_memcpy(page, bio_page, page_offset, |
| 527 | b_offset, clen, |
Dan Williams | eb0645a | 2007-07-20 00:31:46 -0700 | [diff] [blame] | 528 | ASYNC_TX_DEP_ACK, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 529 | tx, NULL, NULL); |
| 530 | else |
| 531 | tx = async_memcpy(bio_page, page, b_offset, |
| 532 | page_offset, clen, |
Dan Williams | eb0645a | 2007-07-20 00:31:46 -0700 | [diff] [blame] | 533 | ASYNC_TX_DEP_ACK, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 534 | tx, NULL, NULL); |
| 535 | } |
| 536 | if (clen < len) /* hit end of page */ |
| 537 | break; |
| 538 | page_offset += len; |
| 539 | } |
| 540 | |
| 541 | return tx; |
| 542 | } |
| 543 | |
| 544 | static void ops_complete_biofill(void *stripe_head_ref) |
| 545 | { |
| 546 | struct stripe_head *sh = stripe_head_ref; |
| 547 | struct bio *return_bi = NULL; |
| 548 | raid5_conf_t *conf = sh->raid_conf; |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 549 | int i; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 550 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 551 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 552 | (unsigned long long)sh->sector); |
| 553 | |
| 554 | /* clear completed biofills */ |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 555 | spin_lock_irq(&conf->device_lock); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 556 | for (i = sh->disks; i--; ) { |
| 557 | struct r5dev *dev = &sh->dev[i]; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 558 | |
| 559 | /* acknowledge completion of a biofill operation */ |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 560 | /* and check if we need to reply to a read request, |
| 561 | * new R5_Wantfill requests are held off until |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 562 | * !STRIPE_BIOFILL_RUN |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 563 | */ |
| 564 | if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 565 | struct bio *rbi, *rbi2; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 566 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 567 | BUG_ON(!dev->read); |
| 568 | rbi = dev->read; |
| 569 | dev->read = NULL; |
| 570 | while (rbi && rbi->bi_sector < |
| 571 | dev->sector + STRIPE_SECTORS) { |
| 572 | rbi2 = r5_next_bio(rbi, dev->sector); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 573 | if (!raid5_dec_bi_phys_segments(rbi)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 574 | rbi->bi_next = return_bi; |
| 575 | return_bi = rbi; |
| 576 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 577 | rbi = rbi2; |
| 578 | } |
| 579 | } |
| 580 | } |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 581 | spin_unlock_irq(&conf->device_lock); |
| 582 | clear_bit(STRIPE_BIOFILL_RUN, &sh->state); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 583 | |
| 584 | return_io(return_bi); |
| 585 | |
Dan Williams | e4d8490 | 2007-09-24 10:06:13 -0700 | [diff] [blame] | 586 | set_bit(STRIPE_HANDLE, &sh->state); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 587 | release_stripe(sh); |
| 588 | } |
| 589 | |
| 590 | static void ops_run_biofill(struct stripe_head *sh) |
| 591 | { |
| 592 | struct dma_async_tx_descriptor *tx = NULL; |
| 593 | raid5_conf_t *conf = sh->raid_conf; |
| 594 | int i; |
| 595 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 596 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 597 | (unsigned long long)sh->sector); |
| 598 | |
| 599 | for (i = sh->disks; i--; ) { |
| 600 | struct r5dev *dev = &sh->dev[i]; |
| 601 | if (test_bit(R5_Wantfill, &dev->flags)) { |
| 602 | struct bio *rbi; |
| 603 | spin_lock_irq(&conf->device_lock); |
| 604 | dev->read = rbi = dev->toread; |
| 605 | dev->toread = NULL; |
| 606 | spin_unlock_irq(&conf->device_lock); |
| 607 | while (rbi && rbi->bi_sector < |
| 608 | dev->sector + STRIPE_SECTORS) { |
| 609 | tx = async_copy_data(0, rbi, dev->page, |
| 610 | dev->sector, tx); |
| 611 | rbi = r5_next_bio(rbi, dev->sector); |
| 612 | } |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | atomic_inc(&sh->count); |
| 617 | async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, |
| 618 | ops_complete_biofill, sh); |
| 619 | } |
| 620 | |
| 621 | static void ops_complete_compute5(void *stripe_head_ref) |
| 622 | { |
| 623 | struct stripe_head *sh = stripe_head_ref; |
| 624 | int target = sh->ops.target; |
| 625 | struct r5dev *tgt = &sh->dev[target]; |
| 626 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 627 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 628 | (unsigned long long)sh->sector); |
| 629 | |
| 630 | set_bit(R5_UPTODATE, &tgt->flags); |
| 631 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 632 | clear_bit(R5_Wantcompute, &tgt->flags); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 633 | clear_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 634 | if (sh->check_state == check_state_compute_run) |
| 635 | sh->check_state = check_state_compute_result; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 636 | set_bit(STRIPE_HANDLE, &sh->state); |
| 637 | release_stripe(sh); |
| 638 | } |
| 639 | |
Dan Williams | 7b3a871 | 2008-06-28 08:32:09 +1000 | [diff] [blame] | 640 | static struct dma_async_tx_descriptor *ops_run_compute5(struct stripe_head *sh) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 641 | { |
| 642 | /* kernel stack size limits the total number of disks */ |
| 643 | int disks = sh->disks; |
| 644 | struct page *xor_srcs[disks]; |
| 645 | int target = sh->ops.target; |
| 646 | struct r5dev *tgt = &sh->dev[target]; |
| 647 | struct page *xor_dest = tgt->page; |
| 648 | int count = 0; |
| 649 | struct dma_async_tx_descriptor *tx; |
| 650 | int i; |
| 651 | |
| 652 | pr_debug("%s: stripe %llu block: %d\n", |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 653 | __func__, (unsigned long long)sh->sector, target); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 654 | BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); |
| 655 | |
| 656 | for (i = disks; i--; ) |
| 657 | if (i != target) |
| 658 | xor_srcs[count++] = sh->dev[i].page; |
| 659 | |
| 660 | atomic_inc(&sh->count); |
| 661 | |
| 662 | if (unlikely(count == 1)) |
| 663 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, |
| 664 | 0, NULL, ops_complete_compute5, sh); |
| 665 | else |
| 666 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
| 667 | ASYNC_TX_XOR_ZERO_DST, NULL, |
| 668 | ops_complete_compute5, sh); |
| 669 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 670 | return tx; |
| 671 | } |
| 672 | |
| 673 | static void ops_complete_prexor(void *stripe_head_ref) |
| 674 | { |
| 675 | struct stripe_head *sh = stripe_head_ref; |
| 676 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 677 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 678 | (unsigned long long)sh->sector); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 679 | } |
| 680 | |
| 681 | static struct dma_async_tx_descriptor * |
| 682 | ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) |
| 683 | { |
| 684 | /* kernel stack size limits the total number of disks */ |
| 685 | int disks = sh->disks; |
| 686 | struct page *xor_srcs[disks]; |
| 687 | int count = 0, pd_idx = sh->pd_idx, i; |
| 688 | |
| 689 | /* existing parity data subtracted */ |
| 690 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
| 691 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 692 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 693 | (unsigned long long)sh->sector); |
| 694 | |
| 695 | for (i = disks; i--; ) { |
| 696 | struct r5dev *dev = &sh->dev[i]; |
| 697 | /* Only process blocks that are known to be uptodate */ |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 698 | if (test_bit(R5_Wantdrain, &dev->flags)) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 699 | xor_srcs[count++] = dev->page; |
| 700 | } |
| 701 | |
| 702 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
| 703 | ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, |
| 704 | ops_complete_prexor, sh); |
| 705 | |
| 706 | return tx; |
| 707 | } |
| 708 | |
| 709 | static struct dma_async_tx_descriptor * |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 710 | ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 711 | { |
| 712 | int disks = sh->disks; |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 713 | int i; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 714 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 715 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 716 | (unsigned long long)sh->sector); |
| 717 | |
| 718 | for (i = disks; i--; ) { |
| 719 | struct r5dev *dev = &sh->dev[i]; |
| 720 | struct bio *chosen; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 721 | |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 722 | if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 723 | struct bio *wbi; |
| 724 | |
| 725 | spin_lock(&sh->lock); |
| 726 | chosen = dev->towrite; |
| 727 | dev->towrite = NULL; |
| 728 | BUG_ON(dev->written); |
| 729 | wbi = dev->written = chosen; |
| 730 | spin_unlock(&sh->lock); |
| 731 | |
| 732 | while (wbi && wbi->bi_sector < |
| 733 | dev->sector + STRIPE_SECTORS) { |
| 734 | tx = async_copy_data(1, wbi, dev->page, |
| 735 | dev->sector, tx); |
| 736 | wbi = r5_next_bio(wbi, dev->sector); |
| 737 | } |
| 738 | } |
| 739 | } |
| 740 | |
| 741 | return tx; |
| 742 | } |
| 743 | |
| 744 | static void ops_complete_postxor(void *stripe_head_ref) |
| 745 | { |
| 746 | struct stripe_head *sh = stripe_head_ref; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 747 | int disks = sh->disks, i, pd_idx = sh->pd_idx; |
| 748 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 749 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 750 | (unsigned long long)sh->sector); |
| 751 | |
| 752 | for (i = disks; i--; ) { |
| 753 | struct r5dev *dev = &sh->dev[i]; |
| 754 | if (dev->written || i == pd_idx) |
| 755 | set_bit(R5_UPTODATE, &dev->flags); |
| 756 | } |
| 757 | |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 758 | if (sh->reconstruct_state == reconstruct_state_drain_run) |
| 759 | sh->reconstruct_state = reconstruct_state_drain_result; |
| 760 | else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) |
| 761 | sh->reconstruct_state = reconstruct_state_prexor_drain_result; |
| 762 | else { |
| 763 | BUG_ON(sh->reconstruct_state != reconstruct_state_run); |
| 764 | sh->reconstruct_state = reconstruct_state_result; |
| 765 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 766 | |
| 767 | set_bit(STRIPE_HANDLE, &sh->state); |
| 768 | release_stripe(sh); |
| 769 | } |
| 770 | |
| 771 | static void |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 772 | ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 773 | { |
| 774 | /* kernel stack size limits the total number of disks */ |
| 775 | int disks = sh->disks; |
| 776 | struct page *xor_srcs[disks]; |
| 777 | |
| 778 | int count = 0, pd_idx = sh->pd_idx, i; |
| 779 | struct page *xor_dest; |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 780 | int prexor = 0; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 781 | unsigned long flags; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 782 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 783 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 784 | (unsigned long long)sh->sector); |
| 785 | |
| 786 | /* check if prexor is active which means only process blocks |
| 787 | * that are part of a read-modify-write (written) |
| 788 | */ |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 789 | if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { |
| 790 | prexor = 1; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 791 | xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
| 792 | for (i = disks; i--; ) { |
| 793 | struct r5dev *dev = &sh->dev[i]; |
| 794 | if (dev->written) |
| 795 | xor_srcs[count++] = dev->page; |
| 796 | } |
| 797 | } else { |
| 798 | xor_dest = sh->dev[pd_idx].page; |
| 799 | for (i = disks; i--; ) { |
| 800 | struct r5dev *dev = &sh->dev[i]; |
| 801 | if (i != pd_idx) |
| 802 | xor_srcs[count++] = dev->page; |
| 803 | } |
| 804 | } |
| 805 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 806 | /* 1/ if we prexor'd then the dest is reused as a source |
| 807 | * 2/ if we did not prexor then we are redoing the parity |
| 808 | * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST |
| 809 | * for the synchronous xor case |
| 810 | */ |
| 811 | flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | |
| 812 | (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); |
| 813 | |
| 814 | atomic_inc(&sh->count); |
| 815 | |
| 816 | if (unlikely(count == 1)) { |
| 817 | flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); |
| 818 | tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 819 | flags, tx, ops_complete_postxor, sh); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 820 | } else |
| 821 | tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 822 | flags, tx, ops_complete_postxor, sh); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 823 | } |
| 824 | |
| 825 | static void ops_complete_check(void *stripe_head_ref) |
| 826 | { |
| 827 | struct stripe_head *sh = stripe_head_ref; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 828 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 829 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 830 | (unsigned long long)sh->sector); |
| 831 | |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 832 | sh->check_state = check_state_check_result; |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 833 | set_bit(STRIPE_HANDLE, &sh->state); |
| 834 | release_stripe(sh); |
| 835 | } |
| 836 | |
| 837 | static void ops_run_check(struct stripe_head *sh) |
| 838 | { |
| 839 | /* kernel stack size limits the total number of disks */ |
| 840 | int disks = sh->disks; |
| 841 | struct page *xor_srcs[disks]; |
| 842 | struct dma_async_tx_descriptor *tx; |
| 843 | |
| 844 | int count = 0, pd_idx = sh->pd_idx, i; |
| 845 | struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; |
| 846 | |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 847 | pr_debug("%s: stripe %llu\n", __func__, |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 848 | (unsigned long long)sh->sector); |
| 849 | |
| 850 | for (i = disks; i--; ) { |
| 851 | struct r5dev *dev = &sh->dev[i]; |
| 852 | if (i != pd_idx) |
| 853 | xor_srcs[count++] = dev->page; |
| 854 | } |
| 855 | |
| 856 | tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, |
| 857 | &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); |
| 858 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 859 | atomic_inc(&sh->count); |
| 860 | tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, |
| 861 | ops_complete_check, sh); |
| 862 | } |
| 863 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 864 | static void raid5_run_ops(struct stripe_head *sh, unsigned long ops_request) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 865 | { |
| 866 | int overlap_clear = 0, i, disks = sh->disks; |
| 867 | struct dma_async_tx_descriptor *tx = NULL; |
| 868 | |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 869 | if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 870 | ops_run_biofill(sh); |
| 871 | overlap_clear++; |
| 872 | } |
| 873 | |
Dan Williams | 7b3a871 | 2008-06-28 08:32:09 +1000 | [diff] [blame] | 874 | if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { |
| 875 | tx = ops_run_compute5(sh); |
| 876 | /* terminate the chain if postxor is not set to be run */ |
| 877 | if (tx && !test_bit(STRIPE_OP_POSTXOR, &ops_request)) |
| 878 | async_tx_ack(tx); |
| 879 | } |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 880 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 881 | if (test_bit(STRIPE_OP_PREXOR, &ops_request)) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 882 | tx = ops_run_prexor(sh, tx); |
| 883 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 884 | if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 885 | tx = ops_run_biodrain(sh, tx); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 886 | overlap_clear++; |
| 887 | } |
| 888 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 889 | if (test_bit(STRIPE_OP_POSTXOR, &ops_request)) |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 890 | ops_run_postxor(sh, tx); |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 891 | |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 892 | if (test_bit(STRIPE_OP_CHECK, &ops_request)) |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 893 | ops_run_check(sh); |
| 894 | |
Dan Williams | 91c0092 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 895 | if (overlap_clear) |
| 896 | for (i = disks; i--; ) { |
| 897 | struct r5dev *dev = &sh->dev[i]; |
| 898 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) |
| 899 | wake_up(&sh->raid_conf->wait_for_overlap); |
| 900 | } |
| 901 | } |
| 902 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 903 | static int grow_one_stripe(raid5_conf_t *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 904 | { |
| 905 | struct stripe_head *sh; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 906 | sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); |
| 907 | if (!sh) |
| 908 | return 0; |
| 909 | memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); |
| 910 | sh->raid_conf = conf; |
| 911 | spin_lock_init(&sh->lock); |
| 912 | |
| 913 | if (grow_buffers(sh, conf->raid_disks)) { |
| 914 | shrink_buffers(sh, conf->raid_disks); |
| 915 | kmem_cache_free(conf->slab_cache, sh); |
| 916 | return 0; |
| 917 | } |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 918 | sh->disks = conf->raid_disks; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 919 | /* we just created an active stripe so... */ |
| 920 | atomic_set(&sh->count, 1); |
| 921 | atomic_inc(&conf->active_stripes); |
| 922 | INIT_LIST_HEAD(&sh->lru); |
| 923 | release_stripe(sh); |
| 924 | return 1; |
| 925 | } |
| 926 | |
| 927 | static int grow_stripes(raid5_conf_t *conf, int num) |
| 928 | { |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 929 | struct kmem_cache *sc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 930 | int devs = conf->raid_disks; |
| 931 | |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 932 | sprintf(conf->cache_name[0], |
| 933 | "raid%d-%s", conf->level, mdname(conf->mddev)); |
| 934 | sprintf(conf->cache_name[1], |
| 935 | "raid%d-%s-alt", conf->level, mdname(conf->mddev)); |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 936 | conf->active_name = 0; |
| 937 | sc = kmem_cache_create(conf->cache_name[conf->active_name], |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 938 | sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 939 | 0, 0, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 940 | if (!sc) |
| 941 | return 1; |
| 942 | conf->slab_cache = sc; |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 943 | conf->pool_size = devs; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 944 | while (num--) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 945 | if (!grow_one_stripe(conf)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 946 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 947 | return 0; |
| 948 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 949 | |
| 950 | #ifdef CONFIG_MD_RAID5_RESHAPE |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 951 | static int resize_stripes(raid5_conf_t *conf, int newsize) |
| 952 | { |
| 953 | /* Make all the stripes able to hold 'newsize' devices. |
| 954 | * New slots in each stripe get 'page' set to a new page. |
| 955 | * |
| 956 | * This happens in stages: |
| 957 | * 1/ create a new kmem_cache and allocate the required number of |
| 958 | * stripe_heads. |
| 959 | * 2/ gather all the old stripe_heads and tranfer the pages across |
| 960 | * to the new stripe_heads. This will have the side effect of |
| 961 | * freezing the array as once all stripe_heads have been collected, |
| 962 | * no IO will be possible. Old stripe heads are freed once their |
| 963 | * pages have been transferred over, and the old kmem_cache is |
| 964 | * freed when all stripes are done. |
| 965 | * 3/ reallocate conf->disks to be suitable bigger. If this fails, |
| 966 | * we simple return a failre status - no need to clean anything up. |
| 967 | * 4/ allocate new pages for the new slots in the new stripe_heads. |
| 968 | * If this fails, we don't bother trying the shrink the |
| 969 | * stripe_heads down again, we just leave them as they are. |
| 970 | * As each stripe_head is processed the new one is released into |
| 971 | * active service. |
| 972 | * |
| 973 | * Once step2 is started, we cannot afford to wait for a write, |
| 974 | * so we use GFP_NOIO allocations. |
| 975 | */ |
| 976 | struct stripe_head *osh, *nsh; |
| 977 | LIST_HEAD(newstripes); |
| 978 | struct disk_info *ndisks; |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 979 | int err; |
Christoph Lameter | e18b890 | 2006-12-06 20:33:20 -0800 | [diff] [blame] | 980 | struct kmem_cache *sc; |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 981 | int i; |
| 982 | |
| 983 | if (newsize <= conf->pool_size) |
| 984 | return 0; /* never bother to shrink */ |
| 985 | |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 986 | err = md_allow_write(conf->mddev); |
| 987 | if (err) |
| 988 | return err; |
NeilBrown | 2a2275d | 2007-01-26 00:57:11 -0800 | [diff] [blame] | 989 | |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 990 | /* Step 1 */ |
| 991 | sc = kmem_cache_create(conf->cache_name[1-conf->active_name], |
| 992 | sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), |
Paul Mundt | 20c2df8 | 2007-07-20 10:11:58 +0900 | [diff] [blame] | 993 | 0, 0, NULL); |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 994 | if (!sc) |
| 995 | return -ENOMEM; |
| 996 | |
| 997 | for (i = conf->max_nr_stripes; i; i--) { |
| 998 | nsh = kmem_cache_alloc(sc, GFP_KERNEL); |
| 999 | if (!nsh) |
| 1000 | break; |
| 1001 | |
| 1002 | memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); |
| 1003 | |
| 1004 | nsh->raid_conf = conf; |
| 1005 | spin_lock_init(&nsh->lock); |
| 1006 | |
| 1007 | list_add(&nsh->lru, &newstripes); |
| 1008 | } |
| 1009 | if (i) { |
| 1010 | /* didn't get enough, give up */ |
| 1011 | while (!list_empty(&newstripes)) { |
| 1012 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
| 1013 | list_del(&nsh->lru); |
| 1014 | kmem_cache_free(sc, nsh); |
| 1015 | } |
| 1016 | kmem_cache_destroy(sc); |
| 1017 | return -ENOMEM; |
| 1018 | } |
| 1019 | /* Step 2 - Must use GFP_NOIO now. |
| 1020 | * OK, we have enough stripes, start collecting inactive |
| 1021 | * stripes and copying them over |
| 1022 | */ |
| 1023 | list_for_each_entry(nsh, &newstripes, lru) { |
| 1024 | spin_lock_irq(&conf->device_lock); |
| 1025 | wait_event_lock_irq(conf->wait_for_stripe, |
| 1026 | !list_empty(&conf->inactive_list), |
| 1027 | conf->device_lock, |
NeilBrown | b3b46be | 2006-03-27 01:18:16 -0800 | [diff] [blame] | 1028 | unplug_slaves(conf->mddev) |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1029 | ); |
| 1030 | osh = get_free_stripe(conf); |
| 1031 | spin_unlock_irq(&conf->device_lock); |
| 1032 | atomic_set(&nsh->count, 1); |
| 1033 | for(i=0; i<conf->pool_size; i++) |
| 1034 | nsh->dev[i].page = osh->dev[i].page; |
| 1035 | for( ; i<newsize; i++) |
| 1036 | nsh->dev[i].page = NULL; |
| 1037 | kmem_cache_free(conf->slab_cache, osh); |
| 1038 | } |
| 1039 | kmem_cache_destroy(conf->slab_cache); |
| 1040 | |
| 1041 | /* Step 3. |
| 1042 | * At this point, we are holding all the stripes so the array |
| 1043 | * is completely stalled, so now is a good time to resize |
| 1044 | * conf->disks. |
| 1045 | */ |
| 1046 | ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); |
| 1047 | if (ndisks) { |
| 1048 | for (i=0; i<conf->raid_disks; i++) |
| 1049 | ndisks[i] = conf->disks[i]; |
| 1050 | kfree(conf->disks); |
| 1051 | conf->disks = ndisks; |
| 1052 | } else |
| 1053 | err = -ENOMEM; |
| 1054 | |
| 1055 | /* Step 4, return new stripes to service */ |
| 1056 | while(!list_empty(&newstripes)) { |
| 1057 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
| 1058 | list_del_init(&nsh->lru); |
| 1059 | for (i=conf->raid_disks; i < newsize; i++) |
| 1060 | if (nsh->dev[i].page == NULL) { |
| 1061 | struct page *p = alloc_page(GFP_NOIO); |
| 1062 | nsh->dev[i].page = p; |
| 1063 | if (!p) |
| 1064 | err = -ENOMEM; |
| 1065 | } |
| 1066 | release_stripe(nsh); |
| 1067 | } |
| 1068 | /* critical section pass, GFP_NOIO no longer needed */ |
| 1069 | |
| 1070 | conf->slab_cache = sc; |
| 1071 | conf->active_name = 1-conf->active_name; |
| 1072 | conf->pool_size = newsize; |
| 1073 | return err; |
| 1074 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 1075 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1076 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1077 | static int drop_one_stripe(raid5_conf_t *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1078 | { |
| 1079 | struct stripe_head *sh; |
| 1080 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1081 | spin_lock_irq(&conf->device_lock); |
| 1082 | sh = get_free_stripe(conf); |
| 1083 | spin_unlock_irq(&conf->device_lock); |
| 1084 | if (!sh) |
| 1085 | return 0; |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 1086 | BUG_ON(atomic_read(&sh->count)); |
NeilBrown | ad01c9e | 2006-03-27 01:18:07 -0800 | [diff] [blame] | 1087 | shrink_buffers(sh, conf->pool_size); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 1088 | kmem_cache_free(conf->slab_cache, sh); |
| 1089 | atomic_dec(&conf->active_stripes); |
| 1090 | return 1; |
| 1091 | } |
| 1092 | |
| 1093 | static void shrink_stripes(raid5_conf_t *conf) |
| 1094 | { |
| 1095 | while (drop_one_stripe(conf)) |
| 1096 | ; |
| 1097 | |
NeilBrown | 29fc7e3 | 2006-02-03 03:03:41 -0800 | [diff] [blame] | 1098 | if (conf->slab_cache) |
| 1099 | kmem_cache_destroy(conf->slab_cache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1100 | conf->slab_cache = NULL; |
| 1101 | } |
| 1102 | |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 1103 | static void raid5_end_read_request(struct bio * bi, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1105 | struct stripe_head *sh = bi->bi_private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1106 | raid5_conf_t *conf = sh->raid_conf; |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 1107 | int disks = sh->disks, i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1109 | char b[BDEVNAME_SIZE]; |
| 1110 | mdk_rdev_t *rdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1111 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1112 | |
| 1113 | for (i=0 ; i<disks; i++) |
| 1114 | if (bi == &sh->dev[i].req) |
| 1115 | break; |
| 1116 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1117 | pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", |
| 1118 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1119 | uptodate); |
| 1120 | if (i == disks) { |
| 1121 | BUG(); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 1122 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 | } |
| 1124 | |
| 1125 | if (uptodate) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1126 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1127 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1128 | rdev = conf->disks[i].rdev; |
Bernd Schubert | 6be9d49 | 2008-05-23 13:04:34 -0700 | [diff] [blame] | 1129 | printk_rl(KERN_INFO "raid5:%s: read error corrected" |
| 1130 | " (%lu sectors at %llu on %s)\n", |
| 1131 | mdname(conf->mddev), STRIPE_SECTORS, |
| 1132 | (unsigned long long)(sh->sector |
| 1133 | + rdev->data_offset), |
| 1134 | bdevname(rdev->bdev, b)); |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1135 | clear_bit(R5_ReadError, &sh->dev[i].flags); |
| 1136 | clear_bit(R5_ReWrite, &sh->dev[i].flags); |
| 1137 | } |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1138 | if (atomic_read(&conf->disks[i].rdev->read_errors)) |
| 1139 | atomic_set(&conf->disks[i].rdev->read_errors, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1140 | } else { |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1141 | const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1142 | int retry = 0; |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1143 | rdev = conf->disks[i].rdev; |
| 1144 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | clear_bit(R5_UPTODATE, &sh->dev[i].flags); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1146 | atomic_inc(&rdev->read_errors); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1147 | if (conf->mddev->degraded) |
Bernd Schubert | 6be9d49 | 2008-05-23 13:04:34 -0700 | [diff] [blame] | 1148 | printk_rl(KERN_WARNING |
| 1149 | "raid5:%s: read error not correctable " |
| 1150 | "(sector %llu on %s).\n", |
| 1151 | mdname(conf->mddev), |
| 1152 | (unsigned long long)(sh->sector |
| 1153 | + rdev->data_offset), |
| 1154 | bdn); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1155 | else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1156 | /* Oh, no!!! */ |
Bernd Schubert | 6be9d49 | 2008-05-23 13:04:34 -0700 | [diff] [blame] | 1157 | printk_rl(KERN_WARNING |
| 1158 | "raid5:%s: read error NOT corrected!! " |
| 1159 | "(sector %llu on %s).\n", |
| 1160 | mdname(conf->mddev), |
| 1161 | (unsigned long long)(sh->sector |
| 1162 | + rdev->data_offset), |
| 1163 | bdn); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1164 | else if (atomic_read(&rdev->read_errors) |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1165 | > conf->max_nr_stripes) |
NeilBrown | 14f8d26 | 2006-01-06 00:20:14 -0800 | [diff] [blame] | 1166 | printk(KERN_WARNING |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1167 | "raid5:%s: Too many read errors, failing device %s.\n", |
| 1168 | mdname(conf->mddev), bdn); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1169 | else |
| 1170 | retry = 1; |
| 1171 | if (retry) |
| 1172 | set_bit(R5_ReadError, &sh->dev[i].flags); |
| 1173 | else { |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 1174 | clear_bit(R5_ReadError, &sh->dev[i].flags); |
| 1175 | clear_bit(R5_ReWrite, &sh->dev[i].flags); |
NeilBrown | d695043 | 2006-07-10 04:44:20 -0700 | [diff] [blame] | 1176 | md_error(conf->mddev, rdev); |
NeilBrown | ba22dcb | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1177 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1178 | } |
| 1179 | rdev_dec_pending(conf->disks[i].rdev, conf->mddev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1180 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 1181 | set_bit(STRIPE_HANDLE, &sh->state); |
| 1182 | release_stripe(sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | } |
| 1184 | |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 1185 | static void raid5_end_write_request(struct bio *bi, int error) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1186 | { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1187 | struct stripe_head *sh = bi->bi_private; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1188 | raid5_conf_t *conf = sh->raid_conf; |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 1189 | int disks = sh->disks, i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
| 1191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1192 | for (i=0 ; i<disks; i++) |
| 1193 | if (bi == &sh->dev[i].req) |
| 1194 | break; |
| 1195 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1196 | pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | (unsigned long long)sh->sector, i, atomic_read(&sh->count), |
| 1198 | uptodate); |
| 1199 | if (i == disks) { |
| 1200 | BUG(); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 1201 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | } |
| 1203 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1204 | if (!uptodate) |
| 1205 | md_error(conf->mddev, conf->disks[i].rdev); |
| 1206 | |
| 1207 | rdev_dec_pending(conf->disks[i].rdev, conf->mddev); |
| 1208 | |
| 1209 | clear_bit(R5_LOCKED, &sh->dev[i].flags); |
| 1210 | set_bit(STRIPE_HANDLE, &sh->state); |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 1211 | release_stripe(sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1212 | } |
| 1213 | |
| 1214 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1215 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1216 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1217 | static void raid5_build_block(struct stripe_head *sh, int i, int previous) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1218 | { |
| 1219 | struct r5dev *dev = &sh->dev[i]; |
| 1220 | |
| 1221 | bio_init(&dev->req); |
| 1222 | dev->req.bi_io_vec = &dev->vec; |
| 1223 | dev->req.bi_vcnt++; |
| 1224 | dev->req.bi_max_vecs++; |
| 1225 | dev->vec.bv_page = dev->page; |
| 1226 | dev->vec.bv_len = STRIPE_SIZE; |
| 1227 | dev->vec.bv_offset = 0; |
| 1228 | |
| 1229 | dev->req.bi_sector = sh->sector; |
| 1230 | dev->req.bi_private = sh; |
| 1231 | |
| 1232 | dev->flags = 0; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1233 | dev->sector = compute_blocknr(sh, i, previous); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1234 | } |
| 1235 | |
| 1236 | static void error(mddev_t *mddev, mdk_rdev_t *rdev) |
| 1237 | { |
| 1238 | char b[BDEVNAME_SIZE]; |
| 1239 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1240 | pr_debug("raid5: error called\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1241 | |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1242 | if (!test_bit(Faulty, &rdev->flags)) { |
NeilBrown | 850b2b4 | 2006-10-03 01:15:46 -0700 | [diff] [blame] | 1243 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 1244 | if (test_and_clear_bit(In_sync, &rdev->flags)) { |
| 1245 | unsigned long flags; |
| 1246 | spin_lock_irqsave(&conf->device_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1247 | mddev->degraded++; |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 1248 | spin_unlock_irqrestore(&conf->device_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1249 | /* |
| 1250 | * if recovery was running, make sure it aborts. |
| 1251 | */ |
NeilBrown | dfc7064 | 2008-05-23 13:04:39 -0700 | [diff] [blame] | 1252 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1253 | } |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 1254 | set_bit(Faulty, &rdev->flags); |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 1255 | printk(KERN_ALERT |
| 1256 | "raid5: Disk failure on %s, disabling device.\n" |
| 1257 | "raid5: Operation continuing on %d devices.\n", |
| 1258 | bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1259 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1260 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | |
| 1262 | /* |
| 1263 | * Input: a 'big' sector number, |
| 1264 | * Output: index of the data and parity disk, and the sector # in them. |
| 1265 | */ |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1266 | static sector_t raid5_compute_sector(raid5_conf_t *conf, sector_t r_sector, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1267 | int previous, int *dd_idx, |
| 1268 | struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1269 | { |
| 1270 | long stripe; |
| 1271 | unsigned long chunk_number; |
| 1272 | unsigned int chunk_offset; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1273 | int pd_idx, qd_idx; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1274 | int ddf_layout = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1275 | sector_t new_sector; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1276 | int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) |
| 1277 | : (conf->chunk_size >> 9); |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1278 | int raid_disks = previous ? conf->previous_raid_disks |
| 1279 | : conf->raid_disks; |
| 1280 | int data_disks = raid_disks - conf->max_degraded; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1281 | |
| 1282 | /* First compute the information on this sector */ |
| 1283 | |
| 1284 | /* |
| 1285 | * Compute the chunk number and the sector offset inside the chunk |
| 1286 | */ |
| 1287 | chunk_offset = sector_div(r_sector, sectors_per_chunk); |
| 1288 | chunk_number = r_sector; |
| 1289 | BUG_ON(r_sector != chunk_number); |
| 1290 | |
| 1291 | /* |
| 1292 | * Compute the stripe number |
| 1293 | */ |
| 1294 | stripe = chunk_number / data_disks; |
| 1295 | |
| 1296 | /* |
| 1297 | * Compute the data disk and parity disk indexes inside the stripe |
| 1298 | */ |
| 1299 | *dd_idx = chunk_number % data_disks; |
| 1300 | |
| 1301 | /* |
| 1302 | * Select the parity disk based on the user selected algorithm. |
| 1303 | */ |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1304 | pd_idx = qd_idx = ~0; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1305 | switch(conf->level) { |
| 1306 | case 4: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1307 | pd_idx = data_disks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1308 | break; |
| 1309 | case 5: |
| 1310 | switch (conf->algorithm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1311 | case ALGORITHM_LEFT_ASYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1312 | pd_idx = data_disks - stripe % raid_disks; |
| 1313 | if (*dd_idx >= pd_idx) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1314 | (*dd_idx)++; |
| 1315 | break; |
| 1316 | case ALGORITHM_RIGHT_ASYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1317 | pd_idx = stripe % raid_disks; |
| 1318 | if (*dd_idx >= pd_idx) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | (*dd_idx)++; |
| 1320 | break; |
| 1321 | case ALGORITHM_LEFT_SYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1322 | pd_idx = data_disks - stripe % raid_disks; |
| 1323 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1324 | break; |
| 1325 | case ALGORITHM_RIGHT_SYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1326 | pd_idx = stripe % raid_disks; |
| 1327 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1328 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1329 | case ALGORITHM_PARITY_0: |
| 1330 | pd_idx = 0; |
| 1331 | (*dd_idx)++; |
| 1332 | break; |
| 1333 | case ALGORITHM_PARITY_N: |
| 1334 | pd_idx = data_disks; |
| 1335 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | default: |
NeilBrown | 14f8d26 | 2006-01-06 00:20:14 -0800 | [diff] [blame] | 1337 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | conf->algorithm); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1339 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1340 | } |
| 1341 | break; |
| 1342 | case 6: |
| 1343 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1344 | switch (conf->algorithm) { |
| 1345 | case ALGORITHM_LEFT_ASYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1346 | pd_idx = raid_disks - 1 - (stripe % raid_disks); |
| 1347 | qd_idx = pd_idx + 1; |
| 1348 | if (pd_idx == raid_disks-1) { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1349 | (*dd_idx)++; /* Q D D D P */ |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1350 | qd_idx = 0; |
| 1351 | } else if (*dd_idx >= pd_idx) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1352 | (*dd_idx) += 2; /* D D P Q D */ |
| 1353 | break; |
| 1354 | case ALGORITHM_RIGHT_ASYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1355 | pd_idx = stripe % raid_disks; |
| 1356 | qd_idx = pd_idx + 1; |
| 1357 | if (pd_idx == raid_disks-1) { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1358 | (*dd_idx)++; /* Q D D D P */ |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1359 | qd_idx = 0; |
| 1360 | } else if (*dd_idx >= pd_idx) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1361 | (*dd_idx) += 2; /* D D P Q D */ |
| 1362 | break; |
| 1363 | case ALGORITHM_LEFT_SYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1364 | pd_idx = raid_disks - 1 - (stripe % raid_disks); |
| 1365 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1366 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1367 | break; |
| 1368 | case ALGORITHM_RIGHT_SYMMETRIC: |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1369 | pd_idx = stripe % raid_disks; |
| 1370 | qd_idx = (pd_idx + 1) % raid_disks; |
| 1371 | *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1372 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1373 | |
| 1374 | case ALGORITHM_PARITY_0: |
| 1375 | pd_idx = 0; |
| 1376 | qd_idx = 1; |
| 1377 | (*dd_idx) += 2; |
| 1378 | break; |
| 1379 | case ALGORITHM_PARITY_N: |
| 1380 | pd_idx = data_disks; |
| 1381 | qd_idx = data_disks + 1; |
| 1382 | break; |
| 1383 | |
| 1384 | case ALGORITHM_ROTATING_ZERO_RESTART: |
| 1385 | /* Exactly the same as RIGHT_ASYMMETRIC, but or |
| 1386 | * of blocks for computing Q is different. |
| 1387 | */ |
| 1388 | pd_idx = stripe % raid_disks; |
| 1389 | qd_idx = pd_idx + 1; |
| 1390 | if (pd_idx == raid_disks-1) { |
| 1391 | (*dd_idx)++; /* Q D D D P */ |
| 1392 | qd_idx = 0; |
| 1393 | } else if (*dd_idx >= pd_idx) |
| 1394 | (*dd_idx) += 2; /* D D P Q D */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1395 | ddf_layout = 1; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1396 | break; |
| 1397 | |
| 1398 | case ALGORITHM_ROTATING_N_RESTART: |
| 1399 | /* Same a left_asymmetric, by first stripe is |
| 1400 | * D D D P Q rather than |
| 1401 | * Q D D D P |
| 1402 | */ |
| 1403 | pd_idx = raid_disks - 1 - ((stripe + 1) % raid_disks); |
| 1404 | qd_idx = pd_idx + 1; |
| 1405 | if (pd_idx == raid_disks-1) { |
| 1406 | (*dd_idx)++; /* Q D D D P */ |
| 1407 | qd_idx = 0; |
| 1408 | } else if (*dd_idx >= pd_idx) |
| 1409 | (*dd_idx) += 2; /* D D P Q D */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1410 | ddf_layout = 1; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1411 | break; |
| 1412 | |
| 1413 | case ALGORITHM_ROTATING_N_CONTINUE: |
| 1414 | /* Same as left_symmetric but Q is before P */ |
| 1415 | pd_idx = raid_disks - 1 - (stripe % raid_disks); |
| 1416 | qd_idx = (pd_idx + raid_disks - 1) % raid_disks; |
| 1417 | *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1418 | ddf_layout = 1; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1419 | break; |
| 1420 | |
| 1421 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 1422 | /* RAID5 left_asymmetric, with Q on last device */ |
| 1423 | pd_idx = data_disks - stripe % (raid_disks-1); |
| 1424 | if (*dd_idx >= pd_idx) |
| 1425 | (*dd_idx)++; |
| 1426 | qd_idx = raid_disks - 1; |
| 1427 | break; |
| 1428 | |
| 1429 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 1430 | pd_idx = stripe % (raid_disks-1); |
| 1431 | if (*dd_idx >= pd_idx) |
| 1432 | (*dd_idx)++; |
| 1433 | qd_idx = raid_disks - 1; |
| 1434 | break; |
| 1435 | |
| 1436 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 1437 | pd_idx = data_disks - stripe % (raid_disks-1); |
| 1438 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 1439 | qd_idx = raid_disks - 1; |
| 1440 | break; |
| 1441 | |
| 1442 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 1443 | pd_idx = stripe % (raid_disks-1); |
| 1444 | *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); |
| 1445 | qd_idx = raid_disks - 1; |
| 1446 | break; |
| 1447 | |
| 1448 | case ALGORITHM_PARITY_0_6: |
| 1449 | pd_idx = 0; |
| 1450 | (*dd_idx)++; |
| 1451 | qd_idx = raid_disks - 1; |
| 1452 | break; |
| 1453 | |
| 1454 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1455 | default: |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 1456 | printk(KERN_CRIT "raid6: unsupported algorithm %d\n", |
| 1457 | conf->algorithm); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1458 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1459 | } |
| 1460 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | } |
| 1462 | |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1463 | if (sh) { |
| 1464 | sh->pd_idx = pd_idx; |
| 1465 | sh->qd_idx = qd_idx; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1466 | sh->ddf_layout = ddf_layout; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1467 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1468 | /* |
| 1469 | * Finally, compute the new sector number |
| 1470 | */ |
| 1471 | new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; |
| 1472 | return new_sector; |
| 1473 | } |
| 1474 | |
| 1475 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1476 | static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1477 | { |
| 1478 | raid5_conf_t *conf = sh->raid_conf; |
NeilBrown | b875e53 | 2006-12-10 02:20:49 -0800 | [diff] [blame] | 1479 | int raid_disks = sh->disks; |
| 1480 | int data_disks = raid_disks - conf->max_degraded; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1481 | sector_t new_sector = sh->sector, check; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1482 | int sectors_per_chunk = previous ? (conf->prev_chunk >> 9) |
| 1483 | : (conf->chunk_size >> 9); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1484 | sector_t stripe; |
| 1485 | int chunk_offset; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1486 | int chunk_number, dummy1, dd_idx = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1487 | sector_t r_sector; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1488 | struct stripe_head sh2; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1489 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1490 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1491 | chunk_offset = sector_div(new_sector, sectors_per_chunk); |
| 1492 | stripe = new_sector; |
| 1493 | BUG_ON(new_sector != stripe); |
| 1494 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1495 | if (i == sh->pd_idx) |
| 1496 | return 0; |
| 1497 | switch(conf->level) { |
| 1498 | case 4: break; |
| 1499 | case 5: |
| 1500 | switch (conf->algorithm) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1501 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 1502 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 1503 | if (i > sh->pd_idx) |
| 1504 | i--; |
| 1505 | break; |
| 1506 | case ALGORITHM_LEFT_SYMMETRIC: |
| 1507 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 1508 | if (i < sh->pd_idx) |
| 1509 | i += raid_disks; |
| 1510 | i -= (sh->pd_idx + 1); |
| 1511 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1512 | case ALGORITHM_PARITY_0: |
| 1513 | i -= 1; |
| 1514 | break; |
| 1515 | case ALGORITHM_PARITY_N: |
| 1516 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1517 | default: |
NeilBrown | 14f8d26 | 2006-01-06 00:20:14 -0800 | [diff] [blame] | 1518 | printk(KERN_ERR "raid5: unsupported algorithm %d\n", |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1519 | conf->algorithm); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1520 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1521 | } |
| 1522 | break; |
| 1523 | case 6: |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1524 | if (i == sh->qd_idx) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1525 | return 0; /* It is the Q disk */ |
| 1526 | switch (conf->algorithm) { |
| 1527 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 1528 | case ALGORITHM_RIGHT_ASYMMETRIC: |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1529 | case ALGORITHM_ROTATING_ZERO_RESTART: |
| 1530 | case ALGORITHM_ROTATING_N_RESTART: |
| 1531 | if (sh->pd_idx == raid_disks-1) |
| 1532 | i--; /* Q D D D P */ |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1533 | else if (i > sh->pd_idx) |
| 1534 | i -= 2; /* D D P Q D */ |
| 1535 | break; |
| 1536 | case ALGORITHM_LEFT_SYMMETRIC: |
| 1537 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 1538 | if (sh->pd_idx == raid_disks-1) |
| 1539 | i--; /* Q D D D P */ |
| 1540 | else { |
| 1541 | /* D D P Q D */ |
| 1542 | if (i < sh->pd_idx) |
| 1543 | i += raid_disks; |
| 1544 | i -= (sh->pd_idx + 2); |
| 1545 | } |
| 1546 | break; |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1547 | case ALGORITHM_PARITY_0: |
| 1548 | i -= 2; |
| 1549 | break; |
| 1550 | case ALGORITHM_PARITY_N: |
| 1551 | break; |
| 1552 | case ALGORITHM_ROTATING_N_CONTINUE: |
| 1553 | if (sh->pd_idx == 0) |
| 1554 | i--; /* P D D D Q */ |
| 1555 | else if (i > sh->pd_idx) |
| 1556 | i -= 2; /* D D Q P D */ |
| 1557 | break; |
| 1558 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 1559 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 1560 | if (i > sh->pd_idx) |
| 1561 | i--; |
| 1562 | break; |
| 1563 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 1564 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 1565 | if (i < sh->pd_idx) |
| 1566 | i += data_disks + 1; |
| 1567 | i -= (sh->pd_idx + 1); |
| 1568 | break; |
| 1569 | case ALGORITHM_PARITY_0_6: |
| 1570 | i -= 1; |
| 1571 | break; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1572 | default: |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 1573 | printk(KERN_CRIT "raid6: unsupported algorithm %d\n", |
| 1574 | conf->algorithm); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1575 | BUG(); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1576 | } |
| 1577 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | } |
| 1579 | |
| 1580 | chunk_number = stripe * data_disks + i; |
| 1581 | r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; |
| 1582 | |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1583 | check = raid5_compute_sector(conf, r_sector, |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1584 | previous, &dummy1, &sh2); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1585 | if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx |
| 1586 | || sh2.qd_idx != sh->qd_idx) { |
NeilBrown | 14f8d26 | 2006-01-06 00:20:14 -0800 | [diff] [blame] | 1587 | printk(KERN_ERR "compute_blocknr: map not correct\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1588 | return 0; |
| 1589 | } |
| 1590 | return r_sector; |
| 1591 | } |
| 1592 | |
| 1593 | |
| 1594 | |
| 1595 | /* |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1596 | * Copy data between a page in the stripe cache, and one or more bion |
| 1597 | * The page could align with the middle of the bio, or there could be |
| 1598 | * several bion, each with several bio_vecs, which cover part of the page |
| 1599 | * Multiple bion are linked together on bi_next. There may be extras |
| 1600 | * at the end of this list. We ignore them. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1601 | */ |
| 1602 | static void copy_data(int frombio, struct bio *bio, |
| 1603 | struct page *page, |
| 1604 | sector_t sector) |
| 1605 | { |
| 1606 | char *pa = page_address(page); |
| 1607 | struct bio_vec *bvl; |
| 1608 | int i; |
| 1609 | int page_offset; |
| 1610 | |
| 1611 | if (bio->bi_sector >= sector) |
| 1612 | page_offset = (signed)(bio->bi_sector - sector) * 512; |
| 1613 | else |
| 1614 | page_offset = (signed)(sector - bio->bi_sector) * -512; |
| 1615 | bio_for_each_segment(bvl, bio, i) { |
| 1616 | int len = bio_iovec_idx(bio,i)->bv_len; |
| 1617 | int clen; |
| 1618 | int b_offset = 0; |
| 1619 | |
| 1620 | if (page_offset < 0) { |
| 1621 | b_offset = -page_offset; |
| 1622 | page_offset += b_offset; |
| 1623 | len -= b_offset; |
| 1624 | } |
| 1625 | |
| 1626 | if (len > 0 && page_offset + len > STRIPE_SIZE) |
| 1627 | clen = STRIPE_SIZE - page_offset; |
| 1628 | else clen = len; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1629 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1630 | if (clen > 0) { |
| 1631 | char *ba = __bio_kmap_atomic(bio, i, KM_USER0); |
| 1632 | if (frombio) |
| 1633 | memcpy(pa+page_offset, ba+b_offset, clen); |
| 1634 | else |
| 1635 | memcpy(ba+b_offset, pa+page_offset, clen); |
| 1636 | __bio_kunmap_atomic(ba, KM_USER0); |
| 1637 | } |
| 1638 | if (clen < len) /* hit end of page */ |
| 1639 | break; |
| 1640 | page_offset += len; |
| 1641 | } |
| 1642 | } |
| 1643 | |
Dan Williams | 9bc89cd | 2007-01-02 11:10:44 -0700 | [diff] [blame] | 1644 | #define check_xor() do { \ |
| 1645 | if (count == MAX_XOR_BLOCKS) { \ |
| 1646 | xor_blocks(count, STRIPE_SIZE, dest, ptr);\ |
| 1647 | count = 0; \ |
| 1648 | } \ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1649 | } while(0) |
| 1650 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1651 | static void compute_parity6(struct stripe_head *sh, int method) |
| 1652 | { |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 1653 | raid5_conf_t *conf = sh->raid_conf; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1654 | int i, pd_idx, qd_idx, d0_idx, disks = sh->disks, count; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1655 | int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1656 | struct bio *chosen; |
| 1657 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1658 | void *ptrs[syndrome_disks+2]; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1659 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1660 | pd_idx = sh->pd_idx; |
| 1661 | qd_idx = sh->qd_idx; |
| 1662 | d0_idx = raid6_d0(sh); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1663 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1664 | pr_debug("compute_parity, stripe %llu, method %d\n", |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1665 | (unsigned long long)sh->sector, method); |
| 1666 | |
| 1667 | switch(method) { |
| 1668 | case READ_MODIFY_WRITE: |
| 1669 | BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ |
| 1670 | case RECONSTRUCT_WRITE: |
| 1671 | for (i= disks; i-- ;) |
| 1672 | if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { |
| 1673 | chosen = sh->dev[i].towrite; |
| 1674 | sh->dev[i].towrite = NULL; |
| 1675 | |
| 1676 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 1677 | wake_up(&conf->wait_for_overlap); |
| 1678 | |
Eric Sesterhenn | 52e5f9d | 2006-10-03 23:33:23 +0200 | [diff] [blame] | 1679 | BUG_ON(sh->dev[i].written); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1680 | sh->dev[i].written = chosen; |
| 1681 | } |
| 1682 | break; |
| 1683 | case CHECK_PARITY: |
| 1684 | BUG(); /* Not implemented yet */ |
| 1685 | } |
| 1686 | |
| 1687 | for (i = disks; i--;) |
| 1688 | if (sh->dev[i].written) { |
| 1689 | sector_t sector = sh->dev[i].sector; |
| 1690 | struct bio *wbi = sh->dev[i].written; |
| 1691 | while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { |
| 1692 | copy_data(1, wbi, sh->dev[i].page, sector); |
| 1693 | wbi = r5_next_bio(wbi, sector); |
| 1694 | } |
| 1695 | |
| 1696 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
| 1697 | set_bit(R5_UPTODATE, &sh->dev[i].flags); |
| 1698 | } |
| 1699 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1700 | /* Note that unlike RAID-5, the ordering of the disks matters greatly.*/ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1701 | |
| 1702 | for (i = 0; i < disks; i++) |
| 1703 | ptrs[i] = (void *)raid6_empty_zero_page; |
| 1704 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1705 | count = 0; |
| 1706 | i = d0_idx; |
| 1707 | do { |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1708 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); |
| 1709 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1710 | ptrs[slot] = page_address(sh->dev[i].page); |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1711 | if (slot < syndrome_disks && |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1712 | !test_bit(R5_UPTODATE, &sh->dev[i].flags)) { |
| 1713 | printk(KERN_ERR "block %d/%d not uptodate " |
| 1714 | "on parity calc\n", i, count); |
| 1715 | BUG(); |
| 1716 | } |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1717 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1718 | i = raid6_next_disk(i, disks); |
| 1719 | } while (i != d0_idx); |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1720 | BUG_ON(count != syndrome_disks); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1721 | |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1722 | raid6_call.gen_syndrome(syndrome_disks+2, STRIPE_SIZE, ptrs); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1723 | |
| 1724 | switch(method) { |
| 1725 | case RECONSTRUCT_WRITE: |
| 1726 | set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
| 1727 | set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); |
| 1728 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); |
| 1729 | set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); |
| 1730 | break; |
| 1731 | case UPDATE_PARITY: |
| 1732 | set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
| 1733 | set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); |
| 1734 | break; |
| 1735 | } |
| 1736 | } |
| 1737 | |
| 1738 | |
| 1739 | /* Compute one missing block */ |
| 1740 | static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) |
| 1741 | { |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 1742 | int i, count, disks = sh->disks; |
Dan Williams | 9bc89cd | 2007-01-02 11:10:44 -0700 | [diff] [blame] | 1743 | void *ptr[MAX_XOR_BLOCKS], *dest, *p; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1744 | int qd_idx = sh->qd_idx; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1745 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1746 | pr_debug("compute_block_1, stripe %llu, idx %d\n", |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1747 | (unsigned long long)sh->sector, dd_idx); |
| 1748 | |
| 1749 | if ( dd_idx == qd_idx ) { |
| 1750 | /* We're actually computing the Q drive */ |
| 1751 | compute_parity6(sh, UPDATE_PARITY); |
| 1752 | } else { |
Dan Williams | 9bc89cd | 2007-01-02 11:10:44 -0700 | [diff] [blame] | 1753 | dest = page_address(sh->dev[dd_idx].page); |
| 1754 | if (!nozero) memset(dest, 0, STRIPE_SIZE); |
| 1755 | count = 0; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1756 | for (i = disks ; i--; ) { |
| 1757 | if (i == dd_idx || i == qd_idx) |
| 1758 | continue; |
| 1759 | p = page_address(sh->dev[i].page); |
| 1760 | if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) |
| 1761 | ptr[count++] = p; |
| 1762 | else |
| 1763 | printk("compute_block() %d, stripe %llu, %d" |
| 1764 | " not present\n", dd_idx, |
| 1765 | (unsigned long long)sh->sector, i); |
| 1766 | |
| 1767 | check_xor(); |
| 1768 | } |
Dan Williams | 9bc89cd | 2007-01-02 11:10:44 -0700 | [diff] [blame] | 1769 | if (count) |
| 1770 | xor_blocks(count, STRIPE_SIZE, dest, ptr); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1771 | if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); |
| 1772 | else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); |
| 1773 | } |
| 1774 | } |
| 1775 | |
| 1776 | /* Compute two missing blocks */ |
| 1777 | static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) |
| 1778 | { |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 1779 | int i, count, disks = sh->disks; |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1780 | int syndrome_disks = sh->ddf_layout ? disks : disks-2; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1781 | int d0_idx = raid6_d0(sh); |
| 1782 | int faila = -1, failb = -1; |
| 1783 | /**** FIX THIS: This could be very bad if disks is close to 256 ****/ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1784 | void *ptrs[syndrome_disks+2]; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1785 | |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1786 | for (i = 0; i < disks ; i++) |
| 1787 | ptrs[i] = (void *)raid6_empty_zero_page; |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1788 | count = 0; |
| 1789 | i = d0_idx; |
| 1790 | do { |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1791 | int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); |
| 1792 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1793 | ptrs[slot] = page_address(sh->dev[i].page); |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1794 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1795 | if (i == dd_idx1) |
| 1796 | faila = slot; |
| 1797 | if (i == dd_idx2) |
| 1798 | failb = slot; |
| 1799 | i = raid6_next_disk(i, disks); |
| 1800 | } while (i != d0_idx); |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1801 | BUG_ON(count != syndrome_disks); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1802 | |
| 1803 | BUG_ON(faila == failb); |
| 1804 | if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } |
| 1805 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1806 | pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1807 | (unsigned long long)sh->sector, dd_idx1, dd_idx2, |
| 1808 | faila, failb); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1809 | |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1810 | if (failb == syndrome_disks+1) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1811 | /* Q disk is one of the missing disks */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1812 | if (faila == syndrome_disks) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1813 | /* Missing P+Q, just recompute */ |
| 1814 | compute_parity6(sh, UPDATE_PARITY); |
| 1815 | return; |
| 1816 | } else { |
| 1817 | /* We're missing D+Q; recompute D from P */ |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1818 | compute_block_1(sh, ((dd_idx1 == sh->qd_idx) ? |
| 1819 | dd_idx2 : dd_idx1), |
| 1820 | 0); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1821 | compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ |
| 1822 | return; |
| 1823 | } |
| 1824 | } |
| 1825 | |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1826 | /* We're missing D+P or D+D; */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1827 | if (failb == syndrome_disks) { |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1828 | /* We're missing D+P. */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1829 | raid6_datap_recov(syndrome_disks+2, STRIPE_SIZE, faila, ptrs); |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1830 | } else { |
| 1831 | /* We're missing D+D. */ |
NeilBrown | 67cc2b8 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1832 | raid6_2data_recov(syndrome_disks+2, STRIPE_SIZE, faila, failb, |
| 1833 | ptrs); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1834 | } |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1835 | |
| 1836 | /* Both the above update both missing blocks */ |
| 1837 | set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); |
| 1838 | set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1839 | } |
| 1840 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1841 | static void |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 1842 | schedule_reconstruction5(struct stripe_head *sh, struct stripe_head_state *s, |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1843 | int rcw, int expand) |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1844 | { |
| 1845 | int i, pd_idx = sh->pd_idx, disks = sh->disks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1846 | |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1847 | if (rcw) { |
| 1848 | /* if we are not expanding this is a proper write request, and |
| 1849 | * there will be bios with new data to be drained into the |
| 1850 | * stripe cache |
| 1851 | */ |
| 1852 | if (!expand) { |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1853 | sh->reconstruct_state = reconstruct_state_drain_run; |
| 1854 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
| 1855 | } else |
| 1856 | sh->reconstruct_state = reconstruct_state_run; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1857 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1858 | set_bit(STRIPE_OP_POSTXOR, &s->ops_request); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1859 | |
| 1860 | for (i = disks; i--; ) { |
| 1861 | struct r5dev *dev = &sh->dev[i]; |
| 1862 | |
| 1863 | if (dev->towrite) { |
| 1864 | set_bit(R5_LOCKED, &dev->flags); |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1865 | set_bit(R5_Wantdrain, &dev->flags); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1866 | if (!expand) |
| 1867 | clear_bit(R5_UPTODATE, &dev->flags); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1868 | s->locked++; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1869 | } |
| 1870 | } |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1871 | if (s->locked + 1 == disks) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 1872 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 1873 | atomic_inc(&sh->raid_conf->pending_full_writes); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1874 | } else { |
| 1875 | BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || |
| 1876 | test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); |
| 1877 | |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1878 | sh->reconstruct_state = reconstruct_state_prexor_drain_run; |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1879 | set_bit(STRIPE_OP_PREXOR, &s->ops_request); |
| 1880 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); |
| 1881 | set_bit(STRIPE_OP_POSTXOR, &s->ops_request); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1882 | |
| 1883 | for (i = disks; i--; ) { |
| 1884 | struct r5dev *dev = &sh->dev[i]; |
| 1885 | if (i == pd_idx) |
| 1886 | continue; |
| 1887 | |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1888 | if (dev->towrite && |
| 1889 | (test_bit(R5_UPTODATE, &dev->flags) || |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 1890 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 1891 | set_bit(R5_Wantdrain, &dev->flags); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1892 | set_bit(R5_LOCKED, &dev->flags); |
| 1893 | clear_bit(R5_UPTODATE, &dev->flags); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1894 | s->locked++; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1895 | } |
| 1896 | } |
| 1897 | } |
| 1898 | |
| 1899 | /* keep the parity disk locked while asynchronous operations |
| 1900 | * are in flight |
| 1901 | */ |
| 1902 | set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); |
| 1903 | clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1904 | s->locked++; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1905 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1906 | pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", |
Harvey Harrison | e46b272 | 2008-04-28 02:15:50 -0700 | [diff] [blame] | 1907 | __func__, (unsigned long long)sh->sector, |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 1908 | s->locked, s->ops_request); |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 1909 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1910 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1911 | /* |
| 1912 | * Each stripe/dev can have one or more bion attached. |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1913 | * toread/towrite point to the first in a chain. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1914 | * The bi_next chain must be in order. |
| 1915 | */ |
| 1916 | static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) |
| 1917 | { |
| 1918 | struct bio **bip; |
| 1919 | raid5_conf_t *conf = sh->raid_conf; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 1920 | int firstwrite=0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1921 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1922 | pr_debug("adding bh b#%llu to stripe s#%llu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1923 | (unsigned long long)bi->bi_sector, |
| 1924 | (unsigned long long)sh->sector); |
| 1925 | |
| 1926 | |
| 1927 | spin_lock(&sh->lock); |
| 1928 | spin_lock_irq(&conf->device_lock); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 1929 | if (forwrite) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1930 | bip = &sh->dev[dd_idx].towrite; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 1931 | if (*bip == NULL && sh->dev[dd_idx].written == NULL) |
| 1932 | firstwrite = 1; |
| 1933 | } else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1934 | bip = &sh->dev[dd_idx].toread; |
| 1935 | while (*bip && (*bip)->bi_sector < bi->bi_sector) { |
| 1936 | if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) |
| 1937 | goto overlap; |
| 1938 | bip = & (*bip)->bi_next; |
| 1939 | } |
| 1940 | if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) |
| 1941 | goto overlap; |
| 1942 | |
Eric Sesterhenn | 78bafeb | 2006-04-02 13:31:42 +0200 | [diff] [blame] | 1943 | BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1944 | if (*bip) |
| 1945 | bi->bi_next = *bip; |
| 1946 | *bip = bi; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 1947 | bi->bi_phys_segments++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1948 | spin_unlock_irq(&conf->device_lock); |
| 1949 | spin_unlock(&sh->lock); |
| 1950 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 1951 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1952 | (unsigned long long)bi->bi_sector, |
| 1953 | (unsigned long long)sh->sector, dd_idx); |
| 1954 | |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 1955 | if (conf->mddev->bitmap && firstwrite) { |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 1956 | bitmap_startwrite(conf->mddev->bitmap, sh->sector, |
| 1957 | STRIPE_SECTORS, 0); |
NeilBrown | ae3c20c | 2006-07-10 04:44:17 -0700 | [diff] [blame] | 1958 | sh->bm_seq = conf->seq_flush+1; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 1959 | set_bit(STRIPE_BIT_DELAY, &sh->state); |
| 1960 | } |
| 1961 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1962 | if (forwrite) { |
| 1963 | /* check if page is covered */ |
| 1964 | sector_t sector = sh->dev[dd_idx].sector; |
| 1965 | for (bi=sh->dev[dd_idx].towrite; |
| 1966 | sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && |
| 1967 | bi && bi->bi_sector <= sector; |
| 1968 | bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { |
| 1969 | if (bi->bi_sector + (bi->bi_size>>9) >= sector) |
| 1970 | sector = bi->bi_sector + (bi->bi_size>>9); |
| 1971 | } |
| 1972 | if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) |
| 1973 | set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); |
| 1974 | } |
| 1975 | return 1; |
| 1976 | |
| 1977 | overlap: |
| 1978 | set_bit(R5_Overlap, &sh->dev[dd_idx].flags); |
| 1979 | spin_unlock_irq(&conf->device_lock); |
| 1980 | spin_unlock(&sh->lock); |
| 1981 | return 0; |
| 1982 | } |
| 1983 | |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 1984 | static void end_reshape(raid5_conf_t *conf); |
| 1985 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 1986 | static int page_is_zero(struct page *p) |
| 1987 | { |
| 1988 | char *a = page_address(p); |
| 1989 | return ((*(u32*)a) == 0 && |
| 1990 | memcmp(a, a+4, STRIPE_SIZE-4)==0); |
| 1991 | } |
| 1992 | |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1993 | static void stripe_set_idx(sector_t stripe, raid5_conf_t *conf, int previous, |
| 1994 | struct stripe_head *sh) |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 1995 | { |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 1996 | int sectors_per_chunk = |
| 1997 | previous ? (conf->prev_chunk >> 9) |
| 1998 | : (conf->chunk_size >> 9); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 1999 | int dd_idx; |
Coywolf Qi Hunt | 2d2063c | 2006-10-03 01:15:50 -0700 | [diff] [blame] | 2000 | int chunk_offset = sector_div(stripe, sectors_per_chunk); |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2001 | int disks = previous ? conf->previous_raid_disks : conf->raid_disks; |
Coywolf Qi Hunt | 2d2063c | 2006-10-03 01:15:50 -0700 | [diff] [blame] | 2002 | |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2003 | raid5_compute_sector(conf, |
| 2004 | stripe * (disks - conf->max_degraded) |
NeilBrown | b875e53 | 2006-12-10 02:20:49 -0800 | [diff] [blame] | 2005 | *sectors_per_chunk + chunk_offset, |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2006 | previous, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2007 | &dd_idx, sh); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 2008 | } |
| 2009 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2010 | static void |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2011 | handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2012 | struct stripe_head_state *s, int disks, |
| 2013 | struct bio **return_bi) |
| 2014 | { |
| 2015 | int i; |
| 2016 | for (i = disks; i--; ) { |
| 2017 | struct bio *bi; |
| 2018 | int bitmap_end = 0; |
| 2019 | |
| 2020 | if (test_bit(R5_ReadError, &sh->dev[i].flags)) { |
| 2021 | mdk_rdev_t *rdev; |
| 2022 | rcu_read_lock(); |
| 2023 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 2024 | if (rdev && test_bit(In_sync, &rdev->flags)) |
| 2025 | /* multiple read failures in one stripe */ |
| 2026 | md_error(conf->mddev, rdev); |
| 2027 | rcu_read_unlock(); |
| 2028 | } |
| 2029 | spin_lock_irq(&conf->device_lock); |
| 2030 | /* fail all writes first */ |
| 2031 | bi = sh->dev[i].towrite; |
| 2032 | sh->dev[i].towrite = NULL; |
| 2033 | if (bi) { |
| 2034 | s->to_write--; |
| 2035 | bitmap_end = 1; |
| 2036 | } |
| 2037 | |
| 2038 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 2039 | wake_up(&conf->wait_for_overlap); |
| 2040 | |
| 2041 | while (bi && bi->bi_sector < |
| 2042 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 2043 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
| 2044 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2045 | if (!raid5_dec_bi_phys_segments(bi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2046 | md_write_end(conf->mddev); |
| 2047 | bi->bi_next = *return_bi; |
| 2048 | *return_bi = bi; |
| 2049 | } |
| 2050 | bi = nextbi; |
| 2051 | } |
| 2052 | /* and fail all 'written' */ |
| 2053 | bi = sh->dev[i].written; |
| 2054 | sh->dev[i].written = NULL; |
| 2055 | if (bi) bitmap_end = 1; |
| 2056 | while (bi && bi->bi_sector < |
| 2057 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 2058 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
| 2059 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2060 | if (!raid5_dec_bi_phys_segments(bi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2061 | md_write_end(conf->mddev); |
| 2062 | bi->bi_next = *return_bi; |
| 2063 | *return_bi = bi; |
| 2064 | } |
| 2065 | bi = bi2; |
| 2066 | } |
| 2067 | |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2068 | /* fail any reads if this device is non-operational and |
| 2069 | * the data has not reached the cache yet. |
| 2070 | */ |
| 2071 | if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && |
| 2072 | (!test_bit(R5_Insync, &sh->dev[i].flags) || |
| 2073 | test_bit(R5_ReadError, &sh->dev[i].flags))) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2074 | bi = sh->dev[i].toread; |
| 2075 | sh->dev[i].toread = NULL; |
| 2076 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
| 2077 | wake_up(&conf->wait_for_overlap); |
| 2078 | if (bi) s->to_read--; |
| 2079 | while (bi && bi->bi_sector < |
| 2080 | sh->dev[i].sector + STRIPE_SECTORS) { |
| 2081 | struct bio *nextbi = |
| 2082 | r5_next_bio(bi, sh->dev[i].sector); |
| 2083 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2084 | if (!raid5_dec_bi_phys_segments(bi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2085 | bi->bi_next = *return_bi; |
| 2086 | *return_bi = bi; |
| 2087 | } |
| 2088 | bi = nextbi; |
| 2089 | } |
| 2090 | } |
| 2091 | spin_unlock_irq(&conf->device_lock); |
| 2092 | if (bitmap_end) |
| 2093 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, |
| 2094 | STRIPE_SECTORS, 0, 0); |
| 2095 | } |
| 2096 | |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 2097 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 2098 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 2099 | md_wakeup_thread(conf->mddev->thread); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2100 | } |
| 2101 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2102 | /* fetch_block5 - checks the given member device to see if its data needs |
| 2103 | * to be read or computed to satisfy a request. |
| 2104 | * |
| 2105 | * Returns 1 when no more member devices need to be checked, otherwise returns |
| 2106 | * 0 to tell the loop in handle_stripe_fill5 to continue |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2107 | */ |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2108 | static int fetch_block5(struct stripe_head *sh, struct stripe_head_state *s, |
| 2109 | int disk_idx, int disks) |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2110 | { |
| 2111 | struct r5dev *dev = &sh->dev[disk_idx]; |
| 2112 | struct r5dev *failed_dev = &sh->dev[s->failed_num]; |
| 2113 | |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2114 | /* is the data in this block needed, and can we get it? */ |
| 2115 | if (!test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2116 | !test_bit(R5_UPTODATE, &dev->flags) && |
| 2117 | (dev->toread || |
| 2118 | (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || |
| 2119 | s->syncing || s->expanding || |
| 2120 | (s->failed && |
| 2121 | (failed_dev->toread || |
| 2122 | (failed_dev->towrite && |
| 2123 | !test_bit(R5_OVERWRITE, &failed_dev->flags)))))) { |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2124 | /* We would like to get this block, possibly by computing it, |
| 2125 | * otherwise read it if the backing disk is insync |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2126 | */ |
| 2127 | if ((s->uptodate == disks - 1) && |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2128 | (s->failed && disk_idx == s->failed_num)) { |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2129 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
| 2130 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2131 | set_bit(R5_Wantcompute, &dev->flags); |
| 2132 | sh->ops.target = disk_idx; |
| 2133 | s->req_compute = 1; |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2134 | /* Careful: from this point on 'uptodate' is in the eye |
| 2135 | * of raid5_run_ops which services 'compute' operations |
| 2136 | * before writes. R5_Wantcompute flags a block that will |
| 2137 | * be R5_UPTODATE by the time it is needed for a |
| 2138 | * subsequent operation. |
| 2139 | */ |
| 2140 | s->uptodate++; |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2141 | return 1; /* uptodate + compute == disks */ |
Dan Williams | 7a1fc53 | 2008-07-10 04:54:57 -0700 | [diff] [blame] | 2142 | } else if (test_bit(R5_Insync, &dev->flags)) { |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2143 | set_bit(R5_LOCKED, &dev->flags); |
| 2144 | set_bit(R5_Wantread, &dev->flags); |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2145 | s->locked++; |
| 2146 | pr_debug("Reading block %d (sync=%d)\n", disk_idx, |
| 2147 | s->syncing); |
| 2148 | } |
| 2149 | } |
| 2150 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2151 | return 0; |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2152 | } |
| 2153 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2154 | /** |
| 2155 | * handle_stripe_fill5 - read or compute data to satisfy pending requests. |
| 2156 | */ |
| 2157 | static void handle_stripe_fill5(struct stripe_head *sh, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2158 | struct stripe_head_state *s, int disks) |
| 2159 | { |
| 2160 | int i; |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2161 | |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2162 | /* look for blocks to read/compute, skip this if a compute |
| 2163 | * is already in flight, or if the stripe contents are in the |
| 2164 | * midst of changing due to a write |
| 2165 | */ |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2166 | if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2167 | !sh->reconstruct_state) |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2168 | for (i = disks; i--; ) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2169 | if (fetch_block5(sh, s, i, disks)) |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2170 | break; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2171 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2172 | } |
| 2173 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2174 | static void handle_stripe_fill6(struct stripe_head *sh, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2175 | struct stripe_head_state *s, struct r6_state *r6s, |
| 2176 | int disks) |
| 2177 | { |
| 2178 | int i; |
| 2179 | for (i = disks; i--; ) { |
| 2180 | struct r5dev *dev = &sh->dev[i]; |
| 2181 | if (!test_bit(R5_LOCKED, &dev->flags) && |
| 2182 | !test_bit(R5_UPTODATE, &dev->flags) && |
| 2183 | (dev->toread || (dev->towrite && |
| 2184 | !test_bit(R5_OVERWRITE, &dev->flags)) || |
| 2185 | s->syncing || s->expanding || |
| 2186 | (s->failed >= 1 && |
| 2187 | (sh->dev[r6s->failed_num[0]].toread || |
| 2188 | s->to_write)) || |
| 2189 | (s->failed >= 2 && |
| 2190 | (sh->dev[r6s->failed_num[1]].toread || |
| 2191 | s->to_write)))) { |
| 2192 | /* we would like to get this block, possibly |
| 2193 | * by computing it, but we might not be able to |
| 2194 | */ |
Dan Williams | c337869 | 2008-06-05 22:45:54 -0700 | [diff] [blame] | 2195 | if ((s->uptodate == disks - 1) && |
| 2196 | (s->failed && (i == r6s->failed_num[0] || |
| 2197 | i == r6s->failed_num[1]))) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2198 | pr_debug("Computing stripe %llu block %d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2199 | (unsigned long long)sh->sector, i); |
| 2200 | compute_block_1(sh, i, 0); |
| 2201 | s->uptodate++; |
| 2202 | } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { |
| 2203 | /* Computing 2-failure is *very* expensive; only |
| 2204 | * do it if failed >= 2 |
| 2205 | */ |
| 2206 | int other; |
| 2207 | for (other = disks; other--; ) { |
| 2208 | if (other == i) |
| 2209 | continue; |
| 2210 | if (!test_bit(R5_UPTODATE, |
| 2211 | &sh->dev[other].flags)) |
| 2212 | break; |
| 2213 | } |
| 2214 | BUG_ON(other < 0); |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2215 | pr_debug("Computing stripe %llu blocks %d,%d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2216 | (unsigned long long)sh->sector, |
| 2217 | i, other); |
| 2218 | compute_block_2(sh, i, other); |
| 2219 | s->uptodate += 2; |
| 2220 | } else if (test_bit(R5_Insync, &dev->flags)) { |
| 2221 | set_bit(R5_LOCKED, &dev->flags); |
| 2222 | set_bit(R5_Wantread, &dev->flags); |
| 2223 | s->locked++; |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2224 | pr_debug("Reading block %d (sync=%d)\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2225 | i, s->syncing); |
| 2226 | } |
| 2227 | } |
| 2228 | } |
| 2229 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2230 | } |
| 2231 | |
| 2232 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2233 | /* handle_stripe_clean_event |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2234 | * any written block on an uptodate or failed drive can be returned. |
| 2235 | * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but |
| 2236 | * never LOCKED, so we don't need to test 'failed' directly. |
| 2237 | */ |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2238 | static void handle_stripe_clean_event(raid5_conf_t *conf, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2239 | struct stripe_head *sh, int disks, struct bio **return_bi) |
| 2240 | { |
| 2241 | int i; |
| 2242 | struct r5dev *dev; |
| 2243 | |
| 2244 | for (i = disks; i--; ) |
| 2245 | if (sh->dev[i].written) { |
| 2246 | dev = &sh->dev[i]; |
| 2247 | if (!test_bit(R5_LOCKED, &dev->flags) && |
| 2248 | test_bit(R5_UPTODATE, &dev->flags)) { |
| 2249 | /* We can return any write requests */ |
| 2250 | struct bio *wbi, *wbi2; |
| 2251 | int bitmap_end = 0; |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2252 | pr_debug("Return write for disc %d\n", i); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2253 | spin_lock_irq(&conf->device_lock); |
| 2254 | wbi = dev->written; |
| 2255 | dev->written = NULL; |
| 2256 | while (wbi && wbi->bi_sector < |
| 2257 | dev->sector + STRIPE_SECTORS) { |
| 2258 | wbi2 = r5_next_bio(wbi, dev->sector); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 2259 | if (!raid5_dec_bi_phys_segments(wbi)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2260 | md_write_end(conf->mddev); |
| 2261 | wbi->bi_next = *return_bi; |
| 2262 | *return_bi = wbi; |
| 2263 | } |
| 2264 | wbi = wbi2; |
| 2265 | } |
| 2266 | if (dev->towrite == NULL) |
| 2267 | bitmap_end = 1; |
| 2268 | spin_unlock_irq(&conf->device_lock); |
| 2269 | if (bitmap_end) |
| 2270 | bitmap_endwrite(conf->mddev->bitmap, |
| 2271 | sh->sector, |
| 2272 | STRIPE_SECTORS, |
| 2273 | !test_bit(STRIPE_DEGRADED, &sh->state), |
| 2274 | 0); |
| 2275 | } |
| 2276 | } |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 2277 | |
| 2278 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 2279 | if (atomic_dec_and_test(&conf->pending_full_writes)) |
| 2280 | md_wakeup_thread(conf->mddev->thread); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2281 | } |
| 2282 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2283 | static void handle_stripe_dirtying5(raid5_conf_t *conf, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2284 | struct stripe_head *sh, struct stripe_head_state *s, int disks) |
| 2285 | { |
| 2286 | int rmw = 0, rcw = 0, i; |
| 2287 | for (i = disks; i--; ) { |
| 2288 | /* would I have to read this buffer for read_modify_write */ |
| 2289 | struct r5dev *dev = &sh->dev[i]; |
| 2290 | if ((dev->towrite || i == sh->pd_idx) && |
| 2291 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2292 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 2293 | test_bit(R5_Wantcompute, &dev->flags))) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2294 | if (test_bit(R5_Insync, &dev->flags)) |
| 2295 | rmw++; |
| 2296 | else |
| 2297 | rmw += 2*disks; /* cannot read it */ |
| 2298 | } |
| 2299 | /* Would I have to read this buffer for reconstruct_write */ |
| 2300 | if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && |
| 2301 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2302 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 2303 | test_bit(R5_Wantcompute, &dev->flags))) { |
| 2304 | if (test_bit(R5_Insync, &dev->flags)) rcw++; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2305 | else |
| 2306 | rcw += 2*disks; |
| 2307 | } |
| 2308 | } |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2309 | pr_debug("for sector %llu, rmw=%d rcw=%d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2310 | (unsigned long long)sh->sector, rmw, rcw); |
| 2311 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2312 | if (rmw < rcw && rmw > 0) |
| 2313 | /* prefer read-modify-write, but need to get some data */ |
| 2314 | for (i = disks; i--; ) { |
| 2315 | struct r5dev *dev = &sh->dev[i]; |
| 2316 | if ((dev->towrite || i == sh->pd_idx) && |
| 2317 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2318 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 2319 | test_bit(R5_Wantcompute, &dev->flags)) && |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2320 | test_bit(R5_Insync, &dev->flags)) { |
| 2321 | if ( |
| 2322 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2323 | pr_debug("Read_old block " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2324 | "%d for r-m-w\n", i); |
| 2325 | set_bit(R5_LOCKED, &dev->flags); |
| 2326 | set_bit(R5_Wantread, &dev->flags); |
| 2327 | s->locked++; |
| 2328 | } else { |
| 2329 | set_bit(STRIPE_DELAYED, &sh->state); |
| 2330 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2331 | } |
| 2332 | } |
| 2333 | } |
| 2334 | if (rcw <= rmw && rcw > 0) |
| 2335 | /* want reconstruct write, but need to get some data */ |
| 2336 | for (i = disks; i--; ) { |
| 2337 | struct r5dev *dev = &sh->dev[i]; |
| 2338 | if (!test_bit(R5_OVERWRITE, &dev->flags) && |
| 2339 | i != sh->pd_idx && |
| 2340 | !test_bit(R5_LOCKED, &dev->flags) && |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2341 | !(test_bit(R5_UPTODATE, &dev->flags) || |
| 2342 | test_bit(R5_Wantcompute, &dev->flags)) && |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2343 | test_bit(R5_Insync, &dev->flags)) { |
| 2344 | if ( |
| 2345 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2346 | pr_debug("Read_old block " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2347 | "%d for Reconstruct\n", i); |
| 2348 | set_bit(R5_LOCKED, &dev->flags); |
| 2349 | set_bit(R5_Wantread, &dev->flags); |
| 2350 | s->locked++; |
| 2351 | } else { |
| 2352 | set_bit(STRIPE_DELAYED, &sh->state); |
| 2353 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2354 | } |
| 2355 | } |
| 2356 | } |
| 2357 | /* now if nothing is locked, and if we have enough data, |
| 2358 | * we can start a write request |
| 2359 | */ |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2360 | /* since handle_stripe can be called at any time we need to handle the |
| 2361 | * case where a compute block operation has been submitted and then a |
| 2362 | * subsequent call wants to start a write request. raid5_run_ops only |
| 2363 | * handles the case where compute block and postxor are requested |
| 2364 | * simultaneously. If this is not the case then new writes need to be |
| 2365 | * held off until the compute completes. |
| 2366 | */ |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2367 | if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && |
| 2368 | (s->locked == 0 && (rcw == 0 || rmw == 0) && |
| 2369 | !test_bit(STRIPE_BIT_DELAY, &sh->state))) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2370 | schedule_reconstruction5(sh, s, rcw == 0, 0); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2371 | } |
| 2372 | |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2373 | static void handle_stripe_dirtying6(raid5_conf_t *conf, |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2374 | struct stripe_head *sh, struct stripe_head_state *s, |
| 2375 | struct r6_state *r6s, int disks) |
| 2376 | { |
| 2377 | int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 2378 | int qd_idx = sh->qd_idx; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2379 | for (i = disks; i--; ) { |
| 2380 | struct r5dev *dev = &sh->dev[i]; |
| 2381 | /* Would I have to read this buffer for reconstruct_write */ |
| 2382 | if (!test_bit(R5_OVERWRITE, &dev->flags) |
| 2383 | && i != pd_idx && i != qd_idx |
| 2384 | && (!test_bit(R5_LOCKED, &dev->flags) |
| 2385 | ) && |
| 2386 | !test_bit(R5_UPTODATE, &dev->flags)) { |
| 2387 | if (test_bit(R5_Insync, &dev->flags)) rcw++; |
| 2388 | else { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2389 | pr_debug("raid6: must_compute: " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2390 | "disk %d flags=%#lx\n", i, dev->flags); |
| 2391 | must_compute++; |
| 2392 | } |
| 2393 | } |
| 2394 | } |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2395 | pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2396 | (unsigned long long)sh->sector, rcw, must_compute); |
| 2397 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2398 | |
| 2399 | if (rcw > 0) |
| 2400 | /* want reconstruct write, but need to get some data */ |
| 2401 | for (i = disks; i--; ) { |
| 2402 | struct r5dev *dev = &sh->dev[i]; |
| 2403 | if (!test_bit(R5_OVERWRITE, &dev->flags) |
| 2404 | && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) |
| 2405 | && !test_bit(R5_LOCKED, &dev->flags) && |
| 2406 | !test_bit(R5_UPTODATE, &dev->flags) && |
| 2407 | test_bit(R5_Insync, &dev->flags)) { |
| 2408 | if ( |
| 2409 | test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2410 | pr_debug("Read_old stripe %llu " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2411 | "block %d for Reconstruct\n", |
| 2412 | (unsigned long long)sh->sector, i); |
| 2413 | set_bit(R5_LOCKED, &dev->flags); |
| 2414 | set_bit(R5_Wantread, &dev->flags); |
| 2415 | s->locked++; |
| 2416 | } else { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2417 | pr_debug("Request delayed stripe %llu " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2418 | "block %d for Reconstruct\n", |
| 2419 | (unsigned long long)sh->sector, i); |
| 2420 | set_bit(STRIPE_DELAYED, &sh->state); |
| 2421 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2422 | } |
| 2423 | } |
| 2424 | } |
| 2425 | /* now if nothing is locked, and if we have enough data, we can start a |
| 2426 | * write request |
| 2427 | */ |
| 2428 | if (s->locked == 0 && rcw == 0 && |
| 2429 | !test_bit(STRIPE_BIT_DELAY, &sh->state)) { |
| 2430 | if (must_compute > 0) { |
| 2431 | /* We have failed blocks and need to compute them */ |
| 2432 | switch (s->failed) { |
| 2433 | case 0: |
| 2434 | BUG(); |
| 2435 | case 1: |
| 2436 | compute_block_1(sh, r6s->failed_num[0], 0); |
| 2437 | break; |
| 2438 | case 2: |
| 2439 | compute_block_2(sh, r6s->failed_num[0], |
| 2440 | r6s->failed_num[1]); |
| 2441 | break; |
| 2442 | default: /* This request should have been failed? */ |
| 2443 | BUG(); |
| 2444 | } |
| 2445 | } |
| 2446 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2447 | pr_debug("Computing parity for stripe %llu\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2448 | (unsigned long long)sh->sector); |
| 2449 | compute_parity6(sh, RECONSTRUCT_WRITE); |
| 2450 | /* now every locked buffer is ready to be written */ |
| 2451 | for (i = disks; i--; ) |
| 2452 | if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2453 | pr_debug("Writing stripe %llu block %d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2454 | (unsigned long long)sh->sector, i); |
| 2455 | s->locked++; |
| 2456 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
| 2457 | } |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 2458 | if (s->locked == disks) |
| 2459 | if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) |
| 2460 | atomic_inc(&conf->pending_full_writes); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2461 | /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ |
| 2462 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2463 | |
| 2464 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
| 2465 | atomic_dec(&conf->preread_active_stripes); |
| 2466 | if (atomic_read(&conf->preread_active_stripes) < |
| 2467 | IO_THRESHOLD) |
| 2468 | md_wakeup_thread(conf->mddev->thread); |
| 2469 | } |
| 2470 | } |
| 2471 | } |
| 2472 | |
| 2473 | static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, |
| 2474 | struct stripe_head_state *s, int disks) |
| 2475 | { |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2476 | struct r5dev *dev = NULL; |
Dan Williams | e89f896 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2477 | |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2478 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2479 | |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2480 | switch (sh->check_state) { |
| 2481 | case check_state_idle: |
| 2482 | /* start a new check operation if there are no failures */ |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2483 | if (s->failed == 0) { |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2484 | BUG_ON(s->uptodate != disks); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2485 | sh->check_state = check_state_run; |
| 2486 | set_bit(STRIPE_OP_CHECK, &s->ops_request); |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2487 | clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2488 | s->uptodate--; |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2489 | break; |
Dan Williams | bd2ab67 | 2008-04-10 21:29:27 -0700 | [diff] [blame] | 2490 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2491 | dev = &sh->dev[s->failed_num]; |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2492 | /* fall through */ |
| 2493 | case check_state_compute_result: |
| 2494 | sh->check_state = check_state_idle; |
| 2495 | if (!dev) |
| 2496 | dev = &sh->dev[sh->pd_idx]; |
| 2497 | |
| 2498 | /* check that a write has not made the stripe insync */ |
| 2499 | if (test_bit(STRIPE_INSYNC, &sh->state)) |
| 2500 | break; |
| 2501 | |
| 2502 | /* either failed parity check, or recovery is happening */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2503 | BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); |
| 2504 | BUG_ON(s->uptodate != disks); |
| 2505 | |
| 2506 | set_bit(R5_LOCKED, &dev->flags); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2507 | s->locked++; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2508 | set_bit(R5_Wantwrite, &dev->flags); |
Dan Williams | 830ea01 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2509 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2510 | clear_bit(STRIPE_DEGRADED, &sh->state); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2511 | set_bit(STRIPE_INSYNC, &sh->state); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2512 | break; |
| 2513 | case check_state_run: |
| 2514 | break; /* we will be called again upon completion */ |
| 2515 | case check_state_check_result: |
| 2516 | sh->check_state = check_state_idle; |
| 2517 | |
| 2518 | /* if a failure occurred during the check operation, leave |
| 2519 | * STRIPE_INSYNC not set and let the stripe be handled again |
| 2520 | */ |
| 2521 | if (s->failed) |
| 2522 | break; |
| 2523 | |
| 2524 | /* handle a successful check operation, if parity is correct |
| 2525 | * we are done. Otherwise update the mismatch count and repair |
| 2526 | * parity if !MD_RECOVERY_CHECK |
| 2527 | */ |
| 2528 | if (sh->ops.zero_sum_result == 0) |
| 2529 | /* parity is correct (on disc, |
| 2530 | * not in buffer any more) |
| 2531 | */ |
| 2532 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2533 | else { |
| 2534 | conf->mddev->resync_mismatches += STRIPE_SECTORS; |
| 2535 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 2536 | /* don't try to repair!! */ |
| 2537 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2538 | else { |
| 2539 | sh->check_state = check_state_compute_run; |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2540 | set_bit(STRIPE_COMPUTE_RUN, &sh->state); |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2541 | set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); |
| 2542 | set_bit(R5_Wantcompute, |
| 2543 | &sh->dev[sh->pd_idx].flags); |
| 2544 | sh->ops.target = sh->pd_idx; |
| 2545 | s->uptodate++; |
| 2546 | } |
| 2547 | } |
| 2548 | break; |
| 2549 | case check_state_compute_run: |
| 2550 | break; |
| 2551 | default: |
| 2552 | printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", |
| 2553 | __func__, sh->check_state, |
| 2554 | (unsigned long long) sh->sector); |
| 2555 | BUG(); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2556 | } |
| 2557 | } |
| 2558 | |
| 2559 | |
| 2560 | static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, |
| 2561 | struct stripe_head_state *s, |
| 2562 | struct r6_state *r6s, struct page *tmp_page, |
| 2563 | int disks) |
| 2564 | { |
| 2565 | int update_p = 0, update_q = 0; |
| 2566 | struct r5dev *dev; |
| 2567 | int pd_idx = sh->pd_idx; |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 2568 | int qd_idx = sh->qd_idx; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2569 | |
| 2570 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2571 | |
| 2572 | BUG_ON(s->failed > 2); |
| 2573 | BUG_ON(s->uptodate < disks); |
| 2574 | /* Want to check and possibly repair P and Q. |
| 2575 | * However there could be one 'failed' device, in which |
| 2576 | * case we can only check one of them, possibly using the |
| 2577 | * other to generate missing data |
| 2578 | */ |
| 2579 | |
| 2580 | /* If !tmp_page, we cannot do the calculations, |
| 2581 | * but as we have set STRIPE_HANDLE, we will soon be called |
| 2582 | * by stripe_handle with a tmp_page - just wait until then. |
| 2583 | */ |
| 2584 | if (tmp_page) { |
| 2585 | if (s->failed == r6s->q_failed) { |
| 2586 | /* The only possible failed device holds 'Q', so it |
| 2587 | * makes sense to check P (If anything else were failed, |
| 2588 | * we would have used P to recreate it). |
| 2589 | */ |
| 2590 | compute_block_1(sh, pd_idx, 1); |
| 2591 | if (!page_is_zero(sh->dev[pd_idx].page)) { |
| 2592 | compute_block_1(sh, pd_idx, 0); |
| 2593 | update_p = 1; |
| 2594 | } |
| 2595 | } |
| 2596 | if (!r6s->q_failed && s->failed < 2) { |
| 2597 | /* q is not failed, and we didn't use it to generate |
| 2598 | * anything, so it makes sense to check it |
| 2599 | */ |
| 2600 | memcpy(page_address(tmp_page), |
| 2601 | page_address(sh->dev[qd_idx].page), |
| 2602 | STRIPE_SIZE); |
| 2603 | compute_parity6(sh, UPDATE_PARITY); |
| 2604 | if (memcmp(page_address(tmp_page), |
| 2605 | page_address(sh->dev[qd_idx].page), |
| 2606 | STRIPE_SIZE) != 0) { |
| 2607 | clear_bit(STRIPE_INSYNC, &sh->state); |
| 2608 | update_q = 1; |
| 2609 | } |
| 2610 | } |
| 2611 | if (update_p || update_q) { |
| 2612 | conf->mddev->resync_mismatches += STRIPE_SECTORS; |
| 2613 | if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) |
| 2614 | /* don't try to repair!! */ |
| 2615 | update_p = update_q = 0; |
| 2616 | } |
| 2617 | |
| 2618 | /* now write out any block on a failed drive, |
| 2619 | * or P or Q if they need it |
| 2620 | */ |
| 2621 | |
| 2622 | if (s->failed == 2) { |
| 2623 | dev = &sh->dev[r6s->failed_num[1]]; |
| 2624 | s->locked++; |
| 2625 | set_bit(R5_LOCKED, &dev->flags); |
| 2626 | set_bit(R5_Wantwrite, &dev->flags); |
| 2627 | } |
| 2628 | if (s->failed >= 1) { |
| 2629 | dev = &sh->dev[r6s->failed_num[0]]; |
| 2630 | s->locked++; |
| 2631 | set_bit(R5_LOCKED, &dev->flags); |
| 2632 | set_bit(R5_Wantwrite, &dev->flags); |
| 2633 | } |
| 2634 | |
| 2635 | if (update_p) { |
| 2636 | dev = &sh->dev[pd_idx]; |
| 2637 | s->locked++; |
| 2638 | set_bit(R5_LOCKED, &dev->flags); |
| 2639 | set_bit(R5_Wantwrite, &dev->flags); |
| 2640 | } |
| 2641 | if (update_q) { |
| 2642 | dev = &sh->dev[qd_idx]; |
| 2643 | s->locked++; |
| 2644 | set_bit(R5_LOCKED, &dev->flags); |
| 2645 | set_bit(R5_Wantwrite, &dev->flags); |
| 2646 | } |
| 2647 | clear_bit(STRIPE_DEGRADED, &sh->state); |
| 2648 | |
| 2649 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2650 | } |
| 2651 | } |
| 2652 | |
| 2653 | static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, |
| 2654 | struct r6_state *r6s) |
| 2655 | { |
| 2656 | int i; |
| 2657 | |
| 2658 | /* We have read all the blocks in this stripe and now we need to |
| 2659 | * copy some of them into a target stripe for expand. |
| 2660 | */ |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2661 | struct dma_async_tx_descriptor *tx = NULL; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2662 | clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 2663 | for (i = 0; i < sh->disks; i++) |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 2664 | if (i != sh->pd_idx && i != sh->qd_idx) { |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2665 | int dd_idx, j; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2666 | struct stripe_head *sh2; |
| 2667 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 2668 | sector_t bn = compute_blocknr(sh, i, 1); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2669 | sector_t s = raid5_compute_sector(conf, bn, 0, |
| 2670 | &dd_idx, NULL); |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2671 | sh2 = get_active_stripe(conf, s, 0, 1); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2672 | if (sh2 == NULL) |
| 2673 | /* so far only the early blocks of this stripe |
| 2674 | * have been requested. When later blocks |
| 2675 | * get requested, we will try again |
| 2676 | */ |
| 2677 | continue; |
| 2678 | if (!test_bit(STRIPE_EXPANDING, &sh2->state) || |
| 2679 | test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { |
| 2680 | /* must have already done this block */ |
| 2681 | release_stripe(sh2); |
| 2682 | continue; |
| 2683 | } |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2684 | |
| 2685 | /* place all the copies on one channel */ |
| 2686 | tx = async_memcpy(sh2->dev[dd_idx].page, |
| 2687 | sh->dev[i].page, 0, 0, STRIPE_SIZE, |
| 2688 | ASYNC_TX_DEP_ACK, tx, NULL, NULL); |
| 2689 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2690 | set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); |
| 2691 | set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); |
| 2692 | for (j = 0; j < conf->raid_disks; j++) |
| 2693 | if (j != sh2->pd_idx && |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2694 | (!r6s || j != sh2->qd_idx) && |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2695 | !test_bit(R5_Expanded, &sh2->dev[j].flags)) |
| 2696 | break; |
| 2697 | if (j == conf->raid_disks) { |
| 2698 | set_bit(STRIPE_EXPAND_READY, &sh2->state); |
| 2699 | set_bit(STRIPE_HANDLE, &sh2->state); |
| 2700 | } |
| 2701 | release_stripe(sh2); |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2702 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2703 | } |
NeilBrown | a2e0855 | 2007-09-11 15:23:36 -0700 | [diff] [blame] | 2704 | /* done submitting copies, wait for them to complete */ |
| 2705 | if (tx) { |
| 2706 | async_tx_ack(tx); |
| 2707 | dma_wait_for_async_tx(tx); |
| 2708 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2709 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2710 | |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2711 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2712 | /* |
| 2713 | * handle_stripe - do things to a stripe. |
| 2714 | * |
| 2715 | * We lock the stripe and then examine the state of various bits |
| 2716 | * to see what needs to be done. |
| 2717 | * Possible results: |
| 2718 | * return some read request which now have data |
| 2719 | * return some write requests which are safely on disc |
| 2720 | * schedule a read on some buffers |
| 2721 | * schedule a write of some buffers |
| 2722 | * return confirmation of parity correctness |
| 2723 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2724 | * buffers are taken off read_list or write_list, and bh_cache buffers |
| 2725 | * get BH_Lock set before the stripe lock is released. |
| 2726 | * |
| 2727 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2728 | |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 2729 | static bool handle_stripe5(struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2730 | { |
| 2731 | raid5_conf_t *conf = sh->raid_conf; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2732 | int disks = sh->disks, i; |
| 2733 | struct bio *return_bi = NULL; |
| 2734 | struct stripe_head_state s; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2735 | struct r5dev *dev; |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2736 | mdk_rdev_t *blocked_rdev = NULL; |
Dan Williams | e0a115e | 2008-06-05 22:45:52 -0700 | [diff] [blame] | 2737 | int prexor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2738 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2739 | memset(&s, 0, sizeof(s)); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2740 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d " |
| 2741 | "reconstruct:%d\n", (unsigned long long)sh->sector, sh->state, |
| 2742 | atomic_read(&sh->count), sh->pd_idx, sh->check_state, |
| 2743 | sh->reconstruct_state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2744 | |
| 2745 | spin_lock(&sh->lock); |
| 2746 | clear_bit(STRIPE_HANDLE, &sh->state); |
| 2747 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 2748 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2749 | s.syncing = test_bit(STRIPE_SYNCING, &sh->state); |
| 2750 | s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 2751 | s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 2752 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2753 | /* Now to look around and see what can be done */ |
NeilBrown | 9910f16 | 2006-01-06 00:20:24 -0800 | [diff] [blame] | 2754 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2755 | for (i=disks; i--; ) { |
| 2756 | mdk_rdev_t *rdev; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2757 | struct r5dev *dev = &sh->dev[i]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2758 | clear_bit(R5_Insync, &dev->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2759 | |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2760 | pr_debug("check %d: state 0x%lx toread %p read %p write %p " |
| 2761 | "written %p\n", i, dev->flags, dev->toread, dev->read, |
| 2762 | dev->towrite, dev->written); |
| 2763 | |
| 2764 | /* maybe we can request a biofill operation |
| 2765 | * |
| 2766 | * new wantfill requests are only permitted while |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 2767 | * ops_complete_biofill is guaranteed to be inactive |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2768 | */ |
| 2769 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 2770 | !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2771 | set_bit(R5_Wantfill, &dev->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2772 | |
| 2773 | /* now count some things */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2774 | if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; |
| 2775 | if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; |
Dan Williams | f38e121 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2776 | if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2777 | |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2778 | if (test_bit(R5_Wantfill, &dev->flags)) |
| 2779 | s.to_fill++; |
| 2780 | else if (dev->toread) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2781 | s.to_read++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2782 | if (dev->towrite) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2783 | s.to_write++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2784 | if (!test_bit(R5_OVERWRITE, &dev->flags)) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2785 | s.non_overwrite++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2786 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2787 | if (dev->written) |
| 2788 | s.written++; |
NeilBrown | 9910f16 | 2006-01-06 00:20:24 -0800 | [diff] [blame] | 2789 | rdev = rcu_dereference(conf->disks[i].rdev); |
NeilBrown | ac4090d | 2008-08-05 15:54:13 +1000 | [diff] [blame] | 2790 | if (blocked_rdev == NULL && |
| 2791 | rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2792 | blocked_rdev = rdev; |
| 2793 | atomic_inc(&rdev->nr_pending); |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2794 | } |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 2795 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { |
NeilBrown | 14f8d26 | 2006-01-06 00:20:14 -0800 | [diff] [blame] | 2796 | /* The ReadError flag will just be confusing now */ |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 2797 | clear_bit(R5_ReadError, &dev->flags); |
| 2798 | clear_bit(R5_ReWrite, &dev->flags); |
| 2799 | } |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 2800 | if (!rdev || !test_bit(In_sync, &rdev->flags) |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 2801 | || test_bit(R5_ReadError, &dev->flags)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2802 | s.failed++; |
| 2803 | s.failed_num = i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2804 | } else |
| 2805 | set_bit(R5_Insync, &dev->flags); |
| 2806 | } |
NeilBrown | 9910f16 | 2006-01-06 00:20:24 -0800 | [diff] [blame] | 2807 | rcu_read_unlock(); |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2808 | |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2809 | if (unlikely(blocked_rdev)) { |
NeilBrown | ac4090d | 2008-08-05 15:54:13 +1000 | [diff] [blame] | 2810 | if (s.syncing || s.expanding || s.expanded || |
| 2811 | s.to_write || s.written) { |
| 2812 | set_bit(STRIPE_HANDLE, &sh->state); |
| 2813 | goto unlock; |
| 2814 | } |
| 2815 | /* There is nothing for the blocked_rdev to block */ |
| 2816 | rdev_dec_pending(blocked_rdev, conf->mddev); |
| 2817 | blocked_rdev = NULL; |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2818 | } |
| 2819 | |
Dan Williams | 83de75c | 2008-06-28 08:31:58 +1000 | [diff] [blame] | 2820 | if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { |
| 2821 | set_bit(STRIPE_OP_BIOFILL, &s.ops_request); |
| 2822 | set_bit(STRIPE_BIOFILL_RUN, &sh->state); |
| 2823 | } |
Dan Williams | b5e98d6 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2824 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2825 | pr_debug("locked=%d uptodate=%d to_read=%d" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2826 | " to_write=%d failed=%d failed_num=%d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2827 | s.locked, s.uptodate, s.to_read, s.to_write, |
| 2828 | s.failed, s.failed_num); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2829 | /* check if the array has lost two devices and, if so, some requests might |
| 2830 | * need to be failed |
| 2831 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2832 | if (s.failed > 1 && s.to_read+s.to_write+s.written) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2833 | handle_failed_stripe(conf, sh, &s, disks, &return_bi); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2834 | if (s.failed > 1 && s.syncing) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2835 | md_done_sync(conf->mddev, STRIPE_SECTORS,0); |
| 2836 | clear_bit(STRIPE_SYNCING, &sh->state); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2837 | s.syncing = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2838 | } |
| 2839 | |
| 2840 | /* might be able to return some write requests if the parity block |
| 2841 | * is safe, or on a failed drive |
| 2842 | */ |
| 2843 | dev = &sh->dev[sh->pd_idx]; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2844 | if ( s.written && |
| 2845 | ((test_bit(R5_Insync, &dev->flags) && |
| 2846 | !test_bit(R5_LOCKED, &dev->flags) && |
| 2847 | test_bit(R5_UPTODATE, &dev->flags)) || |
| 2848 | (s.failed == 1 && s.failed_num == sh->pd_idx))) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2849 | handle_stripe_clean_event(conf, sh, disks, &return_bi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2850 | |
| 2851 | /* Now we might consider reading some blocks, either to check/generate |
| 2852 | * parity, or to satisfy requests |
| 2853 | * or to load a block that is being partially written. |
| 2854 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2855 | if (s.to_read || s.non_overwrite || |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2856 | (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2857 | handle_stripe_fill5(sh, &s, disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2858 | |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2859 | /* Now we check to see if any write operations have recently |
| 2860 | * completed |
| 2861 | */ |
Dan Williams | e0a115e | 2008-06-05 22:45:52 -0700 | [diff] [blame] | 2862 | prexor = 0; |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 2863 | if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) |
Dan Williams | e0a115e | 2008-06-05 22:45:52 -0700 | [diff] [blame] | 2864 | prexor = 1; |
Dan Williams | d8ee072 | 2008-06-28 08:32:06 +1000 | [diff] [blame] | 2865 | if (sh->reconstruct_state == reconstruct_state_drain_result || |
| 2866 | sh->reconstruct_state == reconstruct_state_prexor_drain_result) { |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2867 | sh->reconstruct_state = reconstruct_state_idle; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2868 | |
| 2869 | /* All the 'written' buffers and the parity block are ready to |
| 2870 | * be written back to disk |
| 2871 | */ |
| 2872 | BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); |
| 2873 | for (i = disks; i--; ) { |
| 2874 | dev = &sh->dev[i]; |
| 2875 | if (test_bit(R5_LOCKED, &dev->flags) && |
| 2876 | (i == sh->pd_idx || dev->written)) { |
| 2877 | pr_debug("Writing block %d\n", i); |
| 2878 | set_bit(R5_Wantwrite, &dev->flags); |
Dan Williams | e0a115e | 2008-06-05 22:45:52 -0700 | [diff] [blame] | 2879 | if (prexor) |
| 2880 | continue; |
Dan Williams | e33129d | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2881 | if (!test_bit(R5_Insync, &dev->flags) || |
| 2882 | (i == sh->pd_idx && s.failed == 0)) |
| 2883 | set_bit(STRIPE_INSYNC, &sh->state); |
| 2884 | } |
| 2885 | } |
| 2886 | if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { |
| 2887 | atomic_dec(&conf->preread_active_stripes); |
| 2888 | if (atomic_read(&conf->preread_active_stripes) < |
| 2889 | IO_THRESHOLD) |
| 2890 | md_wakeup_thread(conf->mddev->thread); |
| 2891 | } |
| 2892 | } |
| 2893 | |
| 2894 | /* Now to consider new write requests and what else, if anything |
| 2895 | * should be read. We do not handle new writes when: |
| 2896 | * 1/ A 'write' operation (copy+xor) is already in flight. |
| 2897 | * 2/ A 'check' operation is in flight, as it may clobber the parity |
| 2898 | * block. |
| 2899 | */ |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2900 | if (s.to_write && !sh->reconstruct_state && !sh->check_state) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2901 | handle_stripe_dirtying5(conf, sh, &s, disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2902 | |
| 2903 | /* maybe we need to check and possibly fix the parity for this stripe |
Dan Williams | e89f896 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2904 | * Any reads will already have been scheduled, so we just see if enough |
| 2905 | * data is available. The parity check is held off while parity |
| 2906 | * dependent operations are in flight. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2907 | */ |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2908 | if (sh->check_state || |
| 2909 | (s.syncing && s.locked == 0 && |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2910 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && |
Dan Williams | ecc65c9 | 2008-06-28 08:31:57 +1000 | [diff] [blame] | 2911 | !test_bit(STRIPE_INSYNC, &sh->state))) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2912 | handle_parity_checks5(conf, sh, &s, disks); |
Dan Williams | e89f896 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2913 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2914 | if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2915 | md_done_sync(conf->mddev, STRIPE_SECTORS,1); |
| 2916 | clear_bit(STRIPE_SYNCING, &sh->state); |
| 2917 | } |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 2918 | |
| 2919 | /* If the failed drive is just a ReadError, then we might need to progress |
| 2920 | * the repair/check process |
| 2921 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2922 | if (s.failed == 1 && !conf->mddev->ro && |
| 2923 | test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) |
| 2924 | && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) |
| 2925 | && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 2926 | ) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2927 | dev = &sh->dev[s.failed_num]; |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 2928 | if (!test_bit(R5_ReWrite, &dev->flags)) { |
| 2929 | set_bit(R5_Wantwrite, &dev->flags); |
| 2930 | set_bit(R5_ReWrite, &dev->flags); |
| 2931 | set_bit(R5_LOCKED, &dev->flags); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2932 | s.locked++; |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 2933 | } else { |
| 2934 | /* let's read it back */ |
| 2935 | set_bit(R5_Wantread, &dev->flags); |
| 2936 | set_bit(R5_LOCKED, &dev->flags); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2937 | s.locked++; |
NeilBrown | 4e5314b | 2005-11-08 21:39:22 -0800 | [diff] [blame] | 2938 | } |
| 2939 | } |
| 2940 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2941 | /* Finish reconstruct operations initiated by the expansion process */ |
| 2942 | if (sh->reconstruct_state == reconstruct_state_result) { |
| 2943 | sh->reconstruct_state = reconstruct_state_idle; |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2944 | clear_bit(STRIPE_EXPANDING, &sh->state); |
Dan Williams | 2339788 | 2008-07-23 20:05:34 -0700 | [diff] [blame] | 2945 | for (i = conf->raid_disks; i--; ) { |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2946 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
Dan Williams | 2339788 | 2008-07-23 20:05:34 -0700 | [diff] [blame] | 2947 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
Neil Brown | efe3114 | 2008-06-28 08:31:14 +1000 | [diff] [blame] | 2948 | s.locked++; |
Dan Williams | 2339788 | 2008-07-23 20:05:34 -0700 | [diff] [blame] | 2949 | } |
Dan Williams | f0a50d3 | 2007-01-02 13:52:31 -0700 | [diff] [blame] | 2950 | } |
| 2951 | |
| 2952 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2953 | !sh->reconstruct_state) { |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 2954 | /* Need to write out all blocks after computing parity */ |
| 2955 | sh->disks = conf->raid_disks; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 2956 | stripe_set_idx(sh->sector, conf, 0, sh); |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 2957 | schedule_reconstruction5(sh, &s, 1, 1); |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2958 | } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 2959 | clear_bit(STRIPE_EXPAND_READY, &sh->state); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 2960 | atomic_dec(&conf->reshape_stripes); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 2961 | wake_up(&conf->wait_for_overlap); |
| 2962 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
| 2963 | } |
| 2964 | |
Dan Williams | 0f94e87 | 2008-01-08 15:32:53 -0800 | [diff] [blame] | 2965 | if (s.expanding && s.locked == 0 && |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 2966 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2967 | handle_stripe_expansion(conf, sh, NULL); |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 2968 | |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2969 | unlock: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2970 | spin_unlock(&sh->lock); |
| 2971 | |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2972 | /* wait for this device to become unblocked */ |
| 2973 | if (unlikely(blocked_rdev)) |
| 2974 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); |
| 2975 | |
Dan Williams | 600aa10 | 2008-06-28 08:32:05 +1000 | [diff] [blame] | 2976 | if (s.ops_request) |
| 2977 | raid5_run_ops(sh, s.ops_request); |
Dan Williams | d84e0f1 | 2007-01-02 13:52:30 -0700 | [diff] [blame] | 2978 | |
Dan Williams | c4e5ac0 | 2008-06-28 08:31:53 +1000 | [diff] [blame] | 2979 | ops_run_io(sh, &s); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2980 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2981 | return_io(return_bi); |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 2982 | |
| 2983 | return blocked_rdev == NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2984 | } |
| 2985 | |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 2986 | static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2987 | { |
NeilBrown | bff6197 | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 2988 | raid5_conf_t *conf = sh->raid_conf; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 2989 | int disks = sh->disks; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2990 | struct bio *return_bi = NULL; |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 2991 | int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2992 | struct stripe_head_state s; |
| 2993 | struct r6_state r6s; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2994 | struct r5dev *dev, *pdev, *qdev; |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 2995 | mdk_rdev_t *blocked_rdev = NULL; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 2996 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2997 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 2998 | "pd_idx=%d, qd_idx=%d\n", |
| 2999 | (unsigned long long)sh->sector, sh->state, |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 3000 | atomic_read(&sh->count), pd_idx, qd_idx); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3001 | memset(&s, 0, sizeof(s)); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3002 | |
| 3003 | spin_lock(&sh->lock); |
| 3004 | clear_bit(STRIPE_HANDLE, &sh->state); |
| 3005 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 3006 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3007 | s.syncing = test_bit(STRIPE_SYNCING, &sh->state); |
| 3008 | s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 3009 | s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3010 | /* Now to look around and see what can be done */ |
| 3011 | |
| 3012 | rcu_read_lock(); |
| 3013 | for (i=disks; i--; ) { |
| 3014 | mdk_rdev_t *rdev; |
| 3015 | dev = &sh->dev[i]; |
| 3016 | clear_bit(R5_Insync, &dev->flags); |
| 3017 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3018 | pr_debug("check %d: state 0x%lx read %p write %p written %p\n", |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3019 | i, dev->flags, dev->toread, dev->towrite, dev->written); |
| 3020 | /* maybe we can reply to a read */ |
| 3021 | if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { |
| 3022 | struct bio *rbi, *rbi2; |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3023 | pr_debug("Return read for disc %d\n", i); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3024 | spin_lock_irq(&conf->device_lock); |
| 3025 | rbi = dev->toread; |
| 3026 | dev->toread = NULL; |
| 3027 | if (test_and_clear_bit(R5_Overlap, &dev->flags)) |
| 3028 | wake_up(&conf->wait_for_overlap); |
| 3029 | spin_unlock_irq(&conf->device_lock); |
| 3030 | while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { |
| 3031 | copy_data(0, rbi, dev->page, dev->sector); |
| 3032 | rbi2 = r5_next_bio(rbi, dev->sector); |
| 3033 | spin_lock_irq(&conf->device_lock); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 3034 | if (!raid5_dec_bi_phys_segments(rbi)) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3035 | rbi->bi_next = return_bi; |
| 3036 | return_bi = rbi; |
| 3037 | } |
| 3038 | spin_unlock_irq(&conf->device_lock); |
| 3039 | rbi = rbi2; |
| 3040 | } |
| 3041 | } |
| 3042 | |
| 3043 | /* now count some things */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3044 | if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; |
| 3045 | if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3046 | |
| 3047 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3048 | if (dev->toread) |
| 3049 | s.to_read++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3050 | if (dev->towrite) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3051 | s.to_write++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3052 | if (!test_bit(R5_OVERWRITE, &dev->flags)) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3053 | s.non_overwrite++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3054 | } |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3055 | if (dev->written) |
| 3056 | s.written++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3057 | rdev = rcu_dereference(conf->disks[i].rdev); |
NeilBrown | ac4090d | 2008-08-05 15:54:13 +1000 | [diff] [blame] | 3058 | if (blocked_rdev == NULL && |
| 3059 | rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3060 | blocked_rdev = rdev; |
| 3061 | atomic_inc(&rdev->nr_pending); |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3062 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3063 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { |
| 3064 | /* The ReadError flag will just be confusing now */ |
| 3065 | clear_bit(R5_ReadError, &dev->flags); |
| 3066 | clear_bit(R5_ReWrite, &dev->flags); |
| 3067 | } |
| 3068 | if (!rdev || !test_bit(In_sync, &rdev->flags) |
| 3069 | || test_bit(R5_ReadError, &dev->flags)) { |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3070 | if (s.failed < 2) |
| 3071 | r6s.failed_num[s.failed] = i; |
| 3072 | s.failed++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3073 | } else |
| 3074 | set_bit(R5_Insync, &dev->flags); |
| 3075 | } |
| 3076 | rcu_read_unlock(); |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3077 | |
| 3078 | if (unlikely(blocked_rdev)) { |
NeilBrown | ac4090d | 2008-08-05 15:54:13 +1000 | [diff] [blame] | 3079 | if (s.syncing || s.expanding || s.expanded || |
| 3080 | s.to_write || s.written) { |
| 3081 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3082 | goto unlock; |
| 3083 | } |
| 3084 | /* There is nothing for the blocked_rdev to block */ |
| 3085 | rdev_dec_pending(blocked_rdev, conf->mddev); |
| 3086 | blocked_rdev = NULL; |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3087 | } |
NeilBrown | ac4090d | 2008-08-05 15:54:13 +1000 | [diff] [blame] | 3088 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3089 | pr_debug("locked=%d uptodate=%d to_read=%d" |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3090 | " to_write=%d failed=%d failed_num=%d,%d\n", |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3091 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
| 3092 | r6s.failed_num[0], r6s.failed_num[1]); |
| 3093 | /* check if the array has lost >2 devices and, if so, some requests |
| 3094 | * might need to be failed |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3095 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3096 | if (s.failed > 2 && s.to_read+s.to_write+s.written) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 3097 | handle_failed_stripe(conf, sh, &s, disks, &return_bi); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3098 | if (s.failed > 2 && s.syncing) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3099 | md_done_sync(conf->mddev, STRIPE_SECTORS,0); |
| 3100 | clear_bit(STRIPE_SYNCING, &sh->state); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3101 | s.syncing = 0; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3102 | } |
| 3103 | |
| 3104 | /* |
| 3105 | * might be able to return some write requests if the parity blocks |
| 3106 | * are safe, or on a failed drive |
| 3107 | */ |
| 3108 | pdev = &sh->dev[pd_idx]; |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3109 | r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) |
| 3110 | || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); |
NeilBrown | 34e04e8 | 2009-03-31 15:10:16 +1100 | [diff] [blame] | 3111 | qdev = &sh->dev[qd_idx]; |
| 3112 | r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == qd_idx) |
| 3113 | || (s.failed >= 2 && r6s.failed_num[1] == qd_idx); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3114 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3115 | if ( s.written && |
| 3116 | ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3117 | && !test_bit(R5_LOCKED, &pdev->flags) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3118 | && test_bit(R5_UPTODATE, &pdev->flags)))) && |
| 3119 | ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3120 | && !test_bit(R5_LOCKED, &qdev->flags) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3121 | && test_bit(R5_UPTODATE, &qdev->flags))))) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 3122 | handle_stripe_clean_event(conf, sh, disks, &return_bi); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3123 | |
| 3124 | /* Now we might consider reading some blocks, either to check/generate |
| 3125 | * parity, or to satisfy requests |
| 3126 | * or to load a block that is being partially written. |
| 3127 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3128 | if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || |
| 3129 | (s.syncing && (s.uptodate < disks)) || s.expanding) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 3130 | handle_stripe_fill6(sh, &s, &r6s, disks); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3131 | |
| 3132 | /* now to consider writing and what else, if anything should be read */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3133 | if (s.to_write) |
Dan Williams | 1fe797e | 2008-06-28 09:16:30 +1000 | [diff] [blame] | 3134 | handle_stripe_dirtying6(conf, sh, &s, &r6s, disks); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3135 | |
| 3136 | /* maybe we need to check and possibly fix the parity for this stripe |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3137 | * Any reads will already have been scheduled, so we just see if enough |
| 3138 | * data is available |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3139 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3140 | if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) |
| 3141 | handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3142 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3143 | if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3144 | md_done_sync(conf->mddev, STRIPE_SECTORS,1); |
| 3145 | clear_bit(STRIPE_SYNCING, &sh->state); |
| 3146 | } |
| 3147 | |
| 3148 | /* If the failed drives are just a ReadError, then we might need |
| 3149 | * to progress the repair/check process |
| 3150 | */ |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3151 | if (s.failed <= 2 && !conf->mddev->ro) |
| 3152 | for (i = 0; i < s.failed; i++) { |
| 3153 | dev = &sh->dev[r6s.failed_num[i]]; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3154 | if (test_bit(R5_ReadError, &dev->flags) |
| 3155 | && !test_bit(R5_LOCKED, &dev->flags) |
| 3156 | && test_bit(R5_UPTODATE, &dev->flags) |
| 3157 | ) { |
| 3158 | if (!test_bit(R5_ReWrite, &dev->flags)) { |
| 3159 | set_bit(R5_Wantwrite, &dev->flags); |
| 3160 | set_bit(R5_ReWrite, &dev->flags); |
| 3161 | set_bit(R5_LOCKED, &dev->flags); |
| 3162 | } else { |
| 3163 | /* let's read it back */ |
| 3164 | set_bit(R5_Wantread, &dev->flags); |
| 3165 | set_bit(R5_LOCKED, &dev->flags); |
| 3166 | } |
| 3167 | } |
| 3168 | } |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3169 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3170 | if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3171 | /* Need to write out all blocks after computing P&Q */ |
| 3172 | sh->disks = conf->raid_disks; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3173 | stripe_set_idx(sh->sector, conf, 0, sh); |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3174 | compute_parity6(sh, RECONSTRUCT_WRITE); |
| 3175 | for (i = conf->raid_disks ; i-- ; ) { |
| 3176 | set_bit(R5_LOCKED, &sh->dev[i].flags); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3177 | s.locked++; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3178 | set_bit(R5_Wantwrite, &sh->dev[i].flags); |
| 3179 | } |
| 3180 | clear_bit(STRIPE_EXPANDING, &sh->state); |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3181 | } else if (s.expanded) { |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3182 | clear_bit(STRIPE_EXPAND_READY, &sh->state); |
| 3183 | atomic_dec(&conf->reshape_stripes); |
| 3184 | wake_up(&conf->wait_for_overlap); |
| 3185 | md_done_sync(conf->mddev, STRIPE_SECTORS, 1); |
| 3186 | } |
| 3187 | |
Dan Williams | 0f94e87 | 2008-01-08 15:32:53 -0800 | [diff] [blame] | 3188 | if (s.expanding && s.locked == 0 && |
Dan Williams | 976ea8d | 2008-06-28 08:32:03 +1000 | [diff] [blame] | 3189 | !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3190 | handle_stripe_expansion(conf, sh, &r6s); |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3191 | |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3192 | unlock: |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3193 | spin_unlock(&sh->lock); |
| 3194 | |
Dan Williams | 6bfe0b4 | 2008-04-30 00:52:32 -0700 | [diff] [blame] | 3195 | /* wait for this device to become unblocked */ |
| 3196 | if (unlikely(blocked_rdev)) |
| 3197 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); |
| 3198 | |
Dan Williams | f0e43bc | 2008-06-28 08:31:55 +1000 | [diff] [blame] | 3199 | ops_run_io(sh, &s); |
| 3200 | |
Dan Williams | a445685 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3201 | return_io(return_bi); |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 3202 | |
| 3203 | return blocked_rdev == NULL; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3204 | } |
| 3205 | |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 3206 | /* returns true if the stripe was handled */ |
| 3207 | static bool handle_stripe(struct stripe_head *sh, struct page *tmp_page) |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3208 | { |
| 3209 | if (sh->raid_conf->level == 6) |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 3210 | return handle_stripe6(sh, tmp_page); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3211 | else |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 3212 | return handle_stripe5(sh); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3213 | } |
| 3214 | |
| 3215 | |
| 3216 | |
Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 3217 | static void raid5_activate_delayed(raid5_conf_t *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3218 | { |
| 3219 | if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { |
| 3220 | while (!list_empty(&conf->delayed_list)) { |
| 3221 | struct list_head *l = conf->delayed_list.next; |
| 3222 | struct stripe_head *sh; |
| 3223 | sh = list_entry(l, struct stripe_head, lru); |
| 3224 | list_del_init(l); |
| 3225 | clear_bit(STRIPE_DELAYED, &sh->state); |
| 3226 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) |
| 3227 | atomic_inc(&conf->preread_active_stripes); |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 3228 | list_add_tail(&sh->lru, &conf->hold_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3229 | } |
NeilBrown | 6ed3003 | 2008-02-06 01:40:00 -0800 | [diff] [blame] | 3230 | } else |
| 3231 | blk_plug_device(conf->mddev->queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3232 | } |
| 3233 | |
Arjan van de Ven | 858119e | 2006-01-14 13:20:43 -0800 | [diff] [blame] | 3234 | static void activate_bit_delay(raid5_conf_t *conf) |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3235 | { |
| 3236 | /* device_lock is held */ |
| 3237 | struct list_head head; |
| 3238 | list_add(&head, &conf->bitmap_list); |
| 3239 | list_del_init(&conf->bitmap_list); |
| 3240 | while (!list_empty(&head)) { |
| 3241 | struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); |
| 3242 | list_del_init(&sh->lru); |
| 3243 | atomic_inc(&sh->count); |
| 3244 | __release_stripe(conf, sh); |
| 3245 | } |
| 3246 | } |
| 3247 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3248 | static void unplug_slaves(mddev_t *mddev) |
| 3249 | { |
| 3250 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 3251 | int i; |
| 3252 | |
| 3253 | rcu_read_lock(); |
| 3254 | for (i=0; i<mddev->raid_disks; i++) { |
Suzanne Wood | d6065f7 | 2005-11-08 21:39:27 -0800 | [diff] [blame] | 3255 | mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 3256 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 3257 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3258 | |
| 3259 | atomic_inc(&rdev->nr_pending); |
| 3260 | rcu_read_unlock(); |
| 3261 | |
Alan D. Brunelle | 2ad8b1e | 2007-11-07 14:26:56 -0500 | [diff] [blame] | 3262 | blk_unplug(r_queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3263 | |
| 3264 | rdev_dec_pending(rdev, mddev); |
| 3265 | rcu_read_lock(); |
| 3266 | } |
| 3267 | } |
| 3268 | rcu_read_unlock(); |
| 3269 | } |
| 3270 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 3271 | static void raid5_unplug_device(struct request_queue *q) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3272 | { |
| 3273 | mddev_t *mddev = q->queuedata; |
| 3274 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 3275 | unsigned long flags; |
| 3276 | |
| 3277 | spin_lock_irqsave(&conf->device_lock, flags); |
| 3278 | |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3279 | if (blk_remove_plug(q)) { |
| 3280 | conf->seq_flush++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3281 | raid5_activate_delayed(conf); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3282 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3283 | md_wakeup_thread(mddev->thread); |
| 3284 | |
| 3285 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 3286 | |
| 3287 | unplug_slaves(mddev); |
| 3288 | } |
| 3289 | |
NeilBrown | f022b2f | 2006-10-03 01:15:56 -0700 | [diff] [blame] | 3290 | static int raid5_congested(void *data, int bits) |
| 3291 | { |
| 3292 | mddev_t *mddev = data; |
| 3293 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 3294 | |
| 3295 | /* No difference between reads and writes. Just check |
| 3296 | * how busy the stripe_cache is |
| 3297 | */ |
| 3298 | if (conf->inactive_blocked) |
| 3299 | return 1; |
| 3300 | if (conf->quiesce) |
| 3301 | return 1; |
| 3302 | if (list_empty_careful(&conf->inactive_list)) |
| 3303 | return 1; |
| 3304 | |
| 3305 | return 0; |
| 3306 | } |
| 3307 | |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3308 | /* We want read requests to align with chunks where possible, |
| 3309 | * but write requests don't need to. |
| 3310 | */ |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3311 | static int raid5_mergeable_bvec(struct request_queue *q, |
| 3312 | struct bvec_merge_data *bvm, |
| 3313 | struct bio_vec *biovec) |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3314 | { |
| 3315 | mddev_t *mddev = q->queuedata; |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3316 | sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3317 | int max; |
| 3318 | unsigned int chunk_sectors = mddev->chunk_size >> 9; |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3319 | unsigned int bio_sectors = bvm->bi_size >> 9; |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3320 | |
Alasdair G Kergon | cc371e6 | 2008-07-03 09:53:43 +0200 | [diff] [blame] | 3321 | if ((bvm->bi_rw & 1) == WRITE) |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3322 | return biovec->bv_len; /* always allow writes to be mergeable */ |
| 3323 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 3324 | if (mddev->new_chunk < mddev->chunk_size) |
| 3325 | chunk_sectors = mddev->new_chunk >> 9; |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 3326 | max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; |
| 3327 | if (max < 0) max = 0; |
| 3328 | if (max <= biovec->bv_len && bio_sectors == 0) |
| 3329 | return biovec->bv_len; |
| 3330 | else |
| 3331 | return max; |
| 3332 | } |
| 3333 | |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3334 | |
| 3335 | static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) |
| 3336 | { |
| 3337 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); |
| 3338 | unsigned int chunk_sectors = mddev->chunk_size >> 9; |
| 3339 | unsigned int bio_sectors = bio->bi_size >> 9; |
| 3340 | |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 3341 | if (mddev->new_chunk < mddev->chunk_size) |
| 3342 | chunk_sectors = mddev->new_chunk >> 9; |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3343 | return chunk_sectors >= |
| 3344 | ((sector & (chunk_sectors - 1)) + bio_sectors); |
| 3345 | } |
| 3346 | |
| 3347 | /* |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3348 | * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) |
| 3349 | * later sampled by raid5d. |
| 3350 | */ |
| 3351 | static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) |
| 3352 | { |
| 3353 | unsigned long flags; |
| 3354 | |
| 3355 | spin_lock_irqsave(&conf->device_lock, flags); |
| 3356 | |
| 3357 | bi->bi_next = conf->retry_read_aligned_list; |
| 3358 | conf->retry_read_aligned_list = bi; |
| 3359 | |
| 3360 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 3361 | md_wakeup_thread(conf->mddev->thread); |
| 3362 | } |
| 3363 | |
| 3364 | |
| 3365 | static struct bio *remove_bio_from_retry(raid5_conf_t *conf) |
| 3366 | { |
| 3367 | struct bio *bi; |
| 3368 | |
| 3369 | bi = conf->retry_read_aligned; |
| 3370 | if (bi) { |
| 3371 | conf->retry_read_aligned = NULL; |
| 3372 | return bi; |
| 3373 | } |
| 3374 | bi = conf->retry_read_aligned_list; |
| 3375 | if(bi) { |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3376 | conf->retry_read_aligned_list = bi->bi_next; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3377 | bi->bi_next = NULL; |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 3378 | /* |
| 3379 | * this sets the active strip count to 1 and the processed |
| 3380 | * strip count to zero (upper 8 bits) |
| 3381 | */ |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3382 | bi->bi_phys_segments = 1; /* biased count of active stripes */ |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3383 | } |
| 3384 | |
| 3385 | return bi; |
| 3386 | } |
| 3387 | |
| 3388 | |
| 3389 | /* |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3390 | * The "raid5_align_endio" should check if the read succeeded and if it |
| 3391 | * did, call bio_endio on the original bio (having bio_put the new bio |
| 3392 | * first). |
| 3393 | * If the read failed.. |
| 3394 | */ |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3395 | static void raid5_align_endio(struct bio *bi, int error) |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3396 | { |
| 3397 | struct bio* raid_bi = bi->bi_private; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3398 | mddev_t *mddev; |
| 3399 | raid5_conf_t *conf; |
| 3400 | int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); |
| 3401 | mdk_rdev_t *rdev; |
| 3402 | |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3403 | bio_put(bi); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3404 | |
| 3405 | mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; |
| 3406 | conf = mddev_to_conf(mddev); |
| 3407 | rdev = (void*)raid_bi->bi_next; |
| 3408 | raid_bi->bi_next = NULL; |
| 3409 | |
| 3410 | rdev_dec_pending(rdev, conf->mddev); |
| 3411 | |
| 3412 | if (!error && uptodate) { |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3413 | bio_endio(raid_bi, 0); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3414 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
| 3415 | wake_up(&conf->wait_for_stripe); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3416 | return; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3417 | } |
| 3418 | |
| 3419 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3420 | pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3421 | |
| 3422 | add_bio_to_retry(raid_bi, conf); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3423 | } |
| 3424 | |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3425 | static int bio_fits_rdev(struct bio *bi) |
| 3426 | { |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 3427 | struct request_queue *q = bdev_get_queue(bi->bi_bdev); |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3428 | |
| 3429 | if ((bi->bi_size>>9) > q->max_sectors) |
| 3430 | return 0; |
| 3431 | blk_recount_segments(q, bi); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 3432 | if (bi->bi_phys_segments > q->max_phys_segments) |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3433 | return 0; |
| 3434 | |
| 3435 | if (q->merge_bvec_fn) |
| 3436 | /* it's too hard to apply the merge_bvec_fn at this stage, |
| 3437 | * just just give up |
| 3438 | */ |
| 3439 | return 0; |
| 3440 | |
| 3441 | return 1; |
| 3442 | } |
| 3443 | |
| 3444 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 3445 | static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3446 | { |
| 3447 | mddev_t *mddev = q->queuedata; |
| 3448 | raid5_conf_t *conf = mddev_to_conf(mddev); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3449 | unsigned int dd_idx; |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3450 | struct bio* align_bi; |
| 3451 | mdk_rdev_t *rdev; |
| 3452 | |
| 3453 | if (!in_chunk_boundary(mddev, raid_bio)) { |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3454 | pr_debug("chunk_aligned_read : non aligned\n"); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3455 | return 0; |
| 3456 | } |
| 3457 | /* |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3458 | * use bio_clone to make a copy of the bio |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3459 | */ |
| 3460 | align_bi = bio_clone(raid_bio, GFP_NOIO); |
| 3461 | if (!align_bi) |
| 3462 | return 0; |
| 3463 | /* |
| 3464 | * set bi_end_io to a new function, and set bi_private to the |
| 3465 | * original bio. |
| 3466 | */ |
| 3467 | align_bi->bi_end_io = raid5_align_endio; |
| 3468 | align_bi->bi_private = raid_bio; |
| 3469 | /* |
| 3470 | * compute position |
| 3471 | */ |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3472 | align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, |
| 3473 | 0, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3474 | &dd_idx, NULL); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3475 | |
| 3476 | rcu_read_lock(); |
| 3477 | rdev = rcu_dereference(conf->disks[dd_idx].rdev); |
| 3478 | if (rdev && test_bit(In_sync, &rdev->flags)) { |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3479 | atomic_inc(&rdev->nr_pending); |
| 3480 | rcu_read_unlock(); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3481 | raid_bio->bi_next = (void*)rdev; |
| 3482 | align_bi->bi_bdev = rdev->bdev; |
| 3483 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); |
| 3484 | align_bi->bi_sector += rdev->data_offset; |
| 3485 | |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 3486 | if (!bio_fits_rdev(align_bi)) { |
| 3487 | /* too big in some way */ |
| 3488 | bio_put(align_bi); |
| 3489 | rdev_dec_pending(rdev, mddev); |
| 3490 | return 0; |
| 3491 | } |
| 3492 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3493 | spin_lock_irq(&conf->device_lock); |
| 3494 | wait_event_lock_irq(conf->wait_for_stripe, |
| 3495 | conf->quiesce == 0, |
| 3496 | conf->device_lock, /* nothing */); |
| 3497 | atomic_inc(&conf->active_aligned_reads); |
| 3498 | spin_unlock_irq(&conf->device_lock); |
| 3499 | |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3500 | generic_make_request(align_bi); |
| 3501 | return 1; |
| 3502 | } else { |
| 3503 | rcu_read_unlock(); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3504 | bio_put(align_bi); |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3505 | return 0; |
| 3506 | } |
| 3507 | } |
| 3508 | |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 3509 | /* __get_priority_stripe - get the next stripe to process |
| 3510 | * |
| 3511 | * Full stripe writes are allowed to pass preread active stripes up until |
| 3512 | * the bypass_threshold is exceeded. In general the bypass_count |
| 3513 | * increments when the handle_list is handled before the hold_list; however, it |
| 3514 | * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a |
| 3515 | * stripe with in flight i/o. The bypass_count will be reset when the |
| 3516 | * head of the hold_list has changed, i.e. the head was promoted to the |
| 3517 | * handle_list. |
| 3518 | */ |
| 3519 | static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) |
| 3520 | { |
| 3521 | struct stripe_head *sh; |
| 3522 | |
| 3523 | pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", |
| 3524 | __func__, |
| 3525 | list_empty(&conf->handle_list) ? "empty" : "busy", |
| 3526 | list_empty(&conf->hold_list) ? "empty" : "busy", |
| 3527 | atomic_read(&conf->pending_full_writes), conf->bypass_count); |
| 3528 | |
| 3529 | if (!list_empty(&conf->handle_list)) { |
| 3530 | sh = list_entry(conf->handle_list.next, typeof(*sh), lru); |
| 3531 | |
| 3532 | if (list_empty(&conf->hold_list)) |
| 3533 | conf->bypass_count = 0; |
| 3534 | else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { |
| 3535 | if (conf->hold_list.next == conf->last_hold) |
| 3536 | conf->bypass_count++; |
| 3537 | else { |
| 3538 | conf->last_hold = conf->hold_list.next; |
| 3539 | conf->bypass_count -= conf->bypass_threshold; |
| 3540 | if (conf->bypass_count < 0) |
| 3541 | conf->bypass_count = 0; |
| 3542 | } |
| 3543 | } |
| 3544 | } else if (!list_empty(&conf->hold_list) && |
| 3545 | ((conf->bypass_threshold && |
| 3546 | conf->bypass_count > conf->bypass_threshold) || |
| 3547 | atomic_read(&conf->pending_full_writes) == 0)) { |
| 3548 | sh = list_entry(conf->hold_list.next, |
| 3549 | typeof(*sh), lru); |
| 3550 | conf->bypass_count -= conf->bypass_threshold; |
| 3551 | if (conf->bypass_count < 0) |
| 3552 | conf->bypass_count = 0; |
| 3553 | } else |
| 3554 | return NULL; |
| 3555 | |
| 3556 | list_del_init(&sh->lru); |
| 3557 | atomic_inc(&sh->count); |
| 3558 | BUG_ON(atomic_read(&sh->count) != 1); |
| 3559 | return sh; |
| 3560 | } |
Raz Ben-Jehuda(caro) | f679623 | 2006-12-10 02:20:46 -0800 | [diff] [blame] | 3561 | |
Jens Axboe | 165125e | 2007-07-24 09:28:11 +0200 | [diff] [blame] | 3562 | static int make_request(struct request_queue *q, struct bio * bi) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3563 | { |
| 3564 | mddev_t *mddev = q->queuedata; |
| 3565 | raid5_conf_t *conf = mddev_to_conf(mddev); |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3566 | int dd_idx; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3567 | sector_t new_sector; |
| 3568 | sector_t logical_sector, last_sector; |
| 3569 | struct stripe_head *sh; |
Jens Axboe | a362357 | 2005-11-01 09:26:16 +0100 | [diff] [blame] | 3570 | const int rw = bio_data_dir(bi); |
Tejun Heo | c995905 | 2008-08-25 19:47:21 +0900 | [diff] [blame] | 3571 | int cpu, remaining; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3572 | |
NeilBrown | e5dcdd8 | 2005-09-09 16:23:41 -0700 | [diff] [blame] | 3573 | if (unlikely(bio_barrier(bi))) { |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3574 | bio_endio(bi, -EOPNOTSUPP); |
NeilBrown | e5dcdd8 | 2005-09-09 16:23:41 -0700 | [diff] [blame] | 3575 | return 0; |
| 3576 | } |
| 3577 | |
NeilBrown | 3d310eb | 2005-06-21 17:17:26 -0700 | [diff] [blame] | 3578 | md_write_start(mddev, bi); |
NeilBrown | 06d91a5 | 2005-06-21 17:17:12 -0700 | [diff] [blame] | 3579 | |
Tejun Heo | 074a7ac | 2008-08-25 19:56:14 +0900 | [diff] [blame] | 3580 | cpu = part_stat_lock(); |
| 3581 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
| 3582 | part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], |
| 3583 | bio_sectors(bi)); |
| 3584 | part_stat_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3585 | |
NeilBrown | 802ba06 | 2006-12-13 00:34:13 -0800 | [diff] [blame] | 3586 | if (rw == READ && |
Raz Ben-Jehuda(caro) | 5248861 | 2006-12-10 02:20:48 -0800 | [diff] [blame] | 3587 | mddev->reshape_position == MaxSector && |
| 3588 | chunk_aligned_read(q,bi)) |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3589 | return 0; |
Raz Ben-Jehuda(caro) | 5248861 | 2006-12-10 02:20:48 -0800 | [diff] [blame] | 3590 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3591 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 3592 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
| 3593 | bi->bi_next = NULL; |
| 3594 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ |
NeilBrown | 06d91a5 | 2005-06-21 17:17:12 -0700 | [diff] [blame] | 3595 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3596 | for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { |
| 3597 | DEFINE_WAIT(w); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3598 | int disks, data_disks; |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3599 | int previous; |
NeilBrown | b578d55 | 2006-03-27 01:18:12 -0800 | [diff] [blame] | 3600 | |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3601 | retry: |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3602 | previous = 0; |
NeilBrown | b578d55 | 2006-03-27 01:18:12 -0800 | [diff] [blame] | 3603 | prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3604 | if (likely(conf->reshape_progress == MaxSector)) |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3605 | disks = conf->raid_disks; |
| 3606 | else { |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3607 | /* spinlock is needed as reshape_progress may be |
NeilBrown | df8e7f76 | 2006-03-27 01:18:15 -0800 | [diff] [blame] | 3608 | * 64bit on a 32bit platform, and so it might be |
| 3609 | * possible to see a half-updated value |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3610 | * Ofcourse reshape_progress could change after |
NeilBrown | df8e7f76 | 2006-03-27 01:18:15 -0800 | [diff] [blame] | 3611 | * the lock is dropped, so once we get a reference |
| 3612 | * to the stripe that we think it is, we will have |
| 3613 | * to check again. |
| 3614 | */ |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3615 | spin_lock_irq(&conf->device_lock); |
| 3616 | disks = conf->raid_disks; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3617 | if (mddev->delta_disks < 0 |
| 3618 | ? logical_sector < conf->reshape_progress |
| 3619 | : logical_sector >= conf->reshape_progress) { |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3620 | disks = conf->previous_raid_disks; |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3621 | previous = 1; |
| 3622 | } else { |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3623 | if (mddev->delta_disks < 0 |
| 3624 | ? logical_sector < conf->reshape_safe |
| 3625 | : logical_sector >= conf->reshape_safe) { |
NeilBrown | b578d55 | 2006-03-27 01:18:12 -0800 | [diff] [blame] | 3626 | spin_unlock_irq(&conf->device_lock); |
| 3627 | schedule(); |
| 3628 | goto retry; |
| 3629 | } |
| 3630 | } |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3631 | spin_unlock_irq(&conf->device_lock); |
| 3632 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3633 | data_disks = disks - conf->max_degraded; |
| 3634 | |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3635 | new_sector = raid5_compute_sector(conf, logical_sector, |
| 3636 | previous, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3637 | &dd_idx, NULL); |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 3638 | pr_debug("raid5: make_request, sector %llu logical %llu\n", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3639 | (unsigned long long)new_sector, |
| 3640 | (unsigned long long)logical_sector); |
| 3641 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3642 | sh = get_active_stripe(conf, new_sector, previous, |
| 3643 | (bi->bi_rw&RWA_MASK)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3644 | if (sh) { |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3645 | if (unlikely(conf->reshape_progress != MaxSector)) { |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3646 | /* expansion might have moved on while waiting for a |
NeilBrown | df8e7f76 | 2006-03-27 01:18:15 -0800 | [diff] [blame] | 3647 | * stripe, so we must do the range check again. |
| 3648 | * Expansion could still move past after this |
| 3649 | * test, but as we are holding a reference to |
| 3650 | * 'sh', we know that if that happens, |
| 3651 | * STRIPE_EXPANDING will get set and the expansion |
| 3652 | * won't proceed until we finish with the stripe. |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3653 | */ |
| 3654 | int must_retry = 0; |
| 3655 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3656 | if ((mddev->delta_disks < 0 |
| 3657 | ? logical_sector >= conf->reshape_progress |
| 3658 | : logical_sector < conf->reshape_progress) |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 3659 | && previous) |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3660 | /* mismatch, need to try again */ |
| 3661 | must_retry = 1; |
| 3662 | spin_unlock_irq(&conf->device_lock); |
| 3663 | if (must_retry) { |
| 3664 | release_stripe(sh); |
| 3665 | goto retry; |
| 3666 | } |
| 3667 | } |
NeilBrown | e464eaf | 2006-03-27 01:18:14 -0800 | [diff] [blame] | 3668 | /* FIXME what if we get a false positive because these |
| 3669 | * are being updated. |
| 3670 | */ |
| 3671 | if (logical_sector >= mddev->suspend_lo && |
| 3672 | logical_sector < mddev->suspend_hi) { |
| 3673 | release_stripe(sh); |
| 3674 | schedule(); |
| 3675 | goto retry; |
| 3676 | } |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 3677 | |
| 3678 | if (test_bit(STRIPE_EXPANDING, &sh->state) || |
| 3679 | !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { |
| 3680 | /* Stripe is busy expanding or |
| 3681 | * add failed due to overlap. Flush everything |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3682 | * and wait a while |
| 3683 | */ |
| 3684 | raid5_unplug_device(mddev->queue); |
| 3685 | release_stripe(sh); |
| 3686 | schedule(); |
| 3687 | goto retry; |
| 3688 | } |
| 3689 | finish_wait(&conf->wait_for_overlap, &w); |
NeilBrown | 6ed3003 | 2008-02-06 01:40:00 -0800 | [diff] [blame] | 3690 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3691 | clear_bit(STRIPE_DELAYED, &sh->state); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3692 | release_stripe(sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3693 | } else { |
| 3694 | /* cannot get stripe for read-ahead, just give-up */ |
| 3695 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
| 3696 | finish_wait(&conf->wait_for_overlap, &w); |
| 3697 | break; |
| 3698 | } |
| 3699 | |
| 3700 | } |
| 3701 | spin_lock_irq(&conf->device_lock); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 3702 | remaining = raid5_dec_bi_phys_segments(bi); |
NeilBrown | f634475 | 2006-03-27 01:18:17 -0800 | [diff] [blame] | 3703 | spin_unlock_irq(&conf->device_lock); |
| 3704 | if (remaining == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3705 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3706 | if ( rw == WRITE ) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3707 | md_write_end(mddev); |
NeilBrown | 6712ecf | 2007-09-27 12:47:43 +0200 | [diff] [blame] | 3708 | |
Neil Brown | 0e13fe23 | 2008-06-28 08:31:20 +1000 | [diff] [blame] | 3709 | bio_endio(bi, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3710 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3711 | return 0; |
| 3712 | } |
| 3713 | |
Dan Williams | b522adc | 2009-03-31 15:00:31 +1100 | [diff] [blame] | 3714 | static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); |
| 3715 | |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3716 | static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3717 | { |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3718 | /* reshaping is quite different to recovery/resync so it is |
| 3719 | * handled quite separately ... here. |
| 3720 | * |
| 3721 | * On each call to sync_request, we gather one chunk worth of |
| 3722 | * destination stripes and flag them as expanding. |
| 3723 | * Then we find all the source stripes and request reads. |
| 3724 | * As the reads complete, handle_stripe will copy the data |
| 3725 | * into the destination stripe and release that stripe. |
| 3726 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3727 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
| 3728 | struct stripe_head *sh; |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 3729 | sector_t first_sector, last_sector; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3730 | int raid_disks = conf->previous_raid_disks; |
| 3731 | int data_disks = raid_disks - conf->max_degraded; |
| 3732 | int new_data_disks = conf->raid_disks - conf->max_degraded; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3733 | int i; |
| 3734 | int dd_idx; |
| 3735 | sector_t writepos, safepos, gap; |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 3736 | sector_t stripe_addr; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3737 | |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3738 | if (sector_nr == 0) { |
| 3739 | /* If restarting in the middle, skip the initial sectors */ |
| 3740 | if (mddev->delta_disks < 0 && |
| 3741 | conf->reshape_progress < raid5_size(mddev, 0, 0)) { |
| 3742 | sector_nr = raid5_size(mddev, 0, 0) |
| 3743 | - conf->reshape_progress; |
| 3744 | } else if (mddev->delta_disks > 0 && |
| 3745 | conf->reshape_progress > 0) |
| 3746 | sector_nr = conf->reshape_progress; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3747 | sector_div(sector_nr, new_data_disks); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3748 | if (sector_nr) { |
| 3749 | *skipped = 1; |
| 3750 | return sector_nr; |
| 3751 | } |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3752 | } |
| 3753 | |
| 3754 | /* we update the metadata when there is more than 3Meg |
| 3755 | * in the block range (that is rather arbitrary, should |
| 3756 | * probably be time based) or when the data about to be |
| 3757 | * copied would over-write the source of the data at |
| 3758 | * the front of the range. |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3759 | * i.e. one new_stripe along from reshape_progress new_maps |
| 3760 | * to after where reshape_safe old_maps to |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3761 | */ |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3762 | writepos = conf->reshape_progress; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3763 | sector_div(writepos, new_data_disks); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3764 | safepos = conf->reshape_safe; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3765 | sector_div(safepos, data_disks); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3766 | if (mddev->delta_disks < 0) { |
| 3767 | writepos -= conf->chunk_size/512; |
| 3768 | safepos += conf->chunk_size/512; |
| 3769 | gap = conf->reshape_safe - conf->reshape_progress; |
| 3770 | } else { |
| 3771 | writepos += conf->chunk_size/512; |
| 3772 | safepos -= conf->chunk_size/512; |
| 3773 | gap = conf->reshape_progress - conf->reshape_safe; |
| 3774 | } |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3775 | |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3776 | if ((mddev->delta_disks < 0 |
| 3777 | ? writepos < safepos |
| 3778 | : writepos > safepos) || |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3779 | gap > (new_data_disks)*3000*2 /*3Meg*/) { |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3780 | /* Cannot proceed until we've updated the superblock... */ |
| 3781 | wait_event(conf->wait_for_overlap, |
| 3782 | atomic_read(&conf->reshape_stripes)==0); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3783 | mddev->reshape_position = conf->reshape_progress; |
NeilBrown | 850b2b4 | 2006-10-03 01:15:46 -0700 | [diff] [blame] | 3784 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3785 | md_wakeup_thread(mddev->thread); |
NeilBrown | 850b2b4 | 2006-10-03 01:15:46 -0700 | [diff] [blame] | 3786 | wait_event(mddev->sb_wait, mddev->flags == 0 || |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3787 | kthread_should_stop()); |
| 3788 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3789 | conf->reshape_safe = mddev->reshape_position; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3790 | spin_unlock_irq(&conf->device_lock); |
| 3791 | wake_up(&conf->wait_for_overlap); |
| 3792 | } |
| 3793 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 3794 | if (mddev->delta_disks < 0) { |
| 3795 | BUG_ON(conf->reshape_progress == 0); |
| 3796 | stripe_addr = writepos; |
| 3797 | BUG_ON((mddev->dev_sectors & |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 3798 | ~((sector_t)conf->chunk_size / 512 - 1)) |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 3799 | - (conf->chunk_size / 512) - stripe_addr |
| 3800 | != sector_nr); |
| 3801 | } else { |
| 3802 | BUG_ON(writepos != sector_nr + conf->chunk_size / 512); |
| 3803 | stripe_addr = sector_nr; |
| 3804 | } |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3805 | for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { |
| 3806 | int j; |
| 3807 | int skipped = 0; |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 3808 | sh = get_active_stripe(conf, stripe_addr+i, 0, 0); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3809 | set_bit(STRIPE_EXPANDING, &sh->state); |
| 3810 | atomic_inc(&conf->reshape_stripes); |
| 3811 | /* If any of this stripe is beyond the end of the old |
| 3812 | * array, then we need to zero those blocks |
| 3813 | */ |
| 3814 | for (j=sh->disks; j--;) { |
| 3815 | sector_t s; |
| 3816 | if (j == sh->pd_idx) |
| 3817 | continue; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3818 | if (conf->level == 6 && |
NeilBrown | d0dabf7 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3819 | j == sh->qd_idx) |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 3820 | continue; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 3821 | s = compute_blocknr(sh, j, 0); |
Dan Williams | b522adc | 2009-03-31 15:00:31 +1100 | [diff] [blame] | 3822 | if (s < raid5_size(mddev, 0, 0)) { |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3823 | skipped = 1; |
| 3824 | continue; |
| 3825 | } |
| 3826 | memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); |
| 3827 | set_bit(R5_Expanded, &sh->dev[j].flags); |
| 3828 | set_bit(R5_UPTODATE, &sh->dev[j].flags); |
| 3829 | } |
| 3830 | if (!skipped) { |
| 3831 | set_bit(STRIPE_EXPAND_READY, &sh->state); |
| 3832 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3833 | } |
| 3834 | release_stripe(sh); |
| 3835 | } |
| 3836 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3837 | if (mddev->delta_disks < 0) |
| 3838 | conf->reshape_progress -= i * new_data_disks; |
| 3839 | else |
| 3840 | conf->reshape_progress += i * new_data_disks; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3841 | spin_unlock_irq(&conf->device_lock); |
| 3842 | /* Ok, those stripe are ready. We can start scheduling |
| 3843 | * reads on the source stripes. |
| 3844 | * The source stripes are determined by mapping the first and last |
| 3845 | * block on the destination stripes. |
| 3846 | */ |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3847 | first_sector = |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 3848 | raid5_compute_sector(conf, stripe_addr*(new_data_disks), |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3849 | 1, &dd_idx, NULL); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3850 | last_sector = |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 3851 | raid5_compute_sector(conf, ((stripe_addr+conf->chunk_size/512) |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3852 | *(new_data_disks) - 1), |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3853 | 1, &dd_idx, NULL); |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 3854 | if (last_sector >= mddev->dev_sectors) |
| 3855 | last_sector = mddev->dev_sectors - 1; |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3856 | while (first_sector <= last_sector) { |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3857 | sh = get_active_stripe(conf, first_sector, 1, 0); |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3858 | set_bit(STRIPE_EXPAND_SOURCE, &sh->state); |
| 3859 | set_bit(STRIPE_HANDLE, &sh->state); |
| 3860 | release_stripe(sh); |
| 3861 | first_sector += STRIPE_SECTORS; |
| 3862 | } |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 3863 | /* If this takes us to the resync_max point where we have to pause, |
| 3864 | * then we need to write out the superblock. |
| 3865 | */ |
| 3866 | sector_nr += conf->chunk_size>>9; |
| 3867 | if (sector_nr >= mddev->resync_max) { |
| 3868 | /* Cannot proceed until we've updated the superblock... */ |
| 3869 | wait_event(conf->wait_for_overlap, |
| 3870 | atomic_read(&conf->reshape_stripes) == 0); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3871 | mddev->reshape_position = conf->reshape_progress; |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 3872 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 3873 | md_wakeup_thread(mddev->thread); |
| 3874 | wait_event(mddev->sb_wait, |
| 3875 | !test_bit(MD_CHANGE_DEVS, &mddev->flags) |
| 3876 | || kthread_should_stop()); |
| 3877 | spin_lock_irq(&conf->device_lock); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 3878 | conf->reshape_safe = mddev->reshape_position; |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 3879 | spin_unlock_irq(&conf->device_lock); |
| 3880 | wake_up(&conf->wait_for_overlap); |
| 3881 | } |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3882 | return conf->chunk_size>>9; |
| 3883 | } |
| 3884 | |
| 3885 | /* FIXME go_faster isn't used */ |
| 3886 | static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) |
| 3887 | { |
| 3888 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
| 3889 | struct stripe_head *sh; |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 3890 | sector_t max_sector = mddev->dev_sectors; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3891 | int sync_blocks; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3892 | int still_degraded = 0; |
| 3893 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3894 | |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3895 | if (sector_nr >= max_sector) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3896 | /* just being told to finish up .. nothing much to do */ |
| 3897 | unplug_slaves(mddev); |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 3898 | |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 3899 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { |
| 3900 | end_reshape(conf); |
| 3901 | return 0; |
| 3902 | } |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3903 | |
| 3904 | if (mddev->curr_resync < max_sector) /* aborted */ |
| 3905 | bitmap_end_sync(mddev->bitmap, mddev->curr_resync, |
| 3906 | &sync_blocks, 1); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3907 | else /* completed sync */ |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3908 | conf->fullsync = 0; |
| 3909 | bitmap_close_sync(mddev->bitmap); |
| 3910 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3911 | return 0; |
| 3912 | } |
NeilBrown | ccfcc3c | 2006-03-27 01:18:09 -0800 | [diff] [blame] | 3913 | |
NeilBrown | 52c0329 | 2006-06-26 00:27:43 -0700 | [diff] [blame] | 3914 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
| 3915 | return reshape_request(mddev, sector_nr, skipped); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 3916 | |
NeilBrown | c620727 | 2008-02-06 01:39:52 -0800 | [diff] [blame] | 3917 | /* No need to check resync_max as we never do more than one |
| 3918 | * stripe, and as resync_max will always be on a chunk boundary, |
| 3919 | * if the check in md_do_sync didn't fire, there is no chance |
| 3920 | * of overstepping resync_max here |
| 3921 | */ |
| 3922 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3923 | /* if there is too many failed drives and we are trying |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3924 | * to resync, then assert that we are finished, because there is |
| 3925 | * nothing we can do. |
| 3926 | */ |
NeilBrown | 3285edf | 2006-06-26 00:27:55 -0700 | [diff] [blame] | 3927 | if (mddev->degraded >= conf->max_degraded && |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3928 | test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 3929 | sector_t rv = mddev->dev_sectors - sector_nr; |
NeilBrown | 57afd89 | 2005-06-21 17:17:13 -0700 | [diff] [blame] | 3930 | *skipped = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3931 | return rv; |
| 3932 | } |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3933 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && |
NeilBrown | 3855ad9 | 2005-11-08 21:39:38 -0800 | [diff] [blame] | 3934 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 3935 | !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { |
| 3936 | /* we can skip this block, and probably more */ |
| 3937 | sync_blocks /= STRIPE_SECTORS; |
| 3938 | *skipped = 1; |
| 3939 | return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ |
| 3940 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3941 | |
NeilBrown | b47490c | 2008-02-06 01:39:50 -0800 | [diff] [blame] | 3942 | |
| 3943 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); |
| 3944 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3945 | sh = get_active_stripe(conf, sector_nr, 0, 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3946 | if (sh == NULL) { |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3947 | sh = get_active_stripe(conf, sector_nr, 0, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3948 | /* make sure we don't swamp the stripe cache if someone else |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3949 | * is trying to get access |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3950 | */ |
Nishanth Aravamudan | 66c006a | 2005-11-07 01:01:17 -0800 | [diff] [blame] | 3951 | schedule_timeout_uninterruptible(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3952 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 3953 | /* Need to check if array will still be degraded after recovery/resync |
| 3954 | * We don't need to check the 'failed' flag as when that gets set, |
| 3955 | * recovery aborts. |
| 3956 | */ |
| 3957 | for (i=0; i<mddev->raid_disks; i++) |
| 3958 | if (conf->disks[i].rdev == NULL) |
| 3959 | still_degraded = 1; |
| 3960 | |
| 3961 | bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); |
| 3962 | |
| 3963 | spin_lock(&sh->lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3964 | set_bit(STRIPE_SYNCING, &sh->state); |
| 3965 | clear_bit(STRIPE_INSYNC, &sh->state); |
| 3966 | spin_unlock(&sh->lock); |
| 3967 | |
Dan Williams | df10cfb | 2008-07-28 23:10:39 -0700 | [diff] [blame] | 3968 | /* wait for any blocked device to be handled */ |
| 3969 | while(unlikely(!handle_stripe(sh, NULL))) |
| 3970 | ; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3971 | release_stripe(sh); |
| 3972 | |
| 3973 | return STRIPE_SECTORS; |
| 3974 | } |
| 3975 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3976 | static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) |
| 3977 | { |
| 3978 | /* We may not be able to submit a whole bio at once as there |
| 3979 | * may not be enough stripe_heads available. |
| 3980 | * We cannot pre-allocate enough stripe_heads as we may need |
| 3981 | * more than exist in the cache (if we allow ever large chunks). |
| 3982 | * So we do one stripe head at a time and record in |
| 3983 | * ->bi_hw_segments how many have been done. |
| 3984 | * |
| 3985 | * We *know* that this entire raid_bio is in one chunk, so |
| 3986 | * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. |
| 3987 | */ |
| 3988 | struct stripe_head *sh; |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3989 | int dd_idx; |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3990 | sector_t sector, logical_sector, last_sector; |
| 3991 | int scnt = 0; |
| 3992 | int remaining; |
| 3993 | int handled = 0; |
| 3994 | |
| 3995 | logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
NeilBrown | 112bf89 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3996 | sector = raid5_compute_sector(conf, logical_sector, |
NeilBrown | 911d4ee | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 3997 | 0, &dd_idx, NULL); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 3998 | last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); |
| 3999 | |
| 4000 | for (; logical_sector < last_sector; |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 4001 | logical_sector += STRIPE_SECTORS, |
| 4002 | sector += STRIPE_SECTORS, |
| 4003 | scnt++) { |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4004 | |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4005 | if (scnt < raid5_bi_hw_segments(raid_bio)) |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4006 | /* already done this stripe */ |
| 4007 | continue; |
| 4008 | |
NeilBrown | b5663ba | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4009 | sh = get_active_stripe(conf, sector, 0, 1); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4010 | |
| 4011 | if (!sh) { |
| 4012 | /* failed to get a stripe - must wait */ |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4013 | raid5_set_bi_hw_segments(raid_bio, scnt); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4014 | conf->retry_read_aligned = raid_bio; |
| 4015 | return handled; |
| 4016 | } |
| 4017 | |
| 4018 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 4019 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { |
| 4020 | release_stripe(sh); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4021 | raid5_set_bi_hw_segments(raid_bio, scnt); |
Neil Brown | 387bb17 | 2007-02-08 14:20:29 -0800 | [diff] [blame] | 4022 | conf->retry_read_aligned = raid_bio; |
| 4023 | return handled; |
| 4024 | } |
| 4025 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4026 | handle_stripe(sh, NULL); |
| 4027 | release_stripe(sh); |
| 4028 | handled++; |
| 4029 | } |
| 4030 | spin_lock_irq(&conf->device_lock); |
Jens Axboe | 960e739 | 2008-08-15 10:41:18 +0200 | [diff] [blame] | 4031 | remaining = raid5_dec_bi_phys_segments(raid_bio); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4032 | spin_unlock_irq(&conf->device_lock); |
Neil Brown | 0e13fe23 | 2008-06-28 08:31:20 +1000 | [diff] [blame] | 4033 | if (remaining == 0) |
| 4034 | bio_endio(raid_bio, 0); |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4035 | if (atomic_dec_and_test(&conf->active_aligned_reads)) |
| 4036 | wake_up(&conf->wait_for_stripe); |
| 4037 | return handled; |
| 4038 | } |
| 4039 | |
| 4040 | |
| 4041 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4042 | /* |
| 4043 | * This is our raid5 kernel thread. |
| 4044 | * |
| 4045 | * We scan the hash table for stripes which can be handled now. |
| 4046 | * During the scan, completed stripes are saved for us by the interrupt |
| 4047 | * handler, so that they will not have to wait for our next wakeup. |
| 4048 | */ |
NeilBrown | 6ed3003 | 2008-02-06 01:40:00 -0800 | [diff] [blame] | 4049 | static void raid5d(mddev_t *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4050 | { |
| 4051 | struct stripe_head *sh; |
| 4052 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 4053 | int handled; |
| 4054 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4055 | pr_debug("+++ raid5d active\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4056 | |
| 4057 | md_check_recovery(mddev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4058 | |
| 4059 | handled = 0; |
| 4060 | spin_lock_irq(&conf->device_lock); |
| 4061 | while (1) { |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4062 | struct bio *bio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4063 | |
NeilBrown | ae3c20c | 2006-07-10 04:44:17 -0700 | [diff] [blame] | 4064 | if (conf->seq_flush != conf->seq_write) { |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4065 | int seq = conf->seq_flush; |
NeilBrown | 700e432 | 2005-11-28 13:44:10 -0800 | [diff] [blame] | 4066 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4067 | bitmap_unplug(mddev->bitmap); |
NeilBrown | 700e432 | 2005-11-28 13:44:10 -0800 | [diff] [blame] | 4068 | spin_lock_irq(&conf->device_lock); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4069 | conf->seq_write = seq; |
| 4070 | activate_bit_delay(conf); |
| 4071 | } |
| 4072 | |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4073 | while ((bio = remove_bio_from_retry(conf))) { |
| 4074 | int ok; |
| 4075 | spin_unlock_irq(&conf->device_lock); |
| 4076 | ok = retry_aligned_read(conf, bio); |
| 4077 | spin_lock_irq(&conf->device_lock); |
| 4078 | if (!ok) |
| 4079 | break; |
| 4080 | handled++; |
| 4081 | } |
| 4082 | |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4083 | sh = __get_priority_stripe(conf); |
| 4084 | |
Dan Williams | c9f21aa | 2008-07-23 12:05:51 -0700 | [diff] [blame] | 4085 | if (!sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4086 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4087 | spin_unlock_irq(&conf->device_lock); |
| 4088 | |
| 4089 | handled++; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4090 | handle_stripe(sh, conf->spare_page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4091 | release_stripe(sh); |
| 4092 | |
| 4093 | spin_lock_irq(&conf->device_lock); |
| 4094 | } |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4095 | pr_debug("%d stripes handled\n", handled); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4096 | |
| 4097 | spin_unlock_irq(&conf->device_lock); |
| 4098 | |
Dan Williams | c9f21aa | 2008-07-23 12:05:51 -0700 | [diff] [blame] | 4099 | async_tx_issue_pending_all(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4100 | unplug_slaves(mddev); |
| 4101 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4102 | pr_debug("--- raid5d inactive\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4103 | } |
| 4104 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4105 | static ssize_t |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4106 | raid5_show_stripe_cache_size(mddev_t *mddev, char *page) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4107 | { |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4108 | raid5_conf_t *conf = mddev_to_conf(mddev); |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4109 | if (conf) |
| 4110 | return sprintf(page, "%d\n", conf->max_nr_stripes); |
| 4111 | else |
| 4112 | return 0; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4113 | } |
| 4114 | |
| 4115 | static ssize_t |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4116 | raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4117 | { |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4118 | raid5_conf_t *conf = mddev_to_conf(mddev); |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4119 | unsigned long new; |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 4120 | int err; |
| 4121 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4122 | if (len >= PAGE_SIZE) |
| 4123 | return -EINVAL; |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4124 | if (!conf) |
| 4125 | return -ENODEV; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4126 | |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4127 | if (strict_strtoul(page, 10, &new)) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4128 | return -EINVAL; |
| 4129 | if (new <= 16 || new > 32768) |
| 4130 | return -EINVAL; |
| 4131 | while (new < conf->max_nr_stripes) { |
| 4132 | if (drop_one_stripe(conf)) |
| 4133 | conf->max_nr_stripes--; |
| 4134 | else |
| 4135 | break; |
| 4136 | } |
Dan Williams | b5470dc | 2008-06-27 21:44:04 -0700 | [diff] [blame] | 4137 | err = md_allow_write(mddev); |
| 4138 | if (err) |
| 4139 | return err; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4140 | while (new > conf->max_nr_stripes) { |
| 4141 | if (grow_one_stripe(conf)) |
| 4142 | conf->max_nr_stripes++; |
| 4143 | else break; |
| 4144 | } |
| 4145 | return len; |
| 4146 | } |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4147 | |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4148 | static struct md_sysfs_entry |
| 4149 | raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, |
| 4150 | raid5_show_stripe_cache_size, |
| 4151 | raid5_store_stripe_cache_size); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4152 | |
| 4153 | static ssize_t |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4154 | raid5_show_preread_threshold(mddev_t *mddev, char *page) |
| 4155 | { |
| 4156 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 4157 | if (conf) |
| 4158 | return sprintf(page, "%d\n", conf->bypass_threshold); |
| 4159 | else |
| 4160 | return 0; |
| 4161 | } |
| 4162 | |
| 4163 | static ssize_t |
| 4164 | raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) |
| 4165 | { |
| 4166 | raid5_conf_t *conf = mddev_to_conf(mddev); |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4167 | unsigned long new; |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4168 | if (len >= PAGE_SIZE) |
| 4169 | return -EINVAL; |
| 4170 | if (!conf) |
| 4171 | return -ENODEV; |
| 4172 | |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4173 | if (strict_strtoul(page, 10, &new)) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4174 | return -EINVAL; |
Dan Williams | 4ef197d8 | 2008-04-28 02:15:54 -0700 | [diff] [blame] | 4175 | if (new > conf->max_nr_stripes) |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4176 | return -EINVAL; |
| 4177 | conf->bypass_threshold = new; |
| 4178 | return len; |
| 4179 | } |
| 4180 | |
| 4181 | static struct md_sysfs_entry |
| 4182 | raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, |
| 4183 | S_IRUGO | S_IWUSR, |
| 4184 | raid5_show_preread_threshold, |
| 4185 | raid5_store_preread_threshold); |
| 4186 | |
| 4187 | static ssize_t |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4188 | stripe_cache_active_show(mddev_t *mddev, char *page) |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4189 | { |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4190 | raid5_conf_t *conf = mddev_to_conf(mddev); |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4191 | if (conf) |
| 4192 | return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); |
| 4193 | else |
| 4194 | return 0; |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4195 | } |
| 4196 | |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4197 | static struct md_sysfs_entry |
| 4198 | raid5_stripecache_active = __ATTR_RO(stripe_cache_active); |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4199 | |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4200 | static struct attribute *raid5_attrs[] = { |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4201 | &raid5_stripecache_size.attr, |
| 4202 | &raid5_stripecache_active.attr, |
Dan Williams | 8b3e6cd | 2008-04-28 02:15:53 -0700 | [diff] [blame] | 4203 | &raid5_preread_bypass_threshold.attr, |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4204 | NULL, |
| 4205 | }; |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4206 | static struct attribute_group raid5_attrs_group = { |
| 4207 | .name = NULL, |
| 4208 | .attrs = raid5_attrs, |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4209 | }; |
| 4210 | |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4211 | static sector_t |
| 4212 | raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks) |
| 4213 | { |
| 4214 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 4215 | |
| 4216 | if (!sectors) |
| 4217 | sectors = mddev->dev_sectors; |
NeilBrown | 7ec0547 | 2009-03-31 15:10:36 +1100 | [diff] [blame] | 4218 | if (!raid_disks) { |
| 4219 | /* size is defined by the smallest of previous and new size */ |
| 4220 | if (conf->raid_disks < conf->previous_raid_disks) |
| 4221 | raid_disks = conf->raid_disks; |
| 4222 | else |
| 4223 | raid_disks = conf->previous_raid_disks; |
| 4224 | } |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4225 | |
| 4226 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 4227 | sectors &= ~((sector_t)mddev->new_chunk/512 - 1); |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4228 | return sectors * (raid_disks - conf->max_degraded); |
| 4229 | } |
| 4230 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4231 | static raid5_conf_t *setup_conf(mddev_t *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4232 | { |
| 4233 | raid5_conf_t *conf; |
| 4234 | int raid_disk, memory; |
| 4235 | mdk_rdev_t *rdev; |
| 4236 | struct disk_info *disk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4237 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4238 | if (mddev->new_level != 5 |
| 4239 | && mddev->new_level != 4 |
| 4240 | && mddev->new_level != 6) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4241 | printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4242 | mdname(mddev), mddev->new_level); |
| 4243 | return ERR_PTR(-EIO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4244 | } |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4245 | if ((mddev->new_level == 5 |
| 4246 | && !algorithm_valid_raid5(mddev->new_layout)) || |
| 4247 | (mddev->new_level == 6 |
| 4248 | && !algorithm_valid_raid6(mddev->new_layout))) { |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4249 | printk(KERN_ERR "raid5: %s: layout %d not supported\n", |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4250 | mdname(mddev), mddev->new_layout); |
| 4251 | return ERR_PTR(-EIO); |
| 4252 | } |
| 4253 | if (mddev->new_level == 6 && mddev->raid_disks < 4) { |
| 4254 | printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", |
| 4255 | mdname(mddev), mddev->raid_disks); |
| 4256 | return ERR_PTR(-EINVAL); |
NeilBrown | 99c0fb5 | 2009-03-31 14:39:38 +1100 | [diff] [blame] | 4257 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4258 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4259 | if (!mddev->new_chunk || mddev->new_chunk % PAGE_SIZE) { |
| 4260 | printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", |
| 4261 | mddev->new_chunk, mdname(mddev)); |
| 4262 | return ERR_PTR(-EINVAL); |
NeilBrown | 4bbf377 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 4263 | } |
| 4264 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4265 | conf = kzalloc(sizeof(raid5_conf_t), GFP_KERNEL); |
| 4266 | if (conf == NULL) |
| 4267 | goto abort; |
| 4268 | |
| 4269 | conf->raid_disks = mddev->raid_disks; |
| 4270 | if (mddev->reshape_position == MaxSector) |
| 4271 | conf->previous_raid_disks = mddev->raid_disks; |
| 4272 | else |
| 4273 | conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; |
| 4274 | |
| 4275 | conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), |
| 4276 | GFP_KERNEL); |
| 4277 | if (!conf->disks) |
| 4278 | goto abort; |
| 4279 | |
| 4280 | conf->mddev = mddev; |
| 4281 | |
| 4282 | if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) |
| 4283 | goto abort; |
| 4284 | |
| 4285 | if (mddev->new_level == 6) { |
| 4286 | conf->spare_page = alloc_page(GFP_KERNEL); |
| 4287 | if (!conf->spare_page) |
| 4288 | goto abort; |
| 4289 | } |
| 4290 | spin_lock_init(&conf->device_lock); |
| 4291 | init_waitqueue_head(&conf->wait_for_stripe); |
| 4292 | init_waitqueue_head(&conf->wait_for_overlap); |
| 4293 | INIT_LIST_HEAD(&conf->handle_list); |
| 4294 | INIT_LIST_HEAD(&conf->hold_list); |
| 4295 | INIT_LIST_HEAD(&conf->delayed_list); |
| 4296 | INIT_LIST_HEAD(&conf->bitmap_list); |
| 4297 | INIT_LIST_HEAD(&conf->inactive_list); |
| 4298 | atomic_set(&conf->active_stripes, 0); |
| 4299 | atomic_set(&conf->preread_active_stripes, 0); |
| 4300 | atomic_set(&conf->active_aligned_reads, 0); |
| 4301 | conf->bypass_threshold = BYPASS_THRESHOLD; |
| 4302 | |
| 4303 | pr_debug("raid5: run(%s) called.\n", mdname(mddev)); |
| 4304 | |
| 4305 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
| 4306 | raid_disk = rdev->raid_disk; |
| 4307 | if (raid_disk >= conf->raid_disks |
| 4308 | || raid_disk < 0) |
| 4309 | continue; |
| 4310 | disk = conf->disks + raid_disk; |
| 4311 | |
| 4312 | disk->rdev = rdev; |
| 4313 | |
| 4314 | if (test_bit(In_sync, &rdev->flags)) { |
| 4315 | char b[BDEVNAME_SIZE]; |
| 4316 | printk(KERN_INFO "raid5: device %s operational as raid" |
| 4317 | " disk %d\n", bdevname(rdev->bdev,b), |
| 4318 | raid_disk); |
| 4319 | } else |
| 4320 | /* Cannot rely on bitmap to complete recovery */ |
| 4321 | conf->fullsync = 1; |
| 4322 | } |
| 4323 | |
| 4324 | conf->chunk_size = mddev->new_chunk; |
| 4325 | conf->level = mddev->new_level; |
| 4326 | if (conf->level == 6) |
| 4327 | conf->max_degraded = 2; |
| 4328 | else |
| 4329 | conf->max_degraded = 1; |
| 4330 | conf->algorithm = mddev->new_layout; |
| 4331 | conf->max_nr_stripes = NR_STRIPES; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4332 | conf->reshape_progress = mddev->reshape_position; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 4333 | if (conf->reshape_progress != MaxSector) |
| 4334 | conf->prev_chunk = mddev->chunk_size; |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4335 | |
| 4336 | memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + |
| 4337 | conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; |
| 4338 | if (grow_stripes(conf, conf->max_nr_stripes)) { |
| 4339 | printk(KERN_ERR |
| 4340 | "raid5: couldn't allocate %dkB for buffers\n", memory); |
| 4341 | goto abort; |
| 4342 | } else |
| 4343 | printk(KERN_INFO "raid5: allocated %dkB for %s\n", |
| 4344 | memory, mdname(mddev)); |
| 4345 | |
| 4346 | conf->thread = md_register_thread(raid5d, mddev, "%s_raid5"); |
| 4347 | if (!conf->thread) { |
| 4348 | printk(KERN_ERR |
| 4349 | "raid5: couldn't allocate thread for %s\n", |
| 4350 | mdname(mddev)); |
| 4351 | goto abort; |
| 4352 | } |
| 4353 | |
| 4354 | return conf; |
| 4355 | |
| 4356 | abort: |
| 4357 | if (conf) { |
| 4358 | shrink_stripes(conf); |
| 4359 | safe_put_page(conf->spare_page); |
| 4360 | kfree(conf->disks); |
| 4361 | kfree(conf->stripe_hashtbl); |
| 4362 | kfree(conf); |
| 4363 | return ERR_PTR(-EIO); |
| 4364 | } else |
| 4365 | return ERR_PTR(-ENOMEM); |
| 4366 | } |
| 4367 | |
| 4368 | static int run(mddev_t *mddev) |
| 4369 | { |
| 4370 | raid5_conf_t *conf; |
| 4371 | int working_disks = 0; |
| 4372 | mdk_rdev_t *rdev; |
| 4373 | |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4374 | if (mddev->reshape_position != MaxSector) { |
| 4375 | /* Check that we can continue the reshape. |
| 4376 | * Currently only disks can change, it must |
| 4377 | * increase, and we must be past the point where |
| 4378 | * a stripe over-writes itself |
| 4379 | */ |
| 4380 | sector_t here_new, here_old; |
| 4381 | int old_disks; |
Andre Noll | 18b0033 | 2009-03-31 15:00:56 +1100 | [diff] [blame] | 4382 | int max_degraded = (mddev->level == 6 ? 2 : 1); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4383 | |
| 4384 | if (mddev->new_level != mddev->level || |
| 4385 | mddev->new_layout != mddev->layout || |
| 4386 | mddev->new_chunk != mddev->chunk_size) { |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4387 | printk(KERN_ERR "raid5: %s: unsupported reshape " |
| 4388 | "required - aborting.\n", |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4389 | mdname(mddev)); |
| 4390 | return -EINVAL; |
| 4391 | } |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4392 | old_disks = mddev->raid_disks - mddev->delta_disks; |
| 4393 | /* reshape_position must be on a new-stripe boundary, and one |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4394 | * further up in new geometry must map after here in old |
| 4395 | * geometry. |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4396 | */ |
| 4397 | here_new = mddev->reshape_position; |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 4398 | if (sector_div(here_new, (mddev->new_chunk>>9)* |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4399 | (mddev->raid_disks - max_degraded))) { |
| 4400 | printk(KERN_ERR "raid5: reshape_position not " |
| 4401 | "on a stripe boundary\n"); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4402 | return -EINVAL; |
| 4403 | } |
| 4404 | /* here_new is the stripe we will write to */ |
| 4405 | here_old = mddev->reshape_position; |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4406 | sector_div(here_old, (mddev->chunk_size>>9)* |
| 4407 | (old_disks-max_degraded)); |
| 4408 | /* here_old is the first stripe that we might need to read |
| 4409 | * from */ |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4410 | if (here_new >= here_old) { |
| 4411 | /* Reading from the same stripe as writing to - bad */ |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4412 | printk(KERN_ERR "raid5: reshape_position too early for " |
| 4413 | "auto-recovery - aborting.\n"); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4414 | return -EINVAL; |
| 4415 | } |
| 4416 | printk(KERN_INFO "raid5: reshape will continue\n"); |
| 4417 | /* OK, we should be able to continue; */ |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4418 | } else { |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4419 | BUG_ON(mddev->level != mddev->new_level); |
| 4420 | BUG_ON(mddev->layout != mddev->new_layout); |
| 4421 | BUG_ON(mddev->chunk_size != mddev->new_chunk); |
| 4422 | BUG_ON(mddev->delta_disks != 0); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4423 | } |
| 4424 | |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4425 | if (mddev->private == NULL) |
| 4426 | conf = setup_conf(mddev); |
| 4427 | else |
| 4428 | conf = mddev->private; |
| 4429 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4430 | if (IS_ERR(conf)) |
| 4431 | return PTR_ERR(conf); |
NeilBrown | 9ffae0c | 2006-01-06 00:20:32 -0800 | [diff] [blame] | 4432 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4433 | mddev->thread = conf->thread; |
| 4434 | conf->thread = NULL; |
| 4435 | mddev->private = conf; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4436 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4437 | /* |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4438 | * 0 for a fully functional array, 1 or 2 for a degraded array. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4439 | */ |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4440 | list_for_each_entry(rdev, &mddev->disks, same_set) |
| 4441 | if (rdev->raid_disk >= 0 && |
| 4442 | test_bit(In_sync, &rdev->flags)) |
| 4443 | working_disks++; |
| 4444 | |
NeilBrown | 02c2de8 | 2006-10-03 01:15:47 -0700 | [diff] [blame] | 4445 | mddev->degraded = conf->raid_disks - working_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4446 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4447 | if (mddev->degraded > conf->max_degraded) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4448 | printk(KERN_ERR "raid5: not enough operational devices for %s" |
| 4449 | " (%d/%d failed)\n", |
NeilBrown | 02c2de8 | 2006-10-03 01:15:47 -0700 | [diff] [blame] | 4450 | mdname(mddev), mddev->degraded, conf->raid_disks); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4451 | goto abort; |
| 4452 | } |
| 4453 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4454 | /* device size must be a multiple of chunk size */ |
| 4455 | mddev->dev_sectors &= ~(mddev->chunk_size / 512 - 1); |
| 4456 | mddev->resync_max_sectors = mddev->dev_sectors; |
| 4457 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4458 | if (mddev->degraded > 0 && |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4459 | mddev->recovery_cp != MaxSector) { |
NeilBrown | 6ff8d8ec | 2006-01-06 00:20:15 -0800 | [diff] [blame] | 4460 | if (mddev->ok_start_degraded) |
| 4461 | printk(KERN_WARNING |
| 4462 | "raid5: starting dirty degraded array: %s" |
| 4463 | "- data corruption possible.\n", |
| 4464 | mdname(mddev)); |
| 4465 | else { |
| 4466 | printk(KERN_ERR |
| 4467 | "raid5: cannot start dirty degraded array for %s\n", |
| 4468 | mdname(mddev)); |
| 4469 | goto abort; |
| 4470 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4471 | } |
| 4472 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4473 | if (mddev->degraded == 0) |
| 4474 | printk("raid5: raid level %d set %s active with %d out of %d" |
| 4475 | " devices, algorithm %d\n", conf->level, mdname(mddev), |
| 4476 | mddev->raid_disks-mddev->degraded, mddev->raid_disks, |
| 4477 | conf->algorithm); |
| 4478 | else |
| 4479 | printk(KERN_ALERT "raid5: raid level %d set %s active with %d" |
| 4480 | " out of %d devices, algorithm %d\n", conf->level, |
| 4481 | mdname(mddev), mddev->raid_disks - mddev->degraded, |
| 4482 | mddev->raid_disks, conf->algorithm); |
| 4483 | |
| 4484 | print_raid5_conf(conf); |
| 4485 | |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4486 | if (conf->reshape_progress != MaxSector) { |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4487 | printk("...ok start reshape thread\n"); |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4488 | conf->reshape_safe = conf->reshape_progress; |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4489 | atomic_set(&conf->reshape_stripes, 0); |
| 4490 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 4491 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 4492 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
| 4493 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 4494 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
| 4495 | "%s_reshape"); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4496 | } |
| 4497 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4498 | /* read-ahead size must cover two whole stripes, which is |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4499 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4500 | */ |
| 4501 | { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4502 | int data_disks = conf->previous_raid_disks - conf->max_degraded; |
| 4503 | int stripe = data_disks * |
NeilBrown | 8932c2e | 2006-06-26 00:27:36 -0700 | [diff] [blame] | 4504 | (mddev->chunk_size / PAGE_SIZE); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4505 | if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
| 4506 | mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
| 4507 | } |
| 4508 | |
| 4509 | /* Ok, everything is just fine now */ |
NeilBrown | 5e55e2f | 2007-03-26 21:32:14 -0800 | [diff] [blame] | 4510 | if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) |
| 4511 | printk(KERN_WARNING |
| 4512 | "raid5: failed to create sysfs attributes for %s\n", |
| 4513 | mdname(mddev)); |
NeilBrown | 7a5febe | 2005-05-16 21:53:16 -0700 | [diff] [blame] | 4514 | |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4515 | mddev->queue->queue_lock = &conf->device_lock; |
| 4516 | |
NeilBrown | 7a5febe | 2005-05-16 21:53:16 -0700 | [diff] [blame] | 4517 | mddev->queue->unplug_fn = raid5_unplug_device; |
NeilBrown | f022b2f | 2006-10-03 01:15:56 -0700 | [diff] [blame] | 4518 | mddev->queue->backing_dev_info.congested_data = mddev; |
NeilBrown | 041ae52 | 2007-03-26 21:32:14 -0800 | [diff] [blame] | 4519 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
NeilBrown | f022b2f | 2006-10-03 01:15:56 -0700 | [diff] [blame] | 4520 | |
Dan Williams | 1f40362 | 2009-03-31 14:59:03 +1100 | [diff] [blame] | 4521 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
NeilBrown | 7a5febe | 2005-05-16 21:53:16 -0700 | [diff] [blame] | 4522 | |
Raz Ben-Jehuda(caro) | 23032a0 | 2006-12-10 02:20:45 -0800 | [diff] [blame] | 4523 | blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); |
| 4524 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4525 | return 0; |
| 4526 | abort: |
NeilBrown | e0cf8f0 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4527 | md_unregister_thread(mddev->thread); |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4528 | mddev->thread = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4529 | if (conf) { |
NeilBrown | 91adb56 | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 4530 | shrink_stripes(conf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4531 | print_raid5_conf(conf); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4532 | safe_put_page(conf->spare_page); |
NeilBrown | b55e6bf | 2006-03-27 01:18:06 -0800 | [diff] [blame] | 4533 | kfree(conf->disks); |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 4534 | kfree(conf->stripe_hashtbl); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4535 | kfree(conf); |
| 4536 | } |
| 4537 | mddev->private = NULL; |
| 4538 | printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); |
| 4539 | return -EIO; |
| 4540 | } |
| 4541 | |
| 4542 | |
| 4543 | |
NeilBrown | 3f294f4 | 2005-11-08 21:39:25 -0800 | [diff] [blame] | 4544 | static int stop(mddev_t *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4545 | { |
| 4546 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
| 4547 | |
| 4548 | md_unregister_thread(mddev->thread); |
| 4549 | mddev->thread = NULL; |
| 4550 | shrink_stripes(conf); |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 4551 | kfree(conf->stripe_hashtbl); |
NeilBrown | 041ae52 | 2007-03-26 21:32:14 -0800 | [diff] [blame] | 4552 | mddev->queue->backing_dev_info.congested_fn = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4553 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
NeilBrown | 007583c | 2005-11-08 21:39:30 -0800 | [diff] [blame] | 4554 | sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); |
NeilBrown | b55e6bf | 2006-03-27 01:18:06 -0800 | [diff] [blame] | 4555 | kfree(conf->disks); |
NeilBrown | 96de1e6 | 2005-11-08 21:39:39 -0800 | [diff] [blame] | 4556 | kfree(conf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4557 | mddev->private = NULL; |
| 4558 | return 0; |
| 4559 | } |
| 4560 | |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4561 | #ifdef DEBUG |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 4562 | static void print_sh(struct seq_file *seq, struct stripe_head *sh) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4563 | { |
| 4564 | int i; |
| 4565 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4566 | seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", |
| 4567 | (unsigned long long)sh->sector, sh->pd_idx, sh->state); |
| 4568 | seq_printf(seq, "sh %llu, count %d.\n", |
| 4569 | (unsigned long long)sh->sector, atomic_read(&sh->count)); |
| 4570 | seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); |
NeilBrown | 7ecaa1e | 2006-03-27 01:18:08 -0800 | [diff] [blame] | 4571 | for (i = 0; i < sh->disks; i++) { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4572 | seq_printf(seq, "(cache%d: %p %ld) ", |
| 4573 | i, sh->dev[i].page, sh->dev[i].flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4574 | } |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4575 | seq_printf(seq, "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4576 | } |
| 4577 | |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 4578 | static void printall(struct seq_file *seq, raid5_conf_t *conf) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4579 | { |
| 4580 | struct stripe_head *sh; |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 4581 | struct hlist_node *hn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4582 | int i; |
| 4583 | |
| 4584 | spin_lock_irq(&conf->device_lock); |
| 4585 | for (i = 0; i < NR_HASH; i++) { |
NeilBrown | fccddba | 2006-01-06 00:20:33 -0800 | [diff] [blame] | 4586 | hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4587 | if (sh->raid_conf != conf) |
| 4588 | continue; |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4589 | print_sh(seq, sh); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4590 | } |
| 4591 | } |
| 4592 | spin_unlock_irq(&conf->device_lock); |
| 4593 | } |
| 4594 | #endif |
| 4595 | |
NeilBrown | d710e13 | 2008-10-13 11:55:12 +1100 | [diff] [blame] | 4596 | static void status(struct seq_file *seq, mddev_t *mddev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4597 | { |
| 4598 | raid5_conf_t *conf = (raid5_conf_t *) mddev->private; |
| 4599 | int i; |
| 4600 | |
| 4601 | seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); |
NeilBrown | 02c2de8 | 2006-10-03 01:15:47 -0700 | [diff] [blame] | 4602 | seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4603 | for (i = 0; i < conf->raid_disks; i++) |
| 4604 | seq_printf (seq, "%s", |
| 4605 | conf->disks[i].rdev && |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 4606 | test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4607 | seq_printf (seq, "]"); |
Dan Williams | 45b4233 | 2007-07-09 11:56:43 -0700 | [diff] [blame] | 4608 | #ifdef DEBUG |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4609 | seq_printf (seq, "\n"); |
| 4610 | printall(seq, conf); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4611 | #endif |
| 4612 | } |
| 4613 | |
| 4614 | static void print_raid5_conf (raid5_conf_t *conf) |
| 4615 | { |
| 4616 | int i; |
| 4617 | struct disk_info *tmp; |
| 4618 | |
| 4619 | printk("RAID5 conf printout:\n"); |
| 4620 | if (!conf) { |
| 4621 | printk("(conf==NULL)\n"); |
| 4622 | return; |
| 4623 | } |
NeilBrown | 02c2de8 | 2006-10-03 01:15:47 -0700 | [diff] [blame] | 4624 | printk(" --- rd:%d wd:%d\n", conf->raid_disks, |
| 4625 | conf->raid_disks - conf->mddev->degraded); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4626 | |
| 4627 | for (i = 0; i < conf->raid_disks; i++) { |
| 4628 | char b[BDEVNAME_SIZE]; |
| 4629 | tmp = conf->disks + i; |
| 4630 | if (tmp->rdev) |
| 4631 | printk(" disk %d, o:%d, dev:%s\n", |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 4632 | i, !test_bit(Faulty, &tmp->rdev->flags), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4633 | bdevname(tmp->rdev->bdev,b)); |
| 4634 | } |
| 4635 | } |
| 4636 | |
| 4637 | static int raid5_spare_active(mddev_t *mddev) |
| 4638 | { |
| 4639 | int i; |
| 4640 | raid5_conf_t *conf = mddev->private; |
| 4641 | struct disk_info *tmp; |
| 4642 | |
| 4643 | for (i = 0; i < conf->raid_disks; i++) { |
| 4644 | tmp = conf->disks + i; |
| 4645 | if (tmp->rdev |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 4646 | && !test_bit(Faulty, &tmp->rdev->flags) |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 4647 | && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { |
| 4648 | unsigned long flags; |
| 4649 | spin_lock_irqsave(&conf->device_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4650 | mddev->degraded--; |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 4651 | spin_unlock_irqrestore(&conf->device_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4652 | } |
| 4653 | } |
| 4654 | print_raid5_conf(conf); |
| 4655 | return 0; |
| 4656 | } |
| 4657 | |
| 4658 | static int raid5_remove_disk(mddev_t *mddev, int number) |
| 4659 | { |
| 4660 | raid5_conf_t *conf = mddev->private; |
| 4661 | int err = 0; |
| 4662 | mdk_rdev_t *rdev; |
| 4663 | struct disk_info *p = conf->disks + number; |
| 4664 | |
| 4665 | print_raid5_conf(conf); |
| 4666 | rdev = p->rdev; |
| 4667 | if (rdev) { |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4668 | if (number >= conf->raid_disks && |
| 4669 | conf->reshape_progress == MaxSector) |
| 4670 | clear_bit(In_sync, &rdev->flags); |
| 4671 | |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 4672 | if (test_bit(In_sync, &rdev->flags) || |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4673 | atomic_read(&rdev->nr_pending)) { |
| 4674 | err = -EBUSY; |
| 4675 | goto abort; |
| 4676 | } |
NeilBrown | dfc7064 | 2008-05-23 13:04:39 -0700 | [diff] [blame] | 4677 | /* Only remove non-faulty devices if recovery |
| 4678 | * isn't possible. |
| 4679 | */ |
| 4680 | if (!test_bit(Faulty, &rdev->flags) && |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4681 | mddev->degraded <= conf->max_degraded && |
| 4682 | number < conf->raid_disks) { |
NeilBrown | dfc7064 | 2008-05-23 13:04:39 -0700 | [diff] [blame] | 4683 | err = -EBUSY; |
| 4684 | goto abort; |
| 4685 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4686 | p->rdev = NULL; |
Paul E. McKenney | fbd568a3e | 2005-05-01 08:59:04 -0700 | [diff] [blame] | 4687 | synchronize_rcu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4688 | if (atomic_read(&rdev->nr_pending)) { |
| 4689 | /* lost the race, try later */ |
| 4690 | err = -EBUSY; |
| 4691 | p->rdev = rdev; |
| 4692 | } |
| 4693 | } |
| 4694 | abort: |
| 4695 | |
| 4696 | print_raid5_conf(conf); |
| 4697 | return err; |
| 4698 | } |
| 4699 | |
| 4700 | static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) |
| 4701 | { |
| 4702 | raid5_conf_t *conf = mddev->private; |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 4703 | int err = -EEXIST; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4704 | int disk; |
| 4705 | struct disk_info *p; |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 4706 | int first = 0; |
| 4707 | int last = conf->raid_disks - 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4708 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4709 | if (mddev->degraded > conf->max_degraded) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4710 | /* no point adding a device */ |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 4711 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4712 | |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 4713 | if (rdev->raid_disk >= 0) |
| 4714 | first = last = rdev->raid_disk; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4715 | |
| 4716 | /* |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4717 | * find the disk ... but prefer rdev->saved_raid_disk |
| 4718 | * if possible. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4719 | */ |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4720 | if (rdev->saved_raid_disk >= 0 && |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 4721 | rdev->saved_raid_disk >= first && |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4722 | conf->disks[rdev->saved_raid_disk].rdev == NULL) |
| 4723 | disk = rdev->saved_raid_disk; |
| 4724 | else |
Neil Brown | 6c2fce2 | 2008-06-28 08:31:31 +1000 | [diff] [blame] | 4725 | disk = first; |
| 4726 | for ( ; disk <= last ; disk++) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4727 | if ((p=conf->disks + disk)->rdev == NULL) { |
NeilBrown | b2d444d | 2005-11-08 21:39:31 -0800 | [diff] [blame] | 4728 | clear_bit(In_sync, &rdev->flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4729 | rdev->raid_disk = disk; |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 4730 | err = 0; |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4731 | if (rdev->saved_raid_disk != disk) |
| 4732 | conf->fullsync = 1; |
Suzanne Wood | d6065f7 | 2005-11-08 21:39:27 -0800 | [diff] [blame] | 4733 | rcu_assign_pointer(p->rdev, rdev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4734 | break; |
| 4735 | } |
| 4736 | print_raid5_conf(conf); |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 4737 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4738 | } |
| 4739 | |
| 4740 | static int raid5_resize(mddev_t *mddev, sector_t sectors) |
| 4741 | { |
| 4742 | /* no resync is happening, and there is enough space |
| 4743 | * on all devices, so we can resize. |
| 4744 | * We need to make sure resync covers any new space. |
| 4745 | * If the array is shrinking we should possibly wait until |
| 4746 | * any io in the removed space completes, but it hardly seems |
| 4747 | * worth it. |
| 4748 | */ |
| 4749 | sectors &= ~((sector_t)mddev->chunk_size/512 - 1); |
Dan Williams | 1f40362 | 2009-03-31 14:59:03 +1100 | [diff] [blame] | 4750 | md_set_array_sectors(mddev, raid5_size(mddev, sectors, |
| 4751 | mddev->raid_disks)); |
Dan Williams | b522adc | 2009-03-31 15:00:31 +1100 | [diff] [blame] | 4752 | if (mddev->array_sectors > |
| 4753 | raid5_size(mddev, sectors, mddev->raid_disks)) |
| 4754 | return -EINVAL; |
Andre Noll | f233ea5 | 2008-07-21 17:05:22 +1000 | [diff] [blame] | 4755 | set_capacity(mddev->gendisk, mddev->array_sectors); |
Linus Torvalds | 44ce6294 | 2007-05-09 18:51:36 -0700 | [diff] [blame] | 4756 | mddev->changed = 1; |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 4757 | if (sectors > mddev->dev_sectors && mddev->recovery_cp == MaxSector) { |
| 4758 | mddev->recovery_cp = mddev->dev_sectors; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4759 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
| 4760 | } |
Andre Noll | 58c0fed | 2009-03-31 14:33:13 +1100 | [diff] [blame] | 4761 | mddev->dev_sectors = sectors; |
NeilBrown | 4b5c7ae | 2005-07-27 11:43:28 -0700 | [diff] [blame] | 4762 | mddev->resync_max_sectors = sectors; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4763 | return 0; |
| 4764 | } |
| 4765 | |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4766 | #ifdef CONFIG_MD_RAID5_RESHAPE |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4767 | static int raid5_check_reshape(mddev_t *mddev) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4768 | { |
| 4769 | raid5_conf_t *conf = mddev_to_conf(mddev); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4770 | |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4771 | if (mddev->delta_disks == 0) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4772 | return 0; /* nothing to do */ |
NeilBrown | dba034e | 2008-08-05 15:54:13 +1000 | [diff] [blame] | 4773 | if (mddev->bitmap) |
| 4774 | /* Cannot grow a bitmap yet */ |
| 4775 | return -EBUSY; |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4776 | if (mddev->degraded > conf->max_degraded) |
| 4777 | return -EINVAL; |
| 4778 | if (mddev->delta_disks < 0) { |
| 4779 | /* We might be able to shrink, but the devices must |
| 4780 | * be made bigger first. |
| 4781 | * For raid6, 4 is the minimum size. |
| 4782 | * Otherwise 2 is the minimum |
| 4783 | */ |
| 4784 | int min = 2; |
| 4785 | if (mddev->level == 6) |
| 4786 | min = 4; |
| 4787 | if (mddev->raid_disks + mddev->delta_disks < min) |
| 4788 | return -EINVAL; |
| 4789 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4790 | |
| 4791 | /* Can only proceed if there are plenty of stripe_heads. |
| 4792 | * We need a minimum of one full stripe,, and for sensible progress |
| 4793 | * it is best to have about 4 times that. |
| 4794 | * If we require 4 times, then the default 256 4K stripe_heads will |
| 4795 | * allow for chunk sizes up to 256K, which is probably OK. |
| 4796 | * If the chunk size is greater, user-space should request more |
| 4797 | * stripe_heads first. |
| 4798 | */ |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4799 | if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || |
| 4800 | (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4801 | printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", |
NeilBrown | 784052e | 2009-03-31 15:19:07 +1100 | [diff] [blame^] | 4802 | (max(mddev->chunk_size, mddev->new_chunk) |
| 4803 | / STRIPE_SIZE)*4); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4804 | return -ENOSPC; |
| 4805 | } |
| 4806 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4807 | return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4808 | } |
| 4809 | |
| 4810 | static int raid5_start_reshape(mddev_t *mddev) |
| 4811 | { |
| 4812 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 4813 | mdk_rdev_t *rdev; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4814 | int spares = 0; |
| 4815 | int added_devices = 0; |
NeilBrown | c04be0a | 2006-10-03 01:15:53 -0700 | [diff] [blame] | 4816 | unsigned long flags; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4817 | |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4818 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4819 | return -EBUSY; |
| 4820 | |
Cheng Renquan | 159ec1f | 2009-01-09 08:31:08 +1100 | [diff] [blame] | 4821 | list_for_each_entry(rdev, &mddev->disks, same_set) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4822 | if (rdev->raid_disk < 0 && |
| 4823 | !test_bit(Faulty, &rdev->flags)) |
| 4824 | spares++; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4825 | |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 4826 | if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4827 | /* Not enough devices even to make a degraded array |
| 4828 | * of that size |
| 4829 | */ |
| 4830 | return -EINVAL; |
| 4831 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4832 | /* Refuse to reduce size of the array. Any reductions in |
| 4833 | * array size must be through explicit setting of array_size |
| 4834 | * attribute. |
| 4835 | */ |
| 4836 | if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) |
| 4837 | < mddev->array_sectors) { |
| 4838 | printk(KERN_ERR "md: %s: array size must be reduced " |
| 4839 | "before number of disks\n", mdname(mddev)); |
| 4840 | return -EINVAL; |
| 4841 | } |
| 4842 | |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4843 | atomic_set(&conf->reshape_stripes, 0); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4844 | spin_lock_irq(&conf->device_lock); |
| 4845 | conf->previous_raid_disks = conf->raid_disks; |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4846 | conf->raid_disks += mddev->delta_disks; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4847 | if (mddev->delta_disks < 0) |
| 4848 | conf->reshape_progress = raid5_size(mddev, 0, 0); |
| 4849 | else |
| 4850 | conf->reshape_progress = 0; |
| 4851 | conf->reshape_safe = conf->reshape_progress; |
NeilBrown | 86b42c7 | 2009-03-31 15:19:03 +1100 | [diff] [blame] | 4852 | conf->generation++; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4853 | spin_unlock_irq(&conf->device_lock); |
| 4854 | |
| 4855 | /* Add some new drives, as many as will fit. |
| 4856 | * We know there are enough to make the newly sized array work. |
| 4857 | */ |
Cheng Renquan | 159ec1f | 2009-01-09 08:31:08 +1100 | [diff] [blame] | 4858 | list_for_each_entry(rdev, &mddev->disks, same_set) |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4859 | if (rdev->raid_disk < 0 && |
| 4860 | !test_bit(Faulty, &rdev->flags)) { |
Neil Brown | 199050e | 2008-06-28 08:31:33 +1000 | [diff] [blame] | 4861 | if (raid5_add_disk(mddev, rdev) == 0) { |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4862 | char nm[20]; |
| 4863 | set_bit(In_sync, &rdev->flags); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4864 | added_devices++; |
NeilBrown | 5fd6c1d | 2006-06-26 00:27:40 -0700 | [diff] [blame] | 4865 | rdev->recovery_offset = 0; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4866 | sprintf(nm, "rd%d", rdev->raid_disk); |
NeilBrown | 5e55e2f | 2007-03-26 21:32:14 -0800 | [diff] [blame] | 4867 | if (sysfs_create_link(&mddev->kobj, |
| 4868 | &rdev->kobj, nm)) |
| 4869 | printk(KERN_WARNING |
| 4870 | "raid5: failed to create " |
| 4871 | " link %s for %s\n", |
| 4872 | nm, mdname(mddev)); |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4873 | } else |
| 4874 | break; |
| 4875 | } |
| 4876 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4877 | if (mddev->delta_disks > 0) { |
| 4878 | spin_lock_irqsave(&conf->device_lock, flags); |
| 4879 | mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) |
| 4880 | - added_devices; |
| 4881 | spin_unlock_irqrestore(&conf->device_lock, flags); |
| 4882 | } |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 4883 | mddev->raid_disks = conf->raid_disks; |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4884 | mddev->reshape_position = 0; |
NeilBrown | 850b2b4 | 2006-10-03 01:15:46 -0700 | [diff] [blame] | 4885 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4886 | |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4887 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 4888 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 4889 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
| 4890 | set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); |
| 4891 | mddev->sync_thread = md_register_thread(md_do_sync, mddev, |
| 4892 | "%s_reshape"); |
| 4893 | if (!mddev->sync_thread) { |
| 4894 | mddev->recovery = 0; |
| 4895 | spin_lock_irq(&conf->device_lock); |
| 4896 | mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4897 | conf->reshape_progress = MaxSector; |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4898 | spin_unlock_irq(&conf->device_lock); |
| 4899 | return -EAGAIN; |
| 4900 | } |
| 4901 | md_wakeup_thread(mddev->sync_thread); |
| 4902 | md_new_event(mddev); |
| 4903 | return 0; |
| 4904 | } |
| 4905 | #endif |
| 4906 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4907 | /* This is called from the reshape thread and should make any |
| 4908 | * changes needed in 'conf' |
| 4909 | */ |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4910 | static void end_reshape(raid5_conf_t *conf) |
| 4911 | { |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4912 | |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4913 | if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 4914 | |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4915 | spin_lock_irq(&conf->device_lock); |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 4916 | conf->previous_raid_disks = conf->raid_disks; |
NeilBrown | fef9c61 | 2009-03-31 15:16:46 +1100 | [diff] [blame] | 4917 | conf->reshape_progress = MaxSector; |
NeilBrown | f670557 | 2006-03-27 01:18:11 -0800 | [diff] [blame] | 4918 | spin_unlock_irq(&conf->device_lock); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4919 | |
| 4920 | /* read-ahead size must cover two whole stripes, which is |
| 4921 | * 2 * (datadisks) * chunksize where 'n' is the number of raid devices |
| 4922 | */ |
| 4923 | { |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 4924 | int data_disks = conf->raid_disks - conf->max_degraded; |
| 4925 | int stripe = data_disks * (conf->chunk_size |
| 4926 | / PAGE_SIZE); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 4927 | if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) |
| 4928 | conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; |
| 4929 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4930 | } |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 4931 | } |
| 4932 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4933 | /* This is called from the raid5d thread with mddev_lock held. |
| 4934 | * It makes config changes to the device. |
| 4935 | */ |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 4936 | static void raid5_finish_reshape(mddev_t *mddev) |
| 4937 | { |
| 4938 | struct block_device *bdev; |
| 4939 | |
| 4940 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { |
| 4941 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4942 | if (mddev->delta_disks > 0) { |
| 4943 | md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); |
| 4944 | set_capacity(mddev->gendisk, mddev->array_sectors); |
| 4945 | mddev->changed = 1; |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 4946 | |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4947 | bdev = bdget_disk(mddev->gendisk, 0); |
| 4948 | if (bdev) { |
| 4949 | mutex_lock(&bdev->bd_inode->i_mutex); |
| 4950 | i_size_write(bdev->bd_inode, |
| 4951 | (loff_t)mddev->array_sectors << 9); |
| 4952 | mutex_unlock(&bdev->bd_inode->i_mutex); |
| 4953 | bdput(bdev); |
| 4954 | } |
| 4955 | } else { |
| 4956 | int d; |
| 4957 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 4958 | mddev->degraded = conf->raid_disks; |
| 4959 | for (d = 0; d < conf->raid_disks ; d++) |
| 4960 | if (conf->disks[d].rdev && |
| 4961 | test_bit(In_sync, |
| 4962 | &conf->disks[d].rdev->flags)) |
| 4963 | mddev->degraded--; |
| 4964 | for (d = conf->raid_disks ; |
| 4965 | d < conf->raid_disks - mddev->delta_disks; |
| 4966 | d++) |
| 4967 | raid5_remove_disk(mddev, d); |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 4968 | } |
NeilBrown | ec32a2b | 2009-03-31 15:17:38 +1100 | [diff] [blame] | 4969 | mddev->reshape_position = MaxSector; |
| 4970 | mddev->delta_disks = 0; |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 4971 | } |
| 4972 | } |
| 4973 | |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4974 | static void raid5_quiesce(mddev_t *mddev, int state) |
| 4975 | { |
| 4976 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 4977 | |
| 4978 | switch(state) { |
NeilBrown | e464eaf | 2006-03-27 01:18:14 -0800 | [diff] [blame] | 4979 | case 2: /* resume for a suspend */ |
| 4980 | wake_up(&conf->wait_for_overlap); |
| 4981 | break; |
| 4982 | |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4983 | case 1: /* stop all writes */ |
| 4984 | spin_lock_irq(&conf->device_lock); |
| 4985 | conf->quiesce = 1; |
| 4986 | wait_event_lock_irq(conf->wait_for_stripe, |
Raz Ben-Jehuda(caro) | 46031f9 | 2006-12-10 02:20:47 -0800 | [diff] [blame] | 4987 | atomic_read(&conf->active_stripes) == 0 && |
| 4988 | atomic_read(&conf->active_aligned_reads) == 0, |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4989 | conf->device_lock, /* nothing */); |
| 4990 | spin_unlock_irq(&conf->device_lock); |
| 4991 | break; |
| 4992 | |
| 4993 | case 0: /* re-enable writes */ |
| 4994 | spin_lock_irq(&conf->device_lock); |
| 4995 | conf->quiesce = 0; |
| 4996 | wake_up(&conf->wait_for_stripe); |
NeilBrown | e464eaf | 2006-03-27 01:18:14 -0800 | [diff] [blame] | 4997 | wake_up(&conf->wait_for_overlap); |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 4998 | spin_unlock_irq(&conf->device_lock); |
| 4999 | break; |
| 5000 | } |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5001 | } |
NeilBrown | b15c2e5 | 2006-01-06 00:20:16 -0800 | [diff] [blame] | 5002 | |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5003 | |
| 5004 | static void *raid5_takeover_raid1(mddev_t *mddev) |
| 5005 | { |
| 5006 | int chunksect; |
| 5007 | |
| 5008 | if (mddev->raid_disks != 2 || |
| 5009 | mddev->degraded > 1) |
| 5010 | return ERR_PTR(-EINVAL); |
| 5011 | |
| 5012 | /* Should check if there are write-behind devices? */ |
| 5013 | |
| 5014 | chunksect = 64*2; /* 64K by default */ |
| 5015 | |
| 5016 | /* The array must be an exact multiple of chunksize */ |
| 5017 | while (chunksect && (mddev->array_sectors & (chunksect-1))) |
| 5018 | chunksect >>= 1; |
| 5019 | |
| 5020 | if ((chunksect<<9) < STRIPE_SIZE) |
| 5021 | /* array size does not allow a suitable chunk size */ |
| 5022 | return ERR_PTR(-EINVAL); |
| 5023 | |
| 5024 | mddev->new_level = 5; |
| 5025 | mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; |
| 5026 | mddev->new_chunk = chunksect << 9; |
| 5027 | |
| 5028 | return setup_conf(mddev); |
| 5029 | } |
| 5030 | |
NeilBrown | fc9739c | 2009-03-31 14:57:20 +1100 | [diff] [blame] | 5031 | static void *raid5_takeover_raid6(mddev_t *mddev) |
| 5032 | { |
| 5033 | int new_layout; |
| 5034 | |
| 5035 | switch (mddev->layout) { |
| 5036 | case ALGORITHM_LEFT_ASYMMETRIC_6: |
| 5037 | new_layout = ALGORITHM_LEFT_ASYMMETRIC; |
| 5038 | break; |
| 5039 | case ALGORITHM_RIGHT_ASYMMETRIC_6: |
| 5040 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC; |
| 5041 | break; |
| 5042 | case ALGORITHM_LEFT_SYMMETRIC_6: |
| 5043 | new_layout = ALGORITHM_LEFT_SYMMETRIC; |
| 5044 | break; |
| 5045 | case ALGORITHM_RIGHT_SYMMETRIC_6: |
| 5046 | new_layout = ALGORITHM_RIGHT_SYMMETRIC; |
| 5047 | break; |
| 5048 | case ALGORITHM_PARITY_0_6: |
| 5049 | new_layout = ALGORITHM_PARITY_0; |
| 5050 | break; |
| 5051 | case ALGORITHM_PARITY_N: |
| 5052 | new_layout = ALGORITHM_PARITY_N; |
| 5053 | break; |
| 5054 | default: |
| 5055 | return ERR_PTR(-EINVAL); |
| 5056 | } |
| 5057 | mddev->new_level = 5; |
| 5058 | mddev->new_layout = new_layout; |
| 5059 | mddev->delta_disks = -1; |
| 5060 | mddev->raid_disks -= 1; |
| 5061 | return setup_conf(mddev); |
| 5062 | } |
| 5063 | |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5064 | |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5065 | static int raid5_reconfig(mddev_t *mddev, int new_layout, int new_chunk) |
| 5066 | { |
| 5067 | /* Currently the layout and chunk size can only be changed |
| 5068 | * for a 2-drive raid array, as in that case no data shuffling |
| 5069 | * is required. |
| 5070 | * Later we might validate these and set new_* so a reshape |
| 5071 | * can complete the change. |
| 5072 | */ |
| 5073 | raid5_conf_t *conf = mddev_to_conf(mddev); |
| 5074 | |
| 5075 | if (new_layout >= 0 && !algorithm_valid_raid5(new_layout)) |
| 5076 | return -EINVAL; |
| 5077 | if (new_chunk > 0) { |
| 5078 | if (new_chunk & (new_chunk-1)) |
| 5079 | /* not a power of 2 */ |
| 5080 | return -EINVAL; |
| 5081 | if (new_chunk < PAGE_SIZE) |
| 5082 | return -EINVAL; |
| 5083 | if (mddev->array_sectors & ((new_chunk>>9)-1)) |
| 5084 | /* not factor of array size */ |
| 5085 | return -EINVAL; |
| 5086 | } |
| 5087 | |
| 5088 | /* They look valid */ |
| 5089 | |
| 5090 | if (mddev->raid_disks != 2) |
| 5091 | return -EINVAL; |
| 5092 | |
| 5093 | if (new_layout >= 0) { |
| 5094 | conf->algorithm = new_layout; |
| 5095 | mddev->layout = mddev->new_layout = new_layout; |
| 5096 | } |
| 5097 | if (new_chunk > 0) { |
| 5098 | conf->chunk_size = new_chunk; |
| 5099 | mddev->chunk_size = mddev->new_chunk = new_chunk; |
| 5100 | } |
| 5101 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 5102 | md_wakeup_thread(mddev->thread); |
| 5103 | return 0; |
| 5104 | } |
| 5105 | |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5106 | static void *raid5_takeover(mddev_t *mddev) |
| 5107 | { |
| 5108 | /* raid5 can take over: |
| 5109 | * raid0 - if all devices are the same - make it a raid4 layout |
| 5110 | * raid1 - if there are two drives. We need to know the chunk size |
| 5111 | * raid4 - trivial - just use a raid4 layout. |
| 5112 | * raid6 - Providing it is a *_6 layout |
| 5113 | * |
| 5114 | * For now, just do raid1 |
| 5115 | */ |
| 5116 | |
| 5117 | if (mddev->level == 1) |
| 5118 | return raid5_takeover_raid1(mddev); |
NeilBrown | e9d4758 | 2009-03-31 14:57:09 +1100 | [diff] [blame] | 5119 | if (mddev->level == 4) { |
| 5120 | mddev->new_layout = ALGORITHM_PARITY_N; |
| 5121 | mddev->new_level = 5; |
| 5122 | return setup_conf(mddev); |
| 5123 | } |
NeilBrown | fc9739c | 2009-03-31 14:57:20 +1100 | [diff] [blame] | 5124 | if (mddev->level == 6) |
| 5125 | return raid5_takeover_raid6(mddev); |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5126 | |
| 5127 | return ERR_PTR(-EINVAL); |
| 5128 | } |
| 5129 | |
| 5130 | |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5131 | static struct mdk_personality raid5_personality; |
| 5132 | |
| 5133 | static void *raid6_takeover(mddev_t *mddev) |
| 5134 | { |
| 5135 | /* Currently can only take over a raid5. We map the |
| 5136 | * personality to an equivalent raid6 personality |
| 5137 | * with the Q block at the end. |
| 5138 | */ |
| 5139 | int new_layout; |
| 5140 | |
| 5141 | if (mddev->pers != &raid5_personality) |
| 5142 | return ERR_PTR(-EINVAL); |
| 5143 | if (mddev->degraded > 1) |
| 5144 | return ERR_PTR(-EINVAL); |
| 5145 | if (mddev->raid_disks > 253) |
| 5146 | return ERR_PTR(-EINVAL); |
| 5147 | if (mddev->raid_disks < 3) |
| 5148 | return ERR_PTR(-EINVAL); |
| 5149 | |
| 5150 | switch (mddev->layout) { |
| 5151 | case ALGORITHM_LEFT_ASYMMETRIC: |
| 5152 | new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; |
| 5153 | break; |
| 5154 | case ALGORITHM_RIGHT_ASYMMETRIC: |
| 5155 | new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; |
| 5156 | break; |
| 5157 | case ALGORITHM_LEFT_SYMMETRIC: |
| 5158 | new_layout = ALGORITHM_LEFT_SYMMETRIC_6; |
| 5159 | break; |
| 5160 | case ALGORITHM_RIGHT_SYMMETRIC: |
| 5161 | new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; |
| 5162 | break; |
| 5163 | case ALGORITHM_PARITY_0: |
| 5164 | new_layout = ALGORITHM_PARITY_0_6; |
| 5165 | break; |
| 5166 | case ALGORITHM_PARITY_N: |
| 5167 | new_layout = ALGORITHM_PARITY_N; |
| 5168 | break; |
| 5169 | default: |
| 5170 | return ERR_PTR(-EINVAL); |
| 5171 | } |
| 5172 | mddev->new_level = 6; |
| 5173 | mddev->new_layout = new_layout; |
| 5174 | mddev->delta_disks = 1; |
| 5175 | mddev->raid_disks += 1; |
| 5176 | return setup_conf(mddev); |
| 5177 | } |
| 5178 | |
| 5179 | |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5180 | static struct mdk_personality raid6_personality = |
| 5181 | { |
| 5182 | .name = "raid6", |
| 5183 | .level = 6, |
| 5184 | .owner = THIS_MODULE, |
| 5185 | .make_request = make_request, |
| 5186 | .run = run, |
| 5187 | .stop = stop, |
| 5188 | .status = status, |
| 5189 | .error_handler = error, |
| 5190 | .hot_add_disk = raid5_add_disk, |
| 5191 | .hot_remove_disk= raid5_remove_disk, |
| 5192 | .spare_active = raid5_spare_active, |
| 5193 | .sync_request = sync_request, |
| 5194 | .resize = raid5_resize, |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 5195 | .size = raid5_size, |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 5196 | #ifdef CONFIG_MD_RAID5_RESHAPE |
| 5197 | .check_reshape = raid5_check_reshape, |
| 5198 | .start_reshape = raid5_start_reshape, |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5199 | .finish_reshape = raid5_finish_reshape, |
NeilBrown | f416885 | 2007-02-28 20:11:53 -0800 | [diff] [blame] | 5200 | #endif |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5201 | .quiesce = raid5_quiesce, |
NeilBrown | 245f46c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5202 | .takeover = raid6_takeover, |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5203 | }; |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5204 | static struct mdk_personality raid5_personality = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5205 | { |
| 5206 | .name = "raid5", |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5207 | .level = 5, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5208 | .owner = THIS_MODULE, |
| 5209 | .make_request = make_request, |
| 5210 | .run = run, |
| 5211 | .stop = stop, |
| 5212 | .status = status, |
| 5213 | .error_handler = error, |
| 5214 | .hot_add_disk = raid5_add_disk, |
| 5215 | .hot_remove_disk= raid5_remove_disk, |
| 5216 | .spare_active = raid5_spare_active, |
| 5217 | .sync_request = sync_request, |
| 5218 | .resize = raid5_resize, |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 5219 | .size = raid5_size, |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5220 | #ifdef CONFIG_MD_RAID5_RESHAPE |
NeilBrown | 63c70c4 | 2006-03-27 01:18:13 -0800 | [diff] [blame] | 5221 | .check_reshape = raid5_check_reshape, |
| 5222 | .start_reshape = raid5_start_reshape, |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5223 | .finish_reshape = raid5_finish_reshape, |
NeilBrown | 2926955 | 2006-03-27 01:18:10 -0800 | [diff] [blame] | 5224 | #endif |
NeilBrown | 7262668 | 2005-09-09 16:23:54 -0700 | [diff] [blame] | 5225 | .quiesce = raid5_quiesce, |
NeilBrown | d562b0c | 2009-03-31 14:39:39 +1100 | [diff] [blame] | 5226 | .takeover = raid5_takeover, |
NeilBrown | b354603 | 2009-03-31 14:56:41 +1100 | [diff] [blame] | 5227 | .reconfig = raid5_reconfig, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5228 | }; |
| 5229 | |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5230 | static struct mdk_personality raid4_personality = |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5231 | { |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5232 | .name = "raid4", |
| 5233 | .level = 4, |
| 5234 | .owner = THIS_MODULE, |
| 5235 | .make_request = make_request, |
| 5236 | .run = run, |
| 5237 | .stop = stop, |
| 5238 | .status = status, |
| 5239 | .error_handler = error, |
| 5240 | .hot_add_disk = raid5_add_disk, |
| 5241 | .hot_remove_disk= raid5_remove_disk, |
| 5242 | .spare_active = raid5_spare_active, |
| 5243 | .sync_request = sync_request, |
| 5244 | .resize = raid5_resize, |
Dan Williams | 80c3a6c | 2009-03-17 18:10:40 -0700 | [diff] [blame] | 5245 | .size = raid5_size, |
NeilBrown | 3d37890 | 2007-03-26 21:32:13 -0800 | [diff] [blame] | 5246 | #ifdef CONFIG_MD_RAID5_RESHAPE |
| 5247 | .check_reshape = raid5_check_reshape, |
| 5248 | .start_reshape = raid5_start_reshape, |
NeilBrown | cea9c22 | 2009-03-31 15:15:05 +1100 | [diff] [blame] | 5249 | .finish_reshape = raid5_finish_reshape, |
NeilBrown | 3d37890 | 2007-03-26 21:32:13 -0800 | [diff] [blame] | 5250 | #endif |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5251 | .quiesce = raid5_quiesce, |
| 5252 | }; |
| 5253 | |
| 5254 | static int __init raid5_init(void) |
| 5255 | { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5256 | register_md_personality(&raid6_personality); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5257 | register_md_personality(&raid5_personality); |
| 5258 | register_md_personality(&raid4_personality); |
| 5259 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5260 | } |
| 5261 | |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5262 | static void raid5_exit(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5263 | { |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5264 | unregister_md_personality(&raid6_personality); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5265 | unregister_md_personality(&raid5_personality); |
| 5266 | unregister_md_personality(&raid4_personality); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5267 | } |
| 5268 | |
| 5269 | module_init(raid5_init); |
| 5270 | module_exit(raid5_exit); |
| 5271 | MODULE_LICENSE("GPL"); |
| 5272 | MODULE_ALIAS("md-personality-4"); /* RAID5 */ |
NeilBrown | d9d166c | 2006-01-06 00:20:51 -0800 | [diff] [blame] | 5273 | MODULE_ALIAS("md-raid5"); |
| 5274 | MODULE_ALIAS("md-raid4"); |
NeilBrown | 2604b70 | 2006-01-06 00:20:36 -0800 | [diff] [blame] | 5275 | MODULE_ALIAS("md-level-5"); |
| 5276 | MODULE_ALIAS("md-level-4"); |
NeilBrown | 16a53ec | 2006-06-26 00:27:38 -0700 | [diff] [blame] | 5277 | MODULE_ALIAS("md-personality-8"); /* RAID6 */ |
| 5278 | MODULE_ALIAS("md-raid6"); |
| 5279 | MODULE_ALIAS("md-level-6"); |
| 5280 | |
| 5281 | /* This used to be two separate modules, they were: */ |
| 5282 | MODULE_ALIAS("raid5"); |
| 5283 | MODULE_ALIAS("raid6"); |