NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010-2011 Neil Brown |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 3 | * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved. |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 4 | * |
| 5 | * This file is released under the GPL. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/slab.h> |
Paul Gortmaker | 056075c | 2011-07-03 13:58:33 -0400 | [diff] [blame] | 9 | #include <linux/module.h> |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 10 | |
| 11 | #include "md.h" |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 12 | #include "raid1.h" |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 13 | #include "raid5.h" |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 14 | #include "raid10.h" |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 15 | #include "bitmap.h" |
| 16 | |
Alasdair G Kergon | 3e8dbb7 | 2011-08-02 12:32:03 +0100 | [diff] [blame] | 17 | #include <linux/device-mapper.h> |
| 18 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 19 | #define DM_MSG_PREFIX "raid" |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 20 | #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */ |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 21 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 22 | /* |
| 23 | * Minimum sectors of free reshape space per raid device |
| 24 | */ |
| 25 | #define MIN_FREE_RESHAPE_SPACE to_sector(4*4096) |
| 26 | |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 27 | static bool devices_handle_discard_safely = false; |
| 28 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 29 | /* |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 30 | * The following flags are used by dm-raid.c to set up the array state. |
| 31 | * They must be cleared before md_run is called. |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 32 | */ |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 33 | #define FirstUse 10 /* rdev flag */ |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 34 | |
| 35 | struct raid_dev { |
| 36 | /* |
| 37 | * Two DM devices, one to hold metadata and one to hold the |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 38 | * actual data/parity. The reason for this is to not confuse |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 39 | * ti->len and give more flexibility in altering size and |
| 40 | * characteristics. |
| 41 | * |
| 42 | * While it is possible for this device to be associated |
| 43 | * with a different physical device than the data_dev, it |
| 44 | * is intended for it to be the same. |
| 45 | * |--------- Physical Device ---------| |
| 46 | * |- meta_dev -|------ data_dev ------| |
| 47 | */ |
| 48 | struct dm_dev *meta_dev; |
| 49 | struct dm_dev *data_dev; |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 50 | struct md_rdev rdev; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 51 | }; |
| 52 | |
| 53 | /* |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 54 | * Bits for establishing rs->ctr_flags |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 55 | * |
| 56 | * 1 = no flag value |
| 57 | * 2 = flag with value |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 58 | */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 59 | #define __CTR_FLAG_SYNC 0 /* 1 */ /* Not with raid0! */ |
| 60 | #define __CTR_FLAG_NOSYNC 1 /* 1 */ /* Not with raid0! */ |
| 61 | #define __CTR_FLAG_REBUILD 2 /* 2 */ /* Not with raid0! */ |
| 62 | #define __CTR_FLAG_DAEMON_SLEEP 3 /* 2 */ /* Not with raid0! */ |
| 63 | #define __CTR_FLAG_MIN_RECOVERY_RATE 4 /* 2 */ /* Not with raid0! */ |
| 64 | #define __CTR_FLAG_MAX_RECOVERY_RATE 5 /* 2 */ /* Not with raid0! */ |
| 65 | #define __CTR_FLAG_MAX_WRITE_BEHIND 6 /* 2 */ /* Only with raid1! */ |
| 66 | #define __CTR_FLAG_WRITE_MOSTLY 7 /* 2 */ /* Only with raid1! */ |
| 67 | #define __CTR_FLAG_STRIPE_CACHE 8 /* 2 */ /* Only with raid4/5/6! */ |
| 68 | #define __CTR_FLAG_REGION_SIZE 9 /* 2 */ /* Not with raid0! */ |
| 69 | #define __CTR_FLAG_RAID10_COPIES 10 /* 2 */ /* Only with raid10 */ |
| 70 | #define __CTR_FLAG_RAID10_FORMAT 11 /* 2 */ /* Only with raid10 */ |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 71 | /* New for v1.9.0 */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 72 | #define __CTR_FLAG_DELTA_DISKS 12 /* 2 */ /* Only with reshapable raid4/5/6/10! */ |
| 73 | #define __CTR_FLAG_DATA_OFFSET 13 /* 2 */ /* Only with reshapable raid4/5/6/10! */ |
| 74 | #define __CTR_FLAG_RAID10_USE_NEAR_SETS 14 /* 2 */ /* Only with raid10! */ |
| 75 | |
| 76 | /* |
| 77 | * Flags for rs->ctr_flags field. |
| 78 | */ |
| 79 | #define CTR_FLAG_SYNC (1 << __CTR_FLAG_SYNC) |
| 80 | #define CTR_FLAG_NOSYNC (1 << __CTR_FLAG_NOSYNC) |
| 81 | #define CTR_FLAG_REBUILD (1 << __CTR_FLAG_REBUILD) |
| 82 | #define CTR_FLAG_DAEMON_SLEEP (1 << __CTR_FLAG_DAEMON_SLEEP) |
| 83 | #define CTR_FLAG_MIN_RECOVERY_RATE (1 << __CTR_FLAG_MIN_RECOVERY_RATE) |
| 84 | #define CTR_FLAG_MAX_RECOVERY_RATE (1 << __CTR_FLAG_MAX_RECOVERY_RATE) |
| 85 | #define CTR_FLAG_MAX_WRITE_BEHIND (1 << __CTR_FLAG_MAX_WRITE_BEHIND) |
| 86 | #define CTR_FLAG_WRITE_MOSTLY (1 << __CTR_FLAG_WRITE_MOSTLY) |
| 87 | #define CTR_FLAG_STRIPE_CACHE (1 << __CTR_FLAG_STRIPE_CACHE) |
| 88 | #define CTR_FLAG_REGION_SIZE (1 << __CTR_FLAG_REGION_SIZE) |
| 89 | #define CTR_FLAG_RAID10_COPIES (1 << __CTR_FLAG_RAID10_COPIES) |
| 90 | #define CTR_FLAG_RAID10_FORMAT (1 << __CTR_FLAG_RAID10_FORMAT) |
| 91 | #define CTR_FLAG_DELTA_DISKS (1 << __CTR_FLAG_DELTA_DISKS) |
| 92 | #define CTR_FLAG_DATA_OFFSET (1 << __CTR_FLAG_DATA_OFFSET) |
| 93 | #define CTR_FLAG_RAID10_USE_NEAR_SETS (1 << __CTR_FLAG_RAID10_USE_NEAR_SETS) |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 94 | |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 95 | /* |
| 96 | * Definitions of various constructor flags to |
| 97 | * be used in checks of valid / invalid flags |
| 98 | * per raid level. |
| 99 | */ |
| 100 | /* Define all any sync flags */ |
| 101 | #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC) |
| 102 | |
| 103 | /* Define flags for options without argument (e.g. 'nosync') */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 104 | #define CTR_FLAG_OPTIONS_NO_ARGS (CTR_FLAGS_ANY_SYNC | \ |
| 105 | CTR_FLAG_RAID10_USE_NEAR_SETS) |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 106 | |
| 107 | /* Define flags for options with one argument (e.g. 'delta_disks +2') */ |
| 108 | #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \ |
| 109 | CTR_FLAG_WRITE_MOSTLY | \ |
| 110 | CTR_FLAG_DAEMON_SLEEP | \ |
| 111 | CTR_FLAG_MIN_RECOVERY_RATE | \ |
| 112 | CTR_FLAG_MAX_RECOVERY_RATE | \ |
| 113 | CTR_FLAG_MAX_WRITE_BEHIND | \ |
| 114 | CTR_FLAG_STRIPE_CACHE | \ |
| 115 | CTR_FLAG_REGION_SIZE | \ |
| 116 | CTR_FLAG_RAID10_COPIES | \ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 117 | CTR_FLAG_RAID10_FORMAT | \ |
| 118 | CTR_FLAG_DELTA_DISKS | \ |
| 119 | CTR_FLAG_DATA_OFFSET) |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 120 | |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 121 | /* Valid options definitions per raid level... */ |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 122 | |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 123 | /* "raid0" does only accept data offset */ |
| 124 | #define RAID0_VALID_FLAGS (CTR_FLAG_DATA_OFFSET) |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 125 | |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 126 | /* "raid1" does not accept stripe cache, data offset, delta_disks or any raid10 options */ |
| 127 | #define RAID1_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ |
| 128 | CTR_FLAG_REBUILD | \ |
| 129 | CTR_FLAG_WRITE_MOSTLY | \ |
| 130 | CTR_FLAG_DAEMON_SLEEP | \ |
| 131 | CTR_FLAG_MIN_RECOVERY_RATE | \ |
| 132 | CTR_FLAG_MAX_RECOVERY_RATE | \ |
| 133 | CTR_FLAG_MAX_WRITE_BEHIND | \ |
| 134 | CTR_FLAG_REGION_SIZE | \ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 135 | CTR_FLAG_DATA_OFFSET) |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 136 | |
| 137 | /* "raid10" does not accept any raid1 or stripe cache options */ |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 138 | #define RAID10_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ |
| 139 | CTR_FLAG_REBUILD | \ |
| 140 | CTR_FLAG_DAEMON_SLEEP | \ |
| 141 | CTR_FLAG_MIN_RECOVERY_RATE | \ |
| 142 | CTR_FLAG_MAX_RECOVERY_RATE | \ |
| 143 | CTR_FLAG_REGION_SIZE | \ |
| 144 | CTR_FLAG_RAID10_COPIES | \ |
| 145 | CTR_FLAG_RAID10_FORMAT | \ |
| 146 | CTR_FLAG_DELTA_DISKS | \ |
| 147 | CTR_FLAG_DATA_OFFSET | \ |
| 148 | CTR_FLAG_RAID10_USE_NEAR_SETS) |
| 149 | |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 150 | /* |
| 151 | * "raid4/5/6" do not accept any raid1 or raid10 specific options |
| 152 | * |
| 153 | * "raid6" does not accept "nosync", because it is not guaranteed |
| 154 | * that both parity and q-syndrome are being written properly with |
| 155 | * any writes |
| 156 | */ |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 157 | #define RAID45_VALID_FLAGS (CTR_FLAGS_ANY_SYNC | \ |
| 158 | CTR_FLAG_REBUILD | \ |
| 159 | CTR_FLAG_DAEMON_SLEEP | \ |
| 160 | CTR_FLAG_MIN_RECOVERY_RATE | \ |
| 161 | CTR_FLAG_MAX_RECOVERY_RATE | \ |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 162 | CTR_FLAG_MAX_WRITE_BEHIND | \ |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 163 | CTR_FLAG_STRIPE_CACHE | \ |
| 164 | CTR_FLAG_REGION_SIZE | \ |
| 165 | CTR_FLAG_DELTA_DISKS | \ |
| 166 | CTR_FLAG_DATA_OFFSET) |
| 167 | |
| 168 | #define RAID6_VALID_FLAGS (CTR_FLAG_SYNC | \ |
| 169 | CTR_FLAG_REBUILD | \ |
| 170 | CTR_FLAG_DAEMON_SLEEP | \ |
| 171 | CTR_FLAG_MIN_RECOVERY_RATE | \ |
| 172 | CTR_FLAG_MAX_RECOVERY_RATE | \ |
| 173 | CTR_FLAG_MAX_WRITE_BEHIND | \ |
| 174 | CTR_FLAG_STRIPE_CACHE | \ |
| 175 | CTR_FLAG_REGION_SIZE | \ |
| 176 | CTR_FLAG_DELTA_DISKS | \ |
| 177 | CTR_FLAG_DATA_OFFSET) |
| 178 | /* ...valid options definitions per raid level */ |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 179 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 180 | /* |
| 181 | * Flags for rs->runtime_flags field |
| 182 | * (RT_FLAG prefix meaning "runtime flag") |
| 183 | * |
| 184 | * These are all internal and used to define runtime state, |
| 185 | * e.g. to prevent another resume from preresume processing |
| 186 | * the raid set all over again. |
| 187 | */ |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 188 | #define RT_FLAG_RS_PRERESUMED 0 |
| 189 | #define RT_FLAG_RS_RESUMED 1 |
| 190 | #define RT_FLAG_RS_BITMAP_LOADED 2 |
| 191 | #define RT_FLAG_UPDATE_SBS 3 |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 192 | #define RT_FLAG_RESHAPE_RS 4 |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 193 | #define RT_FLAG_KEEP_RS_FROZEN 5 |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 194 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 195 | /* Array elements of 64 bit needed for rebuild/write_mostly bits */ |
| 196 | #define DISKS_ARRAY_ELEMS ((MAX_RAID_DEVICES + (sizeof(uint64_t) * 8 - 1)) / sizeof(uint64_t) / 8) |
| 197 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 198 | /* |
| 199 | * raid set level, layout and chunk sectors backup/restore |
| 200 | */ |
| 201 | struct rs_layout { |
| 202 | int new_level; |
| 203 | int new_layout; |
| 204 | int new_chunk_sectors; |
| 205 | }; |
| 206 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 207 | struct raid_set { |
| 208 | struct dm_target *ti; |
| 209 | |
Jonathan Brassow | 34f8ac6d | 2012-01-27 14:53:53 -0600 | [diff] [blame] | 210 | uint32_t bitmap_loaded; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 211 | uint32_t stripe_cache_entries; |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 212 | unsigned long ctr_flags; |
| 213 | unsigned long runtime_flags; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 214 | |
| 215 | uint64_t rebuild_disks[DISKS_ARRAY_ELEMS]; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 216 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 217 | int raid_disks; |
| 218 | int delta_disks; |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 219 | int data_offset; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 220 | int raid10_copies; |
Heinz Mauelshagen | 4257e08 | 2016-06-14 01:46:03 +0200 | [diff] [blame] | 221 | int requested_bitmap_chunk_sectors; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 222 | |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 223 | struct mddev md; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 224 | struct raid_type *raid_type; |
| 225 | struct dm_target_callbacks callbacks; |
| 226 | |
| 227 | struct raid_dev dev[0]; |
| 228 | }; |
| 229 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 230 | static void rs_config_backup(struct raid_set *rs, struct rs_layout *l) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 231 | { |
| 232 | struct mddev *mddev = &rs->md; |
| 233 | |
| 234 | l->new_level = mddev->new_level; |
| 235 | l->new_layout = mddev->new_layout; |
| 236 | l->new_chunk_sectors = mddev->new_chunk_sectors; |
| 237 | } |
| 238 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 239 | static void rs_config_restore(struct raid_set *rs, struct rs_layout *l) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 240 | { |
| 241 | struct mddev *mddev = &rs->md; |
| 242 | |
| 243 | mddev->new_level = l->new_level; |
| 244 | mddev->new_layout = l->new_layout; |
| 245 | mddev->new_chunk_sectors = l->new_chunk_sectors; |
| 246 | } |
| 247 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 248 | /* raid10 algorithms (i.e. formats) */ |
| 249 | #define ALGORITHM_RAID10_DEFAULT 0 |
| 250 | #define ALGORITHM_RAID10_NEAR 1 |
| 251 | #define ALGORITHM_RAID10_OFFSET 2 |
| 252 | #define ALGORITHM_RAID10_FAR 3 |
| 253 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 254 | /* Supported raid types and properties. */ |
| 255 | static struct raid_type { |
| 256 | const char *name; /* RAID algorithm. */ |
| 257 | const char *descr; /* Descriptor text for logging. */ |
| 258 | const unsigned parity_devs; /* # of parity devices. */ |
| 259 | const unsigned minimal_devs; /* minimal # of devices in set. */ |
| 260 | const unsigned level; /* RAID level. */ |
| 261 | const unsigned algorithm; /* RAID algorithm. */ |
| 262 | } raid_types[] = { |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 263 | {"raid0", "raid0 (striping)", 0, 2, 0, 0 /* NONE */}, |
| 264 | {"raid1", "raid1 (mirroring)", 0, 2, 1, 0 /* NONE */}, |
| 265 | {"raid10_far", "raid10 far (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_FAR}, |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 266 | {"raid10_offset", "raid10 offset (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_OFFSET}, |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 267 | {"raid10_near", "raid10 near (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_NEAR}, |
| 268 | {"raid10", "raid10 (striped mirrors)", 0, 2, 10, ALGORITHM_RAID10_DEFAULT}, |
| 269 | {"raid4", "raid4 (dedicated last parity disk)", 1, 2, 4, ALGORITHM_PARITY_N}, /* raid4 layout = raid5_n */ |
| 270 | {"raid5_n", "raid5 (dedicated last parity disk)", 1, 2, 5, ALGORITHM_PARITY_N}, |
| 271 | {"raid5_ls", "raid5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC}, |
| 272 | {"raid5_rs", "raid5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC}, |
| 273 | {"raid5_la", "raid5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, |
| 274 | {"raid5_ra", "raid5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, |
| 275 | {"raid6_zr", "raid6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART}, |
| 276 | {"raid6_nr", "raid6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART}, |
| 277 | {"raid6_nc", "raid6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}, |
| 278 | {"raid6_n_6", "raid6 (dedicated parity/Q n/6)", 2, 4, 6, ALGORITHM_PARITY_N_6}, |
| 279 | {"raid6_ls_6", "raid6 (left symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_SYMMETRIC_6}, |
| 280 | {"raid6_rs_6", "raid6 (right symmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_SYMMETRIC_6}, |
| 281 | {"raid6_la_6", "raid6 (left asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_LEFT_ASYMMETRIC_6}, |
| 282 | {"raid6_ra_6", "raid6 (right asymmetric dedicated Q 6)", 2, 4, 6, ALGORITHM_RIGHT_ASYMMETRIC_6} |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 283 | }; |
| 284 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 285 | /* True, if @v is in inclusive range [@min, @max] */ |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 286 | static bool __within_range(long v, long min, long max) |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 287 | { |
| 288 | return v >= min && v <= max; |
| 289 | } |
| 290 | |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 291 | /* All table line arguments are defined here */ |
| 292 | static struct arg_name_flag { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 293 | const unsigned long flag; |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 294 | const char *name; |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 295 | } __arg_name_flags[] = { |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 296 | { CTR_FLAG_SYNC, "sync"}, |
| 297 | { CTR_FLAG_NOSYNC, "nosync"}, |
| 298 | { CTR_FLAG_REBUILD, "rebuild"}, |
| 299 | { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"}, |
| 300 | { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"}, |
| 301 | { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"}, |
| 302 | { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"}, |
| 303 | { CTR_FLAG_WRITE_MOSTLY, "writemostly"}, |
| 304 | { CTR_FLAG_STRIPE_CACHE, "stripe_cache"}, |
| 305 | { CTR_FLAG_REGION_SIZE, "region_size"}, |
| 306 | { CTR_FLAG_RAID10_COPIES, "raid10_copies"}, |
| 307 | { CTR_FLAG_RAID10_FORMAT, "raid10_format"}, |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 308 | { CTR_FLAG_DATA_OFFSET, "data_offset"}, |
| 309 | { CTR_FLAG_DELTA_DISKS, "delta_disks"}, |
| 310 | { CTR_FLAG_RAID10_USE_NEAR_SETS, "raid10_use_near_sets"}, |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 311 | }; |
| 312 | |
| 313 | /* Return argument name string for given @flag */ |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 314 | static const char *dm_raid_arg_name_by_flag(const uint32_t flag) |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 315 | { |
| 316 | if (hweight32(flag) == 1) { |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 317 | struct arg_name_flag *anf = __arg_name_flags + ARRAY_SIZE(__arg_name_flags); |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 318 | |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 319 | while (anf-- > __arg_name_flags) |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 320 | if (flag & anf->flag) |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 321 | return anf->name; |
| 322 | |
| 323 | } else |
| 324 | DMERR("%s called with more than one flag!", __func__); |
| 325 | |
| 326 | return NULL; |
| 327 | } |
| 328 | |
| 329 | /* |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 330 | * bool helpers to test for various raid levels of a raid set, |
| 331 | * is. it's level as reported by the superblock rather than |
| 332 | * the requested raid_type passed to the constructor. |
| 333 | */ |
| 334 | /* Return true, if raid set in @rs is raid0 */ |
| 335 | static bool rs_is_raid0(struct raid_set *rs) |
| 336 | { |
| 337 | return !rs->md.level; |
| 338 | } |
| 339 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 340 | /* Return true, if raid set in @rs is raid1 */ |
| 341 | static bool rs_is_raid1(struct raid_set *rs) |
| 342 | { |
| 343 | return rs->md.level == 1; |
| 344 | } |
| 345 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 346 | /* Return true, if raid set in @rs is raid10 */ |
| 347 | static bool rs_is_raid10(struct raid_set *rs) |
| 348 | { |
| 349 | return rs->md.level == 10; |
| 350 | } |
| 351 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 352 | /* Return true, if raid set in @rs is level 4, 5 or 6 */ |
| 353 | static bool rs_is_raid456(struct raid_set *rs) |
| 354 | { |
| 355 | return __within_range(rs->md.level, 4, 6); |
| 356 | } |
| 357 | |
| 358 | /* Return true, if raid set in @rs is reshapable */ |
| 359 | static unsigned int __is_raid10_far(int layout); |
| 360 | static bool rs_is_reshapable(struct raid_set *rs) |
| 361 | { |
| 362 | return rs_is_raid456(rs) || |
| 363 | (rs_is_raid10(rs) && !__is_raid10_far(rs->md.new_layout)); |
| 364 | } |
| 365 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 366 | /* Return true, if raid set in @rs is recovering */ |
| 367 | static bool rs_is_recovering(struct raid_set *rs) |
| 368 | { |
| 369 | smp_rmb(); |
| 370 | return rs->md.recovery_cp != MaxSector; |
| 371 | } |
| 372 | |
| 373 | /* Return true, if raid set in @rs is reshaping */ |
| 374 | static bool rs_is_reshaping(struct raid_set *rs) |
| 375 | { |
| 376 | smp_rmb(); |
| 377 | return rs->md.reshape_position != MaxSector; |
| 378 | } |
| 379 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 380 | /* |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 381 | * bool helpers to test for various raid levels of a raid type |
| 382 | */ |
| 383 | |
| 384 | /* Return true, if raid type in @rt is raid0 */ |
| 385 | static bool rt_is_raid0(struct raid_type *rt) |
| 386 | { |
| 387 | return !rt->level; |
| 388 | } |
| 389 | |
| 390 | /* Return true, if raid type in @rt is raid1 */ |
| 391 | static bool rt_is_raid1(struct raid_type *rt) |
| 392 | { |
| 393 | return rt->level == 1; |
| 394 | } |
| 395 | |
| 396 | /* Return true, if raid type in @rt is raid10 */ |
| 397 | static bool rt_is_raid10(struct raid_type *rt) |
| 398 | { |
| 399 | return rt->level == 10; |
| 400 | } |
| 401 | |
| 402 | /* Return true, if raid type in @rt is raid4/5 */ |
| 403 | static bool rt_is_raid45(struct raid_type *rt) |
| 404 | { |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 405 | return __within_range(rt->level, 4, 5); |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 406 | } |
| 407 | |
| 408 | /* Return true, if raid type in @rt is raid6 */ |
| 409 | static bool rt_is_raid6(struct raid_type *rt) |
| 410 | { |
| 411 | return rt->level == 6; |
| 412 | } |
Heinz Mauelshagen | 676fa5a | 2016-05-19 18:49:29 +0200 | [diff] [blame] | 413 | |
| 414 | /* Return true, if raid type in @rt is raid4/5/6 */ |
| 415 | static bool rt_is_raid456(struct raid_type *rt) |
| 416 | { |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 417 | return __within_range(rt->level, 4, 6); |
Heinz Mauelshagen | 676fa5a | 2016-05-19 18:49:29 +0200 | [diff] [blame] | 418 | } |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 419 | /* END: raid level bools */ |
| 420 | |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 421 | /* Return valid ctr flags for the raid level of @rs */ |
| 422 | static unsigned long __valid_flags(struct raid_set *rs) |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 423 | { |
| 424 | if (rt_is_raid0(rs->raid_type)) |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 425 | return RAID0_VALID_FLAGS; |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 426 | else if (rt_is_raid1(rs->raid_type)) |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 427 | return RAID1_VALID_FLAGS; |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 428 | else if (rt_is_raid10(rs->raid_type)) |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 429 | return RAID10_VALID_FLAGS; |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 430 | else if (rt_is_raid45(rs->raid_type)) |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 431 | return RAID45_VALID_FLAGS; |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 432 | else if (rt_is_raid6(rs->raid_type)) |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 433 | return RAID6_VALID_FLAGS; |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 434 | |
| 435 | return ~0; |
| 436 | } |
| 437 | |
| 438 | /* |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 439 | * Check for valid flags set on @rs |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 440 | * |
| 441 | * Has to be called after parsing of the ctr flags! |
| 442 | */ |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 443 | static int rs_check_for_valid_flags(struct raid_set *rs) |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 444 | { |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 445 | if (rs->ctr_flags & ~__valid_flags(rs)) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 446 | rs->ti->error = "Invalid flags combination"; |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 447 | return -EINVAL; |
| 448 | } |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 449 | |
| 450 | return 0; |
| 451 | } |
| 452 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 453 | /* MD raid10 bit definitions and helpers */ |
| 454 | #define RAID10_OFFSET (1 << 16) /* stripes with data copies area adjacent on devices */ |
| 455 | #define RAID10_BROCKEN_USE_FAR_SETS (1 << 17) /* Broken in raid10.c: use sets instead of whole stripe rotation */ |
| 456 | #define RAID10_USE_FAR_SETS (1 << 18) /* Use sets instead of whole stripe rotation */ |
| 457 | #define RAID10_FAR_COPIES_SHIFT 8 /* raid10 # far copies shift (2nd byte of layout) */ |
| 458 | |
| 459 | /* Return md raid10 near copies for @layout */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 460 | static unsigned int __raid10_near_copies(int layout) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 461 | { |
| 462 | return layout & 0xFF; |
| 463 | } |
| 464 | |
| 465 | /* Return md raid10 far copies for @layout */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 466 | static unsigned int __raid10_far_copies(int layout) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 467 | { |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 468 | return __raid10_near_copies(layout >> RAID10_FAR_COPIES_SHIFT); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | /* Return true if md raid10 offset for @layout */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 472 | static unsigned int __is_raid10_offset(int layout) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 473 | { |
| 474 | return layout & RAID10_OFFSET; |
| 475 | } |
| 476 | |
| 477 | /* Return true if md raid10 near for @layout */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 478 | static unsigned int __is_raid10_near(int layout) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 479 | { |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 480 | return !__is_raid10_offset(layout) && __raid10_near_copies(layout) > 1; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | /* Return true if md raid10 far for @layout */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 484 | static unsigned int __is_raid10_far(int layout) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 485 | { |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 486 | return !__is_raid10_offset(layout) && __raid10_far_copies(layout) > 1; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 487 | } |
| 488 | |
| 489 | /* Return md raid10 layout string for @layout */ |
| 490 | static const char *raid10_md_layout_to_format(int layout) |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 491 | { |
| 492 | /* |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 493 | * Bit 16 stands for "offset" |
| 494 | * (i.e. adjacent stripes hold copies) |
| 495 | * |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 496 | * Refer to MD's raid10.c for details |
| 497 | */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 498 | if (__is_raid10_offset(layout)) |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 499 | return "offset"; |
| 500 | |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 501 | if (__raid10_near_copies(layout) > 1) |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 502 | return "near"; |
| 503 | |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 504 | WARN_ON(__raid10_far_copies(layout) < 2); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 505 | |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 506 | return "far"; |
| 507 | } |
| 508 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 509 | /* Return md raid10 algorithm for @name */ |
| 510 | static const int raid10_name_to_format(const char *name) |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 511 | { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 512 | if (!strcasecmp(name, "near")) |
| 513 | return ALGORITHM_RAID10_NEAR; |
| 514 | else if (!strcasecmp(name, "offset")) |
| 515 | return ALGORITHM_RAID10_OFFSET; |
| 516 | else if (!strcasecmp(name, "far")) |
| 517 | return ALGORITHM_RAID10_FAR; |
| 518 | |
| 519 | return -EINVAL; |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 520 | } |
| 521 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 522 | /* Return md raid10 copies for @layout */ |
| 523 | static unsigned int raid10_md_layout_to_copies(int layout) |
| 524 | { |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 525 | return __raid10_near_copies(layout) > 1 ? |
| 526 | __raid10_near_copies(layout) : __raid10_far_copies(layout); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 527 | } |
| 528 | |
| 529 | /* Return md raid10 format id for @format string */ |
| 530 | static int raid10_format_to_md_layout(struct raid_set *rs, |
| 531 | unsigned int algorithm, |
| 532 | unsigned int copies) |
| 533 | { |
| 534 | unsigned int n = 1, f = 1, r = 0; |
| 535 | |
| 536 | /* |
| 537 | * MD resilienece flaw: |
| 538 | * |
| 539 | * enabling use_far_sets for far/offset formats causes copies |
| 540 | * to be colocated on the same devs together with their origins! |
| 541 | * |
| 542 | * -> disable it for now in the definition above |
| 543 | */ |
| 544 | if (algorithm == ALGORITHM_RAID10_DEFAULT || |
| 545 | algorithm == ALGORITHM_RAID10_NEAR) |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 546 | n = copies; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 547 | |
| 548 | else if (algorithm == ALGORITHM_RAID10_OFFSET) { |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 549 | f = copies; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 550 | r = RAID10_OFFSET; |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 551 | if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 552 | r |= RAID10_USE_FAR_SETS; |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 553 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 554 | } else if (algorithm == ALGORITHM_RAID10_FAR) { |
| 555 | f = copies; |
| 556 | r = !RAID10_OFFSET; |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 557 | if (!test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 558 | r |= RAID10_USE_FAR_SETS; |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 559 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 560 | } else |
| 561 | return -EINVAL; |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 562 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 563 | return r | (f << RAID10_FAR_COPIES_SHIFT) | n; |
| 564 | } |
| 565 | /* END: MD raid10 bit definitions and helpers */ |
| 566 | |
| 567 | /* Check for any of the raid10 algorithms */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 568 | static int __got_raid10(struct raid_type *rtp, const int layout) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 569 | { |
| 570 | if (rtp->level == 10) { |
| 571 | switch (rtp->algorithm) { |
| 572 | case ALGORITHM_RAID10_DEFAULT: |
| 573 | case ALGORITHM_RAID10_NEAR: |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 574 | return __is_raid10_near(layout); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 575 | case ALGORITHM_RAID10_OFFSET: |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 576 | return __is_raid10_offset(layout); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 577 | case ALGORITHM_RAID10_FAR: |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 578 | return __is_raid10_far(layout); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 579 | default: |
| 580 | break; |
| 581 | } |
| 582 | } |
| 583 | |
| 584 | return 0; |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 585 | } |
| 586 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 587 | /* Return raid_type for @name */ |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 588 | static struct raid_type *get_raid_type(const char *name) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 589 | { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 590 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 591 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 592 | while (rtp-- > raid_types) |
| 593 | if (!strcasecmp(rtp->name, name)) |
| 594 | return rtp; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 595 | |
| 596 | return NULL; |
| 597 | } |
| 598 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 599 | /* Return raid_type for @name based derived from @level and @layout */ |
| 600 | static struct raid_type *get_raid_type_by_ll(const int level, const int layout) |
| 601 | { |
| 602 | struct raid_type *rtp = raid_types + ARRAY_SIZE(raid_types); |
| 603 | |
| 604 | while (rtp-- > raid_types) { |
| 605 | /* RAID10 special checks based on @layout flags/properties */ |
| 606 | if (rtp->level == level && |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 607 | (__got_raid10(rtp, layout) || rtp->algorithm == layout)) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 608 | return rtp; |
| 609 | } |
| 610 | |
| 611 | return NULL; |
| 612 | } |
| 613 | |
| 614 | /* |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 615 | * Conditionally change bdev capacity of @rs |
| 616 | * in case of a disk add/remove reshape |
| 617 | */ |
| 618 | static void rs_set_capacity(struct raid_set *rs) |
| 619 | { |
| 620 | struct mddev *mddev = &rs->md; |
| 621 | |
| 622 | /* Make sure we access most actual mddev properties */ |
| 623 | smp_rmb(); |
| 624 | if (rs->ti->len != mddev->array_sectors && !rs_is_reshaping(rs)) { |
| 625 | struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table)); |
| 626 | |
| 627 | set_capacity(gendisk, mddev->array_sectors); |
| 628 | revalidate_disk(gendisk); |
| 629 | } |
| 630 | } |
| 631 | |
| 632 | /* |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 633 | * Set the mddev properties in @rs to the current |
| 634 | * ones retrieved from the freshest superblock |
| 635 | */ |
| 636 | static void rs_set_cur(struct raid_set *rs) |
| 637 | { |
| 638 | struct mddev *mddev = &rs->md; |
| 639 | |
| 640 | mddev->new_level = mddev->level; |
| 641 | mddev->new_layout = mddev->layout; |
| 642 | mddev->new_chunk_sectors = mddev->chunk_sectors; |
| 643 | } |
| 644 | |
| 645 | /* |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 646 | * Set the mddev properties in @rs to the new |
| 647 | * ones requested by the ctr |
| 648 | */ |
| 649 | static void rs_set_new(struct raid_set *rs) |
| 650 | { |
| 651 | struct mddev *mddev = &rs->md; |
| 652 | |
| 653 | mddev->level = mddev->new_level; |
| 654 | mddev->layout = mddev->new_layout; |
| 655 | mddev->chunk_sectors = mddev->new_chunk_sectors; |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 656 | mddev->raid_disks = rs->raid_disks; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 657 | mddev->delta_disks = 0; |
| 658 | } |
| 659 | |
Mike Snitzer | bfcee0e | 2016-06-02 15:08:09 -0400 | [diff] [blame] | 660 | static struct raid_set *raid_set_alloc(struct dm_target *ti, struct raid_type *raid_type, |
| 661 | unsigned raid_devs) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 662 | { |
| 663 | unsigned i; |
| 664 | struct raid_set *rs; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 665 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 666 | if (raid_devs <= raid_type->parity_devs) { |
| 667 | ti->error = "Insufficient number of devices"; |
| 668 | return ERR_PTR(-EINVAL); |
| 669 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 670 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 671 | rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 672 | if (!rs) { |
| 673 | ti->error = "Cannot allocate raid context"; |
| 674 | return ERR_PTR(-ENOMEM); |
| 675 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 676 | |
| 677 | mddev_init(&rs->md); |
| 678 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 679 | rs->raid_disks = raid_devs; |
| 680 | rs->delta_disks = 0; |
| 681 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 682 | rs->ti = ti; |
| 683 | rs->raid_type = raid_type; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 684 | rs->stripe_cache_entries = 256; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 685 | rs->md.raid_disks = raid_devs; |
| 686 | rs->md.level = raid_type->level; |
| 687 | rs->md.new_level = rs->md.level; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 688 | rs->md.layout = raid_type->algorithm; |
| 689 | rs->md.new_layout = rs->md.layout; |
| 690 | rs->md.delta_disks = 0; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 691 | rs->md.recovery_cp = rs_is_raid0(rs) ? MaxSector : 0; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 692 | |
| 693 | for (i = 0; i < raid_devs; i++) |
| 694 | md_rdev_init(&rs->dev[i].rdev); |
| 695 | |
| 696 | /* |
| 697 | * Remaining items to be initialized by further RAID params: |
| 698 | * rs->md.persistent |
| 699 | * rs->md.external |
| 700 | * rs->md.chunk_sectors |
| 701 | * rs->md.new_chunk_sectors |
Jonathan E Brassow | c039c33 | 2012-07-27 15:08:04 +0100 | [diff] [blame] | 702 | * rs->md.dev_sectors |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 703 | */ |
| 704 | |
| 705 | return rs; |
| 706 | } |
| 707 | |
Mike Snitzer | bfcee0e | 2016-06-02 15:08:09 -0400 | [diff] [blame] | 708 | static void raid_set_free(struct raid_set *rs) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 709 | { |
| 710 | int i; |
| 711 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 712 | for (i = 0; i < rs->md.raid_disks; i++) { |
| 713 | if (rs->dev[i].meta_dev) |
| 714 | dm_put_device(rs->ti, rs->dev[i].meta_dev); |
NeilBrown | 545c879 | 2012-05-22 13:54:30 +1000 | [diff] [blame] | 715 | md_rdev_clear(&rs->dev[i].rdev); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 716 | if (rs->dev[i].data_dev) |
| 717 | dm_put_device(rs->ti, rs->dev[i].data_dev); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 718 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 719 | |
| 720 | kfree(rs); |
| 721 | } |
| 722 | |
| 723 | /* |
| 724 | * For every device we have two words |
| 725 | * <meta_dev>: meta device name or '-' if missing |
| 726 | * <data_dev>: data device name or '-' if missing |
| 727 | * |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 728 | * The following are permitted: |
| 729 | * - - |
| 730 | * - <data_dev> |
| 731 | * <meta_dev> <data_dev> |
| 732 | * |
| 733 | * The following is not allowed: |
| 734 | * <meta_dev> - |
| 735 | * |
| 736 | * This code parses those words. If there is a failure, |
Mike Snitzer | bfcee0e | 2016-06-02 15:08:09 -0400 | [diff] [blame] | 737 | * the caller must use raid_set_free() to unwind the operations. |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 738 | */ |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 739 | static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 740 | { |
| 741 | int i; |
| 742 | int rebuild = 0; |
| 743 | int metadata_available = 0; |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 744 | int r = 0; |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 745 | const char *arg; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 746 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 747 | /* Put off the number of raid devices argument to get to dev pairs */ |
| 748 | arg = dm_shift_arg(as); |
| 749 | if (!arg) |
| 750 | return -EINVAL; |
| 751 | |
| 752 | for (i = 0; i < rs->md.raid_disks; i++) { |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 753 | rs->dev[i].rdev.raid_disk = i; |
| 754 | |
| 755 | rs->dev[i].meta_dev = NULL; |
| 756 | rs->dev[i].data_dev = NULL; |
| 757 | |
| 758 | /* |
| 759 | * There are no offsets, since there is a separate device |
| 760 | * for data and metadata. |
| 761 | */ |
| 762 | rs->dev[i].rdev.data_offset = 0; |
| 763 | rs->dev[i].rdev.mddev = &rs->md; |
| 764 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 765 | arg = dm_shift_arg(as); |
| 766 | if (!arg) |
| 767 | return -EINVAL; |
| 768 | |
| 769 | if (strcmp(arg, "-")) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 770 | r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), |
| 771 | &rs->dev[i].meta_dev); |
| 772 | if (r) { |
| 773 | rs->ti->error = "RAID metadata device lookup failure"; |
| 774 | return r; |
| 775 | } |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 776 | |
| 777 | rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 778 | if (!rs->dev[i].rdev.sb_page) { |
| 779 | rs->ti->error = "Failed to allocate superblock page"; |
| 780 | return -ENOMEM; |
| 781 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 782 | } |
| 783 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 784 | arg = dm_shift_arg(as); |
| 785 | if (!arg) |
| 786 | return -EINVAL; |
| 787 | |
| 788 | if (!strcmp(arg, "-")) { |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 789 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) && |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 790 | (!rs->dev[i].rdev.recovery_offset)) { |
| 791 | rs->ti->error = "Drive designated for rebuild not specified"; |
| 792 | return -EINVAL; |
| 793 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 794 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 795 | if (rs->dev[i].meta_dev) { |
| 796 | rs->ti->error = "No data device supplied with metadata device"; |
| 797 | return -EINVAL; |
| 798 | } |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 799 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 800 | continue; |
| 801 | } |
| 802 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 803 | r = dm_get_device(rs->ti, arg, dm_table_get_mode(rs->ti->table), |
| 804 | &rs->dev[i].data_dev); |
| 805 | if (r) { |
| 806 | rs->ti->error = "RAID device lookup failure"; |
| 807 | return r; |
| 808 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 809 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 810 | if (rs->dev[i].meta_dev) { |
| 811 | metadata_available = 1; |
| 812 | rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev; |
| 813 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 814 | rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev; |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 815 | list_add_tail(&rs->dev[i].rdev.same_set, &rs->md.disks); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 816 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags)) |
| 817 | rebuild++; |
| 818 | } |
| 819 | |
| 820 | if (metadata_available) { |
| 821 | rs->md.external = 0; |
| 822 | rs->md.persistent = 1; |
| 823 | rs->md.major_version = 2; |
| 824 | } else if (rebuild && !rs->md.recovery_cp) { |
| 825 | /* |
| 826 | * Without metadata, we will not be able to tell if the array |
| 827 | * is in-sync or not - we must assume it is not. Therefore, |
| 828 | * it is impossible to rebuild a drive. |
| 829 | * |
| 830 | * Even if there is metadata, the on-disk information may |
| 831 | * indicate that the array is not in-sync and it will then |
| 832 | * fail at that time. |
| 833 | * |
| 834 | * User could specify 'nosync' option if desperate. |
| 835 | */ |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 836 | rs->ti->error = "Unable to rebuild drive while array is not in-sync"; |
| 837 | return -EINVAL; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 838 | } |
| 839 | |
| 840 | return 0; |
| 841 | } |
| 842 | |
| 843 | /* |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 844 | * validate_region_size |
| 845 | * @rs |
| 846 | * @region_size: region size in sectors. If 0, pick a size (4MiB default). |
| 847 | * |
| 848 | * Set rs->md.bitmap_info.chunksize (which really refers to 'region size'). |
| 849 | * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap. |
| 850 | * |
| 851 | * Returns: 0 on success, -EINVAL on failure. |
| 852 | */ |
| 853 | static int validate_region_size(struct raid_set *rs, unsigned long region_size) |
| 854 | { |
| 855 | unsigned long min_region_size = rs->ti->len / (1 << 21); |
| 856 | |
| 857 | if (!region_size) { |
| 858 | /* |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 859 | * Choose a reasonable default. All figures in sectors. |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 860 | */ |
| 861 | if (min_region_size > (1 << 13)) { |
Jonathan Brassow | 3a0f9aa | 2012-12-21 20:23:33 +0000 | [diff] [blame] | 862 | /* If not a power of 2, make it the next power of 2 */ |
Mikulas Patocka | 042745e | 2015-10-02 11:17:37 -0400 | [diff] [blame] | 863 | region_size = roundup_pow_of_two(min_region_size); |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 864 | DMINFO("Choosing default region size of %lu sectors", |
| 865 | region_size); |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 866 | } else { |
| 867 | DMINFO("Choosing default region size of 4MiB"); |
| 868 | region_size = 1 << 13; /* sectors */ |
| 869 | } |
| 870 | } else { |
| 871 | /* |
| 872 | * Validate user-supplied value. |
| 873 | */ |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 874 | if (region_size > rs->ti->len) { |
| 875 | rs->ti->error = "Supplied region size is too large"; |
| 876 | return -EINVAL; |
| 877 | } |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 878 | |
| 879 | if (region_size < min_region_size) { |
| 880 | DMERR("Supplied region_size (%lu sectors) below minimum (%lu)", |
| 881 | region_size, min_region_size); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 882 | rs->ti->error = "Supplied region size is too small"; |
| 883 | return -EINVAL; |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 884 | } |
| 885 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 886 | if (!is_power_of_2(region_size)) { |
| 887 | rs->ti->error = "Region size is not a power of 2"; |
| 888 | return -EINVAL; |
| 889 | } |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 890 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 891 | if (region_size < rs->md.chunk_sectors) { |
| 892 | rs->ti->error = "Region size is smaller than the chunk size"; |
| 893 | return -EINVAL; |
| 894 | } |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 895 | } |
| 896 | |
| 897 | /* |
| 898 | * Convert sectors to bytes. |
| 899 | */ |
| 900 | rs->md.bitmap_info.chunksize = (region_size << 9); |
| 901 | |
| 902 | return 0; |
| 903 | } |
| 904 | |
| 905 | /* |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 906 | * validate_raid_redundancy |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 907 | * @rs |
| 908 | * |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 909 | * Determine if there are enough devices in the array that haven't |
| 910 | * failed (or are being rebuilt) to form a usable array. |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 911 | * |
| 912 | * Returns: 0 on success, -EINVAL on failure. |
| 913 | */ |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 914 | static int validate_raid_redundancy(struct raid_set *rs) |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 915 | { |
| 916 | unsigned i, rebuild_cnt = 0; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 917 | unsigned rebuilds_per_group = 0, copies; |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 918 | unsigned group_size, last_group_start; |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 919 | |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 920 | for (i = 0; i < rs->md.raid_disks; i++) |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 921 | if (!test_bit(In_sync, &rs->dev[i].rdev.flags) || |
| 922 | !rs->dev[i].rdev.sb_page) |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 923 | rebuild_cnt++; |
| 924 | |
| 925 | switch (rs->raid_type->level) { |
| 926 | case 1: |
| 927 | if (rebuild_cnt >= rs->md.raid_disks) |
| 928 | goto too_many; |
| 929 | break; |
| 930 | case 4: |
| 931 | case 5: |
| 932 | case 6: |
| 933 | if (rebuild_cnt > rs->raid_type->parity_devs) |
| 934 | goto too_many; |
| 935 | break; |
| 936 | case 10: |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 937 | copies = raid10_md_layout_to_copies(rs->md.new_layout); |
Jonathan Brassow | 4ec1e36 | 2012-10-11 13:40:24 +1100 | [diff] [blame] | 938 | if (rebuild_cnt < copies) |
| 939 | break; |
| 940 | |
| 941 | /* |
| 942 | * It is possible to have a higher rebuild count for RAID10, |
| 943 | * as long as the failed devices occur in different mirror |
| 944 | * groups (i.e. different stripes). |
| 945 | * |
Jonathan Brassow | 4ec1e36 | 2012-10-11 13:40:24 +1100 | [diff] [blame] | 946 | * When checking "near" format, make sure no adjacent devices |
| 947 | * have failed beyond what can be handled. In addition to the |
| 948 | * simple case where the number of devices is a multiple of the |
| 949 | * number of copies, we must also handle cases where the number |
| 950 | * of devices is not a multiple of the number of copies. |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 951 | * E.g. dev1 dev2 dev3 dev4 dev5 |
| 952 | * A A B B C |
| 953 | * C D D E E |
Jonathan Brassow | 4ec1e36 | 2012-10-11 13:40:24 +1100 | [diff] [blame] | 954 | */ |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 955 | if (__is_raid10_near(rs->md.new_layout)) { |
| 956 | for (i = 0; i < rs->raid_disks; i++) { |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 957 | if (!(i % copies)) |
| 958 | rebuilds_per_group = 0; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 959 | if ((!rs->dev[i].rdev.sb_page || |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 960 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) && |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 961 | (++rebuilds_per_group >= copies)) |
| 962 | goto too_many; |
| 963 | } |
| 964 | break; |
| 965 | } |
| 966 | |
| 967 | /* |
| 968 | * When checking "far" and "offset" formats, we need to ensure |
| 969 | * that the device that holds its copy is not also dead or |
| 970 | * being rebuilt. (Note that "far" and "offset" formats only |
| 971 | * support two copies right now. These formats also only ever |
| 972 | * use the 'use_far_sets' variant.) |
| 973 | * |
| 974 | * This check is somewhat complicated by the need to account |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 975 | * for arrays that are not a multiple of (far) copies. This |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 976 | * results in the need to treat the last (potentially larger) |
| 977 | * set differently. |
| 978 | */ |
| 979 | group_size = (rs->md.raid_disks / copies); |
| 980 | last_group_start = (rs->md.raid_disks / group_size) - 1; |
| 981 | last_group_start *= group_size; |
| 982 | for (i = 0; i < rs->md.raid_disks; i++) { |
| 983 | if (!(i % copies) && !(i > last_group_start)) |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 984 | rebuilds_per_group = 0; |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 985 | if ((!rs->dev[i].rdev.sb_page || |
| 986 | !test_bit(In_sync, &rs->dev[i].rdev.flags)) && |
Jonathan Brassow | 4ec1e36 | 2012-10-11 13:40:24 +1100 | [diff] [blame] | 987 | (++rebuilds_per_group >= copies)) |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 988 | goto too_many; |
Jonathan Brassow | 4ec1e36 | 2012-10-11 13:40:24 +1100 | [diff] [blame] | 989 | } |
| 990 | break; |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 991 | default: |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 992 | if (rebuild_cnt) |
| 993 | return -EINVAL; |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 994 | } |
| 995 | |
| 996 | return 0; |
| 997 | |
| 998 | too_many: |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 999 | return -EINVAL; |
| 1000 | } |
| 1001 | |
| 1002 | /* |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1003 | * Possible arguments are... |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1004 | * <chunk_size> [optional_args] |
| 1005 | * |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1006 | * Argument definitions |
| 1007 | * <chunk_size> The number of sectors per disk that |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1008 | * will form the "stripe" |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1009 | * [[no]sync] Force or prevent recovery of the |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1010 | * entire array |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1011 | * [rebuild <idx>] Rebuild the drive indicated by the index |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1012 | * [daemon_sleep <ms>] Time between bitmap daemon work to |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1013 | * clear bits |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1014 | * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization |
| 1015 | * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization |
Jonathan Brassow | 46bed2b | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1016 | * [write_mostly <idx>] Indicate a write mostly drive via index |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1017 | * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) |
| 1018 | * [stripe_cache <sectors>] Stripe cache size for higher RAIDs |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1019 | * [region_size <sectors>] Defines granularity of bitmap |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 1020 | * |
| 1021 | * RAID10-only options: |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1022 | * [raid10_copies <# copies>] Number of copies. (Default: 2) |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 1023 | * [raid10_format <near|far|offset>] Layout algorithm. (Default: near) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1024 | */ |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 1025 | static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as, |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1026 | unsigned num_raid_params) |
| 1027 | { |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1028 | int value, raid10_format = ALGORITHM_RAID10_DEFAULT; |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 1029 | unsigned raid10_copies = 2; |
Jonathan Brassow | eb64912 | 2012-10-11 13:40:09 +1100 | [diff] [blame] | 1030 | unsigned i; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1031 | unsigned region_size = 0; |
Mike Snitzer | 542f903 | 2012-07-27 15:08:00 +0100 | [diff] [blame] | 1032 | sector_t max_io_len; |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 1033 | const char *arg, *key; |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1034 | struct raid_dev *rd; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1035 | struct raid_type *rt = rs->raid_type; |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 1036 | |
| 1037 | arg = dm_shift_arg(as); |
| 1038 | num_raid_params--; /* Account for chunk_size argument */ |
| 1039 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1040 | if (kstrtoint(arg, 10, &value) < 0) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1041 | rs->ti->error = "Bad numerical argument given for chunk_size"; |
| 1042 | return -EINVAL; |
| 1043 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1044 | |
| 1045 | /* |
| 1046 | * First, parse the in-order required arguments |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1047 | * "chunk_size" is the only argument of this type. |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1048 | */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1049 | if (rt_is_raid1(rt)) { |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1050 | if (value) |
| 1051 | DMERR("Ignoring chunk size parameter for RAID 1"); |
| 1052 | value = 0; |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1053 | } else if (!is_power_of_2(value)) { |
| 1054 | rs->ti->error = "Chunk size must be a power of 2"; |
| 1055 | return -EINVAL; |
| 1056 | } else if (value < 8) { |
| 1057 | rs->ti->error = "Chunk size value is too small"; |
| 1058 | return -EINVAL; |
| 1059 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1060 | |
| 1061 | rs->md.new_chunk_sectors = rs->md.chunk_sectors = value; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1062 | |
| 1063 | /* |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1064 | * We set each individual device as In_sync with a completed |
| 1065 | * 'recovery_offset'. If there has been a device failure or |
| 1066 | * replacement then one of the following cases applies: |
| 1067 | * |
| 1068 | * 1) User specifies 'rebuild'. |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1069 | * - Device is reset when param is read. |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1070 | * 2) A new device is supplied. |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1071 | * - No matching superblock found, resets device. |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1072 | * 3) Device failure was transient and returns on reload. |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1073 | * - Failure noticed, resets device for bitmap replay. |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1074 | * 4) Device hadn't completed recovery after previous failure. |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1075 | * - Superblock is read and overrides recovery_offset. |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1076 | * |
| 1077 | * What is found in the superblocks of the devices is always |
| 1078 | * authoritative, unless 'rebuild' or '[no]sync' was specified. |
| 1079 | */ |
| 1080 | for (i = 0; i < rs->md.raid_disks; i++) { |
| 1081 | set_bit(In_sync, &rs->dev[i].rdev.flags); |
| 1082 | rs->dev[i].rdev.recovery_offset = MaxSector; |
| 1083 | } |
| 1084 | |
| 1085 | /* |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1086 | * Second, parse the unordered optional arguments |
| 1087 | */ |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1088 | for (i = 0; i < num_raid_params; i++) { |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1089 | key = dm_shift_arg(as); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1090 | if (!key) { |
| 1091 | rs->ti->error = "Not enough raid parameters given"; |
| 1092 | return -EINVAL; |
| 1093 | } |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 1094 | |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1095 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1096 | if (test_and_set_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1097 | rs->ti->error = "Only one 'nosync' argument allowed"; |
| 1098 | return -EINVAL; |
| 1099 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1100 | rs->md.recovery_cp = MaxSector; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1101 | continue; |
| 1102 | } |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1103 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_SYNC))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1104 | if (test_and_set_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1105 | rs->ti->error = "Only one 'sync' argument allowed"; |
| 1106 | return -EINVAL; |
| 1107 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1108 | rs->md.recovery_cp = 0; |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1109 | continue; |
| 1110 | } |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1111 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_USE_NEAR_SETS))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1112 | if (test_and_set_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1113 | rs->ti->error = "Only one 'raid10_use_new_sets' argument allowed"; |
| 1114 | return -EINVAL; |
| 1115 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1116 | continue; |
| 1117 | } |
| 1118 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 1119 | arg = dm_shift_arg(as); |
| 1120 | i++; /* Account for the argument pairs */ |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1121 | if (!arg) { |
| 1122 | rs->ti->error = "Wrong number of raid parameters given"; |
| 1123 | return -EINVAL; |
| 1124 | } |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 1125 | |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1126 | /* |
| 1127 | * Parameters that take a string value are checked here. |
| 1128 | */ |
| 1129 | |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1130 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1131 | if (test_and_set_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1132 | rs->ti->error = "Only one 'raid10_format' argument pair allowed"; |
| 1133 | return -EINVAL; |
| 1134 | } |
| 1135 | if (!rt_is_raid10(rt)) { |
| 1136 | rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; |
| 1137 | return -EINVAL; |
| 1138 | } |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1139 | raid10_format = raid10_name_to_format(arg); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1140 | if (raid10_format < 0) { |
| 1141 | rs->ti->error = "Invalid 'raid10_format' value given"; |
| 1142 | return raid10_format; |
| 1143 | } |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 1144 | continue; |
| 1145 | } |
| 1146 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1147 | if (kstrtoint(arg, 10, &value) < 0) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1148 | rs->ti->error = "Bad numerical argument given in raid params"; |
| 1149 | return -EINVAL; |
| 1150 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1151 | |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1152 | if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD))) { |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1153 | /* |
| 1154 | * "rebuild" is being passed in by userspace to provide |
| 1155 | * indexes of replaced devices and to set up additional |
| 1156 | * devices on raid level takeover. |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 1157 | */ |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1158 | if (!__within_range(value, 0, rs->raid_disks - 1)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1159 | rs->ti->error = "Invalid rebuild index given"; |
| 1160 | return -EINVAL; |
| 1161 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1162 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1163 | if (test_and_set_bit(value, (void *) rs->rebuild_disks)) { |
| 1164 | rs->ti->error = "rebuild for this index already given"; |
| 1165 | return -EINVAL; |
| 1166 | } |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1167 | |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1168 | rd = rs->dev + value; |
| 1169 | clear_bit(In_sync, &rd->rdev.flags); |
| 1170 | clear_bit(Faulty, &rd->rdev.flags); |
| 1171 | rd->rdev.recovery_offset = 0; |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1172 | set_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags); |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1173 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY))) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1174 | if (!rt_is_raid1(rt)) { |
| 1175 | rs->ti->error = "write_mostly option is only valid for RAID1"; |
| 1176 | return -EINVAL; |
| 1177 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1178 | |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1179 | if (!__within_range(value, 0, rs->md.raid_disks - 1)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1180 | rs->ti->error = "Invalid write_mostly index given"; |
| 1181 | return -EINVAL; |
| 1182 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1183 | |
Jonathan Brassow | 46bed2b | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1184 | set_bit(WriteMostly, &rs->dev[value].rdev.flags); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1185 | set_bit(__CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags); |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1186 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1187 | if (!rt_is_raid1(rt)) { |
| 1188 | rs->ti->error = "max_write_behind option is only valid for RAID1"; |
| 1189 | return -EINVAL; |
| 1190 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1191 | |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1192 | if (test_and_set_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1193 | rs->ti->error = "Only one max_write_behind argument pair allowed"; |
| 1194 | return -EINVAL; |
| 1195 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1196 | |
| 1197 | /* |
| 1198 | * In device-mapper, we specify things in sectors, but |
| 1199 | * MD records this value in kB |
| 1200 | */ |
| 1201 | value /= 2; |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1202 | if (value > COUNTER_MAX) { |
| 1203 | rs->ti->error = "Max write-behind limit out of range"; |
| 1204 | return -EINVAL; |
| 1205 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1206 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1207 | rs->md.bitmap_info.max_write_behind = value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1208 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1209 | if (test_and_set_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1210 | rs->ti->error = "Only one daemon_sleep argument pair allowed"; |
| 1211 | return -EINVAL; |
| 1212 | } |
| 1213 | if (!value || (value > MAX_SCHEDULE_TIMEOUT)) { |
| 1214 | rs->ti->error = "daemon sleep period out of range"; |
| 1215 | return -EINVAL; |
| 1216 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1217 | rs->md.bitmap_info.daemon_sleep = value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1218 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET))) { |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1219 | /* Userspace passes new data_offset after having extended the the data image LV */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1220 | if (test_and_set_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1221 | rs->ti->error = "Only one data_offset argument pair allowed"; |
| 1222 | return -EINVAL; |
| 1223 | } |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1224 | /* Ensure sensible data offset */ |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1225 | if (value < 0) { |
| 1226 | rs->ti->error = "Bogus data_offset value"; |
| 1227 | return -EINVAL; |
| 1228 | } |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1229 | rs->data_offset = value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1230 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS))) { |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1231 | /* Define the +/-# of disks to add to/remove from the given raid set */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1232 | if (test_and_set_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1233 | rs->ti->error = "Only one delta_disks argument pair allowed"; |
| 1234 | return -EINVAL; |
| 1235 | } |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1236 | /* Ensure MAX_RAID_DEVICES and raid type minimal_devs! */ |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1237 | if (!__within_range(abs(value), 1, MAX_RAID_DEVICES - rt->minimal_devs)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1238 | rs->ti->error = "Too many delta_disk requested"; |
| 1239 | return -EINVAL; |
| 1240 | } |
Heinz Mauelshagen | 4763e54 | 2016-05-19 18:49:31 +0200 | [diff] [blame] | 1241 | |
| 1242 | rs->delta_disks = value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1243 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1244 | if (test_and_set_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1245 | rs->ti->error = "Only one stripe_cache argument pair allowed"; |
| 1246 | return -EINVAL; |
| 1247 | } |
| 1248 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1249 | if (!rt_is_raid456(rt)) { |
| 1250 | rs->ti->error = "Inappropriate argument: stripe_cache"; |
| 1251 | return -EINVAL; |
| 1252 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1253 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1254 | rs->stripe_cache_entries = value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1255 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1256 | if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1257 | rs->ti->error = "Only one min_recovery_rate argument pair allowed"; |
| 1258 | return -EINVAL; |
| 1259 | } |
| 1260 | if (value > INT_MAX) { |
| 1261 | rs->ti->error = "min_recovery_rate out of range"; |
| 1262 | return -EINVAL; |
| 1263 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1264 | rs->md.sync_speed_min = (int)value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1265 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1266 | if (test_and_set_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1267 | rs->ti->error = "Only one max_recovery_rate argument pair allowed"; |
| 1268 | return -EINVAL; |
| 1269 | } |
| 1270 | if (value > INT_MAX) { |
| 1271 | rs->ti->error = "max_recovery_rate out of range"; |
| 1272 | return -EINVAL; |
| 1273 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1274 | rs->md.sync_speed_max = (int)value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1275 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1276 | if (test_and_set_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1277 | rs->ti->error = "Only one region_size argument pair allowed"; |
| 1278 | return -EINVAL; |
| 1279 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1280 | |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1281 | region_size = value; |
Heinz Mauelshagen | 4257e08 | 2016-06-14 01:46:03 +0200 | [diff] [blame] | 1282 | rs->requested_bitmap_chunk_sectors = value; |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 1283 | } else if (!strcasecmp(key, dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES))) { |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1284 | if (test_and_set_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1285 | rs->ti->error = "Only one raid10_copies argument pair allowed"; |
| 1286 | return -EINVAL; |
| 1287 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1288 | |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1289 | if (!__within_range(value, 2, rs->md.raid_disks)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1290 | rs->ti->error = "Bad value for 'raid10_copies'"; |
| 1291 | return -EINVAL; |
| 1292 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1293 | |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 1294 | raid10_copies = value; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1295 | } else { |
| 1296 | DMERR("Unable to parse RAID parameter: %s", key); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1297 | rs->ti->error = "Unable to parse RAID parameter"; |
| 1298 | return -EINVAL; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1299 | } |
| 1300 | } |
| 1301 | |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1302 | if (validate_region_size(rs, region_size)) |
| 1303 | return -EINVAL; |
| 1304 | |
| 1305 | if (rs->md.chunk_sectors) |
Mike Snitzer | 542f903 | 2012-07-27 15:08:00 +0100 | [diff] [blame] | 1306 | max_io_len = rs->md.chunk_sectors; |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1307 | else |
Mike Snitzer | 542f903 | 2012-07-27 15:08:00 +0100 | [diff] [blame] | 1308 | max_io_len = region_size; |
Jonathan Brassow | c108456 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1309 | |
Mike Snitzer | 542f903 | 2012-07-27 15:08:00 +0100 | [diff] [blame] | 1310 | if (dm_set_target_max_io_len(rs->ti, max_io_len)) |
| 1311 | return -EINVAL; |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1312 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1313 | if (rt_is_raid10(rt)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1314 | if (raid10_copies > rs->md.raid_disks) { |
| 1315 | rs->ti->error = "Not enough devices to satisfy specification"; |
| 1316 | return -EINVAL; |
| 1317 | } |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 1318 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1319 | rs->md.new_layout = raid10_format_to_md_layout(rs, raid10_format, raid10_copies); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1320 | if (rs->md.new_layout < 0) { |
| 1321 | rs->ti->error = "Error getting raid10 format"; |
| 1322 | return rs->md.new_layout; |
| 1323 | } |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1324 | |
| 1325 | rt = get_raid_type_by_ll(10, rs->md.new_layout); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1326 | if (!rt) { |
| 1327 | rs->ti->error = "Failed to recognize new raid10 layout"; |
| 1328 | return -EINVAL; |
| 1329 | } |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1330 | |
| 1331 | if ((rt->algorithm == ALGORITHM_RAID10_DEFAULT || |
| 1332 | rt->algorithm == ALGORITHM_RAID10_NEAR) && |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1333 | test_bit(__CTR_FLAG_RAID10_USE_NEAR_SETS, &rs->ctr_flags)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1334 | rs->ti->error = "RAID10 format 'near' and 'raid10_use_near_sets' are incompatible"; |
| 1335 | return -EINVAL; |
| 1336 | } |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1337 | } |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 1338 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1339 | rs->raid10_copies = raid10_copies; |
Jonathan E Brassow | c039c33 | 2012-07-27 15:08:04 +0100 | [diff] [blame] | 1340 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1341 | /* Assume there are no metadata devices until the drives are parsed */ |
| 1342 | rs->md.persistent = 0; |
| 1343 | rs->md.external = 1; |
| 1344 | |
Heinz Mauelshagen | f090279 | 2016-05-19 18:49:27 +0200 | [diff] [blame] | 1345 | /* Check, if any invalid ctr arguments have been passed in for the raid level */ |
Heinz Mauelshagen | a30cbc0 | 2016-06-09 16:42:16 +0200 | [diff] [blame] | 1346 | return rs_check_for_valid_flags(rs); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1347 | } |
| 1348 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1349 | /* Set raid4/5/6 cache size */ |
| 1350 | static int rs_set_raid456_stripe_cache(struct raid_set *rs) |
| 1351 | { |
| 1352 | int r; |
| 1353 | struct r5conf *conf; |
| 1354 | struct mddev *mddev = &rs->md; |
| 1355 | uint32_t min_stripes = max(mddev->chunk_sectors, mddev->new_chunk_sectors) / 2; |
| 1356 | uint32_t nr_stripes = rs->stripe_cache_entries; |
| 1357 | |
| 1358 | if (!rt_is_raid456(rs->raid_type)) { |
| 1359 | rs->ti->error = "Inappropriate raid level; cannot change stripe_cache size"; |
| 1360 | return -EINVAL; |
| 1361 | } |
| 1362 | |
| 1363 | if (nr_stripes < min_stripes) { |
| 1364 | DMINFO("Adjusting requested %u stripe cache entries to %u to suit stripe size", |
| 1365 | nr_stripes, min_stripes); |
| 1366 | nr_stripes = min_stripes; |
| 1367 | } |
| 1368 | |
| 1369 | conf = mddev->private; |
| 1370 | if (!conf) { |
| 1371 | rs->ti->error = "Cannot change stripe_cache size on inactive RAID set"; |
| 1372 | return -EINVAL; |
| 1373 | } |
| 1374 | |
| 1375 | /* Try setting number of stripes in raid456 stripe cache */ |
| 1376 | if (conf->min_nr_stripes != nr_stripes) { |
| 1377 | r = raid5_set_cache_size(mddev, nr_stripes); |
| 1378 | if (r) { |
| 1379 | rs->ti->error = "Failed to set raid4/5/6 stripe cache size"; |
| 1380 | return r; |
| 1381 | } |
| 1382 | |
| 1383 | DMINFO("%u stripe cache entries", nr_stripes); |
| 1384 | } |
| 1385 | |
| 1386 | return 0; |
| 1387 | } |
| 1388 | |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 1389 | /* Return # of data stripes as kept in mddev as of @rs (i.e. as of superblock) */ |
| 1390 | static unsigned int mddev_data_stripes(struct raid_set *rs) |
| 1391 | { |
| 1392 | return rs->md.raid_disks - rs->raid_type->parity_devs; |
| 1393 | } |
| 1394 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 1395 | /* Return # of data stripes of @rs (i.e. as of ctr) */ |
| 1396 | static unsigned int rs_data_stripes(struct raid_set *rs) |
| 1397 | { |
| 1398 | return rs->raid_disks - rs->raid_type->parity_devs; |
| 1399 | } |
| 1400 | |
| 1401 | /* Calculate the sectors per device and per array used for @rs */ |
| 1402 | static int rs_set_dev_and_array_sectors(struct raid_set *rs, bool use_mddev) |
| 1403 | { |
| 1404 | int delta_disks; |
| 1405 | unsigned int data_stripes; |
| 1406 | struct mddev *mddev = &rs->md; |
| 1407 | struct md_rdev *rdev; |
| 1408 | sector_t array_sectors = rs->ti->len, dev_sectors = rs->ti->len; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1409 | sector_t cur_dev_sectors = rs->dev[0].rdev.sectors; |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 1410 | |
| 1411 | if (use_mddev) { |
| 1412 | delta_disks = mddev->delta_disks; |
| 1413 | data_stripes = mddev_data_stripes(rs); |
| 1414 | } else { |
| 1415 | delta_disks = rs->delta_disks; |
| 1416 | data_stripes = rs_data_stripes(rs); |
| 1417 | } |
| 1418 | |
| 1419 | /* Special raid1 case w/o delta_disks support (yet) */ |
| 1420 | if (rt_is_raid1(rs->raid_type)) |
| 1421 | ; |
| 1422 | else if (rt_is_raid10(rs->raid_type)) { |
| 1423 | if (rs->raid10_copies < 2 || |
| 1424 | delta_disks < 0) { |
| 1425 | rs->ti->error = "Bogus raid10 data copies or delta disks"; |
| 1426 | return EINVAL; |
| 1427 | } |
| 1428 | |
| 1429 | dev_sectors *= rs->raid10_copies; |
| 1430 | if (sector_div(dev_sectors, data_stripes)) |
| 1431 | goto bad; |
| 1432 | |
| 1433 | array_sectors = (data_stripes + delta_disks) * dev_sectors; |
| 1434 | if (sector_div(array_sectors, rs->raid10_copies)) |
| 1435 | goto bad; |
| 1436 | |
| 1437 | } else if (sector_div(dev_sectors, data_stripes)) |
| 1438 | goto bad; |
| 1439 | |
| 1440 | else |
| 1441 | /* Striped layouts */ |
| 1442 | array_sectors = (data_stripes + delta_disks) * dev_sectors; |
| 1443 | |
| 1444 | rdev_for_each(rdev, mddev) |
| 1445 | rdev->sectors = dev_sectors; |
| 1446 | |
| 1447 | mddev->array_sectors = array_sectors; |
| 1448 | mddev->dev_sectors = dev_sectors; |
| 1449 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1450 | if (!rs_is_raid0(rs) && dev_sectors > cur_dev_sectors) |
| 1451 | mddev->recovery_cp = dev_sectors; |
| 1452 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 1453 | return 0; |
| 1454 | bad: |
| 1455 | rs->ti->error = "Target length not divisible by number of data devices"; |
| 1456 | return EINVAL; |
| 1457 | } |
| 1458 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1459 | static void do_table_event(struct work_struct *ws) |
| 1460 | { |
| 1461 | struct raid_set *rs = container_of(ws, struct raid_set, md.event_work); |
| 1462 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1463 | rs_set_capacity(rs); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1464 | dm_table_event(rs->ti->table); |
| 1465 | } |
| 1466 | |
| 1467 | static int raid_is_congested(struct dm_target_callbacks *cb, int bits) |
| 1468 | { |
| 1469 | struct raid_set *rs = container_of(cb, struct raid_set, callbacks); |
| 1470 | |
NeilBrown | 5c675f8 | 2014-12-15 12:56:56 +1100 | [diff] [blame] | 1471 | return mddev_congested(&rs->md, bits); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1472 | } |
| 1473 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1474 | /* |
| 1475 | * Make sure a valid takover (level switch) is being requested on @rs |
| 1476 | * |
| 1477 | * Conversions of raid sets from one MD personality to another |
| 1478 | * have to conform to restrictions which are enforced here. |
| 1479 | * |
| 1480 | * Degration is already checked for in rs_check_conversion() below. |
| 1481 | */ |
| 1482 | static int rs_check_takeover(struct raid_set *rs) |
| 1483 | { |
| 1484 | struct mddev *mddev = &rs->md; |
| 1485 | unsigned int near_copies; |
| 1486 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1487 | smp_rmb(); |
| 1488 | if (rs->md.degraded) { |
| 1489 | rs->ti->error = "Can't takeover degraded raid set"; |
| 1490 | return -EPERM; |
| 1491 | } |
| 1492 | |
| 1493 | if (rs_is_reshaping(rs)) { |
| 1494 | rs->ti->error = "Can't takeover reshaping raid set"; |
| 1495 | return -EPERM; |
| 1496 | } |
| 1497 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1498 | switch (mddev->level) { |
| 1499 | case 0: |
| 1500 | /* raid0 -> raid1/5 with one disk */ |
| 1501 | if ((mddev->new_level == 1 || mddev->new_level == 5) && |
| 1502 | mddev->raid_disks == 1) |
| 1503 | return 0; |
| 1504 | |
| 1505 | /* raid0 -> raid10 */ |
| 1506 | if (mddev->new_level == 10 && |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1507 | !(rs->raid_disks % mddev->raid_disks)) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1508 | return 0; |
| 1509 | |
| 1510 | /* raid0 with multiple disks -> raid4/5/6 */ |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1511 | if (__within_range(mddev->new_level, 4, 6) && |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1512 | mddev->new_layout == ALGORITHM_PARITY_N && |
| 1513 | mddev->raid_disks > 1) |
| 1514 | return 0; |
| 1515 | |
| 1516 | break; |
| 1517 | |
| 1518 | case 10: |
| 1519 | /* Can't takeover raid10_offset! */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 1520 | if (__is_raid10_offset(mddev->layout)) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1521 | break; |
| 1522 | |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 1523 | near_copies = __raid10_near_copies(mddev->layout); |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1524 | |
| 1525 | /* raid10* -> raid0 */ |
| 1526 | if (mddev->new_level == 0) { |
| 1527 | /* Can takeover raid10_near with raid disks divisable by data copies! */ |
| 1528 | if (near_copies > 1 && |
| 1529 | !(mddev->raid_disks % near_copies)) { |
| 1530 | mddev->raid_disks /= near_copies; |
| 1531 | mddev->delta_disks = mddev->raid_disks; |
| 1532 | return 0; |
| 1533 | } |
| 1534 | |
| 1535 | /* Can takeover raid10_far */ |
| 1536 | if (near_copies == 1 && |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 1537 | __raid10_far_copies(mddev->layout) > 1) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1538 | return 0; |
| 1539 | |
| 1540 | break; |
| 1541 | } |
| 1542 | |
| 1543 | /* raid10_{near,far} -> raid1 */ |
| 1544 | if (mddev->new_level == 1 && |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 1545 | max(near_copies, __raid10_far_copies(mddev->layout)) == mddev->raid_disks) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1546 | return 0; |
| 1547 | |
| 1548 | /* raid10_{near,far} with 2 disks -> raid4/5 */ |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1549 | if (__within_range(mddev->new_level, 4, 5) && |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1550 | mddev->raid_disks == 2) |
| 1551 | return 0; |
| 1552 | break; |
| 1553 | |
| 1554 | case 1: |
| 1555 | /* raid1 with 2 disks -> raid4/5 */ |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1556 | if (__within_range(mddev->new_level, 4, 5) && |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1557 | mddev->raid_disks == 2) { |
| 1558 | mddev->degraded = 1; |
| 1559 | return 0; |
| 1560 | } |
| 1561 | |
| 1562 | /* raid1 -> raid0 */ |
| 1563 | if (mddev->new_level == 0 && |
| 1564 | mddev->raid_disks == 1) |
| 1565 | return 0; |
| 1566 | |
| 1567 | /* raid1 -> raid10 */ |
| 1568 | if (mddev->new_level == 10) |
| 1569 | return 0; |
| 1570 | |
| 1571 | break; |
| 1572 | |
| 1573 | case 4: |
| 1574 | /* raid4 -> raid0 */ |
| 1575 | if (mddev->new_level == 0) |
| 1576 | return 0; |
| 1577 | |
| 1578 | /* raid4 -> raid1/5 with 2 disks */ |
| 1579 | if ((mddev->new_level == 1 || mddev->new_level == 5) && |
| 1580 | mddev->raid_disks == 2) |
| 1581 | return 0; |
| 1582 | |
| 1583 | /* raid4 -> raid5/6 with parity N */ |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1584 | if (__within_range(mddev->new_level, 5, 6) && |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1585 | mddev->layout == ALGORITHM_PARITY_N) |
| 1586 | return 0; |
| 1587 | break; |
| 1588 | |
| 1589 | case 5: |
| 1590 | /* raid5 with parity N -> raid0 */ |
| 1591 | if (mddev->new_level == 0 && |
| 1592 | mddev->layout == ALGORITHM_PARITY_N) |
| 1593 | return 0; |
| 1594 | |
| 1595 | /* raid5 with parity N -> raid4 */ |
| 1596 | if (mddev->new_level == 4 && |
| 1597 | mddev->layout == ALGORITHM_PARITY_N) |
| 1598 | return 0; |
| 1599 | |
| 1600 | /* raid5 with 2 disks -> raid1/4/10 */ |
| 1601 | if ((mddev->new_level == 1 || mddev->new_level == 4 || mddev->new_level == 10) && |
| 1602 | mddev->raid_disks == 2) |
| 1603 | return 0; |
| 1604 | |
| 1605 | /* raid5 with parity N -> raid6 with parity N */ |
| 1606 | if (mddev->new_level == 6 && |
| 1607 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1608 | __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC_6, ALGORITHM_RIGHT_SYMMETRIC_6))) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1609 | return 0; |
| 1610 | break; |
| 1611 | |
| 1612 | case 6: |
| 1613 | /* raid6 with parity N -> raid0 */ |
| 1614 | if (mddev->new_level == 0 && |
| 1615 | mddev->layout == ALGORITHM_PARITY_N) |
| 1616 | return 0; |
| 1617 | |
| 1618 | /* raid6 with parity N -> raid4 */ |
| 1619 | if (mddev->new_level == 4 && |
| 1620 | mddev->layout == ALGORITHM_PARITY_N) |
| 1621 | return 0; |
| 1622 | |
| 1623 | /* raid6_*_n with parity N -> raid5_* */ |
| 1624 | if (mddev->new_level == 5 && |
| 1625 | ((mddev->layout == ALGORITHM_PARITY_N && mddev->new_layout == ALGORITHM_PARITY_N) || |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 1626 | __within_range(mddev->new_layout, ALGORITHM_LEFT_ASYMMETRIC, ALGORITHM_RIGHT_SYMMETRIC))) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1627 | return 0; |
| 1628 | |
| 1629 | default: |
| 1630 | break; |
| 1631 | } |
| 1632 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 1633 | rs->ti->error = "takeover not possible"; |
| 1634 | return -EINVAL; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 1635 | } |
| 1636 | |
| 1637 | /* True if @rs requested to be taken over */ |
| 1638 | static bool rs_takeover_requested(struct raid_set *rs) |
| 1639 | { |
| 1640 | return rs->md.new_level != rs->md.level; |
| 1641 | } |
| 1642 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 1643 | /* True if @rs is requested to reshape by ctr */ |
| 1644 | static bool rs_reshape_requested(struct raid_set *rs) |
| 1645 | { |
| 1646 | struct mddev *mddev = &rs->md; |
| 1647 | |
| 1648 | if (!mddev->level) |
| 1649 | return false; |
| 1650 | |
| 1651 | return !__is_raid10_far(mddev->new_layout) && |
| 1652 | mddev->new_level == mddev->level && |
| 1653 | (mddev->new_layout != mddev->layout || |
| 1654 | mddev->new_chunk_sectors != mddev->chunk_sectors || |
| 1655 | rs->raid_disks + rs->delta_disks != mddev->raid_disks); |
| 1656 | } |
| 1657 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1658 | /* Features */ |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1659 | #define FEATURE_FLAG_SUPPORTS_V190 0x1 /* Supports extended superblock */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1660 | |
| 1661 | /* State flags for sb->flags */ |
| 1662 | #define SB_FLAG_RESHAPE_ACTIVE 0x1 |
| 1663 | #define SB_FLAG_RESHAPE_BACKWARDS 0x2 |
| 1664 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 1665 | /* |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1666 | * This structure is never routinely used by userspace, unlike md superblocks. |
| 1667 | * Devices with this superblock should only ever be accessed via device-mapper. |
| 1668 | */ |
| 1669 | #define DM_RAID_MAGIC 0x64526D44 |
| 1670 | struct dm_raid_superblock { |
| 1671 | __le32 magic; /* "DmRd" */ |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1672 | __le32 compat_features; /* Used to indicate compatible features (like 1.9.0 ondisk metadata extension) */ |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1673 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1674 | __le32 num_devices; /* Number of devices in this raid set. (Max 64) */ |
| 1675 | __le32 array_position; /* The position of this drive in the raid set */ |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1676 | |
| 1677 | __le64 events; /* Incremented by md when superblock updated */ |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1678 | __le64 failed_devices; /* Pre 1.9.0 part of bit field of devices to */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1679 | /* indicate failures (see extension below) */ |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1680 | |
| 1681 | /* |
| 1682 | * This offset tracks the progress of the repair or replacement of |
| 1683 | * an individual drive. |
| 1684 | */ |
| 1685 | __le64 disk_recovery_offset; |
| 1686 | |
| 1687 | /* |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1688 | * This offset tracks the progress of the initial raid set |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1689 | * synchronisation/parity calculation. |
| 1690 | */ |
| 1691 | __le64 array_resync_offset; |
| 1692 | |
| 1693 | /* |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1694 | * raid characteristics |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1695 | */ |
| 1696 | __le32 level; |
| 1697 | __le32 layout; |
| 1698 | __le32 stripe_sectors; |
| 1699 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1700 | /******************************************************************** |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1701 | * BELOW FOLLOW V1.9.0 EXTENSIONS TO THE PRISTINE SUPERBLOCK FORMAT!!! |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1702 | * |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1703 | * FEATURE_FLAG_SUPPORTS_V190 in the features member indicates that those exist |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1704 | */ |
| 1705 | |
| 1706 | __le32 flags; /* Flags defining array states for reshaping */ |
| 1707 | |
| 1708 | /* |
| 1709 | * This offset tracks the progress of a raid |
| 1710 | * set reshape in order to be able to restart it |
| 1711 | */ |
| 1712 | __le64 reshape_position; |
| 1713 | |
| 1714 | /* |
| 1715 | * These define the properties of the array in case of an interrupted reshape |
| 1716 | */ |
| 1717 | __le32 new_level; |
| 1718 | __le32 new_layout; |
| 1719 | __le32 new_stripe_sectors; |
| 1720 | __le32 delta_disks; |
| 1721 | |
| 1722 | __le64 array_sectors; /* Array size in sectors */ |
| 1723 | |
| 1724 | /* |
| 1725 | * Sector offsets to data on devices (reshaping). |
| 1726 | * Needed to support out of place reshaping, thus |
| 1727 | * not writing over any stripes whilst converting |
| 1728 | * them from old to new layout |
| 1729 | */ |
| 1730 | __le64 data_offset; |
| 1731 | __le64 new_data_offset; |
| 1732 | |
| 1733 | __le64 sectors; /* Used device size in sectors */ |
| 1734 | |
| 1735 | /* |
| 1736 | * Additonal Bit field of devices indicating failures to support |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1737 | * up to 256 devices with the 1.9.0 on-disk metadata format |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1738 | */ |
| 1739 | __le64 extended_failed_devices[DISKS_ARRAY_ELEMS - 1]; |
| 1740 | |
| 1741 | __le32 incompat_features; /* Used to indicate any incompatible features */ |
| 1742 | |
| 1743 | /* Always set rest up to logical block size to 0 when writing (see get_metadata_device() below). */ |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1744 | } __packed; |
| 1745 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 1746 | /* |
| 1747 | * Check for reshape constraints on raid set @rs: |
| 1748 | * |
| 1749 | * - reshape function non-existent |
| 1750 | * - degraded set |
| 1751 | * - ongoing recovery |
| 1752 | * - ongoing reshape |
| 1753 | * |
| 1754 | * Returns 0 if none or -EPERM if given constraint |
| 1755 | * and error message reference in @errmsg |
| 1756 | */ |
| 1757 | static int rs_check_reshape(struct raid_set *rs) |
| 1758 | { |
| 1759 | struct mddev *mddev = &rs->md; |
| 1760 | |
| 1761 | smp_rmb(); /* Make sure we access recent reshape position */ |
| 1762 | |
| 1763 | if (!mddev->pers || !mddev->pers->check_reshape) |
| 1764 | rs->ti->error = "Reshape not supported"; |
| 1765 | else if (mddev->degraded) |
| 1766 | rs->ti->error = "Can't reshape degraded raid set"; |
| 1767 | else if (rs_is_recovering(rs)) |
| 1768 | rs->ti->error = "Convert request on recovering raid set prohibited"; |
| 1769 | else if (mddev->reshape_position && rs_is_reshaping(rs)) |
| 1770 | rs->ti->error = "raid set already reshaping!"; |
| 1771 | else if (!(rs_is_raid10(rs) || rs_is_raid456(rs))) |
| 1772 | rs->ti->error = "Reshaping only supported for raid4/5/6/10"; |
| 1773 | else |
| 1774 | return 0; |
| 1775 | |
| 1776 | return -EPERM; |
| 1777 | } |
| 1778 | |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 1779 | static int read_disk_sb(struct md_rdev *rdev, int size) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1780 | { |
| 1781 | BUG_ON(!rdev->sb_page); |
| 1782 | |
| 1783 | if (rdev->sb_loaded) |
| 1784 | return 0; |
| 1785 | |
Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 1786 | if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) { |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 1787 | DMERR("Failed to read superblock of device at position %d", |
| 1788 | rdev->raid_disk); |
Jonathan Brassow | c32fb9e | 2012-05-22 13:55:31 +1000 | [diff] [blame] | 1789 | md_error(rdev->mddev, rdev); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1790 | return -EINVAL; |
| 1791 | } |
| 1792 | |
| 1793 | rdev->sb_loaded = 1; |
| 1794 | |
| 1795 | return 0; |
| 1796 | } |
| 1797 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1798 | static void sb_retrieve_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
| 1799 | { |
| 1800 | failed_devices[0] = le64_to_cpu(sb->failed_devices); |
| 1801 | memset(failed_devices + 1, 0, sizeof(sb->extended_failed_devices)); |
| 1802 | |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1803 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1804 | int i = ARRAY_SIZE(sb->extended_failed_devices); |
| 1805 | |
| 1806 | while (i--) |
| 1807 | failed_devices[i+1] = le64_to_cpu(sb->extended_failed_devices[i]); |
| 1808 | } |
| 1809 | } |
| 1810 | |
Heinz Mauelshagen | 7b34df7 | 2016-05-19 18:49:32 +0200 | [diff] [blame] | 1811 | static void sb_update_failed_devices(struct dm_raid_superblock *sb, uint64_t *failed_devices) |
| 1812 | { |
| 1813 | int i = ARRAY_SIZE(sb->extended_failed_devices); |
| 1814 | |
| 1815 | sb->failed_devices = cpu_to_le64(failed_devices[0]); |
| 1816 | while (i--) |
| 1817 | sb->extended_failed_devices[i] = cpu_to_le64(failed_devices[i+1]); |
| 1818 | } |
| 1819 | |
| 1820 | /* |
| 1821 | * Synchronize the superblock members with the raid set properties |
| 1822 | * |
| 1823 | * All superblock data is little endian. |
| 1824 | */ |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 1825 | static void super_sync(struct mddev *mddev, struct md_rdev *rdev) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1826 | { |
Heinz Mauelshagen | 7b34df7 | 2016-05-19 18:49:32 +0200 | [diff] [blame] | 1827 | bool update_failed_devices = false; |
| 1828 | unsigned int i; |
| 1829 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1830 | struct dm_raid_superblock *sb; |
Jonathan Brassow | 81f382f | 2012-05-22 13:55:30 +1000 | [diff] [blame] | 1831 | struct raid_set *rs = container_of(mddev, struct raid_set, md); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1832 | |
Heinz Mauelshagen | 7b34df7 | 2016-05-19 18:49:32 +0200 | [diff] [blame] | 1833 | /* No metadata device, no superblock */ |
| 1834 | if (!rdev->meta_bdev) |
| 1835 | return; |
| 1836 | |
| 1837 | BUG_ON(!rdev->sb_page); |
| 1838 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1839 | sb = page_address(rdev->sb_page); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1840 | |
Heinz Mauelshagen | 7b34df7 | 2016-05-19 18:49:32 +0200 | [diff] [blame] | 1841 | sb_retrieve_failed_devices(sb, failed_devices); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1842 | |
Heinz Mauelshagen | 7b34df7 | 2016-05-19 18:49:32 +0200 | [diff] [blame] | 1843 | for (i = 0; i < rs->raid_disks; i++) |
| 1844 | if (!rs->dev[i].data_dev || test_bit(Faulty, &rs->dev[i].rdev.flags)) { |
| 1845 | update_failed_devices = true; |
| 1846 | set_bit(i, (void *) failed_devices); |
| 1847 | } |
| 1848 | |
| 1849 | if (update_failed_devices) |
| 1850 | sb_update_failed_devices(sb, failed_devices); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1851 | |
| 1852 | sb->magic = cpu_to_le32(DM_RAID_MAGIC); |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1853 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1854 | |
| 1855 | sb->num_devices = cpu_to_le32(mddev->raid_disks); |
| 1856 | sb->array_position = cpu_to_le32(rdev->raid_disk); |
| 1857 | |
| 1858 | sb->events = cpu_to_le64(mddev->events); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1859 | |
| 1860 | sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset); |
| 1861 | sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp); |
| 1862 | |
| 1863 | sb->level = cpu_to_le32(mddev->level); |
| 1864 | sb->layout = cpu_to_le32(mddev->layout); |
| 1865 | sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors); |
Heinz Mauelshagen | 7b34df7 | 2016-05-19 18:49:32 +0200 | [diff] [blame] | 1866 | |
| 1867 | sb->new_level = cpu_to_le32(mddev->new_level); |
| 1868 | sb->new_layout = cpu_to_le32(mddev->new_layout); |
| 1869 | sb->new_stripe_sectors = cpu_to_le32(mddev->new_chunk_sectors); |
| 1870 | |
| 1871 | sb->delta_disks = cpu_to_le32(mddev->delta_disks); |
| 1872 | |
| 1873 | smp_rmb(); /* Make sure we access most recent reshape position */ |
| 1874 | sb->reshape_position = cpu_to_le64(mddev->reshape_position); |
| 1875 | if (le64_to_cpu(sb->reshape_position) != MaxSector) { |
| 1876 | /* Flag ongoing reshape */ |
| 1877 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE); |
| 1878 | |
| 1879 | if (mddev->delta_disks < 0 || mddev->reshape_backwards) |
| 1880 | sb->flags |= cpu_to_le32(SB_FLAG_RESHAPE_BACKWARDS); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1881 | } else { |
| 1882 | /* Clear reshape flags */ |
| 1883 | sb->flags &= ~(cpu_to_le32(SB_FLAG_RESHAPE_ACTIVE|SB_FLAG_RESHAPE_BACKWARDS)); |
| 1884 | } |
Heinz Mauelshagen | 7b34df7 | 2016-05-19 18:49:32 +0200 | [diff] [blame] | 1885 | |
| 1886 | sb->array_sectors = cpu_to_le64(mddev->array_sectors); |
| 1887 | sb->data_offset = cpu_to_le64(rdev->data_offset); |
| 1888 | sb->new_data_offset = cpu_to_le64(rdev->new_data_offset); |
| 1889 | sb->sectors = cpu_to_le64(rdev->sectors); |
| 1890 | |
| 1891 | /* Zero out the rest of the payload after the size of the superblock */ |
| 1892 | memset(sb + 1, 0, rdev->sb_size - sizeof(*sb)); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1893 | } |
| 1894 | |
| 1895 | /* |
| 1896 | * super_load |
| 1897 | * |
| 1898 | * This function creates a superblock if one is not found on the device |
| 1899 | * and will decide which superblock to use if there's a choice. |
| 1900 | * |
| 1901 | * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise |
| 1902 | */ |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 1903 | static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1904 | { |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 1905 | int r; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1906 | struct dm_raid_superblock *sb; |
| 1907 | struct dm_raid_superblock *refsb; |
| 1908 | uint64_t events_sb, events_refsb; |
| 1909 | |
| 1910 | rdev->sb_start = 0; |
Heinz Mauelshagen | 40d43c4 | 2014-10-17 13:38:50 +0200 | [diff] [blame] | 1911 | rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev); |
| 1912 | if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) { |
| 1913 | DMERR("superblock size of a logical block is no longer valid"); |
| 1914 | return -EINVAL; |
| 1915 | } |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1916 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 1917 | r = read_disk_sb(rdev, rdev->sb_size); |
| 1918 | if (r) |
| 1919 | return r; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1920 | |
| 1921 | sb = page_address(rdev->sb_page); |
Jonathan E Brassow | 3aa3b2b | 2012-03-07 19:09:47 +0000 | [diff] [blame] | 1922 | |
| 1923 | /* |
| 1924 | * Two cases that we want to write new superblocks and rebuild: |
| 1925 | * 1) New device (no matching magic number) |
| 1926 | * 2) Device specified for rebuild (!In_sync w/ offset == 0) |
| 1927 | */ |
| 1928 | if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) || |
| 1929 | (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) { |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1930 | super_sync(rdev->mddev, rdev); |
| 1931 | |
| 1932 | set_bit(FirstUse, &rdev->flags); |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 1933 | sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1934 | |
| 1935 | /* Force writing of superblocks to disk */ |
| 1936 | set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); |
| 1937 | |
| 1938 | /* Any superblock is better than none, choose that if given */ |
| 1939 | return refdev ? 0 : 1; |
| 1940 | } |
| 1941 | |
| 1942 | if (!refdev) |
| 1943 | return 1; |
| 1944 | |
| 1945 | events_sb = le64_to_cpu(sb->events); |
| 1946 | |
| 1947 | refsb = page_address(refdev->sb_page); |
| 1948 | events_refsb = le64_to_cpu(refsb->events); |
| 1949 | |
| 1950 | return (events_sb > events_refsb) ? 1 : 0; |
| 1951 | } |
| 1952 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1953 | static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1954 | { |
| 1955 | int role; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1956 | unsigned int d; |
| 1957 | struct mddev *mddev = &rs->md; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1958 | uint64_t events_sb; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1959 | uint64_t failed_devices[DISKS_ARRAY_ELEMS]; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1960 | struct dm_raid_superblock *sb; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1961 | uint32_t new_devs = 0, rebuild_and_new = 0, rebuilds = 0; |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 1962 | struct md_rdev *r; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1963 | struct dm_raid_superblock *sb2; |
| 1964 | |
| 1965 | sb = page_address(rdev->sb_page); |
| 1966 | events_sb = le64_to_cpu(sb->events); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1967 | |
| 1968 | /* |
| 1969 | * Initialise to 1 if this is a new superblock. |
| 1970 | */ |
| 1971 | mddev->events = events_sb ? : 1; |
| 1972 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1973 | mddev->reshape_position = MaxSector; |
| 1974 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1975 | /* |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1976 | * Reshaping is supported, e.g. reshape_position is valid |
| 1977 | * in superblock and superblock content is authoritative. |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 1978 | */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1979 | if (le32_to_cpu(sb->compat_features) & FEATURE_FLAG_SUPPORTS_V190) { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1980 | /* Superblock is authoritative wrt given raid set layout! */ |
| 1981 | mddev->raid_disks = le32_to_cpu(sb->num_devices); |
| 1982 | mddev->level = le32_to_cpu(sb->level); |
| 1983 | mddev->layout = le32_to_cpu(sb->layout); |
| 1984 | mddev->chunk_sectors = le32_to_cpu(sb->stripe_sectors); |
| 1985 | mddev->new_level = le32_to_cpu(sb->new_level); |
| 1986 | mddev->new_layout = le32_to_cpu(sb->new_layout); |
| 1987 | mddev->new_chunk_sectors = le32_to_cpu(sb->new_stripe_sectors); |
| 1988 | mddev->delta_disks = le32_to_cpu(sb->delta_disks); |
| 1989 | mddev->array_sectors = le64_to_cpu(sb->array_sectors); |
| 1990 | |
| 1991 | /* raid was reshaping and got interrupted */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1992 | if (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_ACTIVE) { |
| 1993 | if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 1994 | DMERR("Reshape requested but raid set is still reshaping"); |
| 1995 | return -EINVAL; |
| 1996 | } |
| 1997 | |
| 1998 | if (mddev->delta_disks < 0 || |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 1999 | (!mddev->delta_disks && (le32_to_cpu(sb->flags) & SB_FLAG_RESHAPE_BACKWARDS))) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2000 | mddev->reshape_backwards = 1; |
| 2001 | else |
| 2002 | mddev->reshape_backwards = 0; |
| 2003 | |
| 2004 | mddev->reshape_position = le64_to_cpu(sb->reshape_position); |
| 2005 | rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); |
| 2006 | } |
| 2007 | |
| 2008 | } else { |
| 2009 | /* |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 2010 | * No takeover/reshaping, because we don't have the extended v1.9.0 metadata |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2011 | */ |
| 2012 | if (le32_to_cpu(sb->level) != mddev->level) { |
| 2013 | DMERR("Reshaping/takeover raid sets not yet supported. (raid level/stripes/size change)"); |
| 2014 | return -EINVAL; |
| 2015 | } |
| 2016 | if (le32_to_cpu(sb->layout) != mddev->layout) { |
| 2017 | DMERR("Reshaping raid sets not yet supported. (raid layout change)"); |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2018 | DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout); |
| 2019 | DMERR(" Old layout: %s w/ %d copies", |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2020 | raid10_md_layout_to_format(le32_to_cpu(sb->layout)), |
| 2021 | raid10_md_layout_to_copies(le32_to_cpu(sb->layout))); |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2022 | DMERR(" New layout: %s w/ %d copies", |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2023 | raid10_md_layout_to_format(mddev->layout), |
| 2024 | raid10_md_layout_to_copies(mddev->layout)); |
| 2025 | return -EINVAL; |
| 2026 | } |
| 2027 | if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) { |
| 2028 | DMERR("Reshaping raid sets not yet supported. (stripe sectors change)"); |
| 2029 | return -EINVAL; |
| 2030 | } |
| 2031 | |
| 2032 | /* We can only change the number of devices in raid1 with old (i.e. pre 1.0.7) metadata */ |
| 2033 | if (!rt_is_raid1(rs->raid_type) && |
| 2034 | (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) { |
| 2035 | DMERR("Reshaping raid sets not yet supported. (device count change from %u to %u)", |
| 2036 | sb->num_devices, mddev->raid_disks); |
| 2037 | return -EINVAL; |
| 2038 | } |
| 2039 | |
| 2040 | /* Table line is checked vs. authoritative superblock */ |
| 2041 | rs_set_new(rs); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2042 | } |
| 2043 | |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 2044 | if (!test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2045 | mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset); |
| 2046 | |
| 2047 | /* |
| 2048 | * During load, we set FirstUse if a new superblock was written. |
| 2049 | * There are two reasons we might not have a superblock: |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2050 | * 1) The raid set is brand new - in which case, all of the |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2051 | * devices must have their In_sync bit set. Also, |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2052 | * recovery_cp must be 0, unless forced. |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2053 | * 2) This is a new device being added to an old raid set |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2054 | * and the new device needs to be rebuilt - in which |
| 2055 | * case the In_sync bit will /not/ be set and |
| 2056 | * recovery_cp must be MaxSector. |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2057 | * 3) This is/are a new device(s) being added to an old |
| 2058 | * raid set during takeover to a higher raid level |
| 2059 | * to provide capacity for redundancy or during reshape |
| 2060 | * to add capacity to grow the raid set. |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2061 | */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2062 | d = 0; |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 2063 | rdev_for_each(r, mddev) { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2064 | if (test_bit(FirstUse, &r->flags)) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2065 | new_devs++; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2066 | |
| 2067 | if (!test_bit(In_sync, &r->flags)) { |
| 2068 | DMINFO("Device %d specified for rebuild; clearing superblock", |
| 2069 | r->raid_disk); |
| 2070 | rebuilds++; |
| 2071 | |
| 2072 | if (test_bit(FirstUse, &r->flags)) |
| 2073 | rebuild_and_new++; |
| 2074 | } |
| 2075 | |
| 2076 | d++; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2077 | } |
| 2078 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2079 | if (new_devs == rs->raid_disks || !rebuilds) { |
| 2080 | /* Replace a broken device */ |
| 2081 | if (new_devs == 1 && !rs->delta_disks) |
| 2082 | ; |
| 2083 | if (new_devs == rs->raid_disks) { |
| 2084 | DMINFO("Superblocks created for new raid set"); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2085 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2086 | mddev->recovery_cp = 0; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2087 | } else if (new_devs != rebuilds && |
| 2088 | new_devs != rs->delta_disks) { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2089 | DMERR("New device injected into existing raid set without " |
| 2090 | "'delta_disks' or 'rebuild' parameter specified"); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2091 | return -EINVAL; |
| 2092 | } |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2093 | } else if (new_devs && new_devs != rebuilds) { |
| 2094 | DMERR("%u 'rebuild' devices cannot be injected into" |
| 2095 | " a raid set with %u other first-time devices", |
| 2096 | rebuilds, new_devs); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2097 | return -EINVAL; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2098 | } else if (rebuilds) { |
| 2099 | if (rebuild_and_new && rebuilds != rebuild_and_new) { |
| 2100 | DMERR("new device%s provided without 'rebuild'", |
| 2101 | new_devs > 1 ? "s" : ""); |
| 2102 | return -EINVAL; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2103 | } else if (rs_is_recovering(rs)) { |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2104 | DMERR("'rebuild' specified while raid set is not in-sync (recovery_cp=%llu)", |
| 2105 | (unsigned long long) mddev->recovery_cp); |
| 2106 | return -EINVAL; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2107 | } else if (rs_is_reshaping(rs)) { |
| 2108 | DMERR("'rebuild' specified while raid set is being reshaped (reshape_position=%llu)", |
| 2109 | (unsigned long long) mddev->reshape_position); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2110 | return -EINVAL; |
| 2111 | } |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2112 | } |
| 2113 | |
| 2114 | /* |
| 2115 | * Now we set the Faulty bit for those devices that are |
| 2116 | * recorded in the superblock as failed. |
| 2117 | */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2118 | sb_retrieve_failed_devices(sb, failed_devices); |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 2119 | rdev_for_each(r, mddev) { |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2120 | if (!r->sb_page) |
| 2121 | continue; |
| 2122 | sb2 = page_address(r->sb_page); |
| 2123 | sb2->failed_devices = 0; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2124 | memset(sb2->extended_failed_devices, 0, sizeof(sb2->extended_failed_devices)); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2125 | |
| 2126 | /* |
| 2127 | * Check for any device re-ordering. |
| 2128 | */ |
| 2129 | if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) { |
| 2130 | role = le32_to_cpu(sb2->array_position); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2131 | if (role < 0) |
| 2132 | continue; |
| 2133 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2134 | if (role != r->raid_disk) { |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 2135 | if (__is_raid10_near(mddev->layout)) { |
| 2136 | if (mddev->raid_disks % __raid10_near_copies(mddev->layout) || |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2137 | rs->raid_disks % rs->raid10_copies) { |
| 2138 | rs->ti->error = |
| 2139 | "Cannot change raid10 near set to odd # of devices!"; |
| 2140 | return -EINVAL; |
| 2141 | } |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2142 | |
| 2143 | sb2->array_position = cpu_to_le32(r->raid_disk); |
| 2144 | |
| 2145 | } else if (!(rs_is_raid10(rs) && rt_is_raid0(rs->raid_type)) && |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2146 | !(rs_is_raid0(rs) && rt_is_raid10(rs->raid_type)) && |
| 2147 | !rt_is_raid1(rs->raid_type)) { |
| 2148 | rs->ti->error = "Cannot change device positions in raid set"; |
| 2149 | return -EINVAL; |
| 2150 | } |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2151 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2152 | DMINFO("raid device #%d now at position #%d", role, r->raid_disk); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2153 | } |
| 2154 | |
| 2155 | /* |
| 2156 | * Partial recovery is performed on |
| 2157 | * returning failed devices. |
| 2158 | */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2159 | if (test_bit(role, (void *) failed_devices)) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2160 | set_bit(Faulty, &r->flags); |
| 2161 | } |
| 2162 | } |
| 2163 | |
| 2164 | return 0; |
| 2165 | } |
| 2166 | |
Heinz Mauelshagen | 0cf4503 | 2015-04-29 14:03:04 +0200 | [diff] [blame] | 2167 | static int super_validate(struct raid_set *rs, struct md_rdev *rdev) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2168 | { |
Heinz Mauelshagen | 0cf4503 | 2015-04-29 14:03:04 +0200 | [diff] [blame] | 2169 | struct mddev *mddev = &rs->md; |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2170 | struct dm_raid_superblock *sb; |
| 2171 | |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 2172 | if (rs_is_raid0(rs) || !rdev->sb_page) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2173 | return 0; |
| 2174 | |
| 2175 | sb = page_address(rdev->sb_page); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2176 | |
| 2177 | /* |
| 2178 | * If mddev->events is not set, we know we have not yet initialized |
| 2179 | * the array. |
| 2180 | */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2181 | if (!mddev->events && super_init_validation(rs, rdev)) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2182 | return -EINVAL; |
| 2183 | |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 2184 | if (le32_to_cpu(sb->compat_features) != FEATURE_FLAG_SUPPORTS_V190) { |
| 2185 | rs->ti->error = "Unable to assemble array: Unknown flag(s) in compatible feature flags"; |
| 2186 | return -EINVAL; |
| 2187 | } |
| 2188 | |
| 2189 | if (sb->incompat_features) { |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2190 | rs->ti->error = "Unable to assemble array: No incompatible feature flags supported yet"; |
Heinz Mauelshagen | 4c9971c | 2016-04-29 18:59:56 +0200 | [diff] [blame] | 2191 | return -EINVAL; |
| 2192 | } |
| 2193 | |
Heinz Mauelshagen | 0cf4503 | 2015-04-29 14:03:04 +0200 | [diff] [blame] | 2194 | /* Enable bitmap creation for RAID levels != 0 */ |
Heinz Mauelshagen | 676fa5a | 2016-05-19 18:49:29 +0200 | [diff] [blame] | 2195 | mddev->bitmap_info.offset = rt_is_raid0(rs->raid_type) ? 0 : to_sector(4096); |
Heinz Mauelshagen | 0cf4503 | 2015-04-29 14:03:04 +0200 | [diff] [blame] | 2196 | rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset; |
| 2197 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2198 | if (!test_and_clear_bit(FirstUse, &rdev->flags)) { |
| 2199 | /* Retrieve device size stored in superblock to be prepared for shrink */ |
| 2200 | rdev->sectors = le64_to_cpu(sb->sectors); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2201 | rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset); |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2202 | if (rdev->recovery_offset == MaxSector) |
| 2203 | set_bit(In_sync, &rdev->flags); |
| 2204 | /* |
| 2205 | * If no reshape in progress -> we're recovering single |
| 2206 | * disk(s) and have to set the device(s) to out-of-sync |
| 2207 | */ |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2208 | else if (!rs_is_reshaping(rs)) |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2209 | clear_bit(In_sync, &rdev->flags); /* Mandatory for recovery */ |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2210 | } |
| 2211 | |
| 2212 | /* |
| 2213 | * If a device comes back, set it as not In_sync and no longer faulty. |
| 2214 | */ |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2215 | if (test_and_clear_bit(Faulty, &rdev->flags)) { |
| 2216 | rdev->recovery_offset = 0; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2217 | clear_bit(In_sync, &rdev->flags); |
| 2218 | rdev->saved_raid_disk = rdev->raid_disk; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2219 | } |
| 2220 | |
Heinz Mauelshagen | 33e53f0 | 2016-05-19 18:49:30 +0200 | [diff] [blame] | 2221 | /* Reshape support -> restore repective data offsets */ |
| 2222 | rdev->data_offset = le64_to_cpu(sb->data_offset); |
| 2223 | rdev->new_data_offset = le64_to_cpu(sb->new_data_offset); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2224 | |
| 2225 | return 0; |
| 2226 | } |
| 2227 | |
| 2228 | /* |
| 2229 | * Analyse superblocks and select the freshest. |
| 2230 | */ |
| 2231 | static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) |
| 2232 | { |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2233 | int r; |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2234 | struct raid_dev *dev; |
Jonathan Brassow | a9ad852 | 2012-04-24 10:23:13 +1000 | [diff] [blame] | 2235 | struct md_rdev *rdev, *tmp, *freshest; |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 2236 | struct mddev *mddev = &rs->md; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2237 | |
| 2238 | freshest = NULL; |
Jonathan Brassow | a9ad852 | 2012-04-24 10:23:13 +1000 | [diff] [blame] | 2239 | rdev_for_each_safe(rdev, tmp, mddev) { |
Jonathan Brassow | 761becf | 2012-10-11 13:42:19 +1100 | [diff] [blame] | 2240 | /* |
Heinz Mauelshagen | c76d53f | 2015-04-29 14:03:00 +0200 | [diff] [blame] | 2241 | * Skipping super_load due to CTR_FLAG_SYNC will cause |
Jonathan Brassow | 761becf | 2012-10-11 13:42:19 +1100 | [diff] [blame] | 2242 | * the array to undergo initialization again as |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2243 | * though it were new. This is the intended effect |
Jonathan Brassow | 761becf | 2012-10-11 13:42:19 +1100 | [diff] [blame] | 2244 | * of the "sync" directive. |
| 2245 | * |
| 2246 | * When reshaping capability is added, we must ensure |
| 2247 | * that the "sync" directive is disallowed during the |
| 2248 | * reshape. |
| 2249 | */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 2250 | if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) |
Jonathan Brassow | 761becf | 2012-10-11 13:42:19 +1100 | [diff] [blame] | 2251 | continue; |
| 2252 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2253 | if (!rdev->meta_bdev) |
| 2254 | continue; |
| 2255 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2256 | r = super_load(rdev, freshest); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2257 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2258 | switch (r) { |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2259 | case 1: |
| 2260 | freshest = rdev; |
| 2261 | break; |
| 2262 | case 0: |
| 2263 | break; |
| 2264 | default: |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2265 | dev = container_of(rdev, struct raid_dev, rdev); |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2266 | if (dev->meta_dev) |
| 2267 | dm_put_device(ti, dev->meta_dev); |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2268 | |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2269 | dev->meta_dev = NULL; |
| 2270 | rdev->meta_bdev = NULL; |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2271 | |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2272 | if (rdev->sb_page) |
| 2273 | put_page(rdev->sb_page); |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2274 | |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2275 | rdev->sb_page = NULL; |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2276 | |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2277 | rdev->sb_loaded = 0; |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2278 | |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2279 | /* |
| 2280 | * We might be able to salvage the data device |
| 2281 | * even though the meta device has failed. For |
| 2282 | * now, we behave as though '- -' had been |
| 2283 | * set for this device in the table. |
| 2284 | */ |
| 2285 | if (dev->data_dev) |
| 2286 | dm_put_device(ti, dev->data_dev); |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2287 | |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2288 | dev->data_dev = NULL; |
| 2289 | rdev->bdev = NULL; |
Jonathan E Brassow | 0447568 | 2012-03-28 18:41:26 +0100 | [diff] [blame] | 2290 | |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2291 | list_del(&rdev->same_set); |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2292 | } |
| 2293 | } |
| 2294 | |
| 2295 | if (!freshest) |
| 2296 | return 0; |
| 2297 | |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2298 | if (validate_raid_redundancy(rs)) { |
| 2299 | rs->ti->error = "Insufficient redundancy to activate array"; |
| 2300 | return -EINVAL; |
| 2301 | } |
Jonathan Brassow | 55ebbb5 | 2013-01-22 21:42:18 -0600 | [diff] [blame] | 2302 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2303 | /* |
| 2304 | * Validation of the freshest device provides the source of |
| 2305 | * validation for the remaining devices. |
| 2306 | */ |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2307 | rs->ti->error = "Unable to assemble array: Invalid superblocks"; |
| 2308 | if (super_validate(rs, freshest)) |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2309 | return -EINVAL; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2310 | |
NeilBrown | dafb20f | 2012-03-19 12:46:39 +1100 | [diff] [blame] | 2311 | rdev_for_each(rdev, mddev) |
Heinz Mauelshagen | 0cf4503 | 2015-04-29 14:03:04 +0200 | [diff] [blame] | 2312 | if ((rdev != freshest) && super_validate(rs, rdev)) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2313 | return -EINVAL; |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2314 | return 0; |
| 2315 | } |
| 2316 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 2317 | /* |
| 2318 | * Adjust data_offset and new_data_offset on all disk members of @rs |
| 2319 | * for out of place reshaping if requested by contructor |
| 2320 | * |
| 2321 | * We need free space at the beginning of each raid disk for forward |
| 2322 | * and at the end for backward reshapes which userspace has to provide |
| 2323 | * via remapping/reordering of space. |
| 2324 | */ |
| 2325 | static int rs_adjust_data_offsets(struct raid_set *rs) |
| 2326 | { |
| 2327 | sector_t data_offset = 0, new_data_offset = 0; |
| 2328 | struct md_rdev *rdev; |
| 2329 | |
| 2330 | /* Constructor did not request data offset change */ |
| 2331 | if (!test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) { |
| 2332 | if (!rs_is_reshapable(rs)) |
| 2333 | goto out; |
| 2334 | |
| 2335 | return 0; |
| 2336 | } |
| 2337 | |
| 2338 | /* HM FIXME: get InSync raid_dev? */ |
| 2339 | rdev = &rs->dev[0].rdev; |
| 2340 | |
| 2341 | if (rs->delta_disks < 0) { |
| 2342 | /* |
| 2343 | * Removing disks (reshaping backwards): |
| 2344 | * |
| 2345 | * - before reshape: data is at offset 0 and free space |
| 2346 | * is at end of each component LV |
| 2347 | * |
| 2348 | * - after reshape: data is at offset rs->data_offset != 0 on each component LV |
| 2349 | */ |
| 2350 | data_offset = 0; |
| 2351 | new_data_offset = rs->data_offset; |
| 2352 | |
| 2353 | } else if (rs->delta_disks > 0) { |
| 2354 | /* |
| 2355 | * Adding disks (reshaping forwards): |
| 2356 | * |
| 2357 | * - before reshape: data is at offset rs->data_offset != 0 and |
| 2358 | * free space is at begin of each component LV |
| 2359 | * |
| 2360 | * - after reshape: data is at offset 0 on each component LV |
| 2361 | */ |
| 2362 | data_offset = rs->data_offset; |
| 2363 | new_data_offset = 0; |
| 2364 | |
| 2365 | } else { |
| 2366 | /* |
| 2367 | * User space passes in 0 for data offset after having removed reshape space |
| 2368 | * |
| 2369 | * - or - (data offset != 0) |
| 2370 | * |
| 2371 | * Changing RAID layout or chunk size -> toggle offsets |
| 2372 | * |
| 2373 | * - before reshape: data is at offset rs->data_offset 0 and |
| 2374 | * free space is at end of each component LV |
| 2375 | * -or- |
| 2376 | * data is at offset rs->data_offset != 0 and |
| 2377 | * free space is at begin of each component LV |
| 2378 | * |
| 2379 | * - after reshape: data is at offset 0 if i was at offset != 0 |
| 2380 | * of at offset != 0 if it was at offset 0 |
| 2381 | * on each component LV |
| 2382 | * |
| 2383 | */ |
| 2384 | data_offset = rs->data_offset ? rdev->data_offset : 0; |
| 2385 | new_data_offset = data_offset ? 0 : rs->data_offset; |
| 2386 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
| 2387 | } |
| 2388 | |
| 2389 | /* |
| 2390 | * Make sure we got a minimum amount of free sectors per device |
| 2391 | */ |
| 2392 | if (rs->data_offset && |
| 2393 | to_sector(i_size_read(rdev->bdev->bd_inode)) - rdev->sectors < MIN_FREE_RESHAPE_SPACE) { |
| 2394 | rs->ti->error = data_offset ? "No space for forward reshape" : |
| 2395 | "No space for backward reshape"; |
| 2396 | return -ENOSPC; |
| 2397 | } |
| 2398 | out: |
| 2399 | /* Adjust data offsets on all rdevs */ |
| 2400 | rdev_for_each(rdev, &rs->md) { |
| 2401 | rdev->data_offset = data_offset; |
| 2402 | rdev->new_data_offset = new_data_offset; |
| 2403 | } |
| 2404 | |
| 2405 | return 0; |
| 2406 | } |
| 2407 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2408 | /* Userpace reordered disks -> adjust raid_disk indexes in @rs */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 2409 | static void __reorder_raid_disk_indexes(struct raid_set *rs) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2410 | { |
| 2411 | int i = 0; |
| 2412 | struct md_rdev *rdev; |
| 2413 | |
| 2414 | rdev_for_each(rdev, &rs->md) { |
| 2415 | rdev->raid_disk = i++; |
| 2416 | rdev->saved_raid_disk = rdev->new_raid_disk = -1; |
| 2417 | } |
| 2418 | } |
| 2419 | |
| 2420 | /* |
| 2421 | * Setup @rs for takeover by a different raid level |
| 2422 | */ |
| 2423 | static int rs_setup_takeover(struct raid_set *rs) |
| 2424 | { |
| 2425 | struct mddev *mddev = &rs->md; |
| 2426 | struct md_rdev *rdev; |
| 2427 | unsigned int d = mddev->raid_disks = rs->raid_disks; |
| 2428 | sector_t new_data_offset = rs->dev[0].rdev.data_offset ? 0 : rs->data_offset; |
| 2429 | |
| 2430 | if (rt_is_raid10(rs->raid_type)) { |
| 2431 | if (mddev->level == 0) { |
| 2432 | /* Userpace reordered disks -> adjust raid_disk indexes */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 2433 | __reorder_raid_disk_indexes(rs); |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2434 | |
| 2435 | /* raid0 -> raid10_far layout */ |
| 2436 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_FAR, |
| 2437 | rs->raid10_copies); |
| 2438 | } else if (mddev->level == 1) |
| 2439 | /* raid1 -> raid10_near layout */ |
| 2440 | mddev->layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, |
| 2441 | rs->raid_disks); |
| 2442 | else |
| 2443 | return -EINVAL; |
| 2444 | |
| 2445 | } |
| 2446 | |
| 2447 | clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags); |
| 2448 | mddev->recovery_cp = MaxSector; |
| 2449 | |
| 2450 | while (d--) { |
| 2451 | rdev = &rs->dev[d].rdev; |
| 2452 | |
| 2453 | if (test_bit(d, (void *) rs->rebuild_disks)) { |
| 2454 | clear_bit(In_sync, &rdev->flags); |
| 2455 | clear_bit(Faulty, &rdev->flags); |
| 2456 | mddev->recovery_cp = rdev->recovery_offset = 0; |
| 2457 | /* Bitmap has to be created when we do an "up" takeover */ |
| 2458 | set_bit(MD_ARRAY_FIRST_USE, &mddev->flags); |
| 2459 | } |
| 2460 | |
| 2461 | rdev->new_data_offset = new_data_offset; |
| 2462 | } |
| 2463 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2464 | return 0; |
| 2465 | } |
| 2466 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2467 | /* |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2468 | * |
| 2469 | * - change raid layout |
| 2470 | * - change chunk size |
| 2471 | * - add disks |
| 2472 | * - remove disks |
| 2473 | */ |
| 2474 | static int rs_setup_reshape(struct raid_set *rs) |
| 2475 | { |
| 2476 | int r = 0; |
| 2477 | unsigned int cur_raid_devs, d; |
| 2478 | struct mddev *mddev = &rs->md; |
| 2479 | struct md_rdev *rdev; |
| 2480 | |
| 2481 | mddev->delta_disks = rs->delta_disks; |
| 2482 | cur_raid_devs = mddev->raid_disks; |
| 2483 | |
| 2484 | /* Ignore impossible layout change whilst adding/removing disks */ |
| 2485 | if (mddev->delta_disks && |
| 2486 | mddev->layout != mddev->new_layout) { |
| 2487 | DMINFO("Ignoring invalid layout change with delta_disks=%d", rs->delta_disks); |
| 2488 | mddev->new_layout = mddev->layout; |
| 2489 | } |
| 2490 | |
| 2491 | /* |
| 2492 | * Adjust array size: |
| 2493 | * |
| 2494 | * - in case of adding disks, array size has |
| 2495 | * to grow after the disk adding reshape, |
| 2496 | * which'll hapen in the event handler; |
| 2497 | * reshape will happen forward, so space has to |
| 2498 | * be available at the beginning of each disk |
| 2499 | * |
| 2500 | * - in case of removing disks, array size |
| 2501 | * has to shrink before starting the reshape, |
| 2502 | * which'll happen here; |
| 2503 | * reshape will happen backward, so space has to |
| 2504 | * be available at the end of each disk |
| 2505 | * |
| 2506 | * - data_offset and new_data_offset are |
| 2507 | * adjusted for afreentioned out of place |
| 2508 | * reshaping based on userspace passing in |
| 2509 | * the "data_offset <sectors>" key/value |
| 2510 | * pair via te constructor |
| 2511 | */ |
| 2512 | |
| 2513 | /* Add disk(s) */ |
| 2514 | if (rs->delta_disks > 0) { |
| 2515 | /* Prepare disks for check in raid4/5/6/10 {check|start}_reshape */ |
| 2516 | for (d = cur_raid_devs; d < rs->raid_disks; d++) { |
| 2517 | rdev = &rs->dev[d].rdev; |
| 2518 | clear_bit(In_sync, &rdev->flags); |
| 2519 | |
| 2520 | /* |
| 2521 | * save_raid_disk needs to be -1, or recovery_offset will be set to 0 |
| 2522 | * by md, which'll store that erroneously in the superblock on reshape |
| 2523 | */ |
| 2524 | rdev->saved_raid_disk = -1; |
| 2525 | rdev->raid_disk = d; |
| 2526 | |
| 2527 | rdev->sectors = mddev->dev_sectors; |
| 2528 | rdev->recovery_offset = MaxSector; |
| 2529 | } |
| 2530 | |
| 2531 | mddev->reshape_backwards = 0; /* adding disks -> forward reshape */ |
| 2532 | |
| 2533 | /* Remove disk(s) */ |
| 2534 | } else if (rs->delta_disks < 0) { |
| 2535 | r = rs_set_dev_and_array_sectors(rs, true); |
| 2536 | mddev->reshape_backwards = 1; /* removing disk(s) -> backward reshape */ |
| 2537 | |
| 2538 | /* Change layout and/or chunk size */ |
| 2539 | } else { |
| 2540 | /* |
| 2541 | * Reshape layout (e.g. raid5_ls -> raid5_n) and/or chunk size: |
| 2542 | * |
| 2543 | * keeping number of disks and do layout change -> |
| 2544 | * |
| 2545 | * toggle reshape_backward depending on data_offset: |
| 2546 | * |
| 2547 | * - free space upfront -> reshape forward |
| 2548 | * |
| 2549 | * - free space at the end -> reshape backward |
| 2550 | * |
| 2551 | * |
| 2552 | * This utilizes free reshape space avoiding the need |
| 2553 | * for userspace to move (parts of) LV segments in |
| 2554 | * case of layout/chunksize change (for disk |
| 2555 | * adding/removing reshape space has to be at |
| 2556 | * the proper address (see above with delta_disks): |
| 2557 | * |
| 2558 | * add disk(s) -> begin |
| 2559 | * remove disk(s)-> end |
| 2560 | */ |
| 2561 | mddev->reshape_backwards = rs->dev[0].rdev.data_offset ? 0 : 1; |
| 2562 | } |
| 2563 | |
| 2564 | return r; |
| 2565 | } |
| 2566 | |
| 2567 | /* |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2568 | * Enable/disable discard support on RAID set depending on |
| 2569 | * RAID level and discard properties of underlying RAID members. |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2570 | */ |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2571 | static void configure_discard_support(struct raid_set *rs) |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2572 | { |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2573 | int i; |
| 2574 | bool raid456; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2575 | struct dm_target *ti = rs->ti; |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2576 | |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2577 | /* Assume discards not supported until after checks below. */ |
| 2578 | ti->discards_supported = false; |
| 2579 | |
| 2580 | /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */ |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2581 | raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2582 | |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2583 | for (i = 0; i < rs->md.raid_disks; i++) { |
Heinz Mauelshagen | d20c4b0 | 2014-10-29 19:02:27 +0100 | [diff] [blame] | 2584 | struct request_queue *q; |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2585 | |
Heinz Mauelshagen | d20c4b0 | 2014-10-29 19:02:27 +0100 | [diff] [blame] | 2586 | if (!rs->dev[i].rdev.bdev) |
| 2587 | continue; |
| 2588 | |
| 2589 | q = bdev_get_queue(rs->dev[i].rdev.bdev); |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2590 | if (!q || !blk_queue_discard(q)) |
| 2591 | return; |
| 2592 | |
| 2593 | if (raid456) { |
| 2594 | if (!q->limits.discard_zeroes_data) |
| 2595 | return; |
| 2596 | if (!devices_handle_discard_safely) { |
| 2597 | DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty."); |
| 2598 | DMERR("Set dm-raid.devices_handle_discard_safely=Y to override."); |
| 2599 | return; |
| 2600 | } |
| 2601 | } |
| 2602 | } |
| 2603 | |
| 2604 | /* All RAID members properly support discards */ |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2605 | ti->discards_supported = true; |
| 2606 | |
| 2607 | /* |
| 2608 | * RAID1 and RAID10 personalities require bio splitting, |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2609 | * RAID0/4/5/6 don't and process large discard bios properly. |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2610 | */ |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 2611 | ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10); |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2612 | ti->num_discard_bios = 1; |
| 2613 | } |
| 2614 | |
| 2615 | /* |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2616 | * Construct a RAID0/1/10/4/5/6 mapping: |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2617 | * Args: |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2618 | * <raid_type> <#raid_params> <raid_params>{0,} \ |
| 2619 | * <#raid_devs> [<meta_dev1> <dev1>]{1,} |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2620 | * |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2621 | * <raid_params> varies by <raid_type>. See 'parse_raid_params' for |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2622 | * details on possible <raid_params>. |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2623 | * |
| 2624 | * Userspace is free to initialize the metadata devices, hence the superblocks to |
| 2625 | * enforce recreation based on the passed in table parameters. |
| 2626 | * |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2627 | */ |
| 2628 | static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) |
| 2629 | { |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2630 | int r; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2631 | struct raid_type *rt; |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2632 | unsigned num_raid_params, num_raid_devs; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2633 | struct raid_set *rs = NULL; |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2634 | const char *arg; |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2635 | struct rs_layout rs_layout; |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2636 | struct dm_arg_set as = { argc, argv }, as_nrd; |
| 2637 | struct dm_arg _args[] = { |
| 2638 | { 0, as.argc, "Cannot understand number of raid parameters" }, |
| 2639 | { 1, 254, "Cannot understand number of raid devices parameters" } |
| 2640 | }; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2641 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2642 | /* Must have <raid_type> */ |
| 2643 | arg = dm_shift_arg(&as); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2644 | if (!arg) { |
| 2645 | ti->error = "No arguments"; |
| 2646 | return -EINVAL; |
| 2647 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2648 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2649 | rt = get_raid_type(arg); |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2650 | if (!rt) { |
| 2651 | ti->error = "Unrecognised raid_type"; |
| 2652 | return -EINVAL; |
| 2653 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2654 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2655 | /* Must have <#raid_params> */ |
| 2656 | if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error)) |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2657 | return -EINVAL; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2658 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2659 | /* number of raid device tupples <meta_dev data_dev> */ |
| 2660 | as_nrd = as; |
| 2661 | dm_consume_args(&as_nrd, num_raid_params); |
| 2662 | _args[1].max = (as_nrd.argc - 1) / 2; |
| 2663 | if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error)) |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2664 | return -EINVAL; |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2665 | |
Mike Snitzer | bb91a63 | 2016-06-02 12:06:54 -0400 | [diff] [blame] | 2666 | if (!__within_range(num_raid_devs, 1, MAX_RAID_DEVICES)) { |
Mike Snitzer | bd83a4c | 2016-05-31 14:26:52 -0400 | [diff] [blame] | 2667 | ti->error = "Invalid number of supplied raid devices"; |
| 2668 | return -EINVAL; |
| 2669 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2670 | |
Mike Snitzer | bfcee0e | 2016-06-02 15:08:09 -0400 | [diff] [blame] | 2671 | rs = raid_set_alloc(ti, rt, num_raid_devs); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2672 | if (IS_ERR(rs)) |
| 2673 | return PTR_ERR(rs); |
| 2674 | |
Heinz Mauelshagen | 92c83d7 | 2016-05-19 18:49:25 +0200 | [diff] [blame] | 2675 | r = parse_raid_params(rs, &as, num_raid_params); |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2676 | if (r) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2677 | goto bad; |
| 2678 | |
Heinz Mauelshagen | 702108d | 2016-05-19 18:49:26 +0200 | [diff] [blame] | 2679 | r = parse_dev_params(rs, &as); |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2680 | if (r) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2681 | goto bad; |
| 2682 | |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2683 | rs->md.sync_super = super_sync; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2684 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 2685 | r = rs_set_dev_and_array_sectors(rs, false); |
| 2686 | if (r) |
| 2687 | return r; |
| 2688 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2689 | /* |
| 2690 | * Backup any new raid set level, layout, ... |
| 2691 | * requested to be able to compare to superblock |
| 2692 | * members for conversion decisions. |
| 2693 | */ |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2694 | rs_config_backup(rs, &rs_layout); |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2695 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2696 | r = analyse_superblocks(ti, rs); |
| 2697 | if (r) |
Jonathan Brassow | b12d437 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2698 | goto bad; |
| 2699 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2700 | INIT_WORK(&rs->md.event_work, do_table_event); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2701 | ti->private = rs; |
Alasdair G Kergon | 55a62ee | 2013-03-01 22:45:47 +0000 | [diff] [blame] | 2702 | ti->num_flush_bios = 1; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2703 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2704 | /* Restore any requested new layout for conversion decision */ |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2705 | rs_config_restore(rs, &rs_layout); |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2706 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2707 | if (test_bit(MD_ARRAY_FIRST_USE, &rs->md.flags)) { |
| 2708 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
| 2709 | rs_set_new(rs); |
| 2710 | } else if (rs_is_reshaping(rs)) |
| 2711 | ; /* skip rs setup */ |
| 2712 | else if (rs_takeover_requested(rs)) { |
| 2713 | if (rs_is_reshaping(rs)) { |
| 2714 | ti->error = "Can't takeover a reshaping raid set"; |
| 2715 | return -EPERM; |
| 2716 | } |
| 2717 | |
| 2718 | /* |
| 2719 | * If a takeover is needed, just set the level to |
| 2720 | * the new requested one and allow the raid set to run. |
| 2721 | */ |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2722 | r = rs_check_takeover(rs); |
| 2723 | if (r) |
| 2724 | return r; |
| 2725 | |
| 2726 | r = rs_setup_takeover(rs); |
| 2727 | if (r) |
| 2728 | return r; |
| 2729 | |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 2730 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 2731 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 2732 | rs_set_new(rs); |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 2733 | } else if (rs_reshape_requested(rs)) { |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2734 | if (rs_is_reshaping(rs)) { |
| 2735 | ti->error = "raid set already reshaping!"; |
| 2736 | return -EPERM; |
| 2737 | } |
| 2738 | |
| 2739 | if (rs_is_raid10(rs)) { |
| 2740 | if (rs->raid_disks != rs->md.raid_disks && |
| 2741 | __is_raid10_near(rs->md.layout) && |
| 2742 | rs->raid10_copies && |
| 2743 | rs->raid10_copies != __raid10_near_copies(rs->md.layout)) { |
| 2744 | /* |
| 2745 | * raid disk have to be multiple of data copies to allow this conversion, |
| 2746 | * |
| 2747 | * This is actually not a reshape it is a |
| 2748 | * rebuild of any additional mirrors per group |
| 2749 | */ |
| 2750 | if (rs->raid_disks % rs->raid10_copies) { |
| 2751 | ti->error = "Can't reshape raid10 mirror groups"; |
| 2752 | return -EINVAL; |
| 2753 | } |
| 2754 | |
| 2755 | /* Userpace reordered disks to add/remove mirrors -> adjust raid_disk indexes */ |
| 2756 | __reorder_raid_disk_indexes(rs); |
| 2757 | rs->md.layout = raid10_format_to_md_layout(rs, ALGORITHM_RAID10_NEAR, |
| 2758 | rs->raid10_copies); |
| 2759 | rs->md.new_layout = rs->md.layout; |
| 2760 | |
| 2761 | } else |
| 2762 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); |
| 2763 | |
| 2764 | } else if (rs_is_raid456(rs)) |
| 2765 | set_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags); |
| 2766 | |
| 2767 | /* |
| 2768 | * HM FIXME: process raid1 via delta_disks as well? |
| 2769 | * Would cause allocations in raid1->check_reshape |
| 2770 | * though, thus more issues with potential failures |
| 2771 | */ |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 2772 | else if (rs_is_raid1(rs)) { |
| 2773 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2774 | rs->md.raid_disks = rs->raid_disks; |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 2775 | } |
| 2776 | |
| 2777 | if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { |
| 2778 | set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags); |
| 2779 | set_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags); |
| 2780 | } |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2781 | |
| 2782 | if (rs->md.raid_disks < rs->raid_disks) |
| 2783 | set_bit(MD_ARRAY_FIRST_USE, &rs->md.flags); |
| 2784 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2785 | rs_set_cur(rs); |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 2786 | } else |
| 2787 | rs_set_cur(rs); |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2788 | |
Heinz Mauelshagen | 40ba37e | 2016-06-13 17:55:13 +0200 | [diff] [blame] | 2789 | /* If constructor requested it, change data and new_data offsets */ |
| 2790 | r = rs_adjust_data_offsets(rs); |
| 2791 | if (r) |
| 2792 | return r; |
| 2793 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 2794 | /* Start raid set read-only and assumed clean to change in raid_resume() */ |
| 2795 | rs->md.ro = 1; |
| 2796 | rs->md.in_sync = 1; |
| 2797 | set_bit(MD_RECOVERY_FROZEN, &rs->md.recovery); |
Heinz Mauelshagen | 75b8e04 | 2014-09-24 17:47:18 +0200 | [diff] [blame] | 2798 | |
Heinz Mauelshagen | 0cf4503 | 2015-04-29 14:03:04 +0200 | [diff] [blame] | 2799 | /* Has to be held on running the array */ |
| 2800 | mddev_lock_nointr(&rs->md); |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2801 | r = md_run(&rs->md); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2802 | rs->md.in_sync = 0; /* Assume already marked dirty */ |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2803 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2804 | if (r) { |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2805 | ti->error = "Failed to run raid array"; |
| 2806 | mddev_unlock(&rs->md); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2807 | goto bad; |
| 2808 | } |
| 2809 | |
| 2810 | rs->callbacks.congested_fn = raid_is_congested; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2811 | dm_table_add_target_callbacks(ti->table, &rs->callbacks); |
| 2812 | |
Jonathan Brassow | 3273727 | 2011-08-02 12:32:07 +0100 | [diff] [blame] | 2813 | mddev_suspend(&rs->md); |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2814 | |
| 2815 | /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ |
| 2816 | if (rs_is_raid456(rs)) { |
| 2817 | r = rs_set_raid456_stripe_cache(rs); |
| 2818 | if (r) |
| 2819 | goto bad_stripe_cache; |
| 2820 | } |
| 2821 | |
| 2822 | /* Now do an early reshape check */ |
| 2823 | if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { |
| 2824 | r = rs_check_reshape(rs); |
| 2825 | if (r) |
| 2826 | return r; |
| 2827 | |
| 2828 | /* Restore new, ctr requested layout to perform check */ |
| 2829 | rs_config_restore(rs, &rs_layout); |
| 2830 | |
| 2831 | r = rs->md.pers->check_reshape(&rs->md); |
| 2832 | if (r) { |
| 2833 | ti->error = "Reshape check failed"; |
| 2834 | goto bad_check_reshape; |
| 2835 | } |
| 2836 | } |
| 2837 | |
| 2838 | mddev_unlock(&rs->md); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2839 | return 0; |
| 2840 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2841 | bad_stripe_cache: |
| 2842 | bad_check_reshape: |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 2843 | md_stop(&rs->md); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2844 | bad: |
Mike Snitzer | bfcee0e | 2016-06-02 15:08:09 -0400 | [diff] [blame] | 2845 | raid_set_free(rs); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2846 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 2847 | return r; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2848 | } |
| 2849 | |
| 2850 | static void raid_dtr(struct dm_target *ti) |
| 2851 | { |
| 2852 | struct raid_set *rs = ti->private; |
| 2853 | |
| 2854 | list_del_init(&rs->callbacks.list); |
| 2855 | md_stop(&rs->md); |
Mike Snitzer | bfcee0e | 2016-06-02 15:08:09 -0400 | [diff] [blame] | 2856 | raid_set_free(rs); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2857 | } |
| 2858 | |
Mikulas Patocka | 7de3ee5 | 2012-12-21 20:23:41 +0000 | [diff] [blame] | 2859 | static int raid_map(struct dm_target *ti, struct bio *bio) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2860 | { |
| 2861 | struct raid_set *rs = ti->private; |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 2862 | struct mddev *mddev = &rs->md; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2863 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 2864 | /* |
| 2865 | * If we're reshaping to add disk(s)), ti->len and |
| 2866 | * mddev->array_sectors will differ during the process |
| 2867 | * (ti->len > mddev->array_sectors), so we have to requeue |
| 2868 | * bios with addresses > mddev->array_sectors here or |
| 2869 | * or there will occur accesses past EOD of the component |
| 2870 | * data images thus erroring the raid set. |
| 2871 | */ |
| 2872 | if (unlikely(bio_end_sector(bio) > mddev->array_sectors)) |
| 2873 | return DM_MAPIO_REQUEUE; |
| 2874 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 2875 | mddev->pers->make_request(mddev, bio); |
| 2876 | |
| 2877 | return DM_MAPIO_SUBMITTED; |
| 2878 | } |
| 2879 | |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 2880 | /* Return string describing the current sync action of @mddev */ |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 2881 | static const char *decipher_sync_action(struct mddev *mddev) |
| 2882 | { |
| 2883 | if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) |
| 2884 | return "frozen"; |
| 2885 | |
| 2886 | if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || |
| 2887 | (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { |
| 2888 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) |
| 2889 | return "reshape"; |
| 2890 | |
| 2891 | if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { |
| 2892 | if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) |
| 2893 | return "resync"; |
| 2894 | else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) |
| 2895 | return "check"; |
| 2896 | return "repair"; |
| 2897 | } |
| 2898 | |
| 2899 | if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) |
| 2900 | return "recover"; |
| 2901 | } |
| 2902 | |
| 2903 | return "idle"; |
| 2904 | } |
| 2905 | |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 2906 | /* |
| 2907 | * Return status string @rdev |
| 2908 | * |
| 2909 | * Status characters: |
| 2910 | * |
| 2911 | * 'D' = Dead/Failed device |
| 2912 | * 'a' = Alive but not in-sync |
| 2913 | * 'A' = Alive and in-sync |
| 2914 | */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 2915 | static const char *__raid_dev_status(struct md_rdev *rdev, bool array_in_sync) |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 2916 | { |
| 2917 | if (test_bit(Faulty, &rdev->flags)) |
| 2918 | return "D"; |
| 2919 | else if (!array_in_sync || !test_bit(In_sync, &rdev->flags)) |
| 2920 | return "a"; |
| 2921 | else |
| 2922 | return "A"; |
| 2923 | } |
| 2924 | |
| 2925 | /* Helper to return resync/reshape progress for @rs and @array_in_sync */ |
| 2926 | static sector_t rs_get_progress(struct raid_set *rs, |
| 2927 | sector_t resync_max_sectors, bool *array_in_sync) |
| 2928 | { |
| 2929 | sector_t r, recovery_cp, curr_resync_completed; |
| 2930 | struct mddev *mddev = &rs->md; |
| 2931 | |
| 2932 | curr_resync_completed = mddev->curr_resync_completed ?: mddev->recovery_cp; |
| 2933 | recovery_cp = mddev->recovery_cp; |
| 2934 | *array_in_sync = false; |
| 2935 | |
| 2936 | if (rs_is_raid0(rs)) { |
| 2937 | r = resync_max_sectors; |
| 2938 | *array_in_sync = true; |
| 2939 | |
| 2940 | } else { |
| 2941 | r = mddev->reshape_position; |
| 2942 | |
| 2943 | /* Reshape is relative to the array size */ |
| 2944 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || |
| 2945 | r != MaxSector) { |
| 2946 | if (r == MaxSector) { |
| 2947 | *array_in_sync = true; |
| 2948 | r = resync_max_sectors; |
| 2949 | } else { |
| 2950 | /* Got to reverse on backward reshape */ |
| 2951 | if (mddev->reshape_backwards) |
| 2952 | r = mddev->array_sectors - r; |
| 2953 | |
| 2954 | /* Devide by # of data stripes */ |
| 2955 | sector_div(r, mddev_data_stripes(rs)); |
| 2956 | } |
| 2957 | |
| 2958 | /* Sync is relative to the component device size */ |
| 2959 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) |
| 2960 | r = curr_resync_completed; |
| 2961 | else |
| 2962 | r = recovery_cp; |
| 2963 | |
| 2964 | if (r == MaxSector) { |
| 2965 | /* |
| 2966 | * Sync complete. |
| 2967 | */ |
| 2968 | *array_in_sync = true; |
| 2969 | r = resync_max_sectors; |
| 2970 | } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
| 2971 | /* |
| 2972 | * If "check" or "repair" is occurring, the raid set has |
| 2973 | * undergone an initial sync and the health characters |
| 2974 | * should not be 'a' anymore. |
| 2975 | */ |
| 2976 | *array_in_sync = true; |
| 2977 | } else { |
| 2978 | struct md_rdev *rdev; |
| 2979 | |
| 2980 | /* |
| 2981 | * The raid set may be doing an initial sync, or it may |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 2982 | * be rebuilding individual components. If all the |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 2983 | * devices are In_sync, then it is the raid set that is |
| 2984 | * being initialized. |
| 2985 | */ |
| 2986 | rdev_for_each(rdev, mddev) |
| 2987 | if (!test_bit(In_sync, &rdev->flags)) |
| 2988 | *array_in_sync = true; |
| 2989 | #if 0 |
| 2990 | r = 0; /* HM FIXME: TESTME: https://bugzilla.redhat.com/show_bug.cgi?id=1210637 ? */ |
| 2991 | #endif |
| 2992 | } |
| 2993 | } |
| 2994 | |
| 2995 | return r; |
| 2996 | } |
| 2997 | |
| 2998 | /* Helper to return @dev name or "-" if !@dev */ |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 2999 | static const char *__get_dev_name(struct dm_dev *dev) |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3000 | { |
| 3001 | return dev ? dev->name : "-"; |
| 3002 | } |
| 3003 | |
Mikulas Patocka | fd7c092 | 2013-03-01 22:45:44 +0000 | [diff] [blame] | 3004 | static void raid_status(struct dm_target *ti, status_type_t type, |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3005 | unsigned int status_flags, char *result, unsigned int maxlen) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3006 | { |
| 3007 | struct raid_set *rs = ti->private; |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3008 | struct mddev *mddev = &rs->md; |
| 3009 | struct r5conf *conf = mddev->private; |
| 3010 | int max_nr_stripes = conf ? conf->max_nr_stripes : 0; |
| 3011 | bool array_in_sync; |
| 3012 | unsigned int raid_param_cnt = 1; /* at least 1 for chunksize */ |
| 3013 | unsigned int sz = 0; |
| 3014 | unsigned int write_mostly_params = 0; |
| 3015 | sector_t progress, resync_max_sectors, resync_mismatches; |
| 3016 | const char *sync_action; |
| 3017 | struct raid_type *rt; |
| 3018 | struct md_rdev *rdev; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3019 | |
| 3020 | switch (type) { |
| 3021 | case STATUSTYPE_INFO: |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3022 | /* *Should* always succeed */ |
| 3023 | rt = get_raid_type_by_ll(mddev->new_level, mddev->new_layout); |
| 3024 | if (!rt) |
| 3025 | return; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3026 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 3027 | DMEMIT("%s %d ", rt->name, mddev->raid_disks); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3028 | |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3029 | /* Access most recent mddev properties for status output */ |
| 3030 | smp_rmb(); |
| 3031 | /* Get sensible max sectors even if raid set not yet started */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3032 | resync_max_sectors = test_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags) ? |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3033 | mddev->resync_max_sectors : mddev->dev_sectors; |
| 3034 | progress = rs_get_progress(rs, resync_max_sectors, &array_in_sync); |
| 3035 | resync_mismatches = (mddev->last_sync_action && !strcasecmp(mddev->last_sync_action, "check")) ? |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 3036 | atomic64_read(&mddev->resync_mismatches) : 0; |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3037 | sync_action = decipher_sync_action(&rs->md); |
| 3038 | |
| 3039 | /* HM FIXME: do we want another state char for raid0? It shows 'D' or 'A' now */ |
| 3040 | rdev_for_each(rdev, mddev) |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 3041 | DMEMIT(__raid_dev_status(rdev, array_in_sync)); |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3042 | |
Jonathan E Brassow | 2e727c3 | 2011-10-31 20:21:26 +0000 | [diff] [blame] | 3043 | /* |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3044 | * In-sync/Reshape ratio: |
Jonathan E Brassow | 2e727c3 | 2011-10-31 20:21:26 +0000 | [diff] [blame] | 3045 | * The in-sync ratio shows the progress of: |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3046 | * - Initializing the raid set |
| 3047 | * - Rebuilding a subset of devices of the raid set |
Jonathan E Brassow | 2e727c3 | 2011-10-31 20:21:26 +0000 | [diff] [blame] | 3048 | * The user can distinguish between the two by referring |
| 3049 | * to the status characters. |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3050 | * |
| 3051 | * The reshape ratio shows the progress of |
| 3052 | * changing the raid layout or the number of |
| 3053 | * disks of a raid set |
Jonathan E Brassow | 2e727c3 | 2011-10-31 20:21:26 +0000 | [diff] [blame] | 3054 | */ |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3055 | DMEMIT(" %llu/%llu", (unsigned long long) progress, |
| 3056 | (unsigned long long) resync_max_sectors); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3057 | |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3058 | /* |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3059 | * v1.5.0+: |
| 3060 | * |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3061 | * Sync action: |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3062 | * See Documentation/device-mapper/dm-raid.txt for |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3063 | * information on each of these states. |
| 3064 | */ |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3065 | DMEMIT(" %s", sync_action); |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3066 | |
| 3067 | /* |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3068 | * v1.5.0+: |
| 3069 | * |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3070 | * resync_mismatches/mismatch_cnt |
| 3071 | * This field shows the number of discrepancies found when |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3072 | * performing a "check" of the raid set. |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3073 | */ |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3074 | DMEMIT(" %llu", (unsigned long long) resync_mismatches); |
| 3075 | |
| 3076 | /* |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 3077 | * v1.9.0+: |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3078 | * |
| 3079 | * data_offset (needed for out of space reshaping) |
| 3080 | * This field shows the data offset into the data |
| 3081 | * image LV where the first stripes data starts. |
| 3082 | * |
| 3083 | * We keep data_offset equal on all raid disks of the set, |
| 3084 | * so retrieving it from the first raid disk is sufficient. |
| 3085 | */ |
| 3086 | DMEMIT(" %llu", (unsigned long long) rs->dev[0].rdev.data_offset); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3087 | break; |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3088 | |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3089 | case STATUSTYPE_TABLE: |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3090 | /* Report the table line string you would use to construct this raid set */ |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3091 | |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3092 | /* Calculate raid parameter count */ |
| 3093 | rdev_for_each(rdev, mddev) |
| 3094 | if (test_bit(WriteMostly, &rdev->flags)) |
| 3095 | write_mostly_params += 2; |
| 3096 | raid_param_cnt += memweight(rs->rebuild_disks, |
| 3097 | DISKS_ARRAY_ELEMS * sizeof(*rs->rebuild_disks)) * 2 + |
| 3098 | write_mostly_params + |
| 3099 | hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_NO_ARGS) + |
| 3100 | hweight32(rs->ctr_flags & CTR_FLAG_OPTIONS_ONE_ARG) * 2; |
| 3101 | /* Emit table line */ |
| 3102 | DMEMIT("%s %u %u", rs->raid_type->name, raid_param_cnt, mddev->new_chunk_sectors); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3103 | if (test_bit(__CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3104 | DMEMIT(" %s %s", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_FORMAT), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3105 | raid10_md_layout_to_format(mddev->layout)); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3106 | if (test_bit(__CTR_FLAG_RAID10_COPIES, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3107 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_RAID10_COPIES), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3108 | raid10_md_layout_to_copies(mddev->layout)); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3109 | if (test_bit(__CTR_FLAG_NOSYNC, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3110 | DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_NOSYNC)); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3111 | if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3112 | DMEMIT(" %s", dm_raid_arg_name_by_flag(CTR_FLAG_SYNC)); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3113 | if (test_bit(__CTR_FLAG_REGION_SIZE, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3114 | DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_REGION_SIZE), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3115 | (unsigned long long) to_sector(mddev->bitmap_info.chunksize)); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3116 | if (test_bit(__CTR_FLAG_DATA_OFFSET, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3117 | DMEMIT(" %s %llu", dm_raid_arg_name_by_flag(CTR_FLAG_DATA_OFFSET), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3118 | (unsigned long long) rs->data_offset); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3119 | if (test_bit(__CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3120 | DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_DAEMON_SLEEP), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3121 | mddev->bitmap_info.daemon_sleep); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3122 | if (test_bit(__CTR_FLAG_DELTA_DISKS, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3123 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_DELTA_DISKS), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3124 | mddev->delta_disks); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3125 | if (test_bit(__CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3126 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_STRIPE_CACHE), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3127 | max_nr_stripes); |
| 3128 | rdev_for_each(rdev, mddev) |
| 3129 | if (test_bit(rdev->raid_disk, (void *) rs->rebuild_disks)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3130 | DMEMIT(" %s %u", dm_raid_arg_name_by_flag(CTR_FLAG_REBUILD), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3131 | rdev->raid_disk); |
| 3132 | rdev_for_each(rdev, mddev) |
| 3133 | if (test_bit(WriteMostly, &rdev->flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3134 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_WRITE_MOSTLY), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3135 | rdev->raid_disk); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3136 | if (test_bit(__CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3137 | DMEMIT(" %s %lu", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_WRITE_BEHIND), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3138 | mddev->bitmap_info.max_write_behind); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3139 | if (test_bit(__CTR_FLAG_MAX_RECOVERY_RATE, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3140 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MAX_RECOVERY_RATE), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3141 | mddev->sync_speed_max); |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3142 | if (test_bit(__CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags)) |
Mike Snitzer | 3fa6cf3 | 2016-06-02 11:58:51 -0400 | [diff] [blame] | 3143 | DMEMIT(" %s %d", dm_raid_arg_name_by_flag(CTR_FLAG_MIN_RECOVERY_RATE), |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3144 | mddev->sync_speed_min); |
| 3145 | DMEMIT(" %d", rs->raid_disks); |
| 3146 | rdev_for_each(rdev, mddev) { |
| 3147 | struct raid_dev *rd = container_of(rdev, struct raid_dev, rdev); |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 3148 | |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 3149 | DMEMIT(" %s %s", __get_dev_name(rd->meta_dev), |
| 3150 | __get_dev_name(rd->data_dev)); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3151 | } |
| 3152 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3153 | } |
| 3154 | |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3155 | static int raid_message(struct dm_target *ti, unsigned argc, char **argv) |
| 3156 | { |
| 3157 | struct raid_set *rs = ti->private; |
| 3158 | struct mddev *mddev = &rs->md; |
| 3159 | |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3160 | if (!mddev->pers || !mddev->pers->sync_request) |
| 3161 | return -EINVAL; |
| 3162 | |
| 3163 | if (!strcasecmp(argv[0], "frozen")) |
| 3164 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 3165 | else |
| 3166 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 3167 | |
| 3168 | if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { |
| 3169 | if (mddev->sync_thread) { |
| 3170 | set_bit(MD_RECOVERY_INTR, &mddev->recovery); |
| 3171 | md_reap_sync_thread(mddev); |
| 3172 | } |
| 3173 | } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || |
| 3174 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) |
| 3175 | return -EBUSY; |
| 3176 | else if (!strcasecmp(argv[0], "resync")) |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3177 | ; /* MD_RECOVERY_NEEDED set below */ |
| 3178 | else if (!strcasecmp(argv[0], "recover")) |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3179 | set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3180 | else { |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3181 | if (!strcasecmp(argv[0], "check")) |
| 3182 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
| 3183 | else if (!!strcasecmp(argv[0], "repair")) |
| 3184 | return -EINVAL; |
| 3185 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); |
| 3186 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 3187 | } |
| 3188 | if (mddev->ro == 2) { |
| 3189 | /* A write to sync_action is enough to justify |
| 3190 | * canceling read-auto mode |
| 3191 | */ |
| 3192 | mddev->ro = 0; |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3193 | if (!mddev->suspended && mddev->sync_thread) |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3194 | md_wakeup_thread(mddev->sync_thread); |
| 3195 | } |
| 3196 | set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3197 | if (!mddev->suspended && mddev->thread) |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3198 | md_wakeup_thread(mddev->thread); |
| 3199 | |
| 3200 | return 0; |
| 3201 | } |
| 3202 | |
| 3203 | static int raid_iterate_devices(struct dm_target *ti, |
| 3204 | iterate_devices_callout_fn fn, void *data) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3205 | { |
| 3206 | struct raid_set *rs = ti->private; |
| 3207 | unsigned i; |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 3208 | int r = 0; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3209 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 3210 | for (i = 0; !r && i < rs->md.raid_disks; i++) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3211 | if (rs->dev[i].data_dev) |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 3212 | r = fn(ti, |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3213 | rs->dev[i].data_dev, |
| 3214 | 0, /* No offset on data devs */ |
| 3215 | rs->md.dev_sectors, |
| 3216 | data); |
| 3217 | |
Heinz Mauelshagen | 73c6f23 | 2016-05-19 18:49:24 +0200 | [diff] [blame] | 3218 | return r; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3219 | } |
| 3220 | |
| 3221 | static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) |
| 3222 | { |
| 3223 | struct raid_set *rs = ti->private; |
| 3224 | unsigned chunk_size = rs->md.chunk_sectors << 9; |
NeilBrown | d1688a6 | 2011-10-11 16:49:52 +1100 | [diff] [blame] | 3225 | struct r5conf *conf = rs->md.private; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3226 | |
| 3227 | blk_limits_io_min(limits, chunk_size); |
| 3228 | blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded)); |
| 3229 | } |
| 3230 | |
| 3231 | static void raid_presuspend(struct dm_target *ti) |
| 3232 | { |
| 3233 | struct raid_set *rs = ti->private; |
| 3234 | |
| 3235 | md_stop_writes(&rs->md); |
| 3236 | } |
| 3237 | |
| 3238 | static void raid_postsuspend(struct dm_target *ti) |
| 3239 | { |
| 3240 | struct raid_set *rs = ti->private; |
| 3241 | |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 3242 | if (test_and_clear_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { |
| 3243 | if (!rs->md.suspended) |
| 3244 | mddev_suspend(&rs->md); |
| 3245 | rs->md.ro = 1; |
| 3246 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3247 | } |
| 3248 | |
Jonathan Brassow | f381e71 | 2013-05-08 17:57:13 -0500 | [diff] [blame] | 3249 | static void attempt_restore_of_faulty_devices(struct raid_set *rs) |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3250 | { |
Jonathan Brassow | 9092c02 | 2013-05-02 14:19:24 -0500 | [diff] [blame] | 3251 | int i; |
| 3252 | uint64_t failed_devices, cleared_failed_devices = 0; |
| 3253 | unsigned long flags; |
| 3254 | struct dm_raid_superblock *sb; |
Jonathan Brassow | 9092c02 | 2013-05-02 14:19:24 -0500 | [diff] [blame] | 3255 | struct md_rdev *r; |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3256 | |
Jonathan Brassow | f381e71 | 2013-05-08 17:57:13 -0500 | [diff] [blame] | 3257 | for (i = 0; i < rs->md.raid_disks; i++) { |
| 3258 | r = &rs->dev[i].rdev; |
| 3259 | if (test_bit(Faulty, &r->flags) && r->sb_page && |
Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 3260 | sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0, |
| 3261 | 1)) { |
Jonathan Brassow | f381e71 | 2013-05-08 17:57:13 -0500 | [diff] [blame] | 3262 | DMINFO("Faulty %s device #%d has readable super block." |
| 3263 | " Attempting to revive it.", |
| 3264 | rs->raid_type->name, i); |
Jonathan Brassow | a4dc163 | 2013-05-08 18:00:54 -0500 | [diff] [blame] | 3265 | |
| 3266 | /* |
| 3267 | * Faulty bit may be set, but sometimes the array can |
| 3268 | * be suspended before the personalities can respond |
| 3269 | * by removing the device from the array (i.e. calling |
Mike Snitzer | 4315784 | 2016-05-30 13:03:37 -0400 | [diff] [blame] | 3270 | * 'hot_remove_disk'). If they haven't yet removed |
Jonathan Brassow | a4dc163 | 2013-05-08 18:00:54 -0500 | [diff] [blame] | 3271 | * the failed device, its 'raid_disk' number will be |
| 3272 | * '>= 0' - meaning we must call this function |
| 3273 | * ourselves. |
| 3274 | */ |
| 3275 | if ((r->raid_disk >= 0) && |
| 3276 | (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0)) |
| 3277 | /* Failed to revive this device, try next */ |
| 3278 | continue; |
| 3279 | |
Jonathan Brassow | f381e71 | 2013-05-08 17:57:13 -0500 | [diff] [blame] | 3280 | r->raid_disk = i; |
| 3281 | r->saved_raid_disk = i; |
| 3282 | flags = r->flags; |
| 3283 | clear_bit(Faulty, &r->flags); |
| 3284 | clear_bit(WriteErrorSeen, &r->flags); |
| 3285 | clear_bit(In_sync, &r->flags); |
| 3286 | if (r->mddev->pers->hot_add_disk(r->mddev, r)) { |
| 3287 | r->raid_disk = -1; |
| 3288 | r->saved_raid_disk = -1; |
| 3289 | r->flags = flags; |
| 3290 | } else { |
| 3291 | r->recovery_offset = 0; |
| 3292 | cleared_failed_devices |= 1 << i; |
| 3293 | } |
| 3294 | } |
| 3295 | } |
| 3296 | if (cleared_failed_devices) { |
| 3297 | rdev_for_each(r, &rs->md) { |
| 3298 | sb = page_address(r->sb_page); |
| 3299 | failed_devices = le64_to_cpu(sb->failed_devices); |
| 3300 | failed_devices &= ~cleared_failed_devices; |
| 3301 | sb->failed_devices = cpu_to_le64(failed_devices); |
| 3302 | } |
| 3303 | } |
| 3304 | } |
| 3305 | |
Mike Snitzer | e6ca5e1 | 2016-06-02 15:27:22 -0400 | [diff] [blame] | 3306 | static int __load_dirty_region_bitmap(struct raid_set *rs) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3307 | { |
| 3308 | int r = 0; |
| 3309 | |
| 3310 | /* Try loading the bitmap unless "raid0", which does not have one */ |
| 3311 | if (!rs_is_raid0(rs) && |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3312 | !test_and_set_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags)) { |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3313 | r = bitmap_load(&rs->md); |
| 3314 | if (r) |
| 3315 | DMERR("Failed to load bitmap"); |
| 3316 | } |
| 3317 | |
| 3318 | return r; |
| 3319 | } |
| 3320 | |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 3321 | /* Enforce updating all superblocks */ |
| 3322 | static void rs_update_sbs(struct raid_set *rs) |
| 3323 | { |
| 3324 | struct mddev *mddev = &rs->md; |
| 3325 | int ro = mddev->ro; |
| 3326 | |
| 3327 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
| 3328 | mddev->ro = 0; |
| 3329 | md_update_sb(mddev, 1); |
| 3330 | mddev->ro = ro; |
| 3331 | } |
| 3332 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 3333 | /* |
| 3334 | * Reshape changes raid algorithm of @rs to new one within personality |
| 3335 | * (e.g. raid6_zr -> raid6_nc), changes stripe size, adds/removes |
| 3336 | * disks from a raid set thus growing/shrinking it or resizes the set |
| 3337 | * |
| 3338 | * Call mddev_lock_nointr() before! |
| 3339 | */ |
| 3340 | static int rs_start_reshape(struct raid_set *rs) |
| 3341 | { |
| 3342 | int r; |
| 3343 | struct mddev *mddev = &rs->md; |
| 3344 | struct md_personality *pers = mddev->pers; |
| 3345 | |
| 3346 | r = rs_setup_reshape(rs); |
| 3347 | if (r) |
| 3348 | return r; |
| 3349 | |
| 3350 | /* Need to be resumed to be able to start reshape, recovery is frozen until raid_resume() though */ |
| 3351 | if (mddev->suspended) |
| 3352 | mddev_resume(mddev); |
| 3353 | |
| 3354 | /* |
| 3355 | * Check any reshape constraints enforced by the personalility |
| 3356 | * |
| 3357 | * May as well already kick the reshape off so that * pers->start_reshape() becomes optional. |
| 3358 | */ |
| 3359 | r = pers->check_reshape(mddev); |
| 3360 | if (r) { |
| 3361 | rs->ti->error = "pers->check_reshape() failed"; |
| 3362 | return r; |
| 3363 | } |
| 3364 | |
| 3365 | /* |
| 3366 | * Personality may not provide start reshape method in which |
| 3367 | * case check_reshape above has already covered everything |
| 3368 | */ |
| 3369 | if (pers->start_reshape) { |
| 3370 | r = pers->start_reshape(mddev); |
| 3371 | if (r) { |
| 3372 | rs->ti->error = "pers->start_reshape() failed"; |
| 3373 | return r; |
| 3374 | } |
| 3375 | } |
| 3376 | |
| 3377 | /* Suspend because a resume will happen in raid_resume() */ |
| 3378 | if (!mddev->suspended) |
| 3379 | mddev_suspend(mddev); |
| 3380 | |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 3381 | /* |
| 3382 | * Now reshape got set up, update superblocks to |
| 3383 | * reflect the fact so that a table reload will |
| 3384 | * access proper superblock content in the ctr. |
| 3385 | */ |
| 3386 | rs_update_sbs(rs); |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 3387 | |
| 3388 | return 0; |
| 3389 | } |
| 3390 | |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3391 | static int raid_preresume(struct dm_target *ti) |
| 3392 | { |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 3393 | int r; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3394 | struct raid_set *rs = ti->private; |
| 3395 | struct mddev *mddev = &rs->md; |
| 3396 | |
| 3397 | /* This is a resume after a suspend of the set -> it's already started */ |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3398 | if (test_and_set_bit(RT_FLAG_RS_PRERESUMED, &rs->runtime_flags)) |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3399 | return 0; |
| 3400 | |
| 3401 | /* |
| 3402 | * The superblocks need to be updated on disk if the |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 3403 | * array is new or new devices got added (thus zeroed |
| 3404 | * out by userspace) or __load_dirty_region_bitmap |
| 3405 | * will overwrite them in core with old data or fail. |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3406 | */ |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 3407 | if (test_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags)) |
| 3408 | rs_update_sbs(rs); |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3409 | |
| 3410 | /* |
| 3411 | * Disable/enable discard support on raid set after any |
| 3412 | * conversion, because devices can have been added |
| 3413 | */ |
| 3414 | configure_discard_support(rs); |
| 3415 | |
| 3416 | /* Load the bitmap from disk unless raid0 */ |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 3417 | r = __load_dirty_region_bitmap(rs); |
| 3418 | if (r) |
| 3419 | return r; |
| 3420 | |
Heinz Mauelshagen | 4257e08 | 2016-06-14 01:46:03 +0200 | [diff] [blame] | 3421 | /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) */ |
| 3422 | if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && |
| 3423 | mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)) { |
| 3424 | r = bitmap_resize(mddev->bitmap, mddev->dev_sectors, |
| 3425 | to_bytes(rs->requested_bitmap_chunk_sectors), 0); |
| 3426 | if (r) |
| 3427 | DMERR("Failed to resize bitmap"); |
| 3428 | } |
| 3429 | |
Heinz Mauelshagen | 9dbd1aa | 2016-06-13 17:55:14 +0200 | [diff] [blame] | 3430 | /* Check for any resize/reshape on @rs and adjust/initiate */ |
| 3431 | /* Be prepared for mddev_resume() in raid_resume() */ |
| 3432 | set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 3433 | if (mddev->recovery_cp && mddev->recovery_cp < MaxSector) { |
| 3434 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); |
| 3435 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
| 3436 | mddev->resync_min = mddev->recovery_cp; |
| 3437 | } |
| 3438 | |
| 3439 | rs_set_capacity(rs); |
| 3440 | |
| 3441 | /* Check for any reshape request and region size change unless new raid set */ |
| 3442 | if (test_and_clear_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { |
| 3443 | /* Initiate a reshape. */ |
| 3444 | mddev_lock_nointr(mddev); |
| 3445 | r = rs_start_reshape(rs); |
| 3446 | mddev_unlock(mddev); |
| 3447 | if (r) |
| 3448 | DMWARN("Failed to check/start reshape, continuing without change"); |
| 3449 | r = 0; |
| 3450 | } |
| 3451 | |
| 3452 | return r; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3453 | } |
| 3454 | |
Jonathan Brassow | f381e71 | 2013-05-08 17:57:13 -0500 | [diff] [blame] | 3455 | static void raid_resume(struct dm_target *ti) |
| 3456 | { |
| 3457 | struct raid_set *rs = ti->private; |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3458 | struct mddev *mddev = &rs->md; |
Jonathan Brassow | f381e71 | 2013-05-08 17:57:13 -0500 | [diff] [blame] | 3459 | |
Mike Snitzer | 4286325 | 2016-06-02 12:27:46 -0400 | [diff] [blame] | 3460 | if (test_and_set_bit(RT_FLAG_RS_RESUMED, &rs->runtime_flags)) { |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3461 | /* |
| 3462 | * A secondary resume while the device is active. |
| 3463 | * Take this opportunity to check whether any failed |
| 3464 | * devices are reachable again. |
| 3465 | */ |
| 3466 | attempt_restore_of_faulty_devices(rs); |
Heinz Mauelshagen | 6e20902 | 2016-06-14 15:23:13 -0400 | [diff] [blame^] | 3467 | } else { |
| 3468 | mddev->ro = 0; |
| 3469 | mddev->in_sync = 0; |
| 3470 | |
| 3471 | /* |
| 3472 | * When passing in flags to the ctr, we expect userspace |
| 3473 | * to reset them because they made it to the superblocks |
| 3474 | * and reload the mapping anyway. |
| 3475 | * |
| 3476 | * -> only unfreeze recovery in case of a table reload or |
| 3477 | * we'll have a bogus recovery/reshape position |
| 3478 | * retrieved from the superblock by the ctr because |
| 3479 | * the ongoing recovery/reshape will change it after read. |
| 3480 | */ |
| 3481 | if (!test_bit(RT_FLAG_KEEP_RS_FROZEN, &rs->runtime_flags)) |
| 3482 | clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); |
| 3483 | |
| 3484 | if (mddev->suspended) |
| 3485 | mddev_resume(mddev); |
Jonathan Brassow | 47525e5 | 2012-05-22 13:55:29 +1000 | [diff] [blame] | 3486 | } |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3487 | } |
| 3488 | |
| 3489 | static struct target_type raid_target = { |
| 3490 | .name = "raid", |
Mike Snitzer | 9b6e542 | 2016-06-02 11:48:09 -0400 | [diff] [blame] | 3491 | .version = {1, 9, 0}, |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3492 | .module = THIS_MODULE, |
| 3493 | .ctr = raid_ctr, |
| 3494 | .dtr = raid_dtr, |
| 3495 | .map = raid_map, |
| 3496 | .status = raid_status, |
Jonathan Brassow | be83651 | 2013-04-24 11:42:43 +1000 | [diff] [blame] | 3497 | .message = raid_message, |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3498 | .iterate_devices = raid_iterate_devices, |
| 3499 | .io_hints = raid_io_hints, |
| 3500 | .presuspend = raid_presuspend, |
| 3501 | .postsuspend = raid_postsuspend, |
Heinz Mauelshagen | ecbfb9f | 2016-05-19 18:49:33 +0200 | [diff] [blame] | 3502 | .preresume = raid_preresume, |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3503 | .resume = raid_resume, |
| 3504 | }; |
| 3505 | |
| 3506 | static int __init dm_raid_init(void) |
| 3507 | { |
Jonathan Brassow | fe5d2f4 | 2013-02-21 13:28:10 +1100 | [diff] [blame] | 3508 | DMINFO("Loading target version %u.%u.%u", |
| 3509 | raid_target.version[0], |
| 3510 | raid_target.version[1], |
| 3511 | raid_target.version[2]); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3512 | return dm_register_target(&raid_target); |
| 3513 | } |
| 3514 | |
| 3515 | static void __exit dm_raid_exit(void) |
| 3516 | { |
| 3517 | dm_unregister_target(&raid_target); |
| 3518 | } |
| 3519 | |
| 3520 | module_init(dm_raid_init); |
| 3521 | module_exit(dm_raid_exit); |
| 3522 | |
Heinz Mauelshagen | 48cf06b | 2014-09-24 17:47:19 +0200 | [diff] [blame] | 3523 | module_param(devices_handle_discard_safely, bool, 0644); |
| 3524 | MODULE_PARM_DESC(devices_handle_discard_safely, |
| 3525 | "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); |
| 3526 | |
Mike Snitzer | ef9b85a | 2016-06-02 12:02:19 -0400 | [diff] [blame] | 3527 | MODULE_DESCRIPTION(DM_NAME " raid0/1/10/4/5/6 target"); |
| 3528 | MODULE_ALIAS("dm-raid0"); |
Jonathan Brassow | 63f33b8d | 2012-07-31 21:44:26 -0500 | [diff] [blame] | 3529 | MODULE_ALIAS("dm-raid1"); |
| 3530 | MODULE_ALIAS("dm-raid10"); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3531 | MODULE_ALIAS("dm-raid4"); |
| 3532 | MODULE_ALIAS("dm-raid5"); |
| 3533 | MODULE_ALIAS("dm-raid6"); |
| 3534 | MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>"); |
Heinz Mauelshagen | 3a1c1ef | 2016-05-19 18:49:34 +0200 | [diff] [blame] | 3535 | MODULE_AUTHOR("Heinz Mauelshagen <dm-devel@redhat.com>"); |
NeilBrown | 9d09e66 | 2011-01-13 20:00:02 +0000 | [diff] [blame] | 3536 | MODULE_LICENSE("GPL"); |