blob: b0e98c868c14f113c13b5878d80f5247050bb3c7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Wang Sheng-Hui7e841522011-09-21 15:37:46 +10002 md.h : kernel internal structure of the Linux MD driver
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
Christoph Hellwig63fe0812009-04-14 12:01:53 +100015#ifndef _MD_MD_H
16#define _MD_MD_H
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Christoph Hellwig63fe0812009-04-14 12:01:53 +100018#include <linux/blkdev.h>
19#include <linux/kobject.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/timer.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
David Howells93614012006-09-30 20:45:40 +020026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#define MaxSector (~(sector_t)0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029typedef struct mddev_s mddev_t;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
NeilBrown2230dfe2011-07-28 11:31:46 +100031/* Bad block numbers are stored sorted in a single page.
32 * 64bits is used for each block or extent.
33 * 54 bits are sector number, 9 bits are extent size,
34 * 1 bit is an 'acknowledged' flag.
35 */
36#define MD_MAX_BADBLOCKS (PAGE_SIZE/8)
37
Linus Torvalds1da177e2005-04-16 15:20:36 -070038/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070039 * MD's 'extended' device
40 */
NeilBrown3cb03002011-10-11 16:45:26 +110041struct md_rdev {
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 struct list_head same_set; /* RAID devices within the same set */
43
Andre Nolldd8ac332009-03-31 14:33:13 +110044 sector_t sectors; /* Device size (in 512bytes sectors) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 mddev_t *mddev; /* RAID array if running */
NeilBrowneea1bf32009-03-31 14:27:02 +110046 int last_events; /* IO event timestamp */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
Jonathan Brassowa6ff7e02011-01-14 09:14:34 +110048 /*
49 * If meta_bdev is non-NULL, it means that a separate device is
50 * being used to store the metadata (superblock/bitmap) which
51 * would otherwise be contained on the same device as the data (bdev).
52 */
53 struct block_device *meta_bdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 struct block_device *bdev; /* block device handle */
55
NeilBrown2699b672011-07-28 11:31:47 +100056 struct page *sb_page, *bb_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 int sb_loaded;
NeilBrown42543762006-06-26 00:27:57 -070058 __u64 sb_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 sector_t data_offset; /* start of data in array */
Andre Noll0f420352008-07-11 22:02:23 +100060 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
NeilBrown0002b272005-09-09 16:23:53 -070061 int sb_size; /* bytes in the superblock */
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 int preferred_minor; /* autorun support */
63
NeilBrown86e6ffd2005-11-08 21:39:24 -080064 struct kobject kobj;
65
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 /* A device can be in one of three states based on two flags:
67 * Not working: faulty==1 in_sync==0
68 * Fully working: faulty==0 in_sync==1
69 * Working, but not
70 * in sync with array
71 * faulty==0 in_sync==0
72 *
73 * It can never have faulty==1, in_sync==1
74 * This reduces the burden of testing multiple flags in many cases
75 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
NeilBrownb2d444d2005-11-08 21:39:31 -080077 unsigned long flags;
78#define Faulty 1 /* device is known to have a fault */
79#define In_sync 2 /* device is in_sync with rest of array */
NeilBrown8ddf9ef2005-09-09 16:23:45 -070080#define WriteMostly 4 /* Avoid reading if at all possible */
NeilBrownd0fae182008-03-04 14:29:31 -080081#define AutoDetected 7 /* added by auto-detect */
NeilBrownde393cd2011-07-28 11:31:48 +100082#define Blocked 8 /* An error occurred but has not yet
83 * been acknowledged by the metadata
84 * handler, so don't allow writes
Dan Williams6bfe0b42008-04-30 00:52:32 -070085 * until it is cleared */
NeilBrownd7a9d442011-07-28 11:31:48 +100086#define WriteErrorSeen 9 /* A write error has been seen on this
87 * device
88 */
NeilBrownde393cd2011-07-28 11:31:48 +100089#define FaultRecorded 10 /* Intermediate state for clearing
90 * Blocked. The Fault is/will-be
91 * recorded in the metadata, but that
92 * metadata hasn't been stored safely
93 * on disk yet.
94 */
95#define BlockedBadBlocks 11 /* A writer is blocked because they
96 * found an unacknowledged bad-block.
97 * This can safely be cleared at any
98 * time, and the writer will re-check.
99 * It may be set at any time, and at
100 * worst the writer will timeout and
101 * re-check. So setting it as
102 * accurately as possible is good, but
103 * not absolutely critical.
104 */
Dan Williams6bfe0b42008-04-30 00:52:32 -0700105 wait_queue_head_t blocked_wait;
NeilBrown8ddf9ef2005-09-09 16:23:45 -0700106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 int desc_nr; /* descriptor index in the superblock */
108 int raid_disk; /* role of device in array */
NeilBrowne93f68a2010-06-15 09:36:03 +0100109 int new_raid_disk; /* role that the device will have in
110 * the array after a level-change completes.
111 */
NeilBrown41158c72005-06-21 17:17:25 -0700112 int saved_raid_disk; /* role that device used to have in the
113 * array and could again if we did a partial
114 * resync from the bitmap
115 */
NeilBrown5fd6c1d2006-06-26 00:27:40 -0700116 sector_t recovery_offset;/* If this device has been partially
117 * recovered, this is where we were
118 * up to.
119 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120
121 atomic_t nr_pending; /* number of pending requests.
122 * only maintained for arrays that
123 * support hot removal
124 */
NeilBrownba22dcb2005-11-08 21:39:31 -0800125 atomic_t read_errors; /* number of consecutive read errors that
126 * we have tried to ignore.
127 */
Robert Becker1e509152009-12-14 12:49:58 +1100128 struct timespec last_read_error; /* monotonic time since our
129 * last read error
130 */
NeilBrown4dbcdc72006-01-06 00:20:52 -0800131 atomic_t corrected_errors; /* number of corrected read errors,
132 * for reporting to userspace and storing
133 * in superblock.
134 */
NeilBrown5792a282007-04-04 19:08:18 -0700135 struct work_struct del_work; /* used for delayed sysfs removal */
NeilBrown3c0ee632008-10-21 13:25:28 +1100136
137 struct sysfs_dirent *sysfs_state; /* handle for 'state'
138 * sysfs entry */
NeilBrown2230dfe2011-07-28 11:31:46 +1000139
140 struct badblocks {
141 int count; /* count of bad blocks */
NeilBrownde393cd2011-07-28 11:31:48 +1000142 int unacked_exist; /* there probably are unacknowledged
143 * bad blocks. This is only cleared
144 * when a read discovers none
145 */
NeilBrown2230dfe2011-07-28 11:31:46 +1000146 int shift; /* shift from sectors to block size
147 * a -ve shift means badblocks are
148 * disabled.*/
149 u64 *page; /* badblock list */
150 int changed;
151 seqlock_t lock;
NeilBrown2699b672011-07-28 11:31:47 +1000152
153 sector_t sector;
154 sector_t size; /* in sectors */
NeilBrown2230dfe2011-07-28 11:31:46 +1000155 } badblocks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156};
157
NeilBrown2230dfe2011-07-28 11:31:46 +1000158#define BB_LEN_MASK (0x00000000000001FFULL)
159#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
160#define BB_ACK_MASK (0x8000000000000000ULL)
161#define BB_MAX_LEN 512
162#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
163#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
164#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
165#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
166
167extern int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
168 sector_t *first_bad, int *bad_sectors);
NeilBrown3cb03002011-10-11 16:45:26 +1100169static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrown2230dfe2011-07-28 11:31:46 +1000170 sector_t *first_bad, int *bad_sectors)
171{
172 if (unlikely(rdev->badblocks.count)) {
173 int rv = md_is_badblock(&rdev->badblocks, rdev->data_offset + s,
174 sectors,
175 first_bad, bad_sectors);
176 if (rv)
177 *first_bad -= rdev->data_offset;
178 return rv;
179 }
180 return 0;
181}
NeilBrown3cb03002011-10-11 16:45:26 +1100182extern int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
NeilBrown2230dfe2011-07-28 11:31:46 +1000183 int acknowledged);
NeilBrown3cb03002011-10-11 16:45:26 +1100184extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors);
NeilBrown2230dfe2011-07-28 11:31:46 +1000185extern void md_ack_all_badblocks(struct badblocks *bb);
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187struct mddev_s
188{
189 void *private;
NeilBrown2604b702006-01-06 00:20:36 -0800190 struct mdk_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 dev_t unit;
192 int md_minor;
193 struct list_head disks;
NeilBrown850b2b42006-10-03 01:15:46 -0700194 unsigned long flags;
195#define MD_CHANGE_DEVS 0 /* Some device status has changed */
196#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
NeilBrown070dc6d2010-08-30 17:33:34 +1000197#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
Jonathan Brassow9c810752011-06-08 17:59:30 -0500198#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
NeilBrown850b2b42006-10-03 01:15:46 -0700199
NeilBrown409c57f2009-03-31 14:39:39 +1100200 int suspended;
201 atomic_t active_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 int ro;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000203 int sysfs_active; /* set when sysfs deletes
204 * are happening, so run/
205 * takeover/stop are not safe
206 */
NeilBrown0ca69882011-01-14 09:14:33 +1100207 int ready; /* See when safe to pass
208 * IO requests down */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 struct gendisk *gendisk;
210
NeilBrowneae17012005-11-08 21:39:23 -0800211 struct kobject kobj;
NeilBrownd3374822009-01-09 08:31:10 +1100212 int hold_active;
213#define UNTIL_IOCTL 1
NeilBrownefeb53c2009-01-09 08:31:10 +1100214#define UNTIL_STOP 2
NeilBrowneae17012005-11-08 21:39:23 -0800215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Superblock information */
217 int major_version,
218 minor_version,
219 patch_version;
220 int persistent;
NeilBrowne6910632008-02-06 01:39:51 -0800221 int external; /* metadata is
222 * managed externally */
223 char metadata_type[17]; /* externally set*/
Andre Noll9d8f0362009-06-18 08:45:01 +1000224 int chunk_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 time_t ctime, utime;
226 int level, layout;
NeilBrownd9d166c2006-01-06 00:20:51 -0800227 char clevel[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 int raid_disks;
229 int max_disks;
Andre Noll58c0fed2009-03-31 14:33:13 +1100230 sector_t dev_sectors; /* used size of
231 * component devices */
Andre Nollf233ea52008-07-21 17:05:22 +1000232 sector_t array_sectors; /* exported array size */
Dan Williamsb522adc2009-03-31 15:00:31 +1100233 int external_size; /* size managed
234 * externally */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 __u64 events;
NeilBrowna8707c02010-05-18 09:28:43 +1000236 /* If the last 'event' was simply a clean->dirty transition, and
237 * we didn't write it to the spares, then it is safe and simple
238 * to just decrement the event count on a dirty->clean transition.
239 * So we record that possibility here.
240 */
241 int can_decrease_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 char uuid[16];
244
NeilBrownf6705572006-03-27 01:18:11 -0800245 /* If the array is being reshaped, we need to record the
246 * new shape and an indication of where we are up to.
247 * This is written to the superblock.
248 * If reshape_position is MaxSector, then no reshape is happening (yet).
249 */
250 sector_t reshape_position;
Andre Noll664e7c42009-06-18 08:45:27 +1000251 int delta_disks, new_level, new_layout;
252 int new_chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -0800253
NeilBrown97658cd2011-04-18 18:25:42 +1000254 atomic_t plug_cnt; /* If device is expecting
255 * more bios soon.
256 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 struct mdk_thread_s *thread; /* management thread */
258 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
NeilBrownff4e8d92006-07-10 04:44:16 -0700259 sector_t curr_resync; /* last block scheduled */
NeilBrown97e4f422009-03-31 14:33:13 +1100260 /* As resync requests can complete out of order, we cannot easily track
261 * how much resync has been completed. So we occasionally pause until
262 * everything completes, then set curr_resync_completed to curr_resync.
263 * As such it may be well behind the real resync mark, but it is a value
264 * we are certain of.
265 */
266 sector_t curr_resync_completed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 unsigned long resync_mark; /* a recent timestamp */
268 sector_t resync_mark_cnt;/* blocks written at resync_mark */
NeilBrownff4e8d92006-07-10 04:44:16 -0700269 sector_t curr_mark_cnt; /* blocks scheduled now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
271 sector_t resync_max_sectors; /* may be set by personality */
NeilBrown9d888832005-11-08 21:39:26 -0800272
273 sector_t resync_mismatches; /* count of sectors where
274 * parity/replica mismatch found
275 */
NeilBrowne464eaf2006-03-27 01:18:14 -0800276
277 /* allow user-space to request suspension of IO to regions of the array */
278 sector_t suspend_lo;
279 sector_t suspend_hi;
NeilBrown88202a02006-01-06 00:21:36 -0800280 /* if zero, use the system-wide default */
281 int sync_speed_min;
282 int sync_speed_max;
283
Bernd Schubert90b08712008-05-23 13:04:38 -0700284 /* resync even though the same disks are shared among md-devices */
285 int parallel_resync;
286
NeilBrown6ff8d8ec2006-01-06 00:20:15 -0800287 int ok_start_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 /* recovery/resync flags
289 * NEEDED: we might need to start a resync/recover
290 * RUNNING: a thread is running, or about to be started
291 * SYNC: actually doing a resync, not a recovery
Neil Brown72a23c22008-06-28 08:31:41 +1000292 * RECOVER: doing recovery, or need to try it.
NeilBrowndfc70642008-05-23 13:04:39 -0700293 * INTR: resync needs to be aborted for some reason
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 * DONE: thread is done and is waiting to be reaped
NeilBrown24dd4692005-11-08 21:39:26 -0800295 * REQUEST: user-space has requested a sync (used with SYNC)
Anand Gadiyar411c9402009-07-07 15:24:23 +0530296 * CHECK: user-space request for check-only, no repair
NeilBrownccfcc3c2006-03-27 01:18:09 -0800297 * RESHAPE: A reshape is happening
298 *
299 * If neither SYNC or RESHAPE are set, then it is a recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300 */
301#define MD_RECOVERY_RUNNING 0
302#define MD_RECOVERY_SYNC 1
Neil Brown72a23c22008-06-28 08:31:41 +1000303#define MD_RECOVERY_RECOVER 2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304#define MD_RECOVERY_INTR 3
305#define MD_RECOVERY_DONE 4
306#define MD_RECOVERY_NEEDED 5
NeilBrown24dd4692005-11-08 21:39:26 -0800307#define MD_RECOVERY_REQUESTED 6
308#define MD_RECOVERY_CHECK 7
NeilBrownccfcc3c2006-03-27 01:18:09 -0800309#define MD_RECOVERY_RESHAPE 8
NeilBrown5fd6c1d2006-06-26 00:27:40 -0700310#define MD_RECOVERY_FROZEN 9
311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 unsigned long recovery;
NeilBrown53890422011-07-27 11:00:36 +1000313 /* If a RAID personality determines that recovery (of a particular
314 * device) will fail due to a read error on the source device, it
315 * takes a copy of this number and does not attempt recovery again
316 * until this number changes.
317 */
318 int recovery_disabled;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
320 int in_sync; /* know to not need resync */
NeilBrownc8c00a62009-08-10 12:50:52 +1000321 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
322 * that we are never stopping an array while it is open.
323 * 'reconfig_mutex' protects all other reconfiguration.
324 * These locks are separate due to conflicting interactions
325 * with bdev->bd_mutex.
326 * Lock ordering is:
327 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
328 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
329 */
330 struct mutex open_mutex;
NeilBrowndf5b89b2006-03-27 01:18:20 -0800331 struct mutex reconfig_mutex;
NeilBrownf2ea68c2008-07-21 17:05:25 +1000332 atomic_t active; /* general refcount */
333 atomic_t openers; /* number of active opens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
NeilBrownf0b4f7e2011-02-24 17:26:41 +1100335 int changed; /* True if we might need to
336 * reread partition info */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 int degraded; /* whether md should consider
338 * adding a spare
339 */
340
341 atomic_t recovery_active; /* blocks scheduled, but not written */
342 wait_queue_head_t recovery_wait;
343 sector_t recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +1000344 sector_t resync_min; /* user requested sync
345 * starts here */
NeilBrownc6207272008-02-06 01:39:52 -0800346 sector_t resync_max; /* resync should pause
347 * when it gets here */
NeilBrown06d91a52005-06-21 17:17:12 -0700348
NeilBrownb62b7592008-10-21 13:25:21 +1100349 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
350 * file in sysfs.
351 */
NeilBrown0c3573f2009-01-09 08:31:05 +1100352 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
NeilBrownb62b7592008-10-21 13:25:21 +1100353
NeilBrownd3374822009-01-09 08:31:10 +1100354 struct work_struct del_work; /* used for delayed sysfs removal */
355
NeilBrown06d91a52005-06-21 17:17:12 -0700356 spinlock_t write_lock;
NeilBrown3d310eb2005-06-21 17:17:26 -0700357 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
NeilBrown7bfa19f2005-06-21 17:17:28 -0700358 atomic_t pending_writes; /* number of active superblock writes */
NeilBrown06d91a52005-06-21 17:17:12 -0700359
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 unsigned int safemode; /* if set, update "clean" superblock
361 * when no writes pending.
362 */
363 unsigned int safemode_delay;
364 struct timer_list safemode_timer;
365 atomic_t writes_pending;
Jens Axboe165125e2007-07-24 09:28:11 +0200366 struct request_queue *queue; /* for plugging ... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
NeilBrown32a76272005-06-21 17:17:14 -0700368 struct bitmap *bitmap; /* the bitmap for the device */
NeilBrownc3d97142009-12-14 12:49:52 +1100369 struct {
370 struct file *file; /* the bitmap file */
NeilBrownf6af9492009-12-14 12:49:54 +1100371 loff_t offset; /* offset from superblock of
NeilBrownc3d97142009-12-14 12:49:52 +1100372 * start of bitmap. May be
373 * negative, but not '0'
NeilBrownf6af9492009-12-14 12:49:54 +1100374 * For external metadata, offset
375 * from start of device.
NeilBrownc3d97142009-12-14 12:49:52 +1100376 */
NeilBrownf6af9492009-12-14 12:49:54 +1100377 loff_t default_offset; /* this is the offset to use when
NeilBrownc3d97142009-12-14 12:49:52 +1100378 * hot-adding a bitmap. It should
379 * eventually be settable by sysfs.
380 */
381 struct mutex mutex;
NeilBrown42a04b52009-12-14 12:49:53 +1100382 unsigned long chunksize;
NeilBrownac2f40b2010-06-01 19:37:31 +1000383 unsigned long daemon_sleep; /* how many jiffies between updates? */
NeilBrown42a04b52009-12-14 12:49:53 +1100384 unsigned long max_write_behind; /* write-behind mode */
NeilBrownece5cff2009-12-14 12:49:56 +1100385 int external;
NeilBrownc3d97142009-12-14 12:49:52 +1100386 } bitmap_info;
NeilBrown32a76272005-06-21 17:17:14 -0700387
Robert Becker1e509152009-12-14 12:49:58 +1100388 atomic_t max_corr_read_errors; /* max read retries */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 struct list_head all_mddevs;
NeilBrowna2826aa2009-12-14 12:49:49 +1100390
NeilBrowna64c8762010-04-14 17:15:37 +1000391 struct attribute_group *to_remove;
NeilBrown252ac522010-06-01 19:37:29 +1000392
NeilBrowna167f662010-10-26 18:31:13 +1100393 struct bio_set *bio_set;
394
Tejun Heoe9c74692010-09-03 11:56:18 +0200395 /* Generic flush handling.
396 * The last to finish preflush schedules a worker to submit
397 * the rest of the request (without the REQ_FLUSH flag).
NeilBrowna2826aa2009-12-14 12:49:49 +1100398 */
Tejun Heoe9c74692010-09-03 11:56:18 +0200399 struct bio *flush_bio;
NeilBrowna2826aa2009-12-14 12:49:49 +1100400 atomic_t flush_pending;
Tejun Heoe9c74692010-09-03 11:56:18 +0200401 struct work_struct flush_work;
NeilBrown768a4182010-07-26 11:49:55 +1000402 struct work_struct event_work; /* used by dm to report failure event */
NeilBrown3cb03002011-10-11 16:45:26 +1100403 void (*sync_super)(mddev_t *mddev, struct md_rdev *rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404};
405
406
NeilBrown3cb03002011-10-11 16:45:26 +1100407static inline void rdev_dec_pending(struct md_rdev *rdev, mddev_t *mddev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408{
NeilBrownb2d444d2005-11-08 21:39:31 -0800409 int faulty = test_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
411 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
412}
413
414static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
415{
416 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
417}
418
NeilBrown2604b702006-01-06 00:20:36 -0800419struct mdk_personality
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420{
421 char *name;
NeilBrown2604b702006-01-06 00:20:36 -0800422 int level;
423 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 struct module *owner;
NeilBrown21a52c62010-04-01 15:02:13 +1100425 int (*make_request)(mddev_t *mddev, struct bio *bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 int (*run)(mddev_t *mddev);
427 int (*stop)(mddev_t *mddev);
428 void (*status)(struct seq_file *seq, mddev_t *mddev);
429 /* error_handler must set ->faulty and clear ->in_sync
430 * if appropriate, and should abort recovery if needed
431 */
NeilBrown3cb03002011-10-11 16:45:26 +1100432 void (*error_handler)(mddev_t *mddev, struct md_rdev *rdev);
433 int (*hot_add_disk) (mddev_t *mddev, struct md_rdev *rdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 int (*hot_remove_disk) (mddev_t *mddev, int number);
435 int (*spare_active) (mddev_t *mddev);
NeilBrown57afd892005-06-21 17:17:13 -0700436 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 int (*resize) (mddev_t *mddev, sector_t sectors);
Dan Williams80c3a6c2009-03-17 18:10:40 -0700438 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
NeilBrown63c70c42006-03-27 01:18:13 -0800439 int (*check_reshape) (mddev_t *mddev);
440 int (*start_reshape) (mddev_t *mddev);
NeilBrowncea9c222009-03-31 15:15:05 +1100441 void (*finish_reshape) (mddev_t *mddev);
NeilBrown36fa3062005-09-09 16:23:45 -0700442 /* quiesce moves between quiescence states
443 * 0 - fully active
444 * 1 - no new requests allowed
445 * others - reserved
446 */
447 void (*quiesce) (mddev_t *mddev, int state);
NeilBrown245f46c2009-03-31 14:39:39 +1100448 /* takeover is used to transition an array from one
449 * personality to another. The new personality must be able
450 * to handle the data in the current layout.
451 * e.g. 2drive raid1 -> 2drive raid5
452 * ndrive raid5 -> degraded n+1drive raid6 with special layout
453 * If the takeover succeeds, a new 'private' structure is returned.
454 * This needs to be installed and then ->run used to activate the
455 * array.
456 */
457 void *(*takeover) (mddev_t *mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458};
459
460
NeilBrown007583c2005-11-08 21:39:30 -0800461struct md_sysfs_entry {
462 struct attribute attr;
463 ssize_t (*show)(mddev_t *, char *);
464 ssize_t (*store)(mddev_t *, const char *, size_t);
465};
NeilBrown43a70502009-12-14 12:49:55 +1100466extern struct attribute_group md_bitmap_group;
NeilBrown007583c2005-11-08 21:39:30 -0800467
NeilBrown00bcb4a2010-06-01 19:37:23 +1000468static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
469{
470 if (sd)
471 return sysfs_get_dirent(sd, NULL, name);
472 return sd;
473}
474static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
475{
476 if (sd)
477 sysfs_notify_dirent(sd);
478}
479
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480static inline char * mdname (mddev_t * mddev)
481{
482 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
483}
484
NeilBrown3cb03002011-10-11 16:45:26 +1100485static inline int sysfs_link_rdev(mddev_t *mddev, struct md_rdev *rdev)
Namhyung Kim36fad852011-07-27 11:00:36 +1000486{
487 char nm[20];
488 sprintf(nm, "rd%d", rdev->raid_disk);
489 return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
490}
491
NeilBrown3cb03002011-10-11 16:45:26 +1100492static inline void sysfs_unlink_rdev(mddev_t *mddev, struct md_rdev *rdev)
Namhyung Kim36fad852011-07-27 11:00:36 +1000493{
494 char nm[20];
495 sprintf(nm, "rd%d", rdev->raid_disk);
496 sysfs_remove_link(&mddev->kobj, nm);
497}
498
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499/*
500 * iterates through some rdev ringlist. It's safe to remove the
501 * current 'rdev'. Dont touch 'tmp' though.
502 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100503#define rdev_for_each_list(rdev, tmp, head) \
504 list_for_each_entry_safe(rdev, tmp, head, same_set)
505
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506/*
507 * iterates through the 'same array disks' ringlist
508 */
NeilBrownd089c6a2008-02-06 01:39:59 -0800509#define rdev_for_each(rdev, tmp, mddev) \
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100510 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
NeilBrown4b809912008-07-21 17:05:25 +1000512#define rdev_for_each_rcu(rdev, mddev) \
513 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515typedef struct mdk_thread_s {
516 void (*run) (mddev_t *mddev);
517 mddev_t *mddev;
518 wait_queue_head_t wqueue;
519 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 struct task_struct *tsk;
NeilBrown32a76272005-06-21 17:17:14 -0700521 unsigned long timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522} mdk_thread_t;
523
524#define THREAD_WAKEUP 0
525
526#define __wait_event_lock_irq(wq, condition, lock, cmd) \
527do { \
528 wait_queue_t __wait; \
529 init_waitqueue_entry(&__wait, current); \
530 \
531 add_wait_queue(&wq, &__wait); \
532 for (;;) { \
533 set_current_state(TASK_UNINTERRUPTIBLE); \
534 if (condition) \
535 break; \
536 spin_unlock_irq(&lock); \
537 cmd; \
538 schedule(); \
539 spin_lock_irq(&lock); \
540 } \
541 current->state = TASK_RUNNING; \
542 remove_wait_queue(&wq, &__wait); \
543} while (0)
544
545#define wait_event_lock_irq(wq, condition, lock, cmd) \
546do { \
547 if (condition) \
548 break; \
549 __wait_event_lock_irq(wq, condition, lock, cmd); \
550} while (0)
551
NeilBrown1345b1d2006-01-06 00:20:40 -0800552static inline void safe_put_page(struct page *p)
553{
554 if (p) put_page(p);
555}
556
NeilBrown92022952009-03-31 14:33:13 +1100557extern int register_md_personality(struct mdk_personality *p);
558extern int unregister_md_personality(struct mdk_personality *p);
559extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
560 mddev_t *mddev, const char *name);
NeilBrown01f96c02011-09-21 15:30:20 +1000561extern void md_unregister_thread(mdk_thread_t **threadp);
NeilBrown92022952009-03-31 14:33:13 +1100562extern void md_wakeup_thread(mdk_thread_t *thread);
563extern void md_check_recovery(mddev_t *mddev);
564extern void md_write_start(mddev_t *mddev, struct bio *bi);
565extern void md_write_end(mddev_t *mddev);
566extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
NeilBrown3cb03002011-10-11 16:45:26 +1100567extern void md_error(mddev_t *mddev, struct md_rdev *rdev);
NeilBrown92022952009-03-31 14:33:13 +1100568
NeilBrown3fa841d2009-09-23 18:10:29 +1000569extern int mddev_congested(mddev_t *mddev, int bits);
Tejun Heoe9c74692010-09-03 11:56:18 +0200570extern void md_flush_request(mddev_t *mddev, struct bio *bio);
NeilBrown3cb03002011-10-11 16:45:26 +1100571extern void md_super_write(mddev_t *mddev, struct md_rdev *rdev,
NeilBrown92022952009-03-31 14:33:13 +1100572 sector_t sector, int size, struct page *page);
573extern void md_super_wait(mddev_t *mddev);
NeilBrown3cb03002011-10-11 16:45:26 +1100574extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
Jonathan Brassowccebd4c2011-01-14 09:14:33 +1100575 struct page *page, int rw, bool metadata_op);
NeilBrown92022952009-03-31 14:33:13 +1100576extern void md_do_sync(mddev_t *mddev);
577extern void md_new_event(mddev_t *mddev);
578extern int md_allow_write(mddev_t *mddev);
NeilBrown3cb03002011-10-11 16:45:26 +1100579extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, mddev_t *mddev);
Dan Williams1f403622009-03-31 14:59:03 +1100580extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
Andre Noll0894cc32009-06-18 08:49:23 +1000581extern int md_check_no_bitmap(mddev_t *mddev);
Andre Nollac5e7112009-08-03 10:59:47 +1000582extern int md_integrity_register(mddev_t *mddev);
NeilBrown3cb03002011-10-11 16:45:26 +1100583extern void md_integrity_add_rdev(struct md_rdev *rdev, mddev_t *mddev);
NeilBrown72e02072009-12-14 12:49:55 +1100584extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
NeilBrown43a70502009-12-14 12:49:55 +1100585extern void restore_bitmap_write_access(struct file *file);
Christoph Hellwig63fe0812009-04-14 12:01:53 +1000586
NeilBrown390ee602010-06-01 19:37:27 +1000587extern void mddev_init(mddev_t *mddev);
588extern int md_run(mddev_t *mddev);
589extern void md_stop(mddev_t *mddev);
590extern void md_stop_writes(mddev_t *mddev);
NeilBrown3cb03002011-10-11 16:45:26 +1100591extern int md_rdev_init(struct md_rdev *rdev);
NeilBrown390ee602010-06-01 19:37:27 +1000592
593extern void mddev_suspend(mddev_t *mddev);
594extern void mddev_resume(mddev_t *mddev);
NeilBrowna167f662010-10-26 18:31:13 +1100595extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
596 mddev_t *mddev);
597extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
598 mddev_t *mddev);
NeilBrown97658cd2011-04-18 18:25:42 +1000599extern int mddev_check_plugged(mddev_t *mddev);
NeilBrownd2eb35a2011-07-28 11:31:48 +1000600extern void md_trim_bio(struct bio *bio, int offset, int size);
Christoph Hellwig63fe0812009-04-14 12:01:53 +1000601#endif /* _MD_MD_H */