blob: 3931299788dcefe14385980c584321289cedc8db [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 md_k.h : kernel internal structure of the Linux MD driver
3 Copyright (C) 1996-98 Ingo Molnar, Gadi Oxman
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
9
10 You should have received a copy of the GNU General Public License
11 (for example /usr/src/linux/COPYING); if not, write to the Free
12 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
13*/
14
Christoph Hellwig63fe0812009-04-14 12:01:53 +100015#ifndef _MD_MD_H
16#define _MD_MD_H
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Christoph Hellwig63fe0812009-04-14 12:01:53 +100018#include <linux/blkdev.h>
19#include <linux/kobject.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/timer.h>
24#include <linux/wait.h>
25#include <linux/workqueue.h>
David Howells93614012006-09-30 20:45:40 +020026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#define MaxSector (~(sector_t)0)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029typedef struct mddev_s mddev_t;
30typedef struct mdk_rdev_s mdk_rdev_t;
31
NeilBrown2ac87402010-06-01 19:37:29 +100032/* generic plugging support - like that provided with request_queue,
33 * but does not require a request_queue
34 */
35struct plug_handle {
36 void (*unplug_fn)(struct plug_handle *);
37 struct timer_list unplug_timer;
38 struct work_struct unplug_work;
39 unsigned long unplug_flag;
40};
41#define PLUGGED_FLAG 1
42void plugger_init(struct plug_handle *plug,
43 void (*unplug_fn)(struct plug_handle *));
44void plugger_set_plug(struct plug_handle *plug);
45int plugger_remove_plug(struct plug_handle *plug);
46static inline void plugger_flush(struct plug_handle *plug)
47{
48 del_timer_sync(&plug->unplug_timer);
49 cancel_work_sync(&plug->unplug_work);
50}
51
Linus Torvalds1da177e2005-04-16 15:20:36 -070052/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 * MD's 'extended' device
54 */
55struct mdk_rdev_s
56{
57 struct list_head same_set; /* RAID devices within the same set */
58
Andre Nolldd8ac332009-03-31 14:33:13 +110059 sector_t sectors; /* Device size (in 512bytes sectors) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 mddev_t *mddev; /* RAID array if running */
NeilBrowneea1bf32009-03-31 14:27:02 +110061 int last_events; /* IO event timestamp */
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63 struct block_device *bdev; /* block device handle */
64
65 struct page *sb_page;
66 int sb_loaded;
NeilBrown42543762006-06-26 00:27:57 -070067 __u64 sb_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 sector_t data_offset; /* start of data in array */
Andre Noll0f420352008-07-11 22:02:23 +100069 sector_t sb_start; /* offset of the super block (in 512byte sectors) */
NeilBrown0002b272005-09-09 16:23:53 -070070 int sb_size; /* bytes in the superblock */
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 int preferred_minor; /* autorun support */
72
NeilBrown86e6ffd2005-11-08 21:39:24 -080073 struct kobject kobj;
74
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 /* A device can be in one of three states based on two flags:
76 * Not working: faulty==1 in_sync==0
77 * Fully working: faulty==0 in_sync==1
78 * Working, but not
79 * in sync with array
80 * faulty==0 in_sync==0
81 *
82 * It can never have faulty==1, in_sync==1
83 * This reduces the burden of testing multiple flags in many cases
84 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
NeilBrownb2d444d2005-11-08 21:39:31 -080086 unsigned long flags;
87#define Faulty 1 /* device is known to have a fault */
88#define In_sync 2 /* device is in_sync with rest of array */
NeilBrown8ddf9ef2005-09-09 16:23:45 -070089#define WriteMostly 4 /* Avoid reading if at all possible */
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +020090#define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */
NeilBrownc5d79ad2008-02-06 01:39:54 -080091#define AllReserved 6 /* If whole device is reserved for
92 * one array */
NeilBrownd0fae182008-03-04 14:29:31 -080093#define AutoDetected 7 /* added by auto-detect */
Dan Williams6bfe0b42008-04-30 00:52:32 -070094#define Blocked 8 /* An error occured on an externally
95 * managed array, don't allow writes
96 * until it is cleared */
97 wait_queue_head_t blocked_wait;
NeilBrown8ddf9ef2005-09-09 16:23:45 -070098
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 int desc_nr; /* descriptor index in the superblock */
100 int raid_disk; /* role of device in array */
NeilBrowne93f68a2010-06-15 09:36:03 +0100101 int new_raid_disk; /* role that the device will have in
102 * the array after a level-change completes.
103 */
NeilBrown41158c72005-06-21 17:17:25 -0700104 int saved_raid_disk; /* role that device used to have in the
105 * array and could again if we did a partial
106 * resync from the bitmap
107 */
NeilBrown5fd6c1d2006-06-26 00:27:40 -0700108 sector_t recovery_offset;/* If this device has been partially
109 * recovered, this is where we were
110 * up to.
111 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
113 atomic_t nr_pending; /* number of pending requests.
114 * only maintained for arrays that
115 * support hot removal
116 */
NeilBrownba22dcb2005-11-08 21:39:31 -0800117 atomic_t read_errors; /* number of consecutive read errors that
118 * we have tried to ignore.
119 */
Robert Becker1e509152009-12-14 12:49:58 +1100120 struct timespec last_read_error; /* monotonic time since our
121 * last read error
122 */
NeilBrown4dbcdc72006-01-06 00:20:52 -0800123 atomic_t corrected_errors; /* number of corrected read errors,
124 * for reporting to userspace and storing
125 * in superblock.
126 */
NeilBrown5792a282007-04-04 19:08:18 -0700127 struct work_struct del_work; /* used for delayed sysfs removal */
NeilBrown3c0ee632008-10-21 13:25:28 +1100128
129 struct sysfs_dirent *sysfs_state; /* handle for 'state'
130 * sysfs entry */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131};
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133struct mddev_s
134{
135 void *private;
NeilBrown2604b702006-01-06 00:20:36 -0800136 struct mdk_personality *pers;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 dev_t unit;
138 int md_minor;
139 struct list_head disks;
NeilBrown850b2b42006-10-03 01:15:46 -0700140 unsigned long flags;
141#define MD_CHANGE_DEVS 0 /* Some device status has changed */
142#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
NeilBrown070dc6d2010-08-30 17:33:34 +1000143#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
NeilBrown850b2b42006-10-03 01:15:46 -0700144
NeilBrown409c57f2009-03-31 14:39:39 +1100145 int suspended;
146 atomic_t active_io;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 int ro;
NeilBrownbb4f1e92010-08-08 21:18:03 +1000148 int sysfs_active; /* set when sysfs deletes
149 * are happening, so run/
150 * takeover/stop are not safe
151 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153 struct gendisk *gendisk;
154
NeilBrowneae17012005-11-08 21:39:23 -0800155 struct kobject kobj;
NeilBrownd3374822009-01-09 08:31:10 +1100156 int hold_active;
157#define UNTIL_IOCTL 1
NeilBrownefeb53c2009-01-09 08:31:10 +1100158#define UNTIL_STOP 2
NeilBrowneae17012005-11-08 21:39:23 -0800159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 /* Superblock information */
161 int major_version,
162 minor_version,
163 patch_version;
164 int persistent;
NeilBrowne6910632008-02-06 01:39:51 -0800165 int external; /* metadata is
166 * managed externally */
167 char metadata_type[17]; /* externally set*/
Andre Noll9d8f0362009-06-18 08:45:01 +1000168 int chunk_sectors;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 time_t ctime, utime;
170 int level, layout;
NeilBrownd9d166c2006-01-06 00:20:51 -0800171 char clevel[16];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172 int raid_disks;
173 int max_disks;
Andre Noll58c0fed2009-03-31 14:33:13 +1100174 sector_t dev_sectors; /* used size of
175 * component devices */
Andre Nollf233ea52008-07-21 17:05:22 +1000176 sector_t array_sectors; /* exported array size */
Dan Williamsb522adc2009-03-31 15:00:31 +1100177 int external_size; /* size managed
178 * externally */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 __u64 events;
NeilBrowna8707c02010-05-18 09:28:43 +1000180 /* If the last 'event' was simply a clean->dirty transition, and
181 * we didn't write it to the spares, then it is safe and simple
182 * to just decrement the event count on a dirty->clean transition.
183 * So we record that possibility here.
184 */
185 int can_decrease_events;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
187 char uuid[16];
188
NeilBrownf6705572006-03-27 01:18:11 -0800189 /* If the array is being reshaped, we need to record the
190 * new shape and an indication of where we are up to.
191 * This is written to the superblock.
192 * If reshape_position is MaxSector, then no reshape is happening (yet).
193 */
194 sector_t reshape_position;
Andre Noll664e7c42009-06-18 08:45:27 +1000195 int delta_disks, new_level, new_layout;
196 int new_chunk_sectors;
NeilBrownf6705572006-03-27 01:18:11 -0800197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 struct mdk_thread_s *thread; /* management thread */
199 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
NeilBrownff4e8d92006-07-10 04:44:16 -0700200 sector_t curr_resync; /* last block scheduled */
NeilBrown97e4f422009-03-31 14:33:13 +1100201 /* As resync requests can complete out of order, we cannot easily track
202 * how much resync has been completed. So we occasionally pause until
203 * everything completes, then set curr_resync_completed to curr_resync.
204 * As such it may be well behind the real resync mark, but it is a value
205 * we are certain of.
206 */
207 sector_t curr_resync_completed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 unsigned long resync_mark; /* a recent timestamp */
209 sector_t resync_mark_cnt;/* blocks written at resync_mark */
NeilBrownff4e8d92006-07-10 04:44:16 -0700210 sector_t curr_mark_cnt; /* blocks scheduled now */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 sector_t resync_max_sectors; /* may be set by personality */
NeilBrown9d888832005-11-08 21:39:26 -0800213
214 sector_t resync_mismatches; /* count of sectors where
215 * parity/replica mismatch found
216 */
NeilBrowne464eaf2006-03-27 01:18:14 -0800217
218 /* allow user-space to request suspension of IO to regions of the array */
219 sector_t suspend_lo;
220 sector_t suspend_hi;
NeilBrown88202a02006-01-06 00:21:36 -0800221 /* if zero, use the system-wide default */
222 int sync_speed_min;
223 int sync_speed_max;
224
Bernd Schubert90b08712008-05-23 13:04:38 -0700225 /* resync even though the same disks are shared among md-devices */
226 int parallel_resync;
227
NeilBrown6ff8d8e2006-01-06 00:20:15 -0800228 int ok_start_degraded;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 /* recovery/resync flags
230 * NEEDED: we might need to start a resync/recover
231 * RUNNING: a thread is running, or about to be started
232 * SYNC: actually doing a resync, not a recovery
Neil Brown72a23c22008-06-28 08:31:41 +1000233 * RECOVER: doing recovery, or need to try it.
NeilBrowndfc70642008-05-23 13:04:39 -0700234 * INTR: resync needs to be aborted for some reason
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 * DONE: thread is done and is waiting to be reaped
NeilBrown24dd4692005-11-08 21:39:26 -0800236 * REQUEST: user-space has requested a sync (used with SYNC)
Anand Gadiyar411c9402009-07-07 15:24:23 +0530237 * CHECK: user-space request for check-only, no repair
NeilBrownccfcc3c2006-03-27 01:18:09 -0800238 * RESHAPE: A reshape is happening
239 *
240 * If neither SYNC or RESHAPE are set, then it is a recovery.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 */
242#define MD_RECOVERY_RUNNING 0
243#define MD_RECOVERY_SYNC 1
Neil Brown72a23c22008-06-28 08:31:41 +1000244#define MD_RECOVERY_RECOVER 2
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245#define MD_RECOVERY_INTR 3
246#define MD_RECOVERY_DONE 4
247#define MD_RECOVERY_NEEDED 5
NeilBrown24dd4692005-11-08 21:39:26 -0800248#define MD_RECOVERY_REQUESTED 6
249#define MD_RECOVERY_CHECK 7
NeilBrownccfcc3c2006-03-27 01:18:09 -0800250#define MD_RECOVERY_RESHAPE 8
NeilBrown5fd6c1d2006-06-26 00:27:40 -0700251#define MD_RECOVERY_FROZEN 9
252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 unsigned long recovery;
NeilBrown4044ba52009-01-09 08:31:11 +1100254 int recovery_disabled; /* if we detect that recovery
255 * will always fail, set this
256 * so we don't loop trying */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
258 int in_sync; /* know to not need resync */
NeilBrownc8c00a62009-08-10 12:50:52 +1000259 /* 'open_mutex' avoids races between 'md_open' and 'do_md_stop', so
260 * that we are never stopping an array while it is open.
261 * 'reconfig_mutex' protects all other reconfiguration.
262 * These locks are separate due to conflicting interactions
263 * with bdev->bd_mutex.
264 * Lock ordering is:
265 * reconfig_mutex -> bd_mutex : e.g. do_md_run -> revalidate_disk
266 * bd_mutex -> open_mutex: e.g. __blkdev_get -> md_open
267 */
268 struct mutex open_mutex;
NeilBrowndf5b89b2006-03-27 01:18:20 -0800269 struct mutex reconfig_mutex;
NeilBrownf2ea68c2008-07-21 17:05:25 +1000270 atomic_t active; /* general refcount */
271 atomic_t openers; /* number of active opens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 int degraded; /* whether md should consider
274 * adding a spare
275 */
NeilBrowna9701a32005-11-08 21:39:34 -0800276 int barriers_work; /* initialised to true, cleared as soon
277 * as a barrier request to slave
278 * fails. Only supported
279 */
280 struct bio *biolist; /* bios that need to be retried
Christoph Hellwig7b6d91d2010-08-07 18:20:39 +0200281 * because REQ_HARDBARRIER is not supported
NeilBrowna9701a32005-11-08 21:39:34 -0800282 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 atomic_t recovery_active; /* blocks scheduled, but not written */
285 wait_queue_head_t recovery_wait;
286 sector_t recovery_cp;
Neil Brown5e96ee62008-06-28 08:31:24 +1000287 sector_t resync_min; /* user requested sync
288 * starts here */
NeilBrownc6207272008-02-06 01:39:52 -0800289 sector_t resync_max; /* resync should pause
290 * when it gets here */
NeilBrown06d91a52005-06-21 17:17:12 -0700291
NeilBrownb62b7592008-10-21 13:25:21 +1100292 struct sysfs_dirent *sysfs_state; /* handle for 'array_state'
293 * file in sysfs.
294 */
NeilBrown0c3573f2009-01-09 08:31:05 +1100295 struct sysfs_dirent *sysfs_action; /* handle for 'sync_action' */
NeilBrownb62b7592008-10-21 13:25:21 +1100296
NeilBrownd3374822009-01-09 08:31:10 +1100297 struct work_struct del_work; /* used for delayed sysfs removal */
298
NeilBrown06d91a52005-06-21 17:17:12 -0700299 spinlock_t write_lock;
NeilBrown3d310eb2005-06-21 17:17:26 -0700300 wait_queue_head_t sb_wait; /* for waiting on superblock updates */
NeilBrown7bfa19f2005-06-21 17:17:28 -0700301 atomic_t pending_writes; /* number of active superblock writes */
NeilBrown06d91a52005-06-21 17:17:12 -0700302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 unsigned int safemode; /* if set, update "clean" superblock
304 * when no writes pending.
305 */
306 unsigned int safemode_delay;
307 struct timer_list safemode_timer;
308 atomic_t writes_pending;
Jens Axboe165125e2007-07-24 09:28:11 +0200309 struct request_queue *queue; /* for plugging ... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
NeilBrown32a76272005-06-21 17:17:14 -0700311 struct bitmap *bitmap; /* the bitmap for the device */
NeilBrownc3d97142009-12-14 12:49:52 +1100312 struct {
313 struct file *file; /* the bitmap file */
NeilBrownf6af9492009-12-14 12:49:54 +1100314 loff_t offset; /* offset from superblock of
NeilBrownc3d97142009-12-14 12:49:52 +1100315 * start of bitmap. May be
316 * negative, but not '0'
NeilBrownf6af9492009-12-14 12:49:54 +1100317 * For external metadata, offset
318 * from start of device.
NeilBrownc3d97142009-12-14 12:49:52 +1100319 */
NeilBrownf6af9492009-12-14 12:49:54 +1100320 loff_t default_offset; /* this is the offset to use when
NeilBrownc3d97142009-12-14 12:49:52 +1100321 * hot-adding a bitmap. It should
322 * eventually be settable by sysfs.
323 */
NeilBrowne384e582010-06-01 19:37:34 +1000324 /* When md is serving under dm, it might use a
325 * dirty_log to store the bits.
326 */
327 struct dm_dirty_log *log;
328
NeilBrownc3d97142009-12-14 12:49:52 +1100329 struct mutex mutex;
NeilBrown42a04b52009-12-14 12:49:53 +1100330 unsigned long chunksize;
NeilBrownac2f40b2010-06-01 19:37:31 +1000331 unsigned long daemon_sleep; /* how many jiffies between updates? */
NeilBrown42a04b52009-12-14 12:49:53 +1100332 unsigned long max_write_behind; /* write-behind mode */
NeilBrownece5cff2009-12-14 12:49:56 +1100333 int external;
NeilBrownc3d97142009-12-14 12:49:52 +1100334 } bitmap_info;
NeilBrown32a76272005-06-21 17:17:14 -0700335
Robert Becker1e509152009-12-14 12:49:58 +1100336 atomic_t max_corr_read_errors; /* max read retries */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 struct list_head all_mddevs;
NeilBrowna2826aa2009-12-14 12:49:49 +1100338
NeilBrowna64c8762010-04-14 17:15:37 +1000339 struct attribute_group *to_remove;
NeilBrown252ac522010-06-01 19:37:29 +1000340 struct plug_handle *plug; /* if used by personality */
341
NeilBrowna2826aa2009-12-14 12:49:49 +1100342 /* Generic barrier handling.
343 * If there is a pending barrier request, all other
344 * writes are blocked while the devices are flushed.
345 * The last to finish a flush schedules a worker to
346 * submit the barrier request (without the barrier flag),
347 * then submit more flush requests.
348 */
349 struct bio *barrier;
350 atomic_t flush_pending;
351 struct work_struct barrier_work;
NeilBrown768a4182010-07-26 11:49:55 +1000352 struct work_struct event_work; /* used by dm to report failure event */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353};
354
355
356static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
357{
NeilBrownb2d444d2005-11-08 21:39:31 -0800358 int faulty = test_bit(Faulty, &rdev->flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 if (atomic_dec_and_test(&rdev->nr_pending) && faulty)
360 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
361}
362
363static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
364{
365 atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
366}
367
NeilBrown2604b702006-01-06 00:20:36 -0800368struct mdk_personality
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
370 char *name;
NeilBrown2604b702006-01-06 00:20:36 -0800371 int level;
372 struct list_head list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 struct module *owner;
NeilBrown21a52c62010-04-01 15:02:13 +1100374 int (*make_request)(mddev_t *mddev, struct bio *bio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 int (*run)(mddev_t *mddev);
376 int (*stop)(mddev_t *mddev);
377 void (*status)(struct seq_file *seq, mddev_t *mddev);
378 /* error_handler must set ->faulty and clear ->in_sync
379 * if appropriate, and should abort recovery if needed
380 */
381 void (*error_handler)(mddev_t *mddev, mdk_rdev_t *rdev);
382 int (*hot_add_disk) (mddev_t *mddev, mdk_rdev_t *rdev);
383 int (*hot_remove_disk) (mddev_t *mddev, int number);
384 int (*spare_active) (mddev_t *mddev);
NeilBrown57afd892005-06-21 17:17:13 -0700385 sector_t (*sync_request)(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 int (*resize) (mddev_t *mddev, sector_t sectors);
Dan Williams80c3a6c2009-03-17 18:10:40 -0700387 sector_t (*size) (mddev_t *mddev, sector_t sectors, int raid_disks);
NeilBrown63c70c42006-03-27 01:18:13 -0800388 int (*check_reshape) (mddev_t *mddev);
389 int (*start_reshape) (mddev_t *mddev);
NeilBrowncea9c222009-03-31 15:15:05 +1100390 void (*finish_reshape) (mddev_t *mddev);
NeilBrown36fa3062005-09-09 16:23:45 -0700391 /* quiesce moves between quiescence states
392 * 0 - fully active
393 * 1 - no new requests allowed
394 * others - reserved
395 */
396 void (*quiesce) (mddev_t *mddev, int state);
NeilBrown245f46c2009-03-31 14:39:39 +1100397 /* takeover is used to transition an array from one
398 * personality to another. The new personality must be able
399 * to handle the data in the current layout.
400 * e.g. 2drive raid1 -> 2drive raid5
401 * ndrive raid5 -> degraded n+1drive raid6 with special layout
402 * If the takeover succeeds, a new 'private' structure is returned.
403 * This needs to be installed and then ->run used to activate the
404 * array.
405 */
406 void *(*takeover) (mddev_t *mddev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407};
408
409
NeilBrown007583c2005-11-08 21:39:30 -0800410struct md_sysfs_entry {
411 struct attribute attr;
412 ssize_t (*show)(mddev_t *, char *);
413 ssize_t (*store)(mddev_t *, const char *, size_t);
414};
NeilBrown43a70502009-12-14 12:49:55 +1100415extern struct attribute_group md_bitmap_group;
NeilBrown007583c2005-11-08 21:39:30 -0800416
NeilBrown00bcb4a2010-06-01 19:37:23 +1000417static inline struct sysfs_dirent *sysfs_get_dirent_safe(struct sysfs_dirent *sd, char *name)
418{
419 if (sd)
420 return sysfs_get_dirent(sd, NULL, name);
421 return sd;
422}
423static inline void sysfs_notify_dirent_safe(struct sysfs_dirent *sd)
424{
425 if (sd)
426 sysfs_notify_dirent(sd);
427}
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429static inline char * mdname (mddev_t * mddev)
430{
431 return mddev->gendisk ? mddev->gendisk->disk_name : "mdX";
432}
433
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434/*
435 * iterates through some rdev ringlist. It's safe to remove the
436 * current 'rdev'. Dont touch 'tmp' though.
437 */
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100438#define rdev_for_each_list(rdev, tmp, head) \
439 list_for_each_entry_safe(rdev, tmp, head, same_set)
440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441/*
442 * iterates through the 'same array disks' ringlist
443 */
NeilBrownd089c6a2008-02-06 01:39:59 -0800444#define rdev_for_each(rdev, tmp, mddev) \
Cheng Renquan159ec1f2009-01-09 08:31:08 +1100445 list_for_each_entry_safe(rdev, tmp, &((mddev)->disks), same_set)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
NeilBrown4b809912008-07-21 17:05:25 +1000447#define rdev_for_each_rcu(rdev, mddev) \
448 list_for_each_entry_rcu(rdev, &((mddev)->disks), same_set)
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450typedef struct mdk_thread_s {
451 void (*run) (mddev_t *mddev);
452 mddev_t *mddev;
453 wait_queue_head_t wqueue;
454 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 struct task_struct *tsk;
NeilBrown32a76272005-06-21 17:17:14 -0700456 unsigned long timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457} mdk_thread_t;
458
459#define THREAD_WAKEUP 0
460
461#define __wait_event_lock_irq(wq, condition, lock, cmd) \
462do { \
463 wait_queue_t __wait; \
464 init_waitqueue_entry(&__wait, current); \
465 \
466 add_wait_queue(&wq, &__wait); \
467 for (;;) { \
468 set_current_state(TASK_UNINTERRUPTIBLE); \
469 if (condition) \
470 break; \
471 spin_unlock_irq(&lock); \
472 cmd; \
473 schedule(); \
474 spin_lock_irq(&lock); \
475 } \
476 current->state = TASK_RUNNING; \
477 remove_wait_queue(&wq, &__wait); \
478} while (0)
479
480#define wait_event_lock_irq(wq, condition, lock, cmd) \
481do { \
482 if (condition) \
483 break; \
484 __wait_event_lock_irq(wq, condition, lock, cmd); \
485} while (0)
486
NeilBrown1345b1d2006-01-06 00:20:40 -0800487static inline void safe_put_page(struct page *p)
488{
489 if (p) put_page(p);
490}
491
NeilBrown92022952009-03-31 14:33:13 +1100492extern int register_md_personality(struct mdk_personality *p);
493extern int unregister_md_personality(struct mdk_personality *p);
494extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev),
495 mddev_t *mddev, const char *name);
496extern void md_unregister_thread(mdk_thread_t *thread);
497extern void md_wakeup_thread(mdk_thread_t *thread);
498extern void md_check_recovery(mddev_t *mddev);
499extern void md_write_start(mddev_t *mddev, struct bio *bi);
500extern void md_write_end(mddev_t *mddev);
501extern void md_done_sync(mddev_t *mddev, int blocks, int ok);
502extern void md_error(mddev_t *mddev, mdk_rdev_t *rdev);
503
NeilBrown3fa841d2009-09-23 18:10:29 +1000504extern int mddev_congested(mddev_t *mddev, int bits);
NeilBrowna2826aa2009-12-14 12:49:49 +1100505extern void md_barrier_request(mddev_t *mddev, struct bio *bio);
NeilBrown92022952009-03-31 14:33:13 +1100506extern void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
507 sector_t sector, int size, struct page *page);
508extern void md_super_wait(mddev_t *mddev);
509extern int sync_page_io(struct block_device *bdev, sector_t sector, int size,
510 struct page *page, int rw);
511extern void md_do_sync(mddev_t *mddev);
512extern void md_new_event(mddev_t *mddev);
513extern int md_allow_write(mddev_t *mddev);
514extern void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
Dan Williams1f403622009-03-31 14:59:03 +1100515extern void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors);
Andre Noll0894cc32009-06-18 08:49:23 +1000516extern int md_check_no_bitmap(mddev_t *mddev);
Andre Nollac5e7112009-08-03 10:59:47 +1000517extern int md_integrity_register(mddev_t *mddev);
NeilBrown72e02072009-12-14 12:49:55 +1100518extern void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev);
519extern int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale);
NeilBrown43a70502009-12-14 12:49:55 +1100520extern void restore_bitmap_write_access(struct file *file);
NeilBrownb63d7c22010-06-01 19:37:33 +1000521extern void md_unplug(mddev_t *mddev);
Christoph Hellwig63fe0812009-04-14 12:01:53 +1000522
NeilBrown390ee602010-06-01 19:37:27 +1000523extern void mddev_init(mddev_t *mddev);
524extern int md_run(mddev_t *mddev);
525extern void md_stop(mddev_t *mddev);
526extern void md_stop_writes(mddev_t *mddev);
NeilBrowne8bb9a82010-06-01 19:37:26 +1000527extern void md_rdev_init(mdk_rdev_t *rdev);
NeilBrown390ee602010-06-01 19:37:27 +1000528
529extern void mddev_suspend(mddev_t *mddev);
530extern void mddev_resume(mddev_t *mddev);
Christoph Hellwig63fe0812009-04-14 12:01:53 +1000531#endif /* _MD_MD_H */