blob: 135b1b0a155438624b56ebadf42d8971463adc6a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _RAID10_H
2#define _RAID10_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004struct mirror_info {
NeilBrown69335ef2011-12-23 10:17:54 +11005 struct md_rdev *rdev, *replacement;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 sector_t head_position;
NeilBrown2bb77732011-07-27 11:00:36 +10007 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070012};
13
NeilBrowne879a872011-10-11 16:49:02 +110014struct r10conf {
NeilBrownfd01b882011-10-11 16:47:53 +110015 struct mddev *mddev;
NeilBrown0f6d02d2011-10-11 16:48:46 +110016 struct mirror_info *mirrors;
NeilBrown3ea7daa2012-05-22 13:53:47 +100017 struct mirror_info *mirrors_new, *mirrors_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 spinlock_t device_lock;
19
20 /* geometry */
NeilBrown5cf00fc2012-05-21 09:28:20 +100021 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
NeilBrown69335ef2011-12-23 10:17:54 +110024 * raid0 style */
NeilBrown5cf00fc2012-05-21 09:28:20 +100025 int far_copies; /* number of copies laid out
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * at large strides across drives
27 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100028 int far_offset; /* far_copies are offset by 1
NeilBrown69335ef2011-12-23 10:17:54 +110029 * stripe instead of many
NeilBrownc93983b2006-06-26 00:27:41 -070030 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100031 sector_t stride; /* distance between far copies.
NeilBrownc93983b2006-06-26 00:27:41 -070032 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100036 int chunk_shift; /* shift from chunks to sectors */
37 sector_t chunk_mask;
NeilBrownf8c9e742012-05-21 09:28:33 +100038 } prev, geo;
NeilBrown5cf00fc2012-05-21 09:28:20 +100039 int copies; /* near_copies * far_copies.
40 * must be <= raid_disks
41 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
NeilBrown69335ef2011-12-23 10:17:54 +110043 sector_t dev_sectors; /* temp copy of
44 * mddev->dev_sectors */
NeilBrownf8c9e742012-05-21 09:28:33 +100045 sector_t reshape_progress;
NeilBrown3ea7daa2012-05-22 13:53:47 +100046 sector_t reshape_safe;
47 unsigned long reshape_checkpoint;
48 sector_t offset_diff;
Trela, Maciejdab8b292010-03-08 16:02:45 +110049
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 struct list_head retry_list;
NeilBrown6cce3b232006-01-06 00:20:16 -080051 /* queue pending writes and submit them on unplug */
52 struct bio_list pending_bio_list;
NeilBrown34db0cd2011-10-11 16:50:01 +110053 int pending_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
55 spinlock_t resync_lock;
NeilBrown69335ef2011-12-23 10:17:54 +110056 int nr_pending;
57 int nr_waiting;
58 int nr_queued;
59 int barrier;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 sector_t next_resync;
NeilBrown6cce3b232006-01-06 00:20:16 -080061 int fullsync; /* set to 1 if a full sync is needed,
62 * (fresh device added).
63 * Cleared when a sync completes.
64 */
NeilBrown69335ef2011-12-23 10:17:54 +110065 int have_replacement; /* There is at least one
66 * replacement device.
67 */
NeilBrown0a27ec92006-01-06 00:20:13 -080068 wait_queue_head_t wait_barrier;
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
NeilBrown69335ef2011-12-23 10:17:54 +110070 mempool_t *r10bio_pool;
71 mempool_t *r10buf_pool;
NeilBrown4443ae12006-01-06 00:20:28 -080072 struct page *tmppage;
Trela, Maciejdab8b292010-03-08 16:02:45 +110073
74 /* When taking over an array from a different personality, we store
75 * the new thread here until we fully activate the array.
76 */
NeilBrown2b8bf342011-10-11 16:48:23 +110077 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070078};
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 * this is our 'private' RAID10 bio.
82 *
83 * it contains information about what kind of IO operations were started
84 * for this RAID10 operation, and about their status:
85 */
86
NeilBrown9f2c9d12011-10-11 16:48:43 +110087struct r10bio {
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 atomic_t remaining; /* 'have we finished' count,
89 * used from IRQ handlers
90 */
91 sector_t sector; /* virtual sector number */
92 int sectors;
93 unsigned long state;
NeilBrownfd01b882011-10-11 16:47:53 +110094 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 /*
96 * original bio going to /dev/mdx
97 */
98 struct bio *master_bio;
99 /*
100 * if the IO is in READ direction, then this is where we read
101 */
102 int read_slot;
103
104 struct list_head retry_list;
105 /*
106 * if the IO is in WRITE direction, then multiple bios are used,
107 * one for each copy.
108 * When resyncing we also use one for each copy.
109 * When reconstructing, we use 2 bios, one for read, one for write.
110 * We choose the number when they are allocated.
NeilBrown69335ef2011-12-23 10:17:54 +1100111 * We sometimes need an extra bio to write to the replacement.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 */
113 struct {
NeilBrown69335ef2011-12-23 10:17:54 +1100114 struct bio *bio;
115 union {
116 struct bio *repl_bio; /* used for resync and
117 * writes */
118 struct md_rdev *rdev; /* used for reads
119 * (read_slot >= 0) */
120 };
121 sector_t addr;
122 int devnum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 } devs[0];
124};
125
NeilBrown0eb3ff12006-01-06 00:20:29 -0800126/* when we get a read error on a read-only array, we redirect to another
127 * device without failing the first device, or trying to over-write to
128 * correct the read error. To keep track of bad blocks on a per-bio
129 * level, we store IO_BLOCKED in the appropriate 'bios' pointer
130 */
131#define IO_BLOCKED ((struct bio*)1)
NeilBrown749c55e2011-07-28 11:39:24 +1000132/* When we successfully write to a known bad-block, we need to remove the
133 * bad-block marking which must be done from process context. So we record
134 * the success by setting devs[n].bio to IO_MADE_GOOD
135 */
136#define IO_MADE_GOOD ((struct bio *)2)
137
138#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
NeilBrown0eb3ff12006-01-06 00:20:29 -0800139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/* bits for r10bio.state */
NeilBrown69335ef2011-12-23 10:17:54 +1100141enum r10bio_state {
142 R10BIO_Uptodate,
143 R10BIO_IsSync,
144 R10BIO_IsRecover,
NeilBrown3ea7daa2012-05-22 13:53:47 +1000145 R10BIO_IsReshape,
NeilBrown69335ef2011-12-23 10:17:54 +1100146 R10BIO_Degraded,
NeilBrown856e08e2011-07-28 11:39:23 +1000147/* Set ReadError on bios that experience a read error
148 * so that raid10d knows what to do with them.
149 */
NeilBrown69335ef2011-12-23 10:17:54 +1100150 R10BIO_ReadError,
NeilBrown749c55e2011-07-28 11:39:24 +1000151/* If a write for this request means we can clear some
152 * known-bad-block records, we set this flag.
153 */
NeilBrown69335ef2011-12-23 10:17:54 +1100154 R10BIO_MadeGood,
155 R10BIO_WriteError,
NeilBrownf8c9e742012-05-21 09:28:33 +1000156/* During a reshape we might be performing IO on the
157 * 'previous' part of the array, in which case this
158 * flag is set
159 */
160 R10BIO_Previous,
NeilBrown69335ef2011-12-23 10:17:54 +1100161};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162#endif