blob: 735ce1a3d260b706d7375ef9659bca92b330be1e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _RAID10_H
2#define _RAID10_H
3
Jonathan Brassowdc280d982012-07-31 10:03:52 +10004struct raid10_info {
NeilBrown69335ef2011-12-23 10:17:54 +11005 struct md_rdev *rdev, *replacement;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 sector_t head_position;
NeilBrown2bb77732011-07-27 11:00:36 +10007 int recovery_disabled; /* matches
8 * mddev->recovery_disabled
9 * when we shouldn't try
10 * recovering this device.
11 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070012};
13
NeilBrowne879a872011-10-11 16:49:02 +110014struct r10conf {
NeilBrownfd01b882011-10-11 16:47:53 +110015 struct mddev *mddev;
Jonathan Brassowdc280d982012-07-31 10:03:52 +100016 struct raid10_info *mirrors;
17 struct raid10_info *mirrors_new, *mirrors_old;
Linus Torvalds1da177e2005-04-16 15:20:36 -070018 spinlock_t device_lock;
19
20 /* geometry */
NeilBrown5cf00fc2012-05-21 09:28:20 +100021 struct geom {
22 int raid_disks;
23 int near_copies; /* number of copies laid out
NeilBrown69335ef2011-12-23 10:17:54 +110024 * raid0 style */
NeilBrown5cf00fc2012-05-21 09:28:20 +100025 int far_copies; /* number of copies laid out
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 * at large strides across drives
27 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100028 int far_offset; /* far_copies are offset by 1
NeilBrown69335ef2011-12-23 10:17:54 +110029 * stripe instead of many
NeilBrownc93983b2006-06-26 00:27:41 -070030 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100031 sector_t stride; /* distance between far copies.
NeilBrownc93983b2006-06-26 00:27:41 -070032 * This is size / far_copies unless
33 * far_offset, in which case it is
34 * 1 stripe.
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 */
Jonathan Brassow475901a2013-02-21 13:28:10 +110036 int far_set_size; /* The number of devices in a set,
37 * where a 'set' are devices that
38 * contain far/offset copies of
39 * each other.
40 */
NeilBrown5cf00fc2012-05-21 09:28:20 +100041 int chunk_shift; /* shift from chunks to sectors */
42 sector_t chunk_mask;
NeilBrownf8c9e742012-05-21 09:28:33 +100043 } prev, geo;
NeilBrown5cf00fc2012-05-21 09:28:20 +100044 int copies; /* near_copies * far_copies.
45 * must be <= raid_disks
46 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070047
NeilBrown69335ef2011-12-23 10:17:54 +110048 sector_t dev_sectors; /* temp copy of
49 * mddev->dev_sectors */
NeilBrownf8c9e742012-05-21 09:28:33 +100050 sector_t reshape_progress;
NeilBrown3ea7daa2012-05-22 13:53:47 +100051 sector_t reshape_safe;
52 unsigned long reshape_checkpoint;
53 sector_t offset_diff;
Trela, Maciejdab8b292010-03-08 16:02:45 +110054
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 struct list_head retry_list;
NeilBrown95af5872015-08-14 11:26:17 +100056 /* A separate list of r1bio which just need raid_end_bio_io called.
57 * This mustn't happen for writes which had any errors if the superblock
58 * needs to be written.
59 */
60 struct list_head bio_end_io_list;
61
NeilBrown6cce3b22006-01-06 00:20:16 -080062 /* queue pending writes and submit them on unplug */
63 struct bio_list pending_bio_list;
NeilBrown34db0cd2011-10-11 16:50:01 +110064 int pending_count;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66 spinlock_t resync_lock;
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +020067 atomic_t nr_pending;
NeilBrown69335ef2011-12-23 10:17:54 +110068 int nr_waiting;
69 int nr_queued;
70 int barrier;
Tomasz Majchrzak0e5313e2016-06-24 14:20:16 +020071 int array_freeze_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -070072 sector_t next_resync;
NeilBrown6cce3b22006-01-06 00:20:16 -080073 int fullsync; /* set to 1 if a full sync is needed,
74 * (fresh device added).
75 * Cleared when a sync completes.
76 */
NeilBrown69335ef2011-12-23 10:17:54 +110077 int have_replacement; /* There is at least one
78 * replacement device.
79 */
NeilBrown0a27ec92006-01-06 00:20:13 -080080 wait_queue_head_t wait_barrier;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
NeilBrown69335ef2011-12-23 10:17:54 +110082 mempool_t *r10bio_pool;
83 mempool_t *r10buf_pool;
NeilBrown4443ae12006-01-06 00:20:28 -080084 struct page *tmppage;
NeilBrownfc9977d2017-04-05 14:05:51 +100085 struct bio_set *bio_split;
Trela, Maciejdab8b292010-03-08 16:02:45 +110086
87 /* When taking over an array from a different personality, we store
88 * the new thread here until we fully activate the array.
89 */
NeilBrown2b8bf342011-10-11 16:48:23 +110090 struct md_thread *thread;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091};
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 * this is our 'private' RAID10 bio.
95 *
96 * it contains information about what kind of IO operations were started
97 * for this RAID10 operation, and about their status:
98 */
99
NeilBrown9f2c9d12011-10-11 16:48:43 +1100100struct r10bio {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 atomic_t remaining; /* 'have we finished' count,
102 * used from IRQ handlers
103 */
104 sector_t sector; /* virtual sector number */
105 int sectors;
106 unsigned long state;
NeilBrownfd01b882011-10-11 16:47:53 +1100107 struct mddev *mddev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 /*
109 * original bio going to /dev/mdx
110 */
111 struct bio *master_bio;
112 /*
113 * if the IO is in READ direction, then this is where we read
114 */
115 int read_slot;
116
117 struct list_head retry_list;
118 /*
119 * if the IO is in WRITE direction, then multiple bios are used,
120 * one for each copy.
121 * When resyncing we also use one for each copy.
122 * When reconstructing, we use 2 bios, one for read, one for write.
123 * We choose the number when they are allocated.
NeilBrown69335ef2011-12-23 10:17:54 +1100124 * We sometimes need an extra bio to write to the replacement.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 */
NeilBrowne0ee7782012-08-18 09:51:42 +1000126 struct r10dev {
NeilBrown69335ef2011-12-23 10:17:54 +1100127 struct bio *bio;
128 union {
129 struct bio *repl_bio; /* used for resync and
130 * writes */
131 struct md_rdev *rdev; /* used for reads
132 * (read_slot >= 0) */
133 };
134 sector_t addr;
135 int devnum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 } devs[0];
137};
138
139/* bits for r10bio.state */
NeilBrown69335ef2011-12-23 10:17:54 +1100140enum r10bio_state {
141 R10BIO_Uptodate,
142 R10BIO_IsSync,
143 R10BIO_IsRecover,
NeilBrown3ea7daa2012-05-22 13:53:47 +1000144 R10BIO_IsReshape,
NeilBrown69335ef2011-12-23 10:17:54 +1100145 R10BIO_Degraded,
NeilBrown856e08e2011-07-28 11:39:23 +1000146/* Set ReadError on bios that experience a read error
147 * so that raid10d knows what to do with them.
148 */
NeilBrown69335ef2011-12-23 10:17:54 +1100149 R10BIO_ReadError,
NeilBrown749c55e2011-07-28 11:39:24 +1000150/* If a write for this request means we can clear some
151 * known-bad-block records, we set this flag.
152 */
NeilBrown69335ef2011-12-23 10:17:54 +1100153 R10BIO_MadeGood,
154 R10BIO_WriteError,
NeilBrownf8c9e742012-05-21 09:28:33 +1000155/* During a reshape we might be performing IO on the
156 * 'previous' part of the array, in which case this
157 * flag is set
158 */
159 R10BIO_Previous,
NeilBrown8d3ca832016-11-18 16:16:12 +1100160/* failfast devices did receive failfast requests. */
161 R10BIO_FailFast,
NeilBrown69335ef2011-12-23 10:17:54 +1100162};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163#endif