Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _RAID10_H |
| 2 | #define _RAID10_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | struct mirror_info { |
NeilBrown | 3cb0300 | 2011-10-11 16:45:26 +1100 | [diff] [blame] | 5 | struct md_rdev *rdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | sector_t head_position; |
NeilBrown | 2bb7773 | 2011-07-27 11:00:36 +1000 | [diff] [blame] | 7 | int recovery_disabled; /* matches |
| 8 | * mddev->recovery_disabled |
| 9 | * when we shouldn't try |
| 10 | * recovering this device. |
| 11 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | }; |
| 13 | |
NeilBrown | e879a87 | 2011-10-11 16:49:02 +1100 | [diff] [blame] | 14 | struct r10conf { |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 15 | struct mddev *mddev; |
NeilBrown | 0f6d02d | 2011-10-11 16:48:46 +1100 | [diff] [blame] | 16 | struct mirror_info *mirrors; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | int raid_disks; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | spinlock_t device_lock; |
| 19 | |
| 20 | /* geometry */ |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 21 | int near_copies; /* number of copies laid out raid0 style */ |
| 22 | int far_copies; /* number of copies laid out |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | * at large strides across drives |
| 24 | */ |
NeilBrown | c93983b | 2006-06-26 00:27:41 -0700 | [diff] [blame] | 25 | int far_offset; /* far_copies are offset by 1 stripe |
| 26 | * instead of many |
| 27 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | int copies; /* near_copies * far_copies. |
| 29 | * must be <= raid_disks |
| 30 | */ |
| 31 | sector_t stride; /* distance between far copies. |
NeilBrown | c93983b | 2006-06-26 00:27:41 -0700 | [diff] [blame] | 32 | * This is size / far_copies unless |
| 33 | * far_offset, in which case it is |
| 34 | * 1 stripe. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | */ |
| 36 | |
Trela, Maciej | dab8b29 | 2010-03-08 16:02:45 +1100 | [diff] [blame] | 37 | sector_t dev_sectors; /* temp copy of mddev->dev_sectors */ |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | int chunk_shift; /* shift from chunks to sectors */ |
| 40 | sector_t chunk_mask; |
| 41 | |
| 42 | struct list_head retry_list; |
NeilBrown | 6cce3b23 | 2006-01-06 00:20:16 -0800 | [diff] [blame] | 43 | /* queue pending writes and submit them on unplug */ |
| 44 | struct bio_list pending_bio_list; |
NeilBrown | 34db0cd | 2011-10-11 16:50:01 +1100 | [diff] [blame] | 45 | int pending_count; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | spinlock_t resync_lock; |
| 48 | int nr_pending; |
NeilBrown | 0a27ec9 | 2006-01-06 00:20:13 -0800 | [diff] [blame] | 49 | int nr_waiting; |
NeilBrown | 4443ae1 | 2006-01-06 00:20:28 -0800 | [diff] [blame] | 50 | int nr_queued; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | int barrier; |
| 52 | sector_t next_resync; |
NeilBrown | 6cce3b23 | 2006-01-06 00:20:16 -0800 | [diff] [blame] | 53 | int fullsync; /* set to 1 if a full sync is needed, |
| 54 | * (fresh device added). |
| 55 | * Cleared when a sync completes. |
| 56 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 57 | |
NeilBrown | 0a27ec9 | 2006-01-06 00:20:13 -0800 | [diff] [blame] | 58 | wait_queue_head_t wait_barrier; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | |
| 60 | mempool_t *r10bio_pool; |
| 61 | mempool_t *r10buf_pool; |
NeilBrown | 4443ae1 | 2006-01-06 00:20:28 -0800 | [diff] [blame] | 62 | struct page *tmppage; |
Trela, Maciej | dab8b29 | 2010-03-08 16:02:45 +1100 | [diff] [blame] | 63 | |
| 64 | /* When taking over an array from a different personality, we store |
| 65 | * the new thread here until we fully activate the array. |
| 66 | */ |
NeilBrown | 2b8bf34 | 2011-10-11 16:48:23 +1100 | [diff] [blame] | 67 | struct md_thread *thread; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | }; |
| 69 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | * this is our 'private' RAID10 bio. |
| 72 | * |
| 73 | * it contains information about what kind of IO operations were started |
| 74 | * for this RAID10 operation, and about their status: |
| 75 | */ |
| 76 | |
NeilBrown | 9f2c9d1 | 2011-10-11 16:48:43 +1100 | [diff] [blame] | 77 | struct r10bio { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | atomic_t remaining; /* 'have we finished' count, |
| 79 | * used from IRQ handlers |
| 80 | */ |
| 81 | sector_t sector; /* virtual sector number */ |
| 82 | int sectors; |
| 83 | unsigned long state; |
NeilBrown | fd01b88 | 2011-10-11 16:47:53 +1100 | [diff] [blame] | 84 | struct mddev *mddev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 85 | /* |
| 86 | * original bio going to /dev/mdx |
| 87 | */ |
| 88 | struct bio *master_bio; |
| 89 | /* |
| 90 | * if the IO is in READ direction, then this is where we read |
| 91 | */ |
| 92 | int read_slot; |
| 93 | |
| 94 | struct list_head retry_list; |
| 95 | /* |
| 96 | * if the IO is in WRITE direction, then multiple bios are used, |
| 97 | * one for each copy. |
| 98 | * When resyncing we also use one for each copy. |
| 99 | * When reconstructing, we use 2 bios, one for read, one for write. |
| 100 | * We choose the number when they are allocated. |
| 101 | */ |
| 102 | struct { |
| 103 | struct bio *bio; |
| 104 | sector_t addr; |
| 105 | int devnum; |
| 106 | } devs[0]; |
| 107 | }; |
| 108 | |
NeilBrown | 0eb3ff1 | 2006-01-06 00:20:29 -0800 | [diff] [blame] | 109 | /* when we get a read error on a read-only array, we redirect to another |
| 110 | * device without failing the first device, or trying to over-write to |
| 111 | * correct the read error. To keep track of bad blocks on a per-bio |
| 112 | * level, we store IO_BLOCKED in the appropriate 'bios' pointer |
| 113 | */ |
| 114 | #define IO_BLOCKED ((struct bio*)1) |
NeilBrown | 749c55e | 2011-07-28 11:39:24 +1000 | [diff] [blame] | 115 | /* When we successfully write to a known bad-block, we need to remove the |
| 116 | * bad-block marking which must be done from process context. So we record |
| 117 | * the success by setting devs[n].bio to IO_MADE_GOOD |
| 118 | */ |
| 119 | #define IO_MADE_GOOD ((struct bio *)2) |
| 120 | |
| 121 | #define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) |
NeilBrown | 0eb3ff1 | 2006-01-06 00:20:29 -0800 | [diff] [blame] | 122 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | /* bits for r10bio.state */ |
| 124 | #define R10BIO_Uptodate 0 |
| 125 | #define R10BIO_IsSync 1 |
| 126 | #define R10BIO_IsRecover 2 |
NeilBrown | 6cce3b23 | 2006-01-06 00:20:16 -0800 | [diff] [blame] | 127 | #define R10BIO_Degraded 3 |
NeilBrown | 856e08e | 2011-07-28 11:39:23 +1000 | [diff] [blame] | 128 | /* Set ReadError on bios that experience a read error |
| 129 | * so that raid10d knows what to do with them. |
| 130 | */ |
| 131 | #define R10BIO_ReadError 4 |
NeilBrown | 749c55e | 2011-07-28 11:39:24 +1000 | [diff] [blame] | 132 | /* If a write for this request means we can clear some |
| 133 | * known-bad-block records, we set this flag. |
| 134 | */ |
| 135 | #define R10BIO_MadeGood 5 |
NeilBrown | bd870a1 | 2011-07-28 11:39:24 +1000 | [diff] [blame] | 136 | #define R10BIO_WriteError 6 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 137 | #endif |