| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (C) 2015 Shaohua Li <shli@fb.com> | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 3 | * Copyright (C) 2016 Song Liu <songliubraving@fb.com> | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or modify it | 
|  | 6 | * under the terms and conditions of the GNU General Public License, | 
|  | 7 | * version 2, as published by the Free Software Foundation. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope it will be useful, but WITHOUT | 
|  | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|  | 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
|  | 12 | * more details. | 
|  | 13 | * | 
|  | 14 | */ | 
|  | 15 | #include <linux/kernel.h> | 
|  | 16 | #include <linux/wait.h> | 
|  | 17 | #include <linux/blkdev.h> | 
|  | 18 | #include <linux/slab.h> | 
|  | 19 | #include <linux/raid/md_p.h> | 
| Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 20 | #include <linux/crc32c.h> | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 21 | #include <linux/random.h> | 
| Shaohua Li | ce1ccd0 | 2016-11-21 10:29:18 -0800 | [diff] [blame] | 22 | #include <linux/kthread.h> | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 23 | #include <linux/types.h> | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 24 | #include "md.h" | 
|  | 25 | #include "raid5.h" | 
| Mike Snitzer | 935fe09 | 2017-10-10 17:02:41 -0400 | [diff] [blame] | 26 | #include "md-bitmap.h" | 
| Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 27 | #include "raid5-log.h" | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 28 |  | 
|  | 29 | /* | 
|  | 30 | * metadata/data stored in disk with 4k size unit (a block) regardless | 
|  | 31 | * underneath hardware sector size. only works with PAGE_SIZE == 4096 | 
|  | 32 | */ | 
|  | 33 | #define BLOCK_SECTORS (8) | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 34 | #define BLOCK_SECTOR_SHIFT (3) | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 35 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 36 | /* | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 37 | * log->max_free_space is min(1/4 disk size, 10G reclaimable space). | 
|  | 38 | * | 
|  | 39 | * In write through mode, the reclaim runs every log->max_free_space. | 
|  | 40 | * This can prevent the recovery scans for too long | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 41 | */ | 
|  | 42 | #define RECLAIM_MAX_FREE_SPACE (10 * 1024 * 1024 * 2) /* sector */ | 
|  | 43 | #define RECLAIM_MAX_FREE_SPACE_SHIFT (2) | 
|  | 44 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 45 | /* wake up reclaim thread periodically */ | 
|  | 46 | #define R5C_RECLAIM_WAKEUP_INTERVAL (30 * HZ) | 
|  | 47 | /* start flush with these full stripes */ | 
| Shaohua Li | 84890c0 | 2017-02-15 19:58:05 -0800 | [diff] [blame] | 48 | #define R5C_FULL_STRIPE_FLUSH_BATCH(conf) (conf->max_nr_stripes / 4) | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 49 | /* reclaim stripes in groups */ | 
|  | 50 | #define R5C_RECLAIM_STRIPE_GROUP (NR_STRIPE_HASH_LOCKS * 2) | 
|  | 51 |  | 
| Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 52 | /* | 
|  | 53 | * We only need 2 bios per I/O unit to make progress, but ensure we | 
|  | 54 | * have a few more available to not get too tight. | 
|  | 55 | */ | 
|  | 56 | #define R5L_POOL_SIZE	4 | 
|  | 57 |  | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 58 | static char *r5c_journal_mode_str[] = {"write-through", | 
|  | 59 | "write-back"}; | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 60 | /* | 
|  | 61 | * raid5 cache state machine | 
|  | 62 | * | 
| JackieLiu | 9b69173 | 2016-11-28 16:19:18 +0800 | [diff] [blame] | 63 | * With the RAID cache, each stripe works in two phases: | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 64 | *	- caching phase | 
|  | 65 | *	- writing-out phase | 
|  | 66 | * | 
|  | 67 | * These two phases are controlled by bit STRIPE_R5C_CACHING: | 
|  | 68 | *   if STRIPE_R5C_CACHING == 0, the stripe is in writing-out phase | 
|  | 69 | *   if STRIPE_R5C_CACHING == 1, the stripe is in caching phase | 
|  | 70 | * | 
|  | 71 | * When there is no journal, or the journal is in write-through mode, | 
|  | 72 | * the stripe is always in writing-out phase. | 
|  | 73 | * | 
|  | 74 | * For write-back journal, the stripe is sent to caching phase on write | 
|  | 75 | * (r5c_try_caching_write). r5c_make_stripe_write_out() kicks off | 
|  | 76 | * the write-out phase by clearing STRIPE_R5C_CACHING. | 
|  | 77 | * | 
|  | 78 | * Stripes in caching phase do not write the raid disks. Instead, all | 
|  | 79 | * writes are committed from the log device. Therefore, a stripe in | 
|  | 80 | * caching phase handles writes as: | 
|  | 81 | *	- write to log device | 
|  | 82 | *	- return IO | 
|  | 83 | * | 
|  | 84 | * Stripes in writing-out phase handle writes as: | 
|  | 85 | *	- calculate parity | 
|  | 86 | *	- write pending data and parity to journal | 
|  | 87 | *	- write data and parity to raid disks | 
|  | 88 | *	- return IO for pending writes | 
|  | 89 | */ | 
|  | 90 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 91 | struct r5l_log { | 
|  | 92 | struct md_rdev *rdev; | 
|  | 93 |  | 
|  | 94 | u32 uuid_checksum; | 
|  | 95 |  | 
|  | 96 | sector_t device_size;		/* log device size, round to | 
|  | 97 | * BLOCK_SECTORS */ | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 98 | sector_t max_free_space;	/* reclaim run if free space is at | 
|  | 99 | * this size */ | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 100 |  | 
|  | 101 | sector_t last_checkpoint;	/* log tail. where recovery scan | 
|  | 102 | * starts from */ | 
|  | 103 | u64 last_cp_seq;		/* log tail sequence */ | 
|  | 104 |  | 
|  | 105 | sector_t log_start;		/* log head. where new data appends */ | 
|  | 106 | u64 seq;			/* log head sequence */ | 
|  | 107 |  | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 108 | sector_t next_checkpoint; | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 109 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 110 | struct mutex io_mutex; | 
|  | 111 | struct r5l_io_unit *current_io;	/* current io_unit accepting new data */ | 
|  | 112 |  | 
|  | 113 | spinlock_t io_list_lock; | 
|  | 114 | struct list_head running_ios;	/* io_units which are still running, | 
|  | 115 | * and have not yet been completely | 
|  | 116 | * written to the log */ | 
|  | 117 | struct list_head io_end_ios;	/* io_units which have been completely | 
|  | 118 | * written to the log but not yet written | 
|  | 119 | * to the RAID */ | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 120 | struct list_head flushing_ios;	/* io_units which are waiting for log | 
|  | 121 | * cache flush */ | 
| Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 122 | struct list_head finished_ios;	/* io_units which settle down in log disk */ | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 123 | struct bio flush_bio; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 124 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 125 | struct list_head no_mem_stripes;   /* pending stripes, -ENOMEM */ | 
|  | 126 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 127 | struct kmem_cache *io_kc; | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 128 | mempool_t *io_pool; | 
| Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 129 | struct bio_set *bs; | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 130 | mempool_t *meta_pool; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 131 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 132 | struct md_thread *reclaim_thread; | 
|  | 133 | unsigned long reclaim_target;	/* number of space that need to be | 
|  | 134 | * reclaimed.  if it's 0, reclaim spaces | 
|  | 135 | * used by io_units which are in | 
|  | 136 | * IO_UNIT_STRIPE_END state (eg, reclaim | 
|  | 137 | * dones't wait for specific io_unit | 
|  | 138 | * switching to IO_UNIT_STRIPE_END | 
|  | 139 | * state) */ | 
| Shaohua Li | 0fd22b4 | 2015-09-02 13:49:47 -0700 | [diff] [blame] | 140 | wait_queue_head_t iounit_wait; | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 141 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 142 | struct list_head no_space_stripes; /* pending stripes, log has no space */ | 
|  | 143 | spinlock_t no_space_stripes_lock; | 
| Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 144 |  | 
|  | 145 | bool need_cache_flush; | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 146 |  | 
|  | 147 | /* for r5c_cache */ | 
|  | 148 | enum r5c_journal_mode r5c_journal_mode; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 149 |  | 
|  | 150 | /* all stripes in r5cache, in the order of seq at sh->log_start */ | 
|  | 151 | struct list_head stripe_in_journal_list; | 
|  | 152 |  | 
|  | 153 | spinlock_t stripe_in_journal_lock; | 
|  | 154 | atomic_t stripe_in_journal_count; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 155 |  | 
|  | 156 | /* to submit async io_units, to fulfill ordering of flush */ | 
|  | 157 | struct work_struct deferred_io_work; | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 158 | /* to disable write back during in degraded mode */ | 
|  | 159 | struct work_struct disable_writeback_work; | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 160 |  | 
|  | 161 | /* to for chunk_aligned_read in writeback mode, details below */ | 
|  | 162 | spinlock_t tree_lock; | 
|  | 163 | struct radix_tree_root big_stripe_tree; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 164 | }; | 
|  | 165 |  | 
|  | 166 | /* | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 167 | * Enable chunk_aligned_read() with write back cache. | 
|  | 168 | * | 
|  | 169 | * Each chunk may contain more than one stripe (for example, a 256kB | 
|  | 170 | * chunk contains 64 4kB-page, so this chunk contain 64 stripes). For | 
|  | 171 | * chunk_aligned_read, these stripes are grouped into one "big_stripe". | 
|  | 172 | * For each big_stripe, we count how many stripes of this big_stripe | 
|  | 173 | * are in the write back cache. These data are tracked in a radix tree | 
|  | 174 | * (big_stripe_tree). We use radix_tree item pointer as the counter. | 
|  | 175 | * r5c_tree_index() is used to calculate keys for the radix tree. | 
|  | 176 | * | 
|  | 177 | * chunk_aligned_read() calls r5c_big_stripe_cached() to look up | 
|  | 178 | * big_stripe of each chunk in the tree. If this big_stripe is in the | 
|  | 179 | * tree, chunk_aligned_read() aborts. This look up is protected by | 
|  | 180 | * rcu_read_lock(). | 
|  | 181 | * | 
|  | 182 | * It is necessary to remember whether a stripe is counted in | 
|  | 183 | * big_stripe_tree. Instead of adding new flag, we reuses existing flags: | 
|  | 184 | * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE. If either of these | 
|  | 185 | * two flags are set, the stripe is counted in big_stripe_tree. This | 
|  | 186 | * requires moving set_bit(STRIPE_R5C_PARTIAL_STRIPE) to | 
|  | 187 | * r5c_try_caching_write(); and moving clear_bit of | 
|  | 188 | * STRIPE_R5C_PARTIAL_STRIPE and STRIPE_R5C_FULL_STRIPE to | 
|  | 189 | * r5c_finish_stripe_write_out(). | 
|  | 190 | */ | 
|  | 191 |  | 
|  | 192 | /* | 
|  | 193 | * radix tree requests lowest 2 bits of data pointer to be 2b'00. | 
|  | 194 | * So it is necessary to left shift the counter by 2 bits before using it | 
|  | 195 | * as data pointer of the tree. | 
|  | 196 | */ | 
|  | 197 | #define R5C_RADIX_COUNT_SHIFT 2 | 
|  | 198 |  | 
|  | 199 | /* | 
|  | 200 | * calculate key for big_stripe_tree | 
|  | 201 | * | 
|  | 202 | * sect: align_bi->bi_iter.bi_sector or sh->sector | 
|  | 203 | */ | 
|  | 204 | static inline sector_t r5c_tree_index(struct r5conf *conf, | 
|  | 205 | sector_t sect) | 
|  | 206 | { | 
|  | 207 | sector_t offset; | 
|  | 208 |  | 
|  | 209 | offset = sector_div(sect, conf->chunk_sectors); | 
|  | 210 | return sect; | 
|  | 211 | } | 
|  | 212 |  | 
|  | 213 | /* | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 214 | * an IO range starts from a meta data block and end at the next meta data | 
|  | 215 | * block. The io unit's the meta data block tracks data/parity followed it. io | 
|  | 216 | * unit is written to log disk with normal write, as we always flush log disk | 
|  | 217 | * first and then start move data to raid disks, there is no requirement to | 
|  | 218 | * write io unit with FLUSH/FUA | 
|  | 219 | */ | 
|  | 220 | struct r5l_io_unit { | 
|  | 221 | struct r5l_log *log; | 
|  | 222 |  | 
|  | 223 | struct page *meta_page;	/* store meta block */ | 
|  | 224 | int meta_offset;	/* current offset in meta_page */ | 
|  | 225 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 226 | struct bio *current_bio;/* current_bio accepting new data */ | 
|  | 227 |  | 
|  | 228 | atomic_t pending_stripe;/* how many stripes not flushed to raid */ | 
|  | 229 | u64 seq;		/* seq number of the metablock */ | 
|  | 230 | sector_t log_start;	/* where the io_unit starts */ | 
|  | 231 | sector_t log_end;	/* where the io_unit ends */ | 
|  | 232 | struct list_head log_sibling; /* log->running_ios */ | 
|  | 233 | struct list_head stripe_list; /* stripes added to the io_unit */ | 
|  | 234 |  | 
|  | 235 | int state; | 
| Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 236 | bool need_split_bio; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 237 | struct bio *split_bio; | 
|  | 238 |  | 
| Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 239 | unsigned int has_flush:1;		/* include flush request */ | 
|  | 240 | unsigned int has_fua:1;			/* include fua request */ | 
|  | 241 | unsigned int has_null_flush:1;		/* include null flush request */ | 
|  | 242 | unsigned int has_flush_payload:1;	/* include flush payload  */ | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 243 | /* | 
|  | 244 | * io isn't sent yet, flush/fua request can only be submitted till it's | 
|  | 245 | * the first IO in running_ios list | 
|  | 246 | */ | 
|  | 247 | unsigned int io_deferred:1; | 
|  | 248 |  | 
|  | 249 | struct bio_list flush_barriers;   /* size == 0 flush bios */ | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 250 | }; | 
|  | 251 |  | 
|  | 252 | /* r5l_io_unit state */ | 
|  | 253 | enum r5l_io_unit_state { | 
|  | 254 | IO_UNIT_RUNNING = 0,	/* accepting new IO */ | 
|  | 255 | IO_UNIT_IO_START = 1,	/* io_unit bio start writing to log, | 
|  | 256 | * don't accepting new bio */ | 
|  | 257 | IO_UNIT_IO_END = 2,	/* io_unit bio finish writing to log */ | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 258 | IO_UNIT_STRIPE_END = 3,	/* stripes data finished writing to raid */ | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 259 | }; | 
|  | 260 |  | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 261 | bool r5c_is_writeback(struct r5l_log *log) | 
|  | 262 | { | 
|  | 263 | return (log != NULL && | 
|  | 264 | log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK); | 
|  | 265 | } | 
|  | 266 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 267 | static sector_t r5l_ring_add(struct r5l_log *log, sector_t start, sector_t inc) | 
|  | 268 | { | 
|  | 269 | start += inc; | 
|  | 270 | if (start >= log->device_size) | 
|  | 271 | start = start - log->device_size; | 
|  | 272 | return start; | 
|  | 273 | } | 
|  | 274 |  | 
|  | 275 | static sector_t r5l_ring_distance(struct r5l_log *log, sector_t start, | 
|  | 276 | sector_t end) | 
|  | 277 | { | 
|  | 278 | if (end >= start) | 
|  | 279 | return end - start; | 
|  | 280 | else | 
|  | 281 | return end + log->device_size - start; | 
|  | 282 | } | 
|  | 283 |  | 
|  | 284 | static bool r5l_has_free_space(struct r5l_log *log, sector_t size) | 
|  | 285 | { | 
|  | 286 | sector_t used_size; | 
|  | 287 |  | 
|  | 288 | used_size = r5l_ring_distance(log, log->last_checkpoint, | 
|  | 289 | log->log_start); | 
|  | 290 |  | 
|  | 291 | return log->device_size > used_size + size; | 
|  | 292 | } | 
|  | 293 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 294 | static void __r5l_set_io_unit_state(struct r5l_io_unit *io, | 
|  | 295 | enum r5l_io_unit_state state) | 
|  | 296 | { | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 297 | if (WARN_ON(io->state >= state)) | 
|  | 298 | return; | 
|  | 299 | io->state = state; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 300 | } | 
|  | 301 |  | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 302 | static void | 
| NeilBrown | bd83d0a | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 303 | r5c_return_dev_pending_writes(struct r5conf *conf, struct r5dev *dev) | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 304 | { | 
|  | 305 | struct bio *wbi, *wbi2; | 
|  | 306 |  | 
|  | 307 | wbi = dev->written; | 
|  | 308 | dev->written = NULL; | 
|  | 309 | while (wbi && wbi->bi_iter.bi_sector < | 
|  | 310 | dev->sector + STRIPE_SECTORS) { | 
|  | 311 | wbi2 = r5_next_bio(wbi, dev->sector); | 
| NeilBrown | 4972805 | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 312 | md_write_end(conf->mddev); | 
| NeilBrown | 016c76a | 2017-03-15 14:05:13 +1100 | [diff] [blame] | 313 | bio_endio(wbi); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 314 | wbi = wbi2; | 
|  | 315 | } | 
|  | 316 | } | 
|  | 317 |  | 
|  | 318 | void r5c_handle_cached_data_endio(struct r5conf *conf, | 
| NeilBrown | bd83d0a | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 319 | struct stripe_head *sh, int disks) | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 320 | { | 
|  | 321 | int i; | 
|  | 322 |  | 
|  | 323 | for (i = sh->disks; i--; ) { | 
|  | 324 | if (sh->dev[i].written) { | 
|  | 325 | set_bit(R5_UPTODATE, &sh->dev[i].flags); | 
| NeilBrown | bd83d0a | 2017-03-15 14:05:12 +1100 | [diff] [blame] | 326 | r5c_return_dev_pending_writes(conf, &sh->dev[i]); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 327 | bitmap_endwrite(conf->mddev->bitmap, sh->sector, | 
|  | 328 | STRIPE_SECTORS, | 
|  | 329 | !test_bit(STRIPE_DEGRADED, &sh->state), | 
|  | 330 | 0); | 
|  | 331 | } | 
|  | 332 | } | 
|  | 333 | } | 
|  | 334 |  | 
| Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 335 | void r5l_wake_reclaim(struct r5l_log *log, sector_t space); | 
|  | 336 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 337 | /* Check whether we should flush some stripes to free up stripe cache */ | 
|  | 338 | void r5c_check_stripe_cache_usage(struct r5conf *conf) | 
|  | 339 | { | 
|  | 340 | int total_cached; | 
|  | 341 |  | 
|  | 342 | if (!r5c_is_writeback(conf->log)) | 
|  | 343 | return; | 
|  | 344 |  | 
|  | 345 | total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + | 
|  | 346 | atomic_read(&conf->r5c_cached_full_stripes); | 
|  | 347 |  | 
|  | 348 | /* | 
|  | 349 | * The following condition is true for either of the following: | 
|  | 350 | *   - stripe cache pressure high: | 
|  | 351 | *          total_cached > 3/4 min_nr_stripes || | 
|  | 352 | *          empty_inactive_list_nr > 0 | 
|  | 353 | *   - stripe cache pressure moderate: | 
|  | 354 | *          total_cached > 1/2 min_nr_stripes | 
|  | 355 | */ | 
|  | 356 | if (total_cached > conf->min_nr_stripes * 1 / 2 || | 
|  | 357 | atomic_read(&conf->empty_inactive_list_nr) > 0) | 
|  | 358 | r5l_wake_reclaim(conf->log, 0); | 
|  | 359 | } | 
|  | 360 |  | 
|  | 361 | /* | 
|  | 362 | * flush cache when there are R5C_FULL_STRIPE_FLUSH_BATCH or more full | 
|  | 363 | * stripes in the cache | 
|  | 364 | */ | 
|  | 365 | void r5c_check_cached_full_stripe(struct r5conf *conf) | 
|  | 366 | { | 
|  | 367 | if (!r5c_is_writeback(conf->log)) | 
|  | 368 | return; | 
|  | 369 |  | 
|  | 370 | /* | 
|  | 371 | * wake up reclaim for R5C_FULL_STRIPE_FLUSH_BATCH cached stripes | 
|  | 372 | * or a full stripe (chunk size / 4k stripes). | 
|  | 373 | */ | 
|  | 374 | if (atomic_read(&conf->r5c_cached_full_stripes) >= | 
| Shaohua Li | 84890c0 | 2017-02-15 19:58:05 -0800 | [diff] [blame] | 375 | min(R5C_FULL_STRIPE_FLUSH_BATCH(conf), | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 376 | conf->chunk_sectors >> STRIPE_SHIFT)) | 
|  | 377 | r5l_wake_reclaim(conf->log, 0); | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | /* | 
|  | 381 | * Total log space (in sectors) needed to flush all data in cache | 
|  | 382 | * | 
| Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 383 | * To avoid deadlock due to log space, it is necessary to reserve log | 
|  | 384 | * space to flush critical stripes (stripes that occupying log space near | 
|  | 385 | * last_checkpoint). This function helps check how much log space is | 
|  | 386 | * required to flush all cached stripes. | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 387 | * | 
| Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 388 | * To reduce log space requirements, two mechanisms are used to give cache | 
|  | 389 | * flush higher priorities: | 
|  | 390 | *    1. In handle_stripe_dirtying() and schedule_reconstruction(), | 
|  | 391 | *       stripes ALREADY in journal can be flushed w/o pending writes; | 
|  | 392 | *    2. In r5l_write_stripe() and r5c_cache_data(), stripes NOT in journal | 
|  | 393 | *       can be delayed (r5l_add_no_space_stripe). | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 394 | * | 
| Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 395 | * In cache flush, the stripe goes through 1 and then 2. For a stripe that | 
|  | 396 | * already passed 1, flushing it requires at most (conf->max_degraded + 1) | 
|  | 397 | * pages of journal space. For stripes that has not passed 1, flushing it | 
|  | 398 | * requires (conf->raid_disks + 1) pages of journal space. There are at | 
|  | 399 | * most (conf->group_cnt + 1) stripe that passed 1. So total journal space | 
|  | 400 | * required to flush all cached stripes (in pages) is: | 
|  | 401 | * | 
|  | 402 | *     (stripe_in_journal_count - group_cnt - 1) * (max_degraded + 1) + | 
|  | 403 | *     (group_cnt + 1) * (raid_disks + 1) | 
|  | 404 | * or | 
|  | 405 | *     (stripe_in_journal_count) * (max_degraded + 1) + | 
|  | 406 | *     (group_cnt + 1) * (raid_disks - max_degraded) | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 407 | */ | 
|  | 408 | static sector_t r5c_log_required_to_flush_cache(struct r5conf *conf) | 
|  | 409 | { | 
|  | 410 | struct r5l_log *log = conf->log; | 
|  | 411 |  | 
|  | 412 | if (!r5c_is_writeback(log)) | 
|  | 413 | return 0; | 
|  | 414 |  | 
| Song Liu | 39b9958 | 2017-01-24 14:08:23 -0800 | [diff] [blame] | 415 | return BLOCK_SECTORS * | 
|  | 416 | ((conf->max_degraded + 1) * atomic_read(&log->stripe_in_journal_count) + | 
|  | 417 | (conf->raid_disks - conf->max_degraded) * (conf->group_cnt + 1)); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 418 | } | 
|  | 419 |  | 
|  | 420 | /* | 
|  | 421 | * evaluate log space usage and update R5C_LOG_TIGHT and R5C_LOG_CRITICAL | 
|  | 422 | * | 
|  | 423 | * R5C_LOG_TIGHT is set when free space on the log device is less than 3x of | 
|  | 424 | * reclaim_required_space. R5C_LOG_CRITICAL is set when free space on the log | 
|  | 425 | * device is less than 2x of reclaim_required_space. | 
|  | 426 | */ | 
|  | 427 | static inline void r5c_update_log_state(struct r5l_log *log) | 
|  | 428 | { | 
|  | 429 | struct r5conf *conf = log->rdev->mddev->private; | 
|  | 430 | sector_t free_space; | 
|  | 431 | sector_t reclaim_space; | 
| Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 432 | bool wake_reclaim = false; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 433 |  | 
|  | 434 | if (!r5c_is_writeback(log)) | 
|  | 435 | return; | 
|  | 436 |  | 
|  | 437 | free_space = r5l_ring_distance(log, log->log_start, | 
|  | 438 | log->last_checkpoint); | 
|  | 439 | reclaim_space = r5c_log_required_to_flush_cache(conf); | 
|  | 440 | if (free_space < 2 * reclaim_space) | 
|  | 441 | set_bit(R5C_LOG_CRITICAL, &conf->cache_state); | 
| Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 442 | else { | 
|  | 443 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) | 
|  | 444 | wake_reclaim = true; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 445 | clear_bit(R5C_LOG_CRITICAL, &conf->cache_state); | 
| Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 446 | } | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 447 | if (free_space < 3 * reclaim_space) | 
|  | 448 | set_bit(R5C_LOG_TIGHT, &conf->cache_state); | 
|  | 449 | else | 
|  | 450 | clear_bit(R5C_LOG_TIGHT, &conf->cache_state); | 
| Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 451 |  | 
|  | 452 | if (wake_reclaim) | 
|  | 453 | r5l_wake_reclaim(log, 0); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 454 | } | 
|  | 455 |  | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 456 | /* | 
|  | 457 | * Put the stripe into writing-out phase by clearing STRIPE_R5C_CACHING. | 
|  | 458 | * This function should only be called in write-back mode. | 
|  | 459 | */ | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 460 | void r5c_make_stripe_write_out(struct stripe_head *sh) | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 461 | { | 
|  | 462 | struct r5conf *conf = sh->raid_conf; | 
|  | 463 | struct r5l_log *log = conf->log; | 
|  | 464 |  | 
|  | 465 | BUG_ON(!r5c_is_writeback(log)); | 
|  | 466 |  | 
|  | 467 | WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 468 | clear_bit(STRIPE_R5C_CACHING, &sh->state); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 469 |  | 
|  | 470 | if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) | 
|  | 471 | atomic_inc(&conf->preread_active_stripes); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 472 | } | 
|  | 473 |  | 
|  | 474 | static void r5c_handle_data_cached(struct stripe_head *sh) | 
|  | 475 | { | 
|  | 476 | int i; | 
|  | 477 |  | 
|  | 478 | for (i = sh->disks; i--; ) | 
|  | 479 | if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { | 
|  | 480 | set_bit(R5_InJournal, &sh->dev[i].flags); | 
|  | 481 | clear_bit(R5_LOCKED, &sh->dev[i].flags); | 
|  | 482 | } | 
|  | 483 | clear_bit(STRIPE_LOG_TRAPPED, &sh->state); | 
|  | 484 | } | 
|  | 485 |  | 
|  | 486 | /* | 
|  | 487 | * this journal write must contain full parity, | 
|  | 488 | * it may also contain some data pages | 
|  | 489 | */ | 
|  | 490 | static void r5c_handle_parity_cached(struct stripe_head *sh) | 
|  | 491 | { | 
|  | 492 | int i; | 
|  | 493 |  | 
|  | 494 | for (i = sh->disks; i--; ) | 
|  | 495 | if (test_bit(R5_InJournal, &sh->dev[i].flags)) | 
|  | 496 | set_bit(R5_Wantwrite, &sh->dev[i].flags); | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 497 | } | 
|  | 498 |  | 
|  | 499 | /* | 
|  | 500 | * Setting proper flags after writing (or flushing) data and/or parity to the | 
|  | 501 | * log device. This is called from r5l_log_endio() or r5l_log_flush_endio(). | 
|  | 502 | */ | 
|  | 503 | static void r5c_finish_cache_stripe(struct stripe_head *sh) | 
|  | 504 | { | 
|  | 505 | struct r5l_log *log = sh->raid_conf->log; | 
|  | 506 |  | 
|  | 507 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { | 
|  | 508 | BUG_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 509 | /* | 
|  | 510 | * Set R5_InJournal for parity dev[pd_idx]. This means | 
|  | 511 | * all data AND parity in the journal. For RAID 6, it is | 
|  | 512 | * NOT necessary to set the flag for dev[qd_idx], as the | 
|  | 513 | * two parities are written out together. | 
|  | 514 | */ | 
|  | 515 | set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 516 | } else if (test_bit(STRIPE_R5C_CACHING, &sh->state)) { | 
|  | 517 | r5c_handle_data_cached(sh); | 
|  | 518 | } else { | 
|  | 519 | r5c_handle_parity_cached(sh); | 
|  | 520 | set_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); | 
|  | 521 | } | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 522 | } | 
|  | 523 |  | 
| Christoph Hellwig | d8858f4 | 2015-10-05 09:31:08 +0200 | [diff] [blame] | 524 | static void r5l_io_run_stripes(struct r5l_io_unit *io) | 
|  | 525 | { | 
|  | 526 | struct stripe_head *sh, *next; | 
|  | 527 |  | 
|  | 528 | list_for_each_entry_safe(sh, next, &io->stripe_list, log_list) { | 
|  | 529 | list_del_init(&sh->log_list); | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 530 |  | 
|  | 531 | r5c_finish_cache_stripe(sh); | 
|  | 532 |  | 
| Christoph Hellwig | d8858f4 | 2015-10-05 09:31:08 +0200 | [diff] [blame] | 533 | set_bit(STRIPE_HANDLE, &sh->state); | 
|  | 534 | raid5_release_stripe(sh); | 
|  | 535 | } | 
|  | 536 | } | 
|  | 537 |  | 
| Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 538 | static void r5l_log_run_stripes(struct r5l_log *log) | 
|  | 539 | { | 
|  | 540 | struct r5l_io_unit *io, *next; | 
|  | 541 |  | 
| Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 542 | lockdep_assert_held(&log->io_list_lock); | 
| Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 543 |  | 
|  | 544 | list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { | 
|  | 545 | /* don't change list order */ | 
|  | 546 | if (io->state < IO_UNIT_IO_END) | 
|  | 547 | break; | 
|  | 548 |  | 
|  | 549 | list_move_tail(&io->log_sibling, &log->finished_ios); | 
|  | 550 | r5l_io_run_stripes(io); | 
|  | 551 | } | 
|  | 552 | } | 
|  | 553 |  | 
| Christoph Hellwig | 3848c0b | 2015-12-21 10:51:01 +1100 | [diff] [blame] | 554 | static void r5l_move_to_end_ios(struct r5l_log *log) | 
|  | 555 | { | 
|  | 556 | struct r5l_io_unit *io, *next; | 
|  | 557 |  | 
| Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 558 | lockdep_assert_held(&log->io_list_lock); | 
| Christoph Hellwig | 3848c0b | 2015-12-21 10:51:01 +1100 | [diff] [blame] | 559 |  | 
|  | 560 | list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) { | 
|  | 561 | /* don't change list order */ | 
|  | 562 | if (io->state < IO_UNIT_IO_END) | 
|  | 563 | break; | 
|  | 564 | list_move_tail(&io->log_sibling, &log->io_end_ios); | 
|  | 565 | } | 
|  | 566 | } | 
|  | 567 |  | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 568 | static void __r5l_stripe_write_finished(struct r5l_io_unit *io); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 569 | static void r5l_log_endio(struct bio *bio) | 
|  | 570 | { | 
|  | 571 | struct r5l_io_unit *io = bio->bi_private; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 572 | struct r5l_io_unit *io_deferred; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 573 | struct r5l_log *log = io->log; | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 574 | unsigned long flags; | 
| Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 575 | bool has_null_flush; | 
|  | 576 | bool has_flush_payload; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 577 |  | 
| Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 578 | if (bio->bi_status) | 
| Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 579 | md_error(log->rdev->mddev, log->rdev); | 
|  | 580 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 581 | bio_put(bio); | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 582 | mempool_free(io->meta_page, log->meta_pool); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 583 |  | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 584 | spin_lock_irqsave(&log->io_list_lock, flags); | 
|  | 585 | __r5l_set_io_unit_state(io, IO_UNIT_IO_END); | 
| Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 586 |  | 
|  | 587 | /* | 
|  | 588 | * if the io doesn't not have null_flush or flush payload, | 
|  | 589 | * it is not safe to access it after releasing io_list_lock. | 
|  | 590 | * Therefore, it is necessary to check the condition with | 
|  | 591 | * the lock held. | 
|  | 592 | */ | 
|  | 593 | has_null_flush = io->has_null_flush; | 
|  | 594 | has_flush_payload = io->has_flush_payload; | 
|  | 595 |  | 
| Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 596 | if (log->need_cache_flush && !list_empty(&io->stripe_list)) | 
| Christoph Hellwig | 3848c0b | 2015-12-21 10:51:01 +1100 | [diff] [blame] | 597 | r5l_move_to_end_ios(log); | 
| Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 598 | else | 
|  | 599 | r5l_log_run_stripes(log); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 600 | if (!list_empty(&log->running_ios)) { | 
|  | 601 | /* | 
|  | 602 | * FLUSH/FUA io_unit is deferred because of ordering, now we | 
|  | 603 | * can dispatch it | 
|  | 604 | */ | 
|  | 605 | io_deferred = list_first_entry(&log->running_ios, | 
|  | 606 | struct r5l_io_unit, log_sibling); | 
|  | 607 | if (io_deferred->io_deferred) | 
|  | 608 | schedule_work(&log->deferred_io_work); | 
|  | 609 | } | 
|  | 610 |  | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 611 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 
|  | 612 |  | 
| Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 613 | if (log->need_cache_flush) | 
|  | 614 | md_wakeup_thread(log->rdev->mddev->thread); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 615 |  | 
| Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 616 | /* finish flush only io_unit and PAYLOAD_FLUSH only io_unit */ | 
|  | 617 | if (has_null_flush) { | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 618 | struct bio *bi; | 
|  | 619 |  | 
|  | 620 | WARN_ON(bio_list_empty(&io->flush_barriers)); | 
|  | 621 | while ((bi = bio_list_pop(&io->flush_barriers)) != NULL) { | 
|  | 622 | bio_endio(bi); | 
| Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 623 | if (atomic_dec_and_test(&io->pending_stripe)) { | 
|  | 624 | __r5l_stripe_write_finished(io); | 
|  | 625 | return; | 
|  | 626 | } | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 627 | } | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 628 | } | 
| Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 629 | /* decrease pending_stripe for flush payload */ | 
|  | 630 | if (has_flush_payload) | 
|  | 631 | if (atomic_dec_and_test(&io->pending_stripe)) | 
|  | 632 | __r5l_stripe_write_finished(io); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 633 | } | 
|  | 634 |  | 
|  | 635 | static void r5l_do_submit_io(struct r5l_log *log, struct r5l_io_unit *io) | 
|  | 636 | { | 
|  | 637 | unsigned long flags; | 
|  | 638 |  | 
|  | 639 | spin_lock_irqsave(&log->io_list_lock, flags); | 
|  | 640 | __r5l_set_io_unit_state(io, IO_UNIT_IO_START); | 
|  | 641 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 
|  | 642 |  | 
| Song Liu | bb3338d | 2017-05-08 17:39:24 -0700 | [diff] [blame] | 643 | /* | 
|  | 644 | * In case of journal device failures, submit_bio will get error | 
|  | 645 | * and calls endio, then active stripes will continue write | 
|  | 646 | * process. Therefore, it is not necessary to check Faulty bit | 
|  | 647 | * of journal device here. | 
|  | 648 | * | 
|  | 649 | * We can't check split_bio after current_bio is submitted. If | 
|  | 650 | * io->split_bio is null, after current_bio is submitted, current_bio | 
|  | 651 | * might already be completed and the io_unit is freed. We submit | 
|  | 652 | * split_bio first to avoid the issue. | 
|  | 653 | */ | 
|  | 654 | if (io->split_bio) { | 
|  | 655 | if (io->has_flush) | 
|  | 656 | io->split_bio->bi_opf |= REQ_PREFLUSH; | 
|  | 657 | if (io->has_fua) | 
|  | 658 | io->split_bio->bi_opf |= REQ_FUA; | 
|  | 659 | submit_bio(io->split_bio); | 
|  | 660 | } | 
|  | 661 |  | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 662 | if (io->has_flush) | 
| Shaohua Li | 2073773 | 2016-12-13 12:40:15 -0800 | [diff] [blame] | 663 | io->current_bio->bi_opf |= REQ_PREFLUSH; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 664 | if (io->has_fua) | 
| Shaohua Li | 2073773 | 2016-12-13 12:40:15 -0800 | [diff] [blame] | 665 | io->current_bio->bi_opf |= REQ_FUA; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 666 | submit_bio(io->current_bio); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 667 | } | 
|  | 668 |  | 
|  | 669 | /* deferred io_unit will be dispatched here */ | 
|  | 670 | static void r5l_submit_io_async(struct work_struct *work) | 
|  | 671 | { | 
|  | 672 | struct r5l_log *log = container_of(work, struct r5l_log, | 
|  | 673 | deferred_io_work); | 
|  | 674 | struct r5l_io_unit *io = NULL; | 
|  | 675 | unsigned long flags; | 
|  | 676 |  | 
|  | 677 | spin_lock_irqsave(&log->io_list_lock, flags); | 
|  | 678 | if (!list_empty(&log->running_ios)) { | 
|  | 679 | io = list_first_entry(&log->running_ios, struct r5l_io_unit, | 
|  | 680 | log_sibling); | 
|  | 681 | if (!io->io_deferred) | 
|  | 682 | io = NULL; | 
|  | 683 | else | 
|  | 684 | io->io_deferred = 0; | 
|  | 685 | } | 
|  | 686 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 
|  | 687 | if (io) | 
|  | 688 | r5l_do_submit_io(log, io); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 689 | } | 
|  | 690 |  | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 691 | static void r5c_disable_writeback_async(struct work_struct *work) | 
|  | 692 | { | 
|  | 693 | struct r5l_log *log = container_of(work, struct r5l_log, | 
|  | 694 | disable_writeback_work); | 
|  | 695 | struct mddev *mddev = log->rdev->mddev; | 
| NeilBrown | 4d5324f | 2017-10-19 12:17:16 +1100 | [diff] [blame] | 696 | struct r5conf *conf = mddev->private; | 
|  | 697 | int locked = 0; | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 698 |  | 
|  | 699 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) | 
|  | 700 | return; | 
|  | 701 | pr_info("md/raid:%s: Disabling writeback cache for degraded array.\n", | 
|  | 702 | mdname(mddev)); | 
| Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 703 |  | 
|  | 704 | /* wait superblock change before suspend */ | 
|  | 705 | wait_event(mddev->sb_wait, | 
| NeilBrown | 4d5324f | 2017-10-19 12:17:16 +1100 | [diff] [blame] | 706 | conf->log == NULL || | 
|  | 707 | (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags) && | 
|  | 708 | (locked = mddev_trylock(mddev)))); | 
|  | 709 | if (locked) { | 
|  | 710 | mddev_suspend(mddev); | 
|  | 711 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | 
|  | 712 | mddev_resume(mddev); | 
|  | 713 | mddev_unlock(mddev); | 
|  | 714 | } | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 715 | } | 
|  | 716 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 717 | static void r5l_submit_current_io(struct r5l_log *log) | 
|  | 718 | { | 
|  | 719 | struct r5l_io_unit *io = log->current_io; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 720 | struct bio *bio; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 721 | struct r5l_meta_block *block; | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 722 | unsigned long flags; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 723 | u32 crc; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 724 | bool do_submit = true; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 725 |  | 
|  | 726 | if (!io) | 
|  | 727 | return; | 
|  | 728 |  | 
|  | 729 | block = page_address(io->meta_page); | 
|  | 730 | block->meta_size = cpu_to_le32(io->meta_offset); | 
| Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 731 | crc = crc32c_le(log->uuid_checksum, block, PAGE_SIZE); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 732 | block->checksum = cpu_to_le32(crc); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 733 | bio = io->current_bio; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 734 |  | 
|  | 735 | log->current_io = NULL; | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 736 | spin_lock_irqsave(&log->io_list_lock, flags); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 737 | if (io->has_flush || io->has_fua) { | 
|  | 738 | if (io != list_first_entry(&log->running_ios, | 
|  | 739 | struct r5l_io_unit, log_sibling)) { | 
|  | 740 | io->io_deferred = 1; | 
|  | 741 | do_submit = false; | 
|  | 742 | } | 
|  | 743 | } | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 744 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 745 | if (do_submit) | 
|  | 746 | r5l_do_submit_io(log, io); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 747 | } | 
|  | 748 |  | 
| Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 749 | static struct bio *r5l_bio_alloc(struct r5l_log *log) | 
| Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 750 | { | 
| Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 751 | struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); | 
| Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 752 |  | 
| Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 753 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); | 
| Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 754 | bio_set_dev(bio, log->rdev->bdev); | 
| Christoph Hellwig | 1e932a3 | 2015-10-05 09:31:12 +0200 | [diff] [blame] | 755 | bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; | 
| Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 756 |  | 
| Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 757 | return bio; | 
|  | 758 | } | 
|  | 759 |  | 
| Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 760 | static void r5_reserve_log_entry(struct r5l_log *log, struct r5l_io_unit *io) | 
|  | 761 | { | 
|  | 762 | log->log_start = r5l_ring_add(log, log->log_start, BLOCK_SECTORS); | 
|  | 763 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 764 | r5c_update_log_state(log); | 
| Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 765 | /* | 
|  | 766 | * If we filled up the log device start from the beginning again, | 
|  | 767 | * which will require a new bio. | 
|  | 768 | * | 
|  | 769 | * Note: for this to work properly the log size needs to me a multiple | 
|  | 770 | * of BLOCK_SECTORS. | 
|  | 771 | */ | 
|  | 772 | if (log->log_start == 0) | 
| Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 773 | io->need_split_bio = true; | 
| Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 774 |  | 
|  | 775 | io->log_end = log->log_start; | 
|  | 776 | } | 
|  | 777 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 778 | static struct r5l_io_unit *r5l_new_meta(struct r5l_log *log) | 
|  | 779 | { | 
|  | 780 | struct r5l_io_unit *io; | 
|  | 781 | struct r5l_meta_block *block; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 782 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 783 | io = mempool_alloc(log->io_pool, GFP_ATOMIC); | 
|  | 784 | if (!io) | 
|  | 785 | return NULL; | 
|  | 786 | memset(io, 0, sizeof(*io)); | 
|  | 787 |  | 
| Christoph Hellwig | 51039cd | 2015-10-05 09:31:13 +0200 | [diff] [blame] | 788 | io->log = log; | 
| Christoph Hellwig | 51039cd | 2015-10-05 09:31:13 +0200 | [diff] [blame] | 789 | INIT_LIST_HEAD(&io->log_sibling); | 
|  | 790 | INIT_LIST_HEAD(&io->stripe_list); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 791 | bio_list_init(&io->flush_barriers); | 
| Christoph Hellwig | 51039cd | 2015-10-05 09:31:13 +0200 | [diff] [blame] | 792 | io->state = IO_UNIT_RUNNING; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 793 |  | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 794 | io->meta_page = mempool_alloc(log->meta_pool, GFP_NOIO); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 795 | block = page_address(io->meta_page); | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 796 | clear_page(block); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 797 | block->magic = cpu_to_le32(R5LOG_MAGIC); | 
|  | 798 | block->version = R5LOG_VERSION; | 
|  | 799 | block->seq = cpu_to_le64(log->seq); | 
|  | 800 | block->position = cpu_to_le64(log->log_start); | 
|  | 801 |  | 
|  | 802 | io->log_start = log->log_start; | 
|  | 803 | io->meta_offset = sizeof(struct r5l_meta_block); | 
| Christoph Hellwig | 2b8ef16 | 2015-10-05 09:31:15 +0200 | [diff] [blame] | 804 | io->seq = log->seq++; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 805 |  | 
| Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 806 | io->current_bio = r5l_bio_alloc(log); | 
|  | 807 | io->current_bio->bi_end_io = r5l_log_endio; | 
|  | 808 | io->current_bio->bi_private = io; | 
| Christoph Hellwig | b349feb | 2015-10-05 09:31:11 +0200 | [diff] [blame] | 809 | bio_add_page(io->current_bio, io->meta_page, PAGE_SIZE, 0); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 810 |  | 
| Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 811 | r5_reserve_log_entry(log, io); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 812 |  | 
|  | 813 | spin_lock_irq(&log->io_list_lock); | 
|  | 814 | list_add_tail(&io->log_sibling, &log->running_ios); | 
|  | 815 | spin_unlock_irq(&log->io_list_lock); | 
|  | 816 |  | 
|  | 817 | return io; | 
|  | 818 | } | 
|  | 819 |  | 
|  | 820 | static int r5l_get_meta(struct r5l_log *log, unsigned int payload_size) | 
|  | 821 | { | 
| Christoph Hellwig | 22581f5 | 2015-10-05 09:31:10 +0200 | [diff] [blame] | 822 | if (log->current_io && | 
|  | 823 | log->current_io->meta_offset + payload_size > PAGE_SIZE) | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 824 | r5l_submit_current_io(log); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 825 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 826 | if (!log->current_io) { | 
| Christoph Hellwig | 22581f5 | 2015-10-05 09:31:10 +0200 | [diff] [blame] | 827 | log->current_io = r5l_new_meta(log); | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 828 | if (!log->current_io) | 
|  | 829 | return -ENOMEM; | 
|  | 830 | } | 
|  | 831 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 832 | return 0; | 
|  | 833 | } | 
|  | 834 |  | 
|  | 835 | static void r5l_append_payload_meta(struct r5l_log *log, u16 type, | 
|  | 836 | sector_t location, | 
|  | 837 | u32 checksum1, u32 checksum2, | 
|  | 838 | bool checksum2_valid) | 
|  | 839 | { | 
|  | 840 | struct r5l_io_unit *io = log->current_io; | 
|  | 841 | struct r5l_payload_data_parity *payload; | 
|  | 842 |  | 
|  | 843 | payload = page_address(io->meta_page) + io->meta_offset; | 
|  | 844 | payload->header.type = cpu_to_le16(type); | 
|  | 845 | payload->header.flags = cpu_to_le16(0); | 
|  | 846 | payload->size = cpu_to_le32((1 + !!checksum2_valid) << | 
|  | 847 | (PAGE_SHIFT - 9)); | 
|  | 848 | payload->location = cpu_to_le64(location); | 
|  | 849 | payload->checksum[0] = cpu_to_le32(checksum1); | 
|  | 850 | if (checksum2_valid) | 
|  | 851 | payload->checksum[1] = cpu_to_le32(checksum2); | 
|  | 852 |  | 
|  | 853 | io->meta_offset += sizeof(struct r5l_payload_data_parity) + | 
|  | 854 | sizeof(__le32) * (1 + !!checksum2_valid); | 
|  | 855 | } | 
|  | 856 |  | 
|  | 857 | static void r5l_append_payload_page(struct r5l_log *log, struct page *page) | 
|  | 858 | { | 
|  | 859 | struct r5l_io_unit *io = log->current_io; | 
|  | 860 |  | 
| Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 861 | if (io->need_split_bio) { | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 862 | BUG_ON(io->split_bio); | 
|  | 863 | io->split_bio = io->current_bio; | 
| Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 864 | io->current_bio = r5l_bio_alloc(log); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 865 | bio_chain(io->current_bio, io->split_bio); | 
|  | 866 | io->need_split_bio = false; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 867 | } | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 868 |  | 
| Christoph Hellwig | 6143e2c | 2015-10-05 09:31:16 +0200 | [diff] [blame] | 869 | if (!bio_add_page(io->current_bio, page, PAGE_SIZE, 0)) | 
|  | 870 | BUG(); | 
|  | 871 |  | 
| Christoph Hellwig | c1b9919 | 2015-10-05 09:31:14 +0200 | [diff] [blame] | 872 | r5_reserve_log_entry(log, io); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 873 | } | 
|  | 874 |  | 
| Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 875 | static void r5l_append_flush_payload(struct r5l_log *log, sector_t sect) | 
|  | 876 | { | 
|  | 877 | struct mddev *mddev = log->rdev->mddev; | 
|  | 878 | struct r5conf *conf = mddev->private; | 
|  | 879 | struct r5l_io_unit *io; | 
|  | 880 | struct r5l_payload_flush *payload; | 
|  | 881 | int meta_size; | 
|  | 882 |  | 
|  | 883 | /* | 
|  | 884 | * payload_flush requires extra writes to the journal. | 
|  | 885 | * To avoid handling the extra IO in quiesce, just skip | 
|  | 886 | * flush_payload | 
|  | 887 | */ | 
|  | 888 | if (conf->quiesce) | 
|  | 889 | return; | 
|  | 890 |  | 
|  | 891 | mutex_lock(&log->io_mutex); | 
|  | 892 | meta_size = sizeof(struct r5l_payload_flush) + sizeof(__le64); | 
|  | 893 |  | 
|  | 894 | if (r5l_get_meta(log, meta_size)) { | 
|  | 895 | mutex_unlock(&log->io_mutex); | 
|  | 896 | return; | 
|  | 897 | } | 
|  | 898 |  | 
|  | 899 | /* current implementation is one stripe per flush payload */ | 
|  | 900 | io = log->current_io; | 
|  | 901 | payload = page_address(io->meta_page) + io->meta_offset; | 
|  | 902 | payload->header.type = cpu_to_le16(R5LOG_PAYLOAD_FLUSH); | 
|  | 903 | payload->header.flags = cpu_to_le16(0); | 
|  | 904 | payload->size = cpu_to_le32(sizeof(__le64)); | 
|  | 905 | payload->flush_stripes[0] = cpu_to_le64(sect); | 
|  | 906 | io->meta_offset += meta_size; | 
| Song Liu | a9501d7 | 2017-08-03 10:03:17 -0700 | [diff] [blame] | 907 | /* multiple flush payloads count as one pending_stripe */ | 
|  | 908 | if (!io->has_flush_payload) { | 
|  | 909 | io->has_flush_payload = 1; | 
|  | 910 | atomic_inc(&io->pending_stripe); | 
|  | 911 | } | 
| Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 912 | mutex_unlock(&log->io_mutex); | 
|  | 913 | } | 
|  | 914 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 915 | static int r5l_log_stripe(struct r5l_log *log, struct stripe_head *sh, | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 916 | int data_pages, int parity_pages) | 
|  | 917 | { | 
|  | 918 | int i; | 
|  | 919 | int meta_size; | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 920 | int ret; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 921 | struct r5l_io_unit *io; | 
|  | 922 |  | 
|  | 923 | meta_size = | 
|  | 924 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) | 
|  | 925 | * data_pages) + | 
|  | 926 | sizeof(struct r5l_payload_data_parity) + | 
|  | 927 | sizeof(__le32) * parity_pages; | 
|  | 928 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 929 | ret = r5l_get_meta(log, meta_size); | 
|  | 930 | if (ret) | 
|  | 931 | return ret; | 
|  | 932 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 933 | io = log->current_io; | 
|  | 934 |  | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 935 | if (test_and_clear_bit(STRIPE_R5C_PREFLUSH, &sh->state)) | 
|  | 936 | io->has_flush = 1; | 
|  | 937 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 938 | for (i = 0; i < sh->disks; i++) { | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 939 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || | 
|  | 940 | test_bit(R5_InJournal, &sh->dev[i].flags)) | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 941 | continue; | 
|  | 942 | if (i == sh->pd_idx || i == sh->qd_idx) | 
|  | 943 | continue; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 944 | if (test_bit(R5_WantFUA, &sh->dev[i].flags) && | 
|  | 945 | log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) { | 
|  | 946 | io->has_fua = 1; | 
|  | 947 | /* | 
|  | 948 | * we need to flush journal to make sure recovery can | 
|  | 949 | * reach the data with fua flag | 
|  | 950 | */ | 
|  | 951 | io->has_flush = 1; | 
|  | 952 | } | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 953 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_DATA, | 
|  | 954 | raid5_compute_blocknr(sh, i, 0), | 
|  | 955 | sh->dev[i].log_checksum, 0, false); | 
|  | 956 | r5l_append_payload_page(log, sh->dev[i].page); | 
|  | 957 | } | 
|  | 958 |  | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 959 | if (parity_pages == 2) { | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 960 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, | 
|  | 961 | sh->sector, sh->dev[sh->pd_idx].log_checksum, | 
|  | 962 | sh->dev[sh->qd_idx].log_checksum, true); | 
|  | 963 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); | 
|  | 964 | r5l_append_payload_page(log, sh->dev[sh->qd_idx].page); | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 965 | } else if (parity_pages == 1) { | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 966 | r5l_append_payload_meta(log, R5LOG_PAYLOAD_PARITY, | 
|  | 967 | sh->sector, sh->dev[sh->pd_idx].log_checksum, | 
|  | 968 | 0, false); | 
|  | 969 | r5l_append_payload_page(log, sh->dev[sh->pd_idx].page); | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 970 | } else  /* Just writing data, not parity, in caching phase */ | 
|  | 971 | BUG_ON(parity_pages != 0); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 972 |  | 
|  | 973 | list_add_tail(&sh->log_list, &io->stripe_list); | 
|  | 974 | atomic_inc(&io->pending_stripe); | 
|  | 975 | sh->log_io = io; | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 976 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 977 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) | 
|  | 978 | return 0; | 
|  | 979 |  | 
|  | 980 | if (sh->log_start == MaxSector) { | 
|  | 981 | BUG_ON(!list_empty(&sh->r5c)); | 
|  | 982 | sh->log_start = io->log_start; | 
|  | 983 | spin_lock_irq(&log->stripe_in_journal_lock); | 
|  | 984 | list_add_tail(&sh->r5c, | 
|  | 985 | &log->stripe_in_journal_list); | 
|  | 986 | spin_unlock_irq(&log->stripe_in_journal_lock); | 
|  | 987 | atomic_inc(&log->stripe_in_journal_count); | 
|  | 988 | } | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 989 | return 0; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 990 | } | 
|  | 991 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 992 | /* add stripe to no_space_stripes, and then wake up reclaim */ | 
|  | 993 | static inline void r5l_add_no_space_stripe(struct r5l_log *log, | 
|  | 994 | struct stripe_head *sh) | 
|  | 995 | { | 
|  | 996 | spin_lock(&log->no_space_stripes_lock); | 
|  | 997 | list_add_tail(&sh->log_list, &log->no_space_stripes); | 
|  | 998 | spin_unlock(&log->no_space_stripes_lock); | 
|  | 999 | } | 
|  | 1000 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1001 | /* | 
|  | 1002 | * running in raid5d, where reclaim could wait for raid5d too (when it flushes | 
|  | 1003 | * data from log to raid disks), so we shouldn't wait for reclaim here | 
|  | 1004 | */ | 
|  | 1005 | int r5l_write_stripe(struct r5l_log *log, struct stripe_head *sh) | 
|  | 1006 | { | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1007 | struct r5conf *conf = sh->raid_conf; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1008 | int write_disks = 0; | 
|  | 1009 | int data_pages, parity_pages; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1010 | int reserve; | 
|  | 1011 | int i; | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1012 | int ret = 0; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1013 | bool wake_reclaim = false; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1014 |  | 
|  | 1015 | if (!log) | 
|  | 1016 | return -EAGAIN; | 
|  | 1017 | /* Don't support stripe batch */ | 
|  | 1018 | if (sh->log_io || !test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags) || | 
|  | 1019 | test_bit(STRIPE_SYNCING, &sh->state)) { | 
|  | 1020 | /* the stripe is written to log, we start writing it to raid */ | 
|  | 1021 | clear_bit(STRIPE_LOG_TRAPPED, &sh->state); | 
|  | 1022 | return -EAGAIN; | 
|  | 1023 | } | 
|  | 1024 |  | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 1025 | WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 1026 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1027 | for (i = 0; i < sh->disks; i++) { | 
|  | 1028 | void *addr; | 
|  | 1029 |  | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 1030 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags) || | 
|  | 1031 | test_bit(R5_InJournal, &sh->dev[i].flags)) | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1032 | continue; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 1033 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1034 | write_disks++; | 
|  | 1035 | /* checksum is already calculated in last run */ | 
|  | 1036 | if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) | 
|  | 1037 | continue; | 
|  | 1038 | addr = kmap_atomic(sh->dev[i].page); | 
| Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 1039 | sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, | 
|  | 1040 | addr, PAGE_SIZE); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1041 | kunmap_atomic(addr); | 
|  | 1042 | } | 
|  | 1043 | parity_pages = 1 + !!(sh->qd_idx >= 0); | 
|  | 1044 | data_pages = write_disks - parity_pages; | 
|  | 1045 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1046 | set_bit(STRIPE_LOG_TRAPPED, &sh->state); | 
| Shaohua Li | 253f9fd4 | 2015-09-04 14:14:16 -0700 | [diff] [blame] | 1047 | /* | 
|  | 1048 | * The stripe must enter state machine again to finish the write, so | 
|  | 1049 | * don't delay. | 
|  | 1050 | */ | 
|  | 1051 | clear_bit(STRIPE_DELAYED, &sh->state); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1052 | atomic_inc(&sh->count); | 
|  | 1053 |  | 
|  | 1054 | mutex_lock(&log->io_mutex); | 
|  | 1055 | /* meta + data */ | 
|  | 1056 | reserve = (1 + write_disks) << (PAGE_SHIFT - 9); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1057 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1058 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { | 
|  | 1059 | if (!r5l_has_free_space(log, reserve)) { | 
|  | 1060 | r5l_add_no_space_stripe(log, sh); | 
|  | 1061 | wake_reclaim = true; | 
|  | 1062 | } else { | 
|  | 1063 | ret = r5l_log_stripe(log, sh, data_pages, parity_pages); | 
|  | 1064 | if (ret) { | 
|  | 1065 | spin_lock_irq(&log->io_list_lock); | 
|  | 1066 | list_add_tail(&sh->log_list, | 
|  | 1067 | &log->no_mem_stripes); | 
|  | 1068 | spin_unlock_irq(&log->io_list_lock); | 
|  | 1069 | } | 
|  | 1070 | } | 
|  | 1071 | } else {  /* R5C_JOURNAL_MODE_WRITE_BACK */ | 
|  | 1072 | /* | 
|  | 1073 | * log space critical, do not process stripes that are | 
|  | 1074 | * not in cache yet (sh->log_start == MaxSector). | 
|  | 1075 | */ | 
|  | 1076 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && | 
|  | 1077 | sh->log_start == MaxSector) { | 
|  | 1078 | r5l_add_no_space_stripe(log, sh); | 
|  | 1079 | wake_reclaim = true; | 
|  | 1080 | reserve = 0; | 
|  | 1081 | } else if (!r5l_has_free_space(log, reserve)) { | 
|  | 1082 | if (sh->log_start == log->last_checkpoint) | 
|  | 1083 | BUG(); | 
|  | 1084 | else | 
|  | 1085 | r5l_add_no_space_stripe(log, sh); | 
|  | 1086 | } else { | 
|  | 1087 | ret = r5l_log_stripe(log, sh, data_pages, parity_pages); | 
|  | 1088 | if (ret) { | 
|  | 1089 | spin_lock_irq(&log->io_list_lock); | 
|  | 1090 | list_add_tail(&sh->log_list, | 
|  | 1091 | &log->no_mem_stripes); | 
|  | 1092 | spin_unlock_irq(&log->io_list_lock); | 
|  | 1093 | } | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1094 | } | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1095 | } | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1096 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1097 | mutex_unlock(&log->io_mutex); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1098 | if (wake_reclaim) | 
|  | 1099 | r5l_wake_reclaim(log, reserve); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1100 | return 0; | 
|  | 1101 | } | 
|  | 1102 |  | 
|  | 1103 | void r5l_write_stripe_run(struct r5l_log *log) | 
|  | 1104 | { | 
|  | 1105 | if (!log) | 
|  | 1106 | return; | 
|  | 1107 | mutex_lock(&log->io_mutex); | 
|  | 1108 | r5l_submit_current_io(log); | 
|  | 1109 | mutex_unlock(&log->io_mutex); | 
|  | 1110 | } | 
|  | 1111 |  | 
| Shaohua Li | 828cbe9 | 2015-09-02 13:49:49 -0700 | [diff] [blame] | 1112 | int r5l_handle_flush_request(struct r5l_log *log, struct bio *bio) | 
|  | 1113 | { | 
|  | 1114 | if (!log) | 
|  | 1115 | return -ENODEV; | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 1116 |  | 
|  | 1117 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) { | 
|  | 1118 | /* | 
|  | 1119 | * in write through (journal only) | 
|  | 1120 | * we flush log disk cache first, then write stripe data to | 
|  | 1121 | * raid disks. So if bio is finished, the log disk cache is | 
|  | 1122 | * flushed already. The recovery guarantees we can recovery | 
|  | 1123 | * the bio from log disk, so we don't need to flush again | 
|  | 1124 | */ | 
|  | 1125 | if (bio->bi_iter.bi_size == 0) { | 
|  | 1126 | bio_endio(bio); | 
|  | 1127 | return 0; | 
|  | 1128 | } | 
|  | 1129 | bio->bi_opf &= ~REQ_PREFLUSH; | 
|  | 1130 | } else { | 
|  | 1131 | /* write back (with cache) */ | 
|  | 1132 | if (bio->bi_iter.bi_size == 0) { | 
|  | 1133 | mutex_lock(&log->io_mutex); | 
|  | 1134 | r5l_get_meta(log, 0); | 
|  | 1135 | bio_list_add(&log->current_io->flush_barriers, bio); | 
|  | 1136 | log->current_io->has_flush = 1; | 
|  | 1137 | log->current_io->has_null_flush = 1; | 
|  | 1138 | atomic_inc(&log->current_io->pending_stripe); | 
|  | 1139 | r5l_submit_current_io(log); | 
|  | 1140 | mutex_unlock(&log->io_mutex); | 
|  | 1141 | return 0; | 
|  | 1142 | } | 
| Shaohua Li | 828cbe9 | 2015-09-02 13:49:49 -0700 | [diff] [blame] | 1143 | } | 
| Shaohua Li | 828cbe9 | 2015-09-02 13:49:49 -0700 | [diff] [blame] | 1144 | return -EAGAIN; | 
|  | 1145 | } | 
|  | 1146 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1147 | /* This will run after log space is reclaimed */ | 
|  | 1148 | static void r5l_run_no_space_stripes(struct r5l_log *log) | 
|  | 1149 | { | 
|  | 1150 | struct stripe_head *sh; | 
|  | 1151 |  | 
|  | 1152 | spin_lock(&log->no_space_stripes_lock); | 
|  | 1153 | while (!list_empty(&log->no_space_stripes)) { | 
|  | 1154 | sh = list_first_entry(&log->no_space_stripes, | 
|  | 1155 | struct stripe_head, log_list); | 
|  | 1156 | list_del_init(&sh->log_list); | 
|  | 1157 | set_bit(STRIPE_HANDLE, &sh->state); | 
|  | 1158 | raid5_release_stripe(sh); | 
|  | 1159 | } | 
|  | 1160 | spin_unlock(&log->no_space_stripes_lock); | 
|  | 1161 | } | 
|  | 1162 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1163 | /* | 
|  | 1164 | * calculate new last_checkpoint | 
|  | 1165 | * for write through mode, returns log->next_checkpoint | 
|  | 1166 | * for write back, returns log_start of first sh in stripe_in_journal_list | 
|  | 1167 | */ | 
|  | 1168 | static sector_t r5c_calculate_new_cp(struct r5conf *conf) | 
|  | 1169 | { | 
|  | 1170 | struct stripe_head *sh; | 
|  | 1171 | struct r5l_log *log = conf->log; | 
|  | 1172 | sector_t new_cp; | 
|  | 1173 | unsigned long flags; | 
|  | 1174 |  | 
|  | 1175 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) | 
|  | 1176 | return log->next_checkpoint; | 
|  | 1177 |  | 
|  | 1178 | spin_lock_irqsave(&log->stripe_in_journal_lock, flags); | 
|  | 1179 | if (list_empty(&conf->log->stripe_in_journal_list)) { | 
|  | 1180 | /* all stripes flushed */ | 
| Dan Carpenter | d3014e2 | 2016-11-24 14:13:04 +0300 | [diff] [blame] | 1181 | spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1182 | return log->next_checkpoint; | 
|  | 1183 | } | 
|  | 1184 | sh = list_first_entry(&conf->log->stripe_in_journal_list, | 
|  | 1185 | struct stripe_head, r5c); | 
|  | 1186 | new_cp = sh->log_start; | 
|  | 1187 | spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); | 
|  | 1188 | return new_cp; | 
|  | 1189 | } | 
|  | 1190 |  | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1191 | static sector_t r5l_reclaimable_space(struct r5l_log *log) | 
|  | 1192 | { | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1193 | struct r5conf *conf = log->rdev->mddev->private; | 
|  | 1194 |  | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1195 | return r5l_ring_distance(log, log->last_checkpoint, | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1196 | r5c_calculate_new_cp(conf)); | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1197 | } | 
|  | 1198 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1199 | static void r5l_run_no_mem_stripe(struct r5l_log *log) | 
|  | 1200 | { | 
|  | 1201 | struct stripe_head *sh; | 
|  | 1202 |  | 
| Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1203 | lockdep_assert_held(&log->io_list_lock); | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1204 |  | 
|  | 1205 | if (!list_empty(&log->no_mem_stripes)) { | 
|  | 1206 | sh = list_first_entry(&log->no_mem_stripes, | 
|  | 1207 | struct stripe_head, log_list); | 
|  | 1208 | list_del_init(&sh->log_list); | 
|  | 1209 | set_bit(STRIPE_HANDLE, &sh->state); | 
|  | 1210 | raid5_release_stripe(sh); | 
|  | 1211 | } | 
|  | 1212 | } | 
|  | 1213 |  | 
| Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1214 | static bool r5l_complete_finished_ios(struct r5l_log *log) | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1215 | { | 
|  | 1216 | struct r5l_io_unit *io, *next; | 
|  | 1217 | bool found = false; | 
|  | 1218 |  | 
| Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1219 | lockdep_assert_held(&log->io_list_lock); | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1220 |  | 
| Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1221 | list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) { | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1222 | /* don't change list order */ | 
|  | 1223 | if (io->state < IO_UNIT_STRIPE_END) | 
|  | 1224 | break; | 
|  | 1225 |  | 
|  | 1226 | log->next_checkpoint = io->log_start; | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1227 |  | 
|  | 1228 | list_del(&io->log_sibling); | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1229 | mempool_free(io, log->io_pool); | 
|  | 1230 | r5l_run_no_mem_stripe(log); | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1231 |  | 
|  | 1232 | found = true; | 
|  | 1233 | } | 
|  | 1234 |  | 
|  | 1235 | return found; | 
|  | 1236 | } | 
|  | 1237 |  | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1238 | static void __r5l_stripe_write_finished(struct r5l_io_unit *io) | 
|  | 1239 | { | 
|  | 1240 | struct r5l_log *log = io->log; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1241 | struct r5conf *conf = log->rdev->mddev->private; | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1242 | unsigned long flags; | 
|  | 1243 |  | 
|  | 1244 | spin_lock_irqsave(&log->io_list_lock, flags); | 
|  | 1245 | __r5l_set_io_unit_state(io, IO_UNIT_STRIPE_END); | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1246 |  | 
| Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1247 | if (!r5l_complete_finished_ios(log)) { | 
| Shaohua Li | 85f2f9a | 2015-09-04 14:14:05 -0700 | [diff] [blame] | 1248 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 
|  | 1249 | return; | 
|  | 1250 | } | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1251 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1252 | if (r5l_reclaimable_space(log) > log->max_free_space || | 
|  | 1253 | test_bit(R5C_LOG_TIGHT, &conf->cache_state)) | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1254 | r5l_wake_reclaim(log, 0); | 
|  | 1255 |  | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1256 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 
|  | 1257 | wake_up(&log->iounit_wait); | 
|  | 1258 | } | 
|  | 1259 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1260 | void r5l_stripe_write_finished(struct stripe_head *sh) | 
|  | 1261 | { | 
|  | 1262 | struct r5l_io_unit *io; | 
|  | 1263 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1264 | io = sh->log_io; | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1265 | sh->log_io = NULL; | 
|  | 1266 |  | 
| Christoph Hellwig | 509ffec | 2015-09-02 13:49:48 -0700 | [diff] [blame] | 1267 | if (io && atomic_dec_and_test(&io->pending_stripe)) | 
|  | 1268 | __r5l_stripe_write_finished(io); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1269 | } | 
|  | 1270 |  | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1271 | static void r5l_log_flush_endio(struct bio *bio) | 
|  | 1272 | { | 
|  | 1273 | struct r5l_log *log = container_of(bio, struct r5l_log, | 
|  | 1274 | flush_bio); | 
|  | 1275 | unsigned long flags; | 
|  | 1276 | struct r5l_io_unit *io; | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1277 |  | 
| Christoph Hellwig | 4e4cbee | 2017-06-03 09:38:06 +0200 | [diff] [blame] | 1278 | if (bio->bi_status) | 
| Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1279 | md_error(log->rdev->mddev, log->rdev); | 
|  | 1280 |  | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1281 | spin_lock_irqsave(&log->io_list_lock, flags); | 
| Christoph Hellwig | d8858f4 | 2015-10-05 09:31:08 +0200 | [diff] [blame] | 1282 | list_for_each_entry(io, &log->flushing_ios, log_sibling) | 
|  | 1283 | r5l_io_run_stripes(io); | 
| Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1284 | list_splice_tail_init(&log->flushing_ios, &log->finished_ios); | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1285 | spin_unlock_irqrestore(&log->io_list_lock, flags); | 
|  | 1286 | } | 
|  | 1287 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1288 | /* | 
|  | 1289 | * Starting dispatch IO to raid. | 
|  | 1290 | * io_unit(meta) consists of a log. There is one situation we want to avoid. A | 
|  | 1291 | * broken meta in the middle of a log causes recovery can't find meta at the | 
|  | 1292 | * head of log. If operations require meta at the head persistent in log, we | 
|  | 1293 | * must make sure meta before it persistent in log too. A case is: | 
|  | 1294 | * | 
|  | 1295 | * stripe data/parity is in log, we start write stripe to raid disks. stripe | 
|  | 1296 | * data/parity must be persistent in log before we do the write to raid disks. | 
|  | 1297 | * | 
|  | 1298 | * The solution is we restrictly maintain io_unit list order. In this case, we | 
|  | 1299 | * only write stripes of an io_unit to raid disks till the io_unit is the first | 
|  | 1300 | * one whose data/parity is in log. | 
|  | 1301 | */ | 
|  | 1302 | void r5l_flush_stripe_to_raid(struct r5l_log *log) | 
|  | 1303 | { | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1304 | bool do_flush; | 
| Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 1305 |  | 
|  | 1306 | if (!log || !log->need_cache_flush) | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1307 | return; | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1308 |  | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1309 | spin_lock_irq(&log->io_list_lock); | 
|  | 1310 | /* flush bio is running */ | 
|  | 1311 | if (!list_empty(&log->flushing_ios)) { | 
|  | 1312 | spin_unlock_irq(&log->io_list_lock); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1313 | return; | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1314 | } | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1315 | list_splice_tail_init(&log->io_end_ios, &log->flushing_ios); | 
|  | 1316 | do_flush = !list_empty(&log->flushing_ios); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1317 | spin_unlock_irq(&log->io_list_lock); | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1318 |  | 
|  | 1319 | if (!do_flush) | 
|  | 1320 | return; | 
|  | 1321 | bio_reset(&log->flush_bio); | 
| Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1322 | bio_set_dev(&log->flush_bio, log->rdev->bdev); | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1323 | log->flush_bio.bi_end_io = r5l_log_flush_endio; | 
| Christoph Hellwig | 70fd761 | 2016-11-01 07:40:10 -0600 | [diff] [blame] | 1324 | log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; | 
| Mike Christie | 4e49ea4 | 2016-06-05 14:31:41 -0500 | [diff] [blame] | 1325 | submit_bio(&log->flush_bio); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1326 | } | 
|  | 1327 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1328 | static void r5l_write_super(struct r5l_log *log, sector_t cp); | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1329 | static void r5l_write_super_and_discard_space(struct r5l_log *log, | 
|  | 1330 | sector_t end) | 
|  | 1331 | { | 
|  | 1332 | struct block_device *bdev = log->rdev->bdev; | 
|  | 1333 | struct mddev *mddev; | 
|  | 1334 |  | 
|  | 1335 | r5l_write_super(log, end); | 
|  | 1336 |  | 
|  | 1337 | if (!blk_queue_discard(bdev_get_queue(bdev))) | 
|  | 1338 | return; | 
|  | 1339 |  | 
|  | 1340 | mddev = log->rdev->mddev; | 
|  | 1341 | /* | 
| Shaohua Li | 8e018c2 | 2016-08-25 10:09:39 -0700 | [diff] [blame] | 1342 | * Discard could zero data, so before discard we must make sure | 
|  | 1343 | * superblock is updated to new log tail. Updating superblock (either | 
|  | 1344 | * directly call md_update_sb() or depend on md thread) must hold | 
|  | 1345 | * reconfig mutex. On the other hand, raid5_quiesce is called with | 
|  | 1346 | * reconfig_mutex hold. The first step of raid5_quiesce() is waitting | 
|  | 1347 | * for all IO finish, hence waitting for reclaim thread, while reclaim | 
|  | 1348 | * thread is calling this function and waitting for reconfig mutex. So | 
|  | 1349 | * there is a deadlock. We workaround this issue with a trylock. | 
|  | 1350 | * FIXME: we could miss discard if we can't take reconfig mutex | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1351 | */ | 
| Shaohua Li | 2953079 | 2016-12-08 15:48:19 -0800 | [diff] [blame] | 1352 | set_mask_bits(&mddev->sb_flags, 0, | 
|  | 1353 | BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); | 
| Shaohua Li | 8e018c2 | 2016-08-25 10:09:39 -0700 | [diff] [blame] | 1354 | if (!mddev_trylock(mddev)) | 
|  | 1355 | return; | 
|  | 1356 | md_update_sb(mddev, 1); | 
|  | 1357 | mddev_unlock(mddev); | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1358 |  | 
| Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1359 | /* discard IO error really doesn't matter, ignore it */ | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1360 | if (log->last_checkpoint < end) { | 
|  | 1361 | blkdev_issue_discard(bdev, | 
|  | 1362 | log->last_checkpoint + log->rdev->data_offset, | 
|  | 1363 | end - log->last_checkpoint, GFP_NOIO, 0); | 
|  | 1364 | } else { | 
|  | 1365 | blkdev_issue_discard(bdev, | 
|  | 1366 | log->last_checkpoint + log->rdev->data_offset, | 
|  | 1367 | log->device_size - log->last_checkpoint, | 
|  | 1368 | GFP_NOIO, 0); | 
|  | 1369 | blkdev_issue_discard(bdev, log->rdev->data_offset, end, | 
|  | 1370 | GFP_NOIO, 0); | 
|  | 1371 | } | 
|  | 1372 | } | 
|  | 1373 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1374 | /* | 
|  | 1375 | * r5c_flush_stripe moves stripe from cached list to handle_list. When called, | 
|  | 1376 | * the stripe must be on r5c_cached_full_stripes or r5c_cached_partial_stripes. | 
|  | 1377 | * | 
|  | 1378 | * must hold conf->device_lock | 
|  | 1379 | */ | 
|  | 1380 | static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh) | 
|  | 1381 | { | 
|  | 1382 | BUG_ON(list_empty(&sh->lru)); | 
|  | 1383 | BUG_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 1384 | BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); | 
|  | 1385 |  | 
|  | 1386 | /* | 
|  | 1387 | * The stripe is not ON_RELEASE_LIST, so it is safe to call | 
|  | 1388 | * raid5_release_stripe() while holding conf->device_lock | 
|  | 1389 | */ | 
|  | 1390 | BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state)); | 
| Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1391 | lockdep_assert_held(&conf->device_lock); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1392 |  | 
|  | 1393 | list_del_init(&sh->lru); | 
|  | 1394 | atomic_inc(&sh->count); | 
|  | 1395 |  | 
|  | 1396 | set_bit(STRIPE_HANDLE, &sh->state); | 
|  | 1397 | atomic_inc(&conf->active_stripes); | 
|  | 1398 | r5c_make_stripe_write_out(sh); | 
|  | 1399 |  | 
| Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1400 | if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) | 
|  | 1401 | atomic_inc(&conf->r5c_flushing_partial_stripes); | 
|  | 1402 | else | 
|  | 1403 | atomic_inc(&conf->r5c_flushing_full_stripes); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1404 | raid5_release_stripe(sh); | 
|  | 1405 | } | 
|  | 1406 |  | 
|  | 1407 | /* | 
|  | 1408 | * if num == 0, flush all full stripes | 
|  | 1409 | * if num > 0, flush all full stripes. If less than num full stripes are | 
|  | 1410 | *             flushed, flush some partial stripes until totally num stripes are | 
|  | 1411 | *             flushed or there is no more cached stripes. | 
|  | 1412 | */ | 
|  | 1413 | void r5c_flush_cache(struct r5conf *conf, int num) | 
|  | 1414 | { | 
|  | 1415 | int count; | 
|  | 1416 | struct stripe_head *sh, *next; | 
|  | 1417 |  | 
| Shaohua Li | efa4b77 | 2017-10-18 22:08:13 -0700 | [diff] [blame] | 1418 | lockdep_assert_held(&conf->device_lock); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1419 | if (!conf->log) | 
|  | 1420 | return; | 
|  | 1421 |  | 
|  | 1422 | count = 0; | 
|  | 1423 | list_for_each_entry_safe(sh, next, &conf->r5c_full_stripe_list, lru) { | 
|  | 1424 | r5c_flush_stripe(conf, sh); | 
|  | 1425 | count++; | 
|  | 1426 | } | 
|  | 1427 |  | 
|  | 1428 | if (count >= num) | 
|  | 1429 | return; | 
|  | 1430 | list_for_each_entry_safe(sh, next, | 
|  | 1431 | &conf->r5c_partial_stripe_list, lru) { | 
|  | 1432 | r5c_flush_stripe(conf, sh); | 
|  | 1433 | if (++count >= num) | 
|  | 1434 | break; | 
|  | 1435 | } | 
|  | 1436 | } | 
|  | 1437 |  | 
|  | 1438 | static void r5c_do_reclaim(struct r5conf *conf) | 
|  | 1439 | { | 
|  | 1440 | struct r5l_log *log = conf->log; | 
|  | 1441 | struct stripe_head *sh; | 
|  | 1442 | int count = 0; | 
|  | 1443 | unsigned long flags; | 
|  | 1444 | int total_cached; | 
|  | 1445 | int stripes_to_flush; | 
| Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1446 | int flushing_partial, flushing_full; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1447 |  | 
|  | 1448 | if (!r5c_is_writeback(log)) | 
|  | 1449 | return; | 
|  | 1450 |  | 
| Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1451 | flushing_partial = atomic_read(&conf->r5c_flushing_partial_stripes); | 
|  | 1452 | flushing_full = atomic_read(&conf->r5c_flushing_full_stripes); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1453 | total_cached = atomic_read(&conf->r5c_cached_partial_stripes) + | 
| Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1454 | atomic_read(&conf->r5c_cached_full_stripes) - | 
|  | 1455 | flushing_full - flushing_partial; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1456 |  | 
|  | 1457 | if (total_cached > conf->min_nr_stripes * 3 / 4 || | 
|  | 1458 | atomic_read(&conf->empty_inactive_list_nr) > 0) | 
|  | 1459 | /* | 
|  | 1460 | * if stripe cache pressure high, flush all full stripes and | 
|  | 1461 | * some partial stripes | 
|  | 1462 | */ | 
|  | 1463 | stripes_to_flush = R5C_RECLAIM_STRIPE_GROUP; | 
|  | 1464 | else if (total_cached > conf->min_nr_stripes * 1 / 2 || | 
| Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 1465 | atomic_read(&conf->r5c_cached_full_stripes) - flushing_full > | 
| Shaohua Li | 84890c0 | 2017-02-15 19:58:05 -0800 | [diff] [blame] | 1466 | R5C_FULL_STRIPE_FLUSH_BATCH(conf)) | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1467 | /* | 
|  | 1468 | * if stripe cache pressure moderate, or if there is many full | 
|  | 1469 | * stripes,flush all full stripes | 
|  | 1470 | */ | 
|  | 1471 | stripes_to_flush = 0; | 
|  | 1472 | else | 
|  | 1473 | /* no need to flush */ | 
|  | 1474 | stripes_to_flush = -1; | 
|  | 1475 |  | 
|  | 1476 | if (stripes_to_flush >= 0) { | 
|  | 1477 | spin_lock_irqsave(&conf->device_lock, flags); | 
|  | 1478 | r5c_flush_cache(conf, stripes_to_flush); | 
|  | 1479 | spin_unlock_irqrestore(&conf->device_lock, flags); | 
|  | 1480 | } | 
|  | 1481 |  | 
|  | 1482 | /* if log space is tight, flush stripes on stripe_in_journal_list */ | 
|  | 1483 | if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) { | 
|  | 1484 | spin_lock_irqsave(&log->stripe_in_journal_lock, flags); | 
|  | 1485 | spin_lock(&conf->device_lock); | 
|  | 1486 | list_for_each_entry(sh, &log->stripe_in_journal_list, r5c) { | 
|  | 1487 | /* | 
|  | 1488 | * stripes on stripe_in_journal_list could be in any | 
|  | 1489 | * state of the stripe_cache state machine. In this | 
|  | 1490 | * case, we only want to flush stripe on | 
|  | 1491 | * r5c_cached_full/partial_stripes. The following | 
|  | 1492 | * condition makes sure the stripe is on one of the | 
|  | 1493 | * two lists. | 
|  | 1494 | */ | 
|  | 1495 | if (!list_empty(&sh->lru) && | 
|  | 1496 | !test_bit(STRIPE_HANDLE, &sh->state) && | 
|  | 1497 | atomic_read(&sh->count) == 0) { | 
|  | 1498 | r5c_flush_stripe(conf, sh); | 
| Shaohua Li | e8fd52e | 2017-02-10 16:18:08 -0800 | [diff] [blame] | 1499 | if (count++ >= R5C_RECLAIM_STRIPE_GROUP) | 
|  | 1500 | break; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1501 | } | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1502 | } | 
|  | 1503 | spin_unlock(&conf->device_lock); | 
|  | 1504 | spin_unlock_irqrestore(&log->stripe_in_journal_lock, flags); | 
|  | 1505 | } | 
| Song Liu | f687a33 | 2016-11-30 16:57:54 -0800 | [diff] [blame] | 1506 |  | 
|  | 1507 | if (!test_bit(R5C_LOG_CRITICAL, &conf->cache_state)) | 
|  | 1508 | r5l_run_no_space_stripes(log); | 
|  | 1509 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1510 | md_wakeup_thread(conf->mddev->thread); | 
|  | 1511 | } | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1512 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1513 | static void r5l_do_reclaim(struct r5l_log *log) | 
|  | 1514 | { | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1515 | struct r5conf *conf = log->rdev->mddev->private; | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1516 | sector_t reclaim_target = xchg(&log->reclaim_target, 0); | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1517 | sector_t reclaimable; | 
|  | 1518 | sector_t next_checkpoint; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1519 | bool write_super; | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1520 |  | 
|  | 1521 | spin_lock_irq(&log->io_list_lock); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1522 | write_super = r5l_reclaimable_space(log) > log->max_free_space || | 
|  | 1523 | reclaim_target != 0 || !list_empty(&log->no_space_stripes); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1524 | /* | 
|  | 1525 | * move proper io_unit to reclaim list. We should not change the order. | 
|  | 1526 | * reclaimable/unreclaimable io_unit can be mixed in the list, we | 
|  | 1527 | * shouldn't reuse space of an unreclaimable io_unit | 
|  | 1528 | */ | 
|  | 1529 | while (1) { | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1530 | reclaimable = r5l_reclaimable_space(log); | 
|  | 1531 | if (reclaimable >= reclaim_target || | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1532 | (list_empty(&log->running_ios) && | 
|  | 1533 | list_empty(&log->io_end_ios) && | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 1534 | list_empty(&log->flushing_ios) && | 
| Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 1535 | list_empty(&log->finished_ios))) | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1536 | break; | 
|  | 1537 |  | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1538 | md_wakeup_thread(log->rdev->mddev->thread); | 
|  | 1539 | wait_event_lock_irq(log->iounit_wait, | 
|  | 1540 | r5l_reclaimable_space(log) > reclaimable, | 
|  | 1541 | log->io_list_lock); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1542 | } | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1543 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1544 | next_checkpoint = r5c_calculate_new_cp(conf); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1545 | spin_unlock_irq(&log->io_list_lock); | 
|  | 1546 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1547 | if (reclaimable == 0 || !write_super) | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1548 | return; | 
|  | 1549 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1550 | /* | 
|  | 1551 | * write_super will flush cache of each raid disk. We must write super | 
|  | 1552 | * here, because the log area might be reused soon and we don't want to | 
|  | 1553 | * confuse recovery | 
|  | 1554 | */ | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1555 | r5l_write_super_and_discard_space(log, next_checkpoint); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1556 |  | 
|  | 1557 | mutex_lock(&log->io_mutex); | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1558 | log->last_checkpoint = next_checkpoint; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1559 | r5c_update_log_state(log); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1560 | mutex_unlock(&log->io_mutex); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1561 |  | 
| Christoph Hellwig | 1703646 | 2015-10-05 09:31:06 +0200 | [diff] [blame] | 1562 | r5l_run_no_space_stripes(log); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1563 | } | 
|  | 1564 |  | 
|  | 1565 | static void r5l_reclaim_thread(struct md_thread *thread) | 
|  | 1566 | { | 
|  | 1567 | struct mddev *mddev = thread->mddev; | 
|  | 1568 | struct r5conf *conf = mddev->private; | 
|  | 1569 | struct r5l_log *log = conf->log; | 
|  | 1570 |  | 
|  | 1571 | if (!log) | 
|  | 1572 | return; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1573 | r5c_do_reclaim(conf); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1574 | r5l_do_reclaim(log); | 
|  | 1575 | } | 
|  | 1576 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1577 | void r5l_wake_reclaim(struct r5l_log *log, sector_t space) | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1578 | { | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1579 | unsigned long target; | 
|  | 1580 | unsigned long new = (unsigned long)space; /* overflow in theory */ | 
|  | 1581 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1582 | if (!log) | 
|  | 1583 | return; | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 1584 | do { | 
|  | 1585 | target = log->reclaim_target; | 
|  | 1586 | if (new < target) | 
|  | 1587 | return; | 
|  | 1588 | } while (cmpxchg(&log->reclaim_target, target, new) != target); | 
|  | 1589 | md_wakeup_thread(log->reclaim_thread); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 1590 | } | 
|  | 1591 |  | 
| NeilBrown | b03e0cc | 2017-10-19 12:49:15 +1100 | [diff] [blame] | 1592 | void r5l_quiesce(struct r5l_log *log, int quiesce) | 
| Shaohua Li | e6c033f | 2015-10-04 09:20:12 -0700 | [diff] [blame] | 1593 | { | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1594 | struct mddev *mddev; | 
| NeilBrown | b03e0cc | 2017-10-19 12:49:15 +1100 | [diff] [blame] | 1595 | if (!log) | 
| Shaohua Li | e6c033f | 2015-10-04 09:20:12 -0700 | [diff] [blame] | 1596 | return; | 
| NeilBrown | b03e0cc | 2017-10-19 12:49:15 +1100 | [diff] [blame] | 1597 |  | 
|  | 1598 | if (quiesce) { | 
| Shaohua Li | 4b48204 | 2015-10-08 21:54:06 -0700 | [diff] [blame] | 1599 | /* make sure r5l_write_super_and_discard_space exits */ | 
|  | 1600 | mddev = log->rdev->mddev; | 
|  | 1601 | wake_up(&mddev->sb_wait); | 
| Shaohua Li | ce1ccd0 | 2016-11-21 10:29:18 -0800 | [diff] [blame] | 1602 | kthread_park(log->reclaim_thread->tsk); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 1603 | r5l_wake_reclaim(log, MaxSector); | 
| Shaohua Li | e6c033f | 2015-10-04 09:20:12 -0700 | [diff] [blame] | 1604 | r5l_do_reclaim(log); | 
| NeilBrown | b03e0cc | 2017-10-19 12:49:15 +1100 | [diff] [blame] | 1605 | } else | 
|  | 1606 | kthread_unpark(log->reclaim_thread->tsk); | 
| Shaohua Li | e6c033f | 2015-10-04 09:20:12 -0700 | [diff] [blame] | 1607 | } | 
|  | 1608 |  | 
| Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1609 | bool r5l_log_disk_error(struct r5conf *conf) | 
|  | 1610 | { | 
| Shaohua Li | f6b6ec5 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1611 | struct r5l_log *log; | 
|  | 1612 | bool ret; | 
| Shaohua Li | 7dde2ad | 2015-10-08 21:54:10 -0700 | [diff] [blame] | 1613 | /* don't allow write if journal disk is missing */ | 
| Shaohua Li | f6b6ec5 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 1614 | rcu_read_lock(); | 
|  | 1615 | log = rcu_dereference(conf->log); | 
|  | 1616 |  | 
|  | 1617 | if (!log) | 
|  | 1618 | ret = test_bit(MD_HAS_JOURNAL, &conf->mddev->flags); | 
|  | 1619 | else | 
|  | 1620 | ret = test_bit(Faulty, &log->rdev->flags); | 
|  | 1621 | rcu_read_unlock(); | 
|  | 1622 | return ret; | 
| Shaohua Li | 6e74a9c | 2015-10-08 21:54:08 -0700 | [diff] [blame] | 1623 | } | 
|  | 1624 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1625 | #define R5L_RECOVERY_PAGE_POOL_SIZE 256 | 
|  | 1626 |  | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1627 | struct r5l_recovery_ctx { | 
|  | 1628 | struct page *meta_page;		/* current meta */ | 
|  | 1629 | sector_t meta_total_blocks;	/* total size of current meta and data */ | 
|  | 1630 | sector_t pos;			/* recovery position */ | 
|  | 1631 | u64 seq;			/* recovery position seq */ | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1632 | int data_parity_stripes;	/* number of data_parity stripes */ | 
|  | 1633 | int data_only_stripes;		/* number of data_only stripes */ | 
|  | 1634 | struct list_head cached_list; | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1635 |  | 
|  | 1636 | /* | 
|  | 1637 | * read ahead page pool (ra_pool) | 
|  | 1638 | * in recovery, log is read sequentially. It is not efficient to | 
|  | 1639 | * read every page with sync_page_io(). The read ahead page pool | 
|  | 1640 | * reads multiple pages with one IO, so further log read can | 
|  | 1641 | * just copy data from the pool. | 
|  | 1642 | */ | 
|  | 1643 | struct page *ra_pool[R5L_RECOVERY_PAGE_POOL_SIZE]; | 
|  | 1644 | sector_t pool_offset;	/* offset of first page in the pool */ | 
|  | 1645 | int total_pages;	/* total allocated pages */ | 
|  | 1646 | int valid_pages;	/* pages with valid data */ | 
|  | 1647 | struct bio *ra_bio;	/* bio to do the read ahead */ | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1648 | }; | 
|  | 1649 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1650 | static int r5l_recovery_allocate_ra_pool(struct r5l_log *log, | 
|  | 1651 | struct r5l_recovery_ctx *ctx) | 
|  | 1652 | { | 
|  | 1653 | struct page *page; | 
|  | 1654 |  | 
|  | 1655 | ctx->ra_bio = bio_alloc_bioset(GFP_KERNEL, BIO_MAX_PAGES, log->bs); | 
|  | 1656 | if (!ctx->ra_bio) | 
|  | 1657 | return -ENOMEM; | 
|  | 1658 |  | 
|  | 1659 | ctx->valid_pages = 0; | 
|  | 1660 | ctx->total_pages = 0; | 
|  | 1661 | while (ctx->total_pages < R5L_RECOVERY_PAGE_POOL_SIZE) { | 
|  | 1662 | page = alloc_page(GFP_KERNEL); | 
|  | 1663 |  | 
|  | 1664 | if (!page) | 
|  | 1665 | break; | 
|  | 1666 | ctx->ra_pool[ctx->total_pages] = page; | 
|  | 1667 | ctx->total_pages += 1; | 
|  | 1668 | } | 
|  | 1669 |  | 
|  | 1670 | if (ctx->total_pages == 0) { | 
|  | 1671 | bio_put(ctx->ra_bio); | 
|  | 1672 | return -ENOMEM; | 
|  | 1673 | } | 
|  | 1674 |  | 
|  | 1675 | ctx->pool_offset = 0; | 
|  | 1676 | return 0; | 
|  | 1677 | } | 
|  | 1678 |  | 
|  | 1679 | static void r5l_recovery_free_ra_pool(struct r5l_log *log, | 
|  | 1680 | struct r5l_recovery_ctx *ctx) | 
|  | 1681 | { | 
|  | 1682 | int i; | 
|  | 1683 |  | 
|  | 1684 | for (i = 0; i < ctx->total_pages; ++i) | 
|  | 1685 | put_page(ctx->ra_pool[i]); | 
|  | 1686 | bio_put(ctx->ra_bio); | 
|  | 1687 | } | 
|  | 1688 |  | 
|  | 1689 | /* | 
|  | 1690 | * fetch ctx->valid_pages pages from offset | 
|  | 1691 | * In normal cases, ctx->valid_pages == ctx->total_pages after the call. | 
|  | 1692 | * However, if the offset is close to the end of the journal device, | 
|  | 1693 | * ctx->valid_pages could be smaller than ctx->total_pages | 
|  | 1694 | */ | 
|  | 1695 | static int r5l_recovery_fetch_ra_pool(struct r5l_log *log, | 
|  | 1696 | struct r5l_recovery_ctx *ctx, | 
|  | 1697 | sector_t offset) | 
|  | 1698 | { | 
|  | 1699 | bio_reset(ctx->ra_bio); | 
| Christoph Hellwig | 74d4699 | 2017-08-23 19:10:32 +0200 | [diff] [blame] | 1700 | bio_set_dev(ctx->ra_bio, log->rdev->bdev); | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1701 | bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); | 
|  | 1702 | ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; | 
|  | 1703 |  | 
|  | 1704 | ctx->valid_pages = 0; | 
|  | 1705 | ctx->pool_offset = offset; | 
|  | 1706 |  | 
|  | 1707 | while (ctx->valid_pages < ctx->total_pages) { | 
|  | 1708 | bio_add_page(ctx->ra_bio, | 
|  | 1709 | ctx->ra_pool[ctx->valid_pages], PAGE_SIZE, 0); | 
|  | 1710 | ctx->valid_pages += 1; | 
|  | 1711 |  | 
|  | 1712 | offset = r5l_ring_add(log, offset, BLOCK_SECTORS); | 
|  | 1713 |  | 
|  | 1714 | if (offset == 0)  /* reached end of the device */ | 
|  | 1715 | break; | 
|  | 1716 | } | 
|  | 1717 |  | 
|  | 1718 | return submit_bio_wait(ctx->ra_bio); | 
|  | 1719 | } | 
|  | 1720 |  | 
|  | 1721 | /* | 
|  | 1722 | * try read a page from the read ahead page pool, if the page is not in the | 
|  | 1723 | * pool, call r5l_recovery_fetch_ra_pool | 
|  | 1724 | */ | 
|  | 1725 | static int r5l_recovery_read_page(struct r5l_log *log, | 
|  | 1726 | struct r5l_recovery_ctx *ctx, | 
|  | 1727 | struct page *page, | 
|  | 1728 | sector_t offset) | 
|  | 1729 | { | 
|  | 1730 | int ret; | 
|  | 1731 |  | 
|  | 1732 | if (offset < ctx->pool_offset || | 
|  | 1733 | offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS) { | 
|  | 1734 | ret = r5l_recovery_fetch_ra_pool(log, ctx, offset); | 
|  | 1735 | if (ret) | 
|  | 1736 | return ret; | 
|  | 1737 | } | 
|  | 1738 |  | 
|  | 1739 | BUG_ON(offset < ctx->pool_offset || | 
|  | 1740 | offset >= ctx->pool_offset + ctx->valid_pages * BLOCK_SECTORS); | 
|  | 1741 |  | 
|  | 1742 | memcpy(page_address(page), | 
|  | 1743 | page_address(ctx->ra_pool[(offset - ctx->pool_offset) >> | 
|  | 1744 | BLOCK_SECTOR_SHIFT]), | 
|  | 1745 | PAGE_SIZE); | 
|  | 1746 | return 0; | 
|  | 1747 | } | 
|  | 1748 |  | 
| Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1749 | static int r5l_recovery_read_meta_block(struct r5l_log *log, | 
|  | 1750 | struct r5l_recovery_ctx *ctx) | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1751 | { | 
|  | 1752 | struct page *page = ctx->meta_page; | 
|  | 1753 | struct r5l_meta_block *mb; | 
|  | 1754 | u32 crc, stored_crc; | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1755 | int ret; | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1756 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1757 | ret = r5l_recovery_read_page(log, ctx, page, ctx->pos); | 
|  | 1758 | if (ret != 0) | 
|  | 1759 | return ret; | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1760 |  | 
|  | 1761 | mb = page_address(page); | 
|  | 1762 | stored_crc = le32_to_cpu(mb->checksum); | 
|  | 1763 | mb->checksum = 0; | 
|  | 1764 |  | 
|  | 1765 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || | 
|  | 1766 | le64_to_cpu(mb->seq) != ctx->seq || | 
|  | 1767 | mb->version != R5LOG_VERSION || | 
|  | 1768 | le64_to_cpu(mb->position) != ctx->pos) | 
|  | 1769 | return -EINVAL; | 
|  | 1770 |  | 
| Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 1771 | crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1772 | if (stored_crc != crc) | 
|  | 1773 | return -EINVAL; | 
|  | 1774 |  | 
|  | 1775 | if (le32_to_cpu(mb->meta_size) > PAGE_SIZE) | 
|  | 1776 | return -EINVAL; | 
|  | 1777 |  | 
|  | 1778 | ctx->meta_total_blocks = BLOCK_SECTORS; | 
|  | 1779 |  | 
|  | 1780 | return 0; | 
|  | 1781 | } | 
|  | 1782 |  | 
| Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1783 | static void | 
|  | 1784 | r5l_recovery_create_empty_meta_block(struct r5l_log *log, | 
|  | 1785 | struct page *page, | 
|  | 1786 | sector_t pos, u64 seq) | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1787 | { | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1788 | struct r5l_meta_block *mb; | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1789 |  | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1790 | mb = page_address(page); | 
| Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1791 | clear_page(mb); | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1792 | mb->magic = cpu_to_le32(R5LOG_MAGIC); | 
|  | 1793 | mb->version = R5LOG_VERSION; | 
|  | 1794 | mb->meta_size = cpu_to_le32(sizeof(struct r5l_meta_block)); | 
|  | 1795 | mb->seq = cpu_to_le64(seq); | 
|  | 1796 | mb->position = cpu_to_le64(pos); | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1797 | } | 
|  | 1798 |  | 
|  | 1799 | static int r5l_log_write_empty_meta_block(struct r5l_log *log, sector_t pos, | 
|  | 1800 | u64 seq) | 
|  | 1801 | { | 
|  | 1802 | struct page *page; | 
|  | 1803 | struct r5l_meta_block *mb; | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1804 |  | 
| Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1805 | page = alloc_page(GFP_KERNEL); | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1806 | if (!page) | 
|  | 1807 | return -ENOMEM; | 
| Song Liu | 9ed988f5 | 2016-11-17 15:24:42 -0800 | [diff] [blame] | 1808 | r5l_recovery_create_empty_meta_block(log, page, pos, seq); | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1809 | mb = page_address(page); | 
| Song Liu | 5c88f40 | 2016-12-07 09:42:05 -0800 | [diff] [blame] | 1810 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, | 
|  | 1811 | mb, PAGE_SIZE)); | 
| Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 1812 | if (!sync_page_io(log->rdev, pos, PAGE_SIZE, page, REQ_OP_WRITE, | 
| Jan Kara | 5a8948f | 2017-05-31 09:44:33 +0200 | [diff] [blame] | 1813 | REQ_SYNC | REQ_FUA, false)) { | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 1814 | __free_page(page); | 
|  | 1815 | return -EIO; | 
|  | 1816 | } | 
|  | 1817 | __free_page(page); | 
|  | 1818 | return 0; | 
|  | 1819 | } | 
|  | 1820 |  | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1821 | /* | 
|  | 1822 | * r5l_recovery_load_data and r5l_recovery_load_parity uses flag R5_Wantwrite | 
|  | 1823 | * to mark valid (potentially not flushed) data in the journal. | 
|  | 1824 | * | 
|  | 1825 | * We already verified checksum in r5l_recovery_verify_data_checksum_for_mb, | 
|  | 1826 | * so there should not be any mismatch here. | 
|  | 1827 | */ | 
|  | 1828 | static void r5l_recovery_load_data(struct r5l_log *log, | 
|  | 1829 | struct stripe_head *sh, | 
|  | 1830 | struct r5l_recovery_ctx *ctx, | 
|  | 1831 | struct r5l_payload_data_parity *payload, | 
|  | 1832 | sector_t log_offset) | 
|  | 1833 | { | 
|  | 1834 | struct mddev *mddev = log->rdev->mddev; | 
|  | 1835 | struct r5conf *conf = mddev->private; | 
|  | 1836 | int dd_idx; | 
|  | 1837 |  | 
|  | 1838 | raid5_compute_sector(conf, | 
|  | 1839 | le64_to_cpu(payload->location), 0, | 
|  | 1840 | &dd_idx, sh); | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1841 | r5l_recovery_read_page(log, ctx, sh->dev[dd_idx].page, log_offset); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1842 | sh->dev[dd_idx].log_checksum = | 
|  | 1843 | le32_to_cpu(payload->checksum[0]); | 
|  | 1844 | ctx->meta_total_blocks += BLOCK_SECTORS; | 
|  | 1845 |  | 
|  | 1846 | set_bit(R5_Wantwrite, &sh->dev[dd_idx].flags); | 
|  | 1847 | set_bit(STRIPE_R5C_CACHING, &sh->state); | 
|  | 1848 | } | 
|  | 1849 |  | 
|  | 1850 | static void r5l_recovery_load_parity(struct r5l_log *log, | 
|  | 1851 | struct stripe_head *sh, | 
|  | 1852 | struct r5l_recovery_ctx *ctx, | 
|  | 1853 | struct r5l_payload_data_parity *payload, | 
|  | 1854 | sector_t log_offset) | 
|  | 1855 | { | 
|  | 1856 | struct mddev *mddev = log->rdev->mddev; | 
|  | 1857 | struct r5conf *conf = mddev->private; | 
|  | 1858 |  | 
|  | 1859 | ctx->meta_total_blocks += BLOCK_SECTORS * conf->max_degraded; | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1860 | r5l_recovery_read_page(log, ctx, sh->dev[sh->pd_idx].page, log_offset); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1861 | sh->dev[sh->pd_idx].log_checksum = | 
|  | 1862 | le32_to_cpu(payload->checksum[0]); | 
|  | 1863 | set_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags); | 
|  | 1864 |  | 
|  | 1865 | if (sh->qd_idx >= 0) { | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1866 | r5l_recovery_read_page( | 
|  | 1867 | log, ctx, sh->dev[sh->qd_idx].page, | 
|  | 1868 | r5l_ring_add(log, log_offset, BLOCK_SECTORS)); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1869 | sh->dev[sh->qd_idx].log_checksum = | 
|  | 1870 | le32_to_cpu(payload->checksum[1]); | 
|  | 1871 | set_bit(R5_Wantwrite, &sh->dev[sh->qd_idx].flags); | 
|  | 1872 | } | 
|  | 1873 | clear_bit(STRIPE_R5C_CACHING, &sh->state); | 
|  | 1874 | } | 
|  | 1875 |  | 
|  | 1876 | static void r5l_recovery_reset_stripe(struct stripe_head *sh) | 
|  | 1877 | { | 
|  | 1878 | int i; | 
|  | 1879 |  | 
|  | 1880 | sh->state = 0; | 
|  | 1881 | sh->log_start = MaxSector; | 
|  | 1882 | for (i = sh->disks; i--; ) | 
|  | 1883 | sh->dev[i].flags = 0; | 
|  | 1884 | } | 
|  | 1885 |  | 
|  | 1886 | static void | 
|  | 1887 | r5l_recovery_replay_one_stripe(struct r5conf *conf, | 
|  | 1888 | struct stripe_head *sh, | 
|  | 1889 | struct r5l_recovery_ctx *ctx) | 
|  | 1890 | { | 
|  | 1891 | struct md_rdev *rdev, *rrdev; | 
|  | 1892 | int disk_index; | 
|  | 1893 | int data_count = 0; | 
|  | 1894 |  | 
|  | 1895 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { | 
|  | 1896 | if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) | 
|  | 1897 | continue; | 
|  | 1898 | if (disk_index == sh->qd_idx || disk_index == sh->pd_idx) | 
|  | 1899 | continue; | 
|  | 1900 | data_count++; | 
|  | 1901 | } | 
|  | 1902 |  | 
|  | 1903 | /* | 
|  | 1904 | * stripes that only have parity must have been flushed | 
|  | 1905 | * before the crash that we are now recovering from, so | 
|  | 1906 | * there is nothing more to recovery. | 
|  | 1907 | */ | 
|  | 1908 | if (data_count == 0) | 
|  | 1909 | goto out; | 
|  | 1910 |  | 
|  | 1911 | for (disk_index = 0; disk_index < sh->disks; disk_index++) { | 
|  | 1912 | if (!test_bit(R5_Wantwrite, &sh->dev[disk_index].flags)) | 
|  | 1913 | continue; | 
|  | 1914 |  | 
|  | 1915 | /* in case device is broken */ | 
|  | 1916 | rcu_read_lock(); | 
|  | 1917 | rdev = rcu_dereference(conf->disks[disk_index].rdev); | 
|  | 1918 | if (rdev) { | 
|  | 1919 | atomic_inc(&rdev->nr_pending); | 
|  | 1920 | rcu_read_unlock(); | 
|  | 1921 | sync_page_io(rdev, sh->sector, PAGE_SIZE, | 
|  | 1922 | sh->dev[disk_index].page, REQ_OP_WRITE, 0, | 
|  | 1923 | false); | 
|  | 1924 | rdev_dec_pending(rdev, rdev->mddev); | 
|  | 1925 | rcu_read_lock(); | 
|  | 1926 | } | 
|  | 1927 | rrdev = rcu_dereference(conf->disks[disk_index].replacement); | 
|  | 1928 | if (rrdev) { | 
|  | 1929 | atomic_inc(&rrdev->nr_pending); | 
|  | 1930 | rcu_read_unlock(); | 
|  | 1931 | sync_page_io(rrdev, sh->sector, PAGE_SIZE, | 
|  | 1932 | sh->dev[disk_index].page, REQ_OP_WRITE, 0, | 
|  | 1933 | false); | 
|  | 1934 | rdev_dec_pending(rrdev, rrdev->mddev); | 
|  | 1935 | rcu_read_lock(); | 
|  | 1936 | } | 
|  | 1937 | rcu_read_unlock(); | 
|  | 1938 | } | 
|  | 1939 | ctx->data_parity_stripes++; | 
|  | 1940 | out: | 
|  | 1941 | r5l_recovery_reset_stripe(sh); | 
|  | 1942 | } | 
|  | 1943 |  | 
|  | 1944 | static struct stripe_head * | 
|  | 1945 | r5c_recovery_alloc_stripe(struct r5conf *conf, | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 1946 | sector_t stripe_sect) | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1947 | { | 
|  | 1948 | struct stripe_head *sh; | 
|  | 1949 |  | 
|  | 1950 | sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); | 
|  | 1951 | if (!sh) | 
|  | 1952 | return NULL;  /* no more stripe available */ | 
|  | 1953 |  | 
|  | 1954 | r5l_recovery_reset_stripe(sh); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 1955 |  | 
|  | 1956 | return sh; | 
|  | 1957 | } | 
|  | 1958 |  | 
|  | 1959 | static struct stripe_head * | 
|  | 1960 | r5c_recovery_lookup_stripe(struct list_head *list, sector_t sect) | 
|  | 1961 | { | 
|  | 1962 | struct stripe_head *sh; | 
|  | 1963 |  | 
|  | 1964 | list_for_each_entry(sh, list, lru) | 
|  | 1965 | if (sh->sector == sect) | 
|  | 1966 | return sh; | 
|  | 1967 | return NULL; | 
|  | 1968 | } | 
|  | 1969 |  | 
|  | 1970 | static void | 
|  | 1971 | r5c_recovery_drop_stripes(struct list_head *cached_stripe_list, | 
|  | 1972 | struct r5l_recovery_ctx *ctx) | 
|  | 1973 | { | 
|  | 1974 | struct stripe_head *sh, *next; | 
|  | 1975 |  | 
|  | 1976 | list_for_each_entry_safe(sh, next, cached_stripe_list, lru) { | 
|  | 1977 | r5l_recovery_reset_stripe(sh); | 
|  | 1978 | list_del_init(&sh->lru); | 
|  | 1979 | raid5_release_stripe(sh); | 
|  | 1980 | } | 
|  | 1981 | } | 
|  | 1982 |  | 
|  | 1983 | static void | 
|  | 1984 | r5c_recovery_replay_stripes(struct list_head *cached_stripe_list, | 
|  | 1985 | struct r5l_recovery_ctx *ctx) | 
|  | 1986 | { | 
|  | 1987 | struct stripe_head *sh, *next; | 
|  | 1988 |  | 
|  | 1989 | list_for_each_entry_safe(sh, next, cached_stripe_list, lru) | 
|  | 1990 | if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { | 
|  | 1991 | r5l_recovery_replay_one_stripe(sh->raid_conf, sh, ctx); | 
|  | 1992 | list_del_init(&sh->lru); | 
|  | 1993 | raid5_release_stripe(sh); | 
|  | 1994 | } | 
|  | 1995 | } | 
|  | 1996 |  | 
|  | 1997 | /* if matches return 0; otherwise return -EINVAL */ | 
|  | 1998 | static int | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 1999 | r5l_recovery_verify_data_checksum(struct r5l_log *log, | 
|  | 2000 | struct r5l_recovery_ctx *ctx, | 
|  | 2001 | struct page *page, | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2002 | sector_t log_offset, __le32 log_checksum) | 
|  | 2003 | { | 
|  | 2004 | void *addr; | 
|  | 2005 | u32 checksum; | 
|  | 2006 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2007 | r5l_recovery_read_page(log, ctx, page, log_offset); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2008 | addr = kmap_atomic(page); | 
|  | 2009 | checksum = crc32c_le(log->uuid_checksum, addr, PAGE_SIZE); | 
|  | 2010 | kunmap_atomic(addr); | 
|  | 2011 | return (le32_to_cpu(log_checksum) == checksum) ? 0 : -EINVAL; | 
|  | 2012 | } | 
|  | 2013 |  | 
|  | 2014 | /* | 
|  | 2015 | * before loading data to stripe cache, we need verify checksum for all data, | 
|  | 2016 | * if there is mismatch for any data page, we drop all data in the mata block | 
|  | 2017 | */ | 
|  | 2018 | static int | 
|  | 2019 | r5l_recovery_verify_data_checksum_for_mb(struct r5l_log *log, | 
|  | 2020 | struct r5l_recovery_ctx *ctx) | 
|  | 2021 | { | 
|  | 2022 | struct mddev *mddev = log->rdev->mddev; | 
|  | 2023 | struct r5conf *conf = mddev->private; | 
|  | 2024 | struct r5l_meta_block *mb = page_address(ctx->meta_page); | 
|  | 2025 | sector_t mb_offset = sizeof(struct r5l_meta_block); | 
|  | 2026 | sector_t log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); | 
|  | 2027 | struct page *page; | 
|  | 2028 | struct r5l_payload_data_parity *payload; | 
| Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2029 | struct r5l_payload_flush *payload_flush; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2030 |  | 
|  | 2031 | page = alloc_page(GFP_KERNEL); | 
|  | 2032 | if (!page) | 
|  | 2033 | return -ENOMEM; | 
|  | 2034 |  | 
|  | 2035 | while (mb_offset < le32_to_cpu(mb->meta_size)) { | 
|  | 2036 | payload = (void *)mb + mb_offset; | 
| Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2037 | payload_flush = (void *)mb + mb_offset; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2038 |  | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2039 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2040 | if (r5l_recovery_verify_data_checksum( | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2041 | log, ctx, page, log_offset, | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2042 | payload->checksum[0]) < 0) | 
|  | 2043 | goto mismatch; | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2044 | } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) { | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2045 | if (r5l_recovery_verify_data_checksum( | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2046 | log, ctx, page, log_offset, | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2047 | payload->checksum[0]) < 0) | 
|  | 2048 | goto mismatch; | 
|  | 2049 | if (conf->max_degraded == 2 && /* q for RAID 6 */ | 
|  | 2050 | r5l_recovery_verify_data_checksum( | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2051 | log, ctx, page, | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2052 | r5l_ring_add(log, log_offset, | 
|  | 2053 | BLOCK_SECTORS), | 
|  | 2054 | payload->checksum[1]) < 0) | 
|  | 2055 | goto mismatch; | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2056 | } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { | 
| Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2057 | /* nothing to do for R5LOG_PAYLOAD_FLUSH here */ | 
|  | 2058 | } else /* not R5LOG_PAYLOAD_DATA/PARITY/FLUSH */ | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2059 | goto mismatch; | 
|  | 2060 |  | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2061 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { | 
| Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2062 | mb_offset += sizeof(struct r5l_payload_flush) + | 
|  | 2063 | le32_to_cpu(payload_flush->size); | 
|  | 2064 | } else { | 
|  | 2065 | /* DATA or PARITY payload */ | 
|  | 2066 | log_offset = r5l_ring_add(log, log_offset, | 
|  | 2067 | le32_to_cpu(payload->size)); | 
|  | 2068 | mb_offset += sizeof(struct r5l_payload_data_parity) + | 
|  | 2069 | sizeof(__le32) * | 
|  | 2070 | (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); | 
|  | 2071 | } | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2072 |  | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2073 | } | 
|  | 2074 |  | 
|  | 2075 | put_page(page); | 
|  | 2076 | return 0; | 
|  | 2077 |  | 
|  | 2078 | mismatch: | 
|  | 2079 | put_page(page); | 
|  | 2080 | return -EINVAL; | 
|  | 2081 | } | 
|  | 2082 |  | 
|  | 2083 | /* | 
|  | 2084 | * Analyze all data/parity pages in one meta block | 
|  | 2085 | * Returns: | 
|  | 2086 | * 0 for success | 
|  | 2087 | * -EINVAL for unknown playload type | 
|  | 2088 | * -EAGAIN for checksum mismatch of data page | 
|  | 2089 | * -ENOMEM for run out of memory (alloc_page failed or run out of stripes) | 
|  | 2090 | */ | 
|  | 2091 | static int | 
|  | 2092 | r5c_recovery_analyze_meta_block(struct r5l_log *log, | 
|  | 2093 | struct r5l_recovery_ctx *ctx, | 
|  | 2094 | struct list_head *cached_stripe_list) | 
|  | 2095 | { | 
|  | 2096 | struct mddev *mddev = log->rdev->mddev; | 
|  | 2097 | struct r5conf *conf = mddev->private; | 
|  | 2098 | struct r5l_meta_block *mb; | 
|  | 2099 | struct r5l_payload_data_parity *payload; | 
| Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2100 | struct r5l_payload_flush *payload_flush; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2101 | int mb_offset; | 
|  | 2102 | sector_t log_offset; | 
|  | 2103 | sector_t stripe_sect; | 
|  | 2104 | struct stripe_head *sh; | 
|  | 2105 | int ret; | 
|  | 2106 |  | 
|  | 2107 | /* | 
|  | 2108 | * for mismatch in data blocks, we will drop all data in this mb, but | 
|  | 2109 | * we will still read next mb for other data with FLUSH flag, as | 
|  | 2110 | * io_unit could finish out of order. | 
|  | 2111 | */ | 
|  | 2112 | ret = r5l_recovery_verify_data_checksum_for_mb(log, ctx); | 
|  | 2113 | if (ret == -EINVAL) | 
|  | 2114 | return -EAGAIN; | 
|  | 2115 | else if (ret) | 
|  | 2116 | return ret;   /* -ENOMEM duo to alloc_page() failed */ | 
|  | 2117 |  | 
|  | 2118 | mb = page_address(ctx->meta_page); | 
|  | 2119 | mb_offset = sizeof(struct r5l_meta_block); | 
|  | 2120 | log_offset = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); | 
|  | 2121 |  | 
|  | 2122 | while (mb_offset < le32_to_cpu(mb->meta_size)) { | 
|  | 2123 | int dd; | 
|  | 2124 |  | 
|  | 2125 | payload = (void *)mb + mb_offset; | 
| Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2126 | payload_flush = (void *)mb + mb_offset; | 
|  | 2127 |  | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2128 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_FLUSH) { | 
| Song Liu | 2d4f468 | 2017-03-07 17:44:21 -0800 | [diff] [blame] | 2129 | int i, count; | 
|  | 2130 |  | 
|  | 2131 | count = le32_to_cpu(payload_flush->size) / sizeof(__le64); | 
|  | 2132 | for (i = 0; i < count; ++i) { | 
|  | 2133 | stripe_sect = le64_to_cpu(payload_flush->flush_stripes[i]); | 
|  | 2134 | sh = r5c_recovery_lookup_stripe(cached_stripe_list, | 
|  | 2135 | stripe_sect); | 
|  | 2136 | if (sh) { | 
|  | 2137 | WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 2138 | r5l_recovery_reset_stripe(sh); | 
|  | 2139 | list_del_init(&sh->lru); | 
|  | 2140 | raid5_release_stripe(sh); | 
|  | 2141 | } | 
|  | 2142 | } | 
|  | 2143 |  | 
|  | 2144 | mb_offset += sizeof(struct r5l_payload_flush) + | 
|  | 2145 | le32_to_cpu(payload_flush->size); | 
|  | 2146 | continue; | 
|  | 2147 | } | 
|  | 2148 |  | 
|  | 2149 | /* DATA or PARITY payload */ | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2150 | stripe_sect = (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) ? | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2151 | raid5_compute_sector( | 
|  | 2152 | conf, le64_to_cpu(payload->location), 0, &dd, | 
|  | 2153 | NULL) | 
|  | 2154 | : le64_to_cpu(payload->location); | 
|  | 2155 |  | 
|  | 2156 | sh = r5c_recovery_lookup_stripe(cached_stripe_list, | 
|  | 2157 | stripe_sect); | 
|  | 2158 |  | 
|  | 2159 | if (!sh) { | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2160 | sh = r5c_recovery_alloc_stripe(conf, stripe_sect); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2161 | /* | 
|  | 2162 | * cannot get stripe from raid5_get_active_stripe | 
|  | 2163 | * try replay some stripes | 
|  | 2164 | */ | 
|  | 2165 | if (!sh) { | 
|  | 2166 | r5c_recovery_replay_stripes( | 
|  | 2167 | cached_stripe_list, ctx); | 
|  | 2168 | sh = r5c_recovery_alloc_stripe( | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2169 | conf, stripe_sect); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2170 | } | 
|  | 2171 | if (!sh) { | 
|  | 2172 | pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", | 
|  | 2173 | mdname(mddev), | 
|  | 2174 | conf->min_nr_stripes * 2); | 
|  | 2175 | raid5_set_cache_size(mddev, | 
|  | 2176 | conf->min_nr_stripes * 2); | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2177 | sh = r5c_recovery_alloc_stripe(conf, | 
|  | 2178 | stripe_sect); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2179 | } | 
|  | 2180 | if (!sh) { | 
|  | 2181 | pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", | 
|  | 2182 | mdname(mddev)); | 
|  | 2183 | return -ENOMEM; | 
|  | 2184 | } | 
|  | 2185 | list_add_tail(&sh->lru, cached_stripe_list); | 
|  | 2186 | } | 
|  | 2187 |  | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2188 | if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_DATA) { | 
| Zhengyuan Liu | f7b7bee | 2016-11-26 10:57:13 +0800 | [diff] [blame] | 2189 | if (!test_bit(STRIPE_R5C_CACHING, &sh->state) && | 
|  | 2190 | test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) { | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2191 | r5l_recovery_replay_one_stripe(conf, sh, ctx); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2192 | list_move_tail(&sh->lru, cached_stripe_list); | 
|  | 2193 | } | 
|  | 2194 | r5l_recovery_load_data(log, sh, ctx, payload, | 
|  | 2195 | log_offset); | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2196 | } else if (le16_to_cpu(payload->header.type) == R5LOG_PAYLOAD_PARITY) | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2197 | r5l_recovery_load_parity(log, sh, ctx, payload, | 
|  | 2198 | log_offset); | 
|  | 2199 | else | 
|  | 2200 | return -EINVAL; | 
|  | 2201 |  | 
|  | 2202 | log_offset = r5l_ring_add(log, log_offset, | 
|  | 2203 | le32_to_cpu(payload->size)); | 
|  | 2204 |  | 
|  | 2205 | mb_offset += sizeof(struct r5l_payload_data_parity) + | 
|  | 2206 | sizeof(__le32) * | 
|  | 2207 | (le32_to_cpu(payload->size) >> (PAGE_SHIFT - 9)); | 
|  | 2208 | } | 
|  | 2209 |  | 
|  | 2210 | return 0; | 
|  | 2211 | } | 
|  | 2212 |  | 
|  | 2213 | /* | 
|  | 2214 | * Load the stripe into cache. The stripe will be written out later by | 
|  | 2215 | * the stripe cache state machine. | 
|  | 2216 | */ | 
|  | 2217 | static void r5c_recovery_load_one_stripe(struct r5l_log *log, | 
|  | 2218 | struct stripe_head *sh) | 
|  | 2219 | { | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2220 | struct r5dev *dev; | 
|  | 2221 | int i; | 
|  | 2222 |  | 
|  | 2223 | for (i = sh->disks; i--; ) { | 
|  | 2224 | dev = sh->dev + i; | 
|  | 2225 | if (test_and_clear_bit(R5_Wantwrite, &dev->flags)) { | 
|  | 2226 | set_bit(R5_InJournal, &dev->flags); | 
|  | 2227 | set_bit(R5_UPTODATE, &dev->flags); | 
|  | 2228 | } | 
|  | 2229 | } | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2230 | } | 
|  | 2231 |  | 
|  | 2232 | /* | 
|  | 2233 | * Scan through the log for all to-be-flushed data | 
|  | 2234 | * | 
|  | 2235 | * For stripes with data and parity, namely Data-Parity stripe | 
|  | 2236 | * (STRIPE_R5C_CACHING == 0), we simply replay all the writes. | 
|  | 2237 | * | 
|  | 2238 | * For stripes with only data, namely Data-Only stripe | 
|  | 2239 | * (STRIPE_R5C_CACHING == 1), we load them to stripe cache state machine. | 
|  | 2240 | * | 
|  | 2241 | * For a stripe, if we see data after parity, we should discard all previous | 
|  | 2242 | * data and parity for this stripe, as these data are already flushed to | 
|  | 2243 | * the array. | 
|  | 2244 | * | 
|  | 2245 | * At the end of the scan, we return the new journal_tail, which points to | 
|  | 2246 | * first data-only stripe on the journal device, or next invalid meta block. | 
|  | 2247 | */ | 
|  | 2248 | static int r5c_recovery_flush_log(struct r5l_log *log, | 
|  | 2249 | struct r5l_recovery_ctx *ctx) | 
|  | 2250 | { | 
| JackieLiu | bc8f167 | 2016-11-28 16:19:20 +0800 | [diff] [blame] | 2251 | struct stripe_head *sh; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2252 | int ret = 0; | 
|  | 2253 |  | 
|  | 2254 | /* scan through the log */ | 
|  | 2255 | while (1) { | 
|  | 2256 | if (r5l_recovery_read_meta_block(log, ctx)) | 
|  | 2257 | break; | 
|  | 2258 |  | 
|  | 2259 | ret = r5c_recovery_analyze_meta_block(log, ctx, | 
|  | 2260 | &ctx->cached_list); | 
|  | 2261 | /* | 
|  | 2262 | * -EAGAIN means mismatch in data block, in this case, we still | 
|  | 2263 | * try scan the next metablock | 
|  | 2264 | */ | 
|  | 2265 | if (ret && ret != -EAGAIN) | 
|  | 2266 | break;   /* ret == -EINVAL or -ENOMEM */ | 
|  | 2267 | ctx->seq++; | 
|  | 2268 | ctx->pos = r5l_ring_add(log, ctx->pos, ctx->meta_total_blocks); | 
|  | 2269 | } | 
|  | 2270 |  | 
|  | 2271 | if (ret == -ENOMEM) { | 
|  | 2272 | r5c_recovery_drop_stripes(&ctx->cached_list, ctx); | 
|  | 2273 | return ret; | 
|  | 2274 | } | 
|  | 2275 |  | 
|  | 2276 | /* replay data-parity stripes */ | 
|  | 2277 | r5c_recovery_replay_stripes(&ctx->cached_list, ctx); | 
|  | 2278 |  | 
|  | 2279 | /* load data-only stripes to stripe cache */ | 
| JackieLiu | bc8f167 | 2016-11-28 16:19:20 +0800 | [diff] [blame] | 2280 | list_for_each_entry(sh, &ctx->cached_list, lru) { | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2281 | WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 2282 | r5c_recovery_load_one_stripe(log, sh); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2283 | ctx->data_only_stripes++; | 
|  | 2284 | } | 
|  | 2285 |  | 
|  | 2286 | return 0; | 
|  | 2287 | } | 
|  | 2288 |  | 
|  | 2289 | /* | 
|  | 2290 | * we did a recovery. Now ctx.pos points to an invalid meta block. New | 
|  | 2291 | * log will start here. but we can't let superblock point to last valid | 
|  | 2292 | * meta block. The log might looks like: | 
|  | 2293 | * | meta 1| meta 2| meta 3| | 
|  | 2294 | * meta 1 is valid, meta 2 is invalid. meta 3 could be valid. If | 
|  | 2295 | * superblock points to meta 1, we write a new valid meta 2n.  if crash | 
|  | 2296 | * happens again, new recovery will start from meta 1. Since meta 2n is | 
|  | 2297 | * valid now, recovery will think meta 3 is valid, which is wrong. | 
|  | 2298 | * The solution is we create a new meta in meta2 with its seq == meta | 
| Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2299 | * 1's seq + 10000 and let superblock points to meta2. The same recovery | 
|  | 2300 | * will not think meta 3 is a valid meta, because its seq doesn't match | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2301 | */ | 
|  | 2302 |  | 
|  | 2303 | /* | 
|  | 2304 | * Before recovery, the log looks like the following | 
|  | 2305 | * | 
|  | 2306 | *   --------------------------------------------- | 
|  | 2307 | *   |           valid log        | invalid log  | | 
|  | 2308 | *   --------------------------------------------- | 
|  | 2309 | *   ^ | 
|  | 2310 | *   |- log->last_checkpoint | 
|  | 2311 | *   |- log->last_cp_seq | 
|  | 2312 | * | 
|  | 2313 | * Now we scan through the log until we see invalid entry | 
|  | 2314 | * | 
|  | 2315 | *   --------------------------------------------- | 
|  | 2316 | *   |           valid log        | invalid log  | | 
|  | 2317 | *   --------------------------------------------- | 
|  | 2318 | *   ^                            ^ | 
|  | 2319 | *   |- log->last_checkpoint      |- ctx->pos | 
|  | 2320 | *   |- log->last_cp_seq          |- ctx->seq | 
|  | 2321 | * | 
|  | 2322 | * From this point, we need to increase seq number by 10 to avoid | 
|  | 2323 | * confusing next recovery. | 
|  | 2324 | * | 
|  | 2325 | *   --------------------------------------------- | 
|  | 2326 | *   |           valid log        | invalid log  | | 
|  | 2327 | *   --------------------------------------------- | 
|  | 2328 | *   ^                              ^ | 
|  | 2329 | *   |- log->last_checkpoint        |- ctx->pos+1 | 
| Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2330 | *   |- log->last_cp_seq            |- ctx->seq+10001 | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2331 | * | 
|  | 2332 | * However, it is not safe to start the state machine yet, because data only | 
|  | 2333 | * parities are not yet secured in RAID. To save these data only parities, we | 
|  | 2334 | * rewrite them from seq+11. | 
|  | 2335 | * | 
|  | 2336 | *   ----------------------------------------------------------------- | 
|  | 2337 | *   |           valid log        | data only stripes | invalid log  | | 
|  | 2338 | *   ----------------------------------------------------------------- | 
|  | 2339 | *   ^                                                ^ | 
|  | 2340 | *   |- log->last_checkpoint                          |- ctx->pos+n | 
| Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2341 | *   |- log->last_cp_seq                              |- ctx->seq+10000+n | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2342 | * | 
|  | 2343 | * If failure happens again during this process, the recovery can safe start | 
|  | 2344 | * again from log->last_checkpoint. | 
|  | 2345 | * | 
|  | 2346 | * Once data only stripes are rewritten to journal, we move log_tail | 
|  | 2347 | * | 
|  | 2348 | *   ----------------------------------------------------------------- | 
|  | 2349 | *   |     old log        |    data only stripes    | invalid log  | | 
|  | 2350 | *   ----------------------------------------------------------------- | 
|  | 2351 | *                        ^                         ^ | 
|  | 2352 | *                        |- log->last_checkpoint   |- ctx->pos+n | 
| Song Liu | 3c6edc6 | 2016-12-07 09:42:06 -0800 | [diff] [blame] | 2353 | *                        |- log->last_cp_seq       |- ctx->seq+10000+n | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2354 | * | 
|  | 2355 | * Then we can safely start the state machine. If failure happens from this | 
|  | 2356 | * point on, the recovery will start from new log->last_checkpoint. | 
|  | 2357 | */ | 
|  | 2358 | static int | 
|  | 2359 | r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log, | 
|  | 2360 | struct r5l_recovery_ctx *ctx) | 
|  | 2361 | { | 
| Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2362 | struct stripe_head *sh; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2363 | struct mddev *mddev = log->rdev->mddev; | 
|  | 2364 | struct page *page; | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2365 | sector_t next_checkpoint = MaxSector; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2366 |  | 
|  | 2367 | page = alloc_page(GFP_KERNEL); | 
|  | 2368 | if (!page) { | 
|  | 2369 | pr_err("md/raid:%s: cannot allocate memory to rewrite data only stripes\n", | 
|  | 2370 | mdname(mddev)); | 
|  | 2371 | return -ENOMEM; | 
|  | 2372 | } | 
|  | 2373 |  | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2374 | WARN_ON(list_empty(&ctx->cached_list)); | 
|  | 2375 |  | 
| Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2376 | list_for_each_entry(sh, &ctx->cached_list, lru) { | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2377 | struct r5l_meta_block *mb; | 
|  | 2378 | int i; | 
|  | 2379 | int offset; | 
|  | 2380 | sector_t write_pos; | 
|  | 2381 |  | 
|  | 2382 | WARN_ON(!test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 2383 | r5l_recovery_create_empty_meta_block(log, page, | 
|  | 2384 | ctx->pos, ctx->seq); | 
|  | 2385 | mb = page_address(page); | 
|  | 2386 | offset = le32_to_cpu(mb->meta_size); | 
| JackieLiu | fc833c2 | 2016-11-28 16:19:19 +0800 | [diff] [blame] | 2387 | write_pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2388 |  | 
|  | 2389 | for (i = sh->disks; i--; ) { | 
|  | 2390 | struct r5dev *dev = &sh->dev[i]; | 
|  | 2391 | struct r5l_payload_data_parity *payload; | 
|  | 2392 | void *addr; | 
|  | 2393 |  | 
|  | 2394 | if (test_bit(R5_InJournal, &dev->flags)) { | 
|  | 2395 | payload = (void *)mb + offset; | 
|  | 2396 | payload->header.type = cpu_to_le16( | 
|  | 2397 | R5LOG_PAYLOAD_DATA); | 
| Jason Yan | 1ad45a9 | 2017-03-25 09:44:39 +0800 | [diff] [blame] | 2398 | payload->size = cpu_to_le32(BLOCK_SECTORS); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2399 | payload->location = cpu_to_le64( | 
|  | 2400 | raid5_compute_blocknr(sh, i, 0)); | 
|  | 2401 | addr = kmap_atomic(dev->page); | 
|  | 2402 | payload->checksum[0] = cpu_to_le32( | 
|  | 2403 | crc32c_le(log->uuid_checksum, addr, | 
|  | 2404 | PAGE_SIZE)); | 
|  | 2405 | kunmap_atomic(addr); | 
|  | 2406 | sync_page_io(log->rdev, write_pos, PAGE_SIZE, | 
|  | 2407 | dev->page, REQ_OP_WRITE, 0, false); | 
|  | 2408 | write_pos = r5l_ring_add(log, write_pos, | 
|  | 2409 | BLOCK_SECTORS); | 
|  | 2410 | offset += sizeof(__le32) + | 
|  | 2411 | sizeof(struct r5l_payload_data_parity); | 
|  | 2412 |  | 
|  | 2413 | } | 
|  | 2414 | } | 
|  | 2415 | mb->meta_size = cpu_to_le32(offset); | 
| Song Liu | 5c88f40 | 2016-12-07 09:42:05 -0800 | [diff] [blame] | 2416 | mb->checksum = cpu_to_le32(crc32c_le(log->uuid_checksum, | 
|  | 2417 | mb, PAGE_SIZE)); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2418 | sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page, | 
| Jan Kara | 5a8948f | 2017-05-31 09:44:33 +0200 | [diff] [blame] | 2419 | REQ_OP_WRITE, REQ_SYNC | REQ_FUA, false); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2420 | sh->log_start = ctx->pos; | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2421 | list_add_tail(&sh->r5c, &log->stripe_in_journal_list); | 
|  | 2422 | atomic_inc(&log->stripe_in_journal_count); | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2423 | ctx->pos = write_pos; | 
|  | 2424 | ctx->seq += 1; | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2425 | next_checkpoint = sh->log_start; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2426 | } | 
| Song Liu | 3c66abb | 2016-12-14 15:38:01 -0800 | [diff] [blame] | 2427 | log->next_checkpoint = next_checkpoint; | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2428 | __free_page(page); | 
|  | 2429 | return 0; | 
|  | 2430 | } | 
|  | 2431 |  | 
| Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2432 | static void r5c_recovery_flush_data_only_stripes(struct r5l_log *log, | 
|  | 2433 | struct r5l_recovery_ctx *ctx) | 
|  | 2434 | { | 
|  | 2435 | struct mddev *mddev = log->rdev->mddev; | 
|  | 2436 | struct r5conf *conf = mddev->private; | 
|  | 2437 | struct stripe_head *sh, *next; | 
|  | 2438 |  | 
|  | 2439 | if (ctx->data_only_stripes == 0) | 
|  | 2440 | return; | 
|  | 2441 |  | 
|  | 2442 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_BACK; | 
|  | 2443 |  | 
|  | 2444 | list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) { | 
|  | 2445 | r5c_make_stripe_write_out(sh); | 
|  | 2446 | set_bit(STRIPE_HANDLE, &sh->state); | 
|  | 2447 | list_del_init(&sh->lru); | 
|  | 2448 | raid5_release_stripe(sh); | 
|  | 2449 | } | 
|  | 2450 |  | 
|  | 2451 | md_wakeup_thread(conf->mddev->thread); | 
|  | 2452 | /* reuse conf->wait_for_quiescent in recovery */ | 
|  | 2453 | wait_event(conf->wait_for_quiescent, | 
|  | 2454 | atomic_read(&conf->active_stripes) == 0); | 
|  | 2455 |  | 
|  | 2456 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | 
|  | 2457 | } | 
|  | 2458 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2459 | static int r5l_recovery_log(struct r5l_log *log) | 
|  | 2460 | { | 
| Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2461 | struct mddev *mddev = log->rdev->mddev; | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2462 | struct r5l_recovery_ctx *ctx; | 
| Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2463 | int ret; | 
| JackieLiu | 43b9674 | 2016-12-05 11:58:53 +0800 | [diff] [blame] | 2464 | sector_t pos; | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2465 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2466 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); | 
|  | 2467 | if (!ctx) | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2468 | return -ENOMEM; | 
|  | 2469 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2470 | ctx->pos = log->last_checkpoint; | 
|  | 2471 | ctx->seq = log->last_cp_seq; | 
|  | 2472 | INIT_LIST_HEAD(&ctx->cached_list); | 
|  | 2473 | ctx->meta_page = alloc_page(GFP_KERNEL); | 
|  | 2474 |  | 
|  | 2475 | if (!ctx->meta_page) { | 
|  | 2476 | ret =  -ENOMEM; | 
|  | 2477 | goto meta_page; | 
|  | 2478 | } | 
|  | 2479 |  | 
|  | 2480 | if (r5l_recovery_allocate_ra_pool(log, ctx) != 0) { | 
|  | 2481 | ret = -ENOMEM; | 
|  | 2482 | goto ra_pool; | 
|  | 2483 | } | 
|  | 2484 |  | 
|  | 2485 | ret = r5c_recovery_flush_log(log, ctx); | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2486 |  | 
| Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2487 | if (ret) | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2488 | goto error; | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2489 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2490 | pos = ctx->pos; | 
|  | 2491 | ctx->seq += 10000; | 
| JackieLiu | 43b9674 | 2016-12-05 11:58:53 +0800 | [diff] [blame] | 2492 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2493 | if ((ctx->data_only_stripes == 0) && (ctx->data_parity_stripes == 0)) | 
| Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2494 | pr_debug("md/raid:%s: starting from clean shutdown\n", | 
|  | 2495 | mdname(mddev)); | 
| Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2496 | else | 
| Colin Ian King | 99f1789 | 2016-12-23 00:52:30 +0000 | [diff] [blame] | 2497 | pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n", | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2498 | mdname(mddev), ctx->data_only_stripes, | 
|  | 2499 | ctx->data_parity_stripes); | 
| Song Liu | 5aabf7c | 2016-11-17 15:24:44 -0800 | [diff] [blame] | 2500 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2501 | if (ctx->data_only_stripes == 0) { | 
|  | 2502 | log->next_checkpoint = ctx->pos; | 
|  | 2503 | r5l_log_write_empty_meta_block(log, ctx->pos, ctx->seq++); | 
|  | 2504 | ctx->pos = r5l_ring_add(log, ctx->pos, BLOCK_SECTORS); | 
|  | 2505 | } else if (r5c_recovery_rewrite_data_only_stripes(log, ctx)) { | 
| Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2506 | pr_err("md/raid:%s: failed to rewrite stripes to journal\n", | 
|  | 2507 | mdname(mddev)); | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2508 | ret =  -EIO; | 
|  | 2509 | goto error; | 
| Shaohua Li | 355810d | 2015-08-13 14:32:01 -0700 | [diff] [blame] | 2510 | } | 
| Song Liu | b4c625c | 2016-11-17 15:24:43 -0800 | [diff] [blame] | 2511 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2512 | log->log_start = ctx->pos; | 
|  | 2513 | log->seq = ctx->seq; | 
| JackieLiu | 43b9674 | 2016-12-05 11:58:53 +0800 | [diff] [blame] | 2514 | log->last_checkpoint = pos; | 
|  | 2515 | r5l_write_super(log, pos); | 
| Song Liu | a85dd7b | 2017-01-23 17:12:57 -0800 | [diff] [blame] | 2516 |  | 
| Song Liu | effe6ee | 2017-03-07 16:49:17 -0800 | [diff] [blame] | 2517 | r5c_recovery_flush_data_only_stripes(log, ctx); | 
|  | 2518 | ret = 0; | 
|  | 2519 | error: | 
|  | 2520 | r5l_recovery_free_ra_pool(log, ctx); | 
|  | 2521 | ra_pool: | 
|  | 2522 | __free_page(ctx->meta_page); | 
|  | 2523 | meta_page: | 
|  | 2524 | kfree(ctx); | 
|  | 2525 | return ret; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2526 | } | 
|  | 2527 |  | 
|  | 2528 | static void r5l_write_super(struct r5l_log *log, sector_t cp) | 
|  | 2529 | { | 
|  | 2530 | struct mddev *mddev = log->rdev->mddev; | 
|  | 2531 |  | 
|  | 2532 | log->rdev->journal_tail = cp; | 
| Shaohua Li | 2953079 | 2016-12-08 15:48:19 -0800 | [diff] [blame] | 2533 | set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2534 | } | 
|  | 2535 |  | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2536 | static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) | 
|  | 2537 | { | 
| Song Liu | a72cbf8 | 2017-08-08 22:56:52 -0700 | [diff] [blame] | 2538 | struct r5conf *conf; | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2539 | int ret; | 
|  | 2540 |  | 
| Song Liu | a72cbf8 | 2017-08-08 22:56:52 -0700 | [diff] [blame] | 2541 | ret = mddev_lock(mddev); | 
|  | 2542 | if (ret) | 
|  | 2543 | return ret; | 
|  | 2544 |  | 
|  | 2545 | conf = mddev->private; | 
|  | 2546 | if (!conf || !conf->log) { | 
|  | 2547 | mddev_unlock(mddev); | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2548 | return 0; | 
| Song Liu | a72cbf8 | 2017-08-08 22:56:52 -0700 | [diff] [blame] | 2549 | } | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2550 |  | 
|  | 2551 | switch (conf->log->r5c_journal_mode) { | 
|  | 2552 | case R5C_JOURNAL_MODE_WRITE_THROUGH: | 
|  | 2553 | ret = snprintf( | 
|  | 2554 | page, PAGE_SIZE, "[%s] %s\n", | 
|  | 2555 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], | 
|  | 2556 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); | 
|  | 2557 | break; | 
|  | 2558 | case R5C_JOURNAL_MODE_WRITE_BACK: | 
|  | 2559 | ret = snprintf( | 
|  | 2560 | page, PAGE_SIZE, "%s [%s]\n", | 
|  | 2561 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_THROUGH], | 
|  | 2562 | r5c_journal_mode_str[R5C_JOURNAL_MODE_WRITE_BACK]); | 
|  | 2563 | break; | 
|  | 2564 | default: | 
|  | 2565 | ret = 0; | 
|  | 2566 | } | 
| Song Liu | a72cbf8 | 2017-08-08 22:56:52 -0700 | [diff] [blame] | 2567 | mddev_unlock(mddev); | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2568 | return ret; | 
|  | 2569 | } | 
|  | 2570 |  | 
| Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2571 | /* | 
|  | 2572 | * Set journal cache mode on @mddev (external API initially needed by dm-raid). | 
|  | 2573 | * | 
|  | 2574 | * @mode as defined in 'enum r5c_journal_mode'. | 
|  | 2575 | * | 
|  | 2576 | */ | 
|  | 2577 | int r5c_journal_mode_set(struct mddev *mddev, int mode) | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2578 | { | 
| Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2579 | struct r5conf *conf; | 
|  | 2580 | int err; | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2581 |  | 
| Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2582 | if (mode < R5C_JOURNAL_MODE_WRITE_THROUGH || | 
|  | 2583 | mode > R5C_JOURNAL_MODE_WRITE_BACK) | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2584 | return -EINVAL; | 
|  | 2585 |  | 
| Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2586 | err = mddev_lock(mddev); | 
|  | 2587 | if (err) | 
|  | 2588 | return err; | 
|  | 2589 | conf = mddev->private; | 
|  | 2590 | if (!conf || !conf->log) { | 
|  | 2591 | mddev_unlock(mddev); | 
|  | 2592 | return -ENODEV; | 
|  | 2593 | } | 
|  | 2594 |  | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2595 | if (raid5_calc_degraded(conf) > 0 && | 
| Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2596 | mode == R5C_JOURNAL_MODE_WRITE_BACK) { | 
|  | 2597 | mddev_unlock(mddev); | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2598 | return -EINVAL; | 
| Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2599 | } | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2600 |  | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2601 | mddev_suspend(mddev); | 
| Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2602 | conf->log->r5c_journal_mode = mode; | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2603 | mddev_resume(mddev); | 
| Song Liu | b44886c | 2017-07-31 14:52:26 -0700 | [diff] [blame] | 2604 | mddev_unlock(mddev); | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2605 |  | 
|  | 2606 | pr_debug("md/raid:%s: setting r5c cache mode to %d: %s\n", | 
| Heinz Mauelshagen | 78e470c | 2017-03-22 17:44:37 +0100 | [diff] [blame] | 2607 | mdname(mddev), mode, r5c_journal_mode_str[mode]); | 
|  | 2608 | return 0; | 
|  | 2609 | } | 
|  | 2610 | EXPORT_SYMBOL(r5c_journal_mode_set); | 
|  | 2611 |  | 
|  | 2612 | static ssize_t r5c_journal_mode_store(struct mddev *mddev, | 
|  | 2613 | const char *page, size_t length) | 
|  | 2614 | { | 
|  | 2615 | int mode = ARRAY_SIZE(r5c_journal_mode_str); | 
|  | 2616 | size_t len = length; | 
|  | 2617 |  | 
|  | 2618 | if (len < 2) | 
|  | 2619 | return -EINVAL; | 
|  | 2620 |  | 
|  | 2621 | if (page[len - 1] == '\n') | 
|  | 2622 | len--; | 
|  | 2623 |  | 
|  | 2624 | while (mode--) | 
|  | 2625 | if (strlen(r5c_journal_mode_str[mode]) == len && | 
|  | 2626 | !strncmp(page, r5c_journal_mode_str[mode], len)) | 
|  | 2627 | break; | 
|  | 2628 |  | 
|  | 2629 | return r5c_journal_mode_set(mddev, mode) ?: length; | 
| Song Liu | 2c7da14 | 2016-11-17 15:24:41 -0800 | [diff] [blame] | 2630 | } | 
|  | 2631 |  | 
|  | 2632 | struct md_sysfs_entry | 
|  | 2633 | r5c_journal_mode = __ATTR(journal_mode, 0644, | 
|  | 2634 | r5c_journal_mode_show, r5c_journal_mode_store); | 
|  | 2635 |  | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2636 | /* | 
|  | 2637 | * Try handle write operation in caching phase. This function should only | 
|  | 2638 | * be called in write-back mode. | 
|  | 2639 | * | 
|  | 2640 | * If all outstanding writes can be handled in caching phase, returns 0 | 
|  | 2641 | * If writes requires write-out phase, call r5c_make_stripe_write_out() | 
|  | 2642 | * and returns -EAGAIN | 
|  | 2643 | */ | 
|  | 2644 | int r5c_try_caching_write(struct r5conf *conf, | 
|  | 2645 | struct stripe_head *sh, | 
|  | 2646 | struct stripe_head_state *s, | 
|  | 2647 | int disks) | 
|  | 2648 | { | 
|  | 2649 | struct r5l_log *log = conf->log; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2650 | int i; | 
|  | 2651 | struct r5dev *dev; | 
|  | 2652 | int to_cache = 0; | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2653 | void **pslot; | 
|  | 2654 | sector_t tree_index; | 
|  | 2655 | int ret; | 
|  | 2656 | uintptr_t refcount; | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2657 |  | 
|  | 2658 | BUG_ON(!r5c_is_writeback(log)); | 
|  | 2659 |  | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2660 | if (!test_bit(STRIPE_R5C_CACHING, &sh->state)) { | 
|  | 2661 | /* | 
|  | 2662 | * There are two different scenarios here: | 
|  | 2663 | *  1. The stripe has some data cached, and it is sent to | 
|  | 2664 | *     write-out phase for reclaim | 
|  | 2665 | *  2. The stripe is clean, and this is the first write | 
|  | 2666 | * | 
|  | 2667 | * For 1, return -EAGAIN, so we continue with | 
|  | 2668 | * handle_stripe_dirtying(). | 
|  | 2669 | * | 
|  | 2670 | * For 2, set STRIPE_R5C_CACHING and continue with caching | 
|  | 2671 | * write. | 
|  | 2672 | */ | 
|  | 2673 |  | 
|  | 2674 | /* case 1: anything injournal or anything in written */ | 
|  | 2675 | if (s->injournal > 0 || s->written > 0) | 
|  | 2676 | return -EAGAIN; | 
|  | 2677 | /* case 2 */ | 
|  | 2678 | set_bit(STRIPE_R5C_CACHING, &sh->state); | 
|  | 2679 | } | 
|  | 2680 |  | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2681 | /* | 
|  | 2682 | * When run in degraded mode, array is set to write-through mode. | 
|  | 2683 | * This check helps drain pending write safely in the transition to | 
|  | 2684 | * write-through mode. | 
| Song Liu | 5ddf044 | 2017-05-11 17:03:44 -0700 | [diff] [blame] | 2685 | * | 
|  | 2686 | * When a stripe is syncing, the write is also handled in write | 
|  | 2687 | * through mode. | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2688 | */ | 
| Song Liu | 5ddf044 | 2017-05-11 17:03:44 -0700 | [diff] [blame] | 2689 | if (s->failed || test_bit(STRIPE_SYNCING, &sh->state)) { | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 2690 | r5c_make_stripe_write_out(sh); | 
|  | 2691 | return -EAGAIN; | 
|  | 2692 | } | 
|  | 2693 |  | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2694 | for (i = disks; i--; ) { | 
|  | 2695 | dev = &sh->dev[i]; | 
|  | 2696 | /* if non-overwrite, use writing-out phase */ | 
|  | 2697 | if (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags) && | 
|  | 2698 | !test_bit(R5_InJournal, &dev->flags)) { | 
|  | 2699 | r5c_make_stripe_write_out(sh); | 
|  | 2700 | return -EAGAIN; | 
|  | 2701 | } | 
|  | 2702 | } | 
|  | 2703 |  | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2704 | /* if the stripe is not counted in big_stripe_tree, add it now */ | 
|  | 2705 | if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && | 
|  | 2706 | !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { | 
|  | 2707 | tree_index = r5c_tree_index(conf, sh->sector); | 
|  | 2708 | spin_lock(&log->tree_lock); | 
|  | 2709 | pslot = radix_tree_lookup_slot(&log->big_stripe_tree, | 
|  | 2710 | tree_index); | 
|  | 2711 | if (pslot) { | 
|  | 2712 | refcount = (uintptr_t)radix_tree_deref_slot_protected( | 
|  | 2713 | pslot, &log->tree_lock) >> | 
|  | 2714 | R5C_RADIX_COUNT_SHIFT; | 
|  | 2715 | radix_tree_replace_slot( | 
|  | 2716 | &log->big_stripe_tree, pslot, | 
|  | 2717 | (void *)((refcount + 1) << R5C_RADIX_COUNT_SHIFT)); | 
|  | 2718 | } else { | 
|  | 2719 | /* | 
|  | 2720 | * this radix_tree_insert can fail safely, so no | 
|  | 2721 | * need to call radix_tree_preload() | 
|  | 2722 | */ | 
|  | 2723 | ret = radix_tree_insert( | 
|  | 2724 | &log->big_stripe_tree, tree_index, | 
|  | 2725 | (void *)(1 << R5C_RADIX_COUNT_SHIFT)); | 
|  | 2726 | if (ret) { | 
|  | 2727 | spin_unlock(&log->tree_lock); | 
|  | 2728 | r5c_make_stripe_write_out(sh); | 
|  | 2729 | return -EAGAIN; | 
|  | 2730 | } | 
|  | 2731 | } | 
|  | 2732 | spin_unlock(&log->tree_lock); | 
|  | 2733 |  | 
|  | 2734 | /* | 
|  | 2735 | * set STRIPE_R5C_PARTIAL_STRIPE, this shows the stripe is | 
|  | 2736 | * counted in the radix tree | 
|  | 2737 | */ | 
|  | 2738 | set_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state); | 
|  | 2739 | atomic_inc(&conf->r5c_cached_partial_stripes); | 
|  | 2740 | } | 
|  | 2741 |  | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2742 | for (i = disks; i--; ) { | 
|  | 2743 | dev = &sh->dev[i]; | 
|  | 2744 | if (dev->towrite) { | 
|  | 2745 | set_bit(R5_Wantwrite, &dev->flags); | 
|  | 2746 | set_bit(R5_Wantdrain, &dev->flags); | 
|  | 2747 | set_bit(R5_LOCKED, &dev->flags); | 
|  | 2748 | to_cache++; | 
|  | 2749 | } | 
|  | 2750 | } | 
|  | 2751 |  | 
|  | 2752 | if (to_cache) { | 
|  | 2753 | set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); | 
|  | 2754 | /* | 
|  | 2755 | * set STRIPE_LOG_TRAPPED, which triggers r5c_cache_data() | 
|  | 2756 | * in ops_run_io(). STRIPE_LOG_TRAPPED will be cleared in | 
|  | 2757 | * r5c_handle_data_cached() | 
|  | 2758 | */ | 
|  | 2759 | set_bit(STRIPE_LOG_TRAPPED, &sh->state); | 
|  | 2760 | } | 
|  | 2761 |  | 
|  | 2762 | return 0; | 
|  | 2763 | } | 
|  | 2764 |  | 
|  | 2765 | /* | 
|  | 2766 | * free extra pages (orig_page) we allocated for prexor | 
|  | 2767 | */ | 
|  | 2768 | void r5c_release_extra_page(struct stripe_head *sh) | 
|  | 2769 | { | 
| Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2770 | struct r5conf *conf = sh->raid_conf; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2771 | int i; | 
| Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2772 | bool using_disk_info_extra_page; | 
|  | 2773 |  | 
|  | 2774 | using_disk_info_extra_page = | 
|  | 2775 | sh->dev[0].orig_page == conf->disks[0].extra_page; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2776 |  | 
|  | 2777 | for (i = sh->disks; i--; ) | 
|  | 2778 | if (sh->dev[i].page != sh->dev[i].orig_page) { | 
|  | 2779 | struct page *p = sh->dev[i].orig_page; | 
|  | 2780 |  | 
|  | 2781 | sh->dev[i].orig_page = sh->dev[i].page; | 
| Song Liu | 86aa1397 | 2017-01-12 17:22:41 -0800 | [diff] [blame] | 2782 | clear_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); | 
|  | 2783 |  | 
| Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2784 | if (!using_disk_info_extra_page) | 
|  | 2785 | put_page(p); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2786 | } | 
| Song Liu | d7bd398 | 2016-11-23 22:50:39 -0800 | [diff] [blame] | 2787 |  | 
|  | 2788 | if (using_disk_info_extra_page) { | 
|  | 2789 | clear_bit(R5C_EXTRA_PAGE_IN_USE, &conf->cache_state); | 
|  | 2790 | md_wakeup_thread(conf->mddev->thread); | 
|  | 2791 | } | 
|  | 2792 | } | 
|  | 2793 |  | 
|  | 2794 | void r5c_use_extra_page(struct stripe_head *sh) | 
|  | 2795 | { | 
|  | 2796 | struct r5conf *conf = sh->raid_conf; | 
|  | 2797 | int i; | 
|  | 2798 | struct r5dev *dev; | 
|  | 2799 |  | 
|  | 2800 | for (i = sh->disks; i--; ) { | 
|  | 2801 | dev = &sh->dev[i]; | 
|  | 2802 | if (dev->orig_page != dev->page) | 
|  | 2803 | put_page(dev->orig_page); | 
|  | 2804 | dev->orig_page = conf->disks[i].extra_page; | 
|  | 2805 | } | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2806 | } | 
|  | 2807 |  | 
|  | 2808 | /* | 
|  | 2809 | * clean up the stripe (clear R5_InJournal for dev[pd_idx] etc.) after the | 
|  | 2810 | * stripe is committed to RAID disks. | 
|  | 2811 | */ | 
|  | 2812 | void r5c_finish_stripe_write_out(struct r5conf *conf, | 
|  | 2813 | struct stripe_head *sh, | 
|  | 2814 | struct stripe_head_state *s) | 
|  | 2815 | { | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2816 | struct r5l_log *log = conf->log; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2817 | int i; | 
|  | 2818 | int do_wakeup = 0; | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2819 | sector_t tree_index; | 
|  | 2820 | void **pslot; | 
|  | 2821 | uintptr_t refcount; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2822 |  | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2823 | if (!log || !test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)) | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2824 | return; | 
|  | 2825 |  | 
|  | 2826 | WARN_ON(test_bit(STRIPE_R5C_CACHING, &sh->state)); | 
|  | 2827 | clear_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags); | 
|  | 2828 |  | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2829 | if (log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH) | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 2830 | return; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2831 |  | 
|  | 2832 | for (i = sh->disks; i--; ) { | 
|  | 2833 | clear_bit(R5_InJournal, &sh->dev[i].flags); | 
|  | 2834 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 
|  | 2835 | do_wakeup = 1; | 
|  | 2836 | } | 
|  | 2837 |  | 
|  | 2838 | /* | 
|  | 2839 | * analyse_stripe() runs before r5c_finish_stripe_write_out(), | 
|  | 2840 | * We updated R5_InJournal, so we also update s->injournal. | 
|  | 2841 | */ | 
|  | 2842 | s->injournal = 0; | 
|  | 2843 |  | 
|  | 2844 | if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) | 
|  | 2845 | if (atomic_dec_and_test(&conf->pending_full_writes)) | 
|  | 2846 | md_wakeup_thread(conf->mddev->thread); | 
|  | 2847 |  | 
|  | 2848 | if (do_wakeup) | 
|  | 2849 | wake_up(&conf->wait_for_overlap); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2850 |  | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2851 | spin_lock_irq(&log->stripe_in_journal_lock); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2852 | list_del_init(&sh->r5c); | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2853 | spin_unlock_irq(&log->stripe_in_journal_lock); | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2854 | sh->log_start = MaxSector; | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2855 |  | 
|  | 2856 | atomic_dec(&log->stripe_in_journal_count); | 
|  | 2857 | r5c_update_log_state(log); | 
|  | 2858 |  | 
|  | 2859 | /* stop counting this stripe in big_stripe_tree */ | 
|  | 2860 | if (test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) || | 
|  | 2861 | test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { | 
|  | 2862 | tree_index = r5c_tree_index(conf, sh->sector); | 
|  | 2863 | spin_lock(&log->tree_lock); | 
|  | 2864 | pslot = radix_tree_lookup_slot(&log->big_stripe_tree, | 
|  | 2865 | tree_index); | 
|  | 2866 | BUG_ON(pslot == NULL); | 
|  | 2867 | refcount = (uintptr_t)radix_tree_deref_slot_protected( | 
|  | 2868 | pslot, &log->tree_lock) >> | 
|  | 2869 | R5C_RADIX_COUNT_SHIFT; | 
|  | 2870 | if (refcount == 1) | 
|  | 2871 | radix_tree_delete(&log->big_stripe_tree, tree_index); | 
|  | 2872 | else | 
|  | 2873 | radix_tree_replace_slot( | 
|  | 2874 | &log->big_stripe_tree, pslot, | 
|  | 2875 | (void *)((refcount - 1) << R5C_RADIX_COUNT_SHIFT)); | 
|  | 2876 | spin_unlock(&log->tree_lock); | 
|  | 2877 | } | 
|  | 2878 |  | 
|  | 2879 | if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) { | 
|  | 2880 | BUG_ON(atomic_read(&conf->r5c_cached_partial_stripes) == 0); | 
| Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 2881 | atomic_dec(&conf->r5c_flushing_partial_stripes); | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2882 | atomic_dec(&conf->r5c_cached_partial_stripes); | 
|  | 2883 | } | 
|  | 2884 |  | 
|  | 2885 | if (test_and_clear_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) { | 
|  | 2886 | BUG_ON(atomic_read(&conf->r5c_cached_full_stripes) == 0); | 
| Shaohua Li | e33fbb9 | 2017-02-10 16:18:09 -0800 | [diff] [blame] | 2887 | atomic_dec(&conf->r5c_flushing_full_stripes); | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2888 | atomic_dec(&conf->r5c_cached_full_stripes); | 
|  | 2889 | } | 
| Song Liu | ea17481 | 2017-03-09 21:23:39 -0800 | [diff] [blame] | 2890 |  | 
|  | 2891 | r5l_append_flush_payload(log, sh->sector); | 
| Song Liu | 5ddf044 | 2017-05-11 17:03:44 -0700 | [diff] [blame] | 2892 | /* stripe is flused to raid disks, we can do resync now */ | 
|  | 2893 | if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) | 
|  | 2894 | set_bit(STRIPE_HANDLE, &sh->state); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2895 | } | 
|  | 2896 |  | 
| Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 2897 | int r5c_cache_data(struct r5l_log *log, struct stripe_head *sh) | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2898 | { | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2899 | struct r5conf *conf = sh->raid_conf; | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2900 | int pages = 0; | 
|  | 2901 | int reserve; | 
|  | 2902 | int i; | 
|  | 2903 | int ret = 0; | 
|  | 2904 |  | 
|  | 2905 | BUG_ON(!log); | 
|  | 2906 |  | 
|  | 2907 | for (i = 0; i < sh->disks; i++) { | 
|  | 2908 | void *addr; | 
|  | 2909 |  | 
|  | 2910 | if (!test_bit(R5_Wantwrite, &sh->dev[i].flags)) | 
|  | 2911 | continue; | 
|  | 2912 | addr = kmap_atomic(sh->dev[i].page); | 
|  | 2913 | sh->dev[i].log_checksum = crc32c_le(log->uuid_checksum, | 
|  | 2914 | addr, PAGE_SIZE); | 
|  | 2915 | kunmap_atomic(addr); | 
|  | 2916 | pages++; | 
|  | 2917 | } | 
|  | 2918 | WARN_ON(pages == 0); | 
|  | 2919 |  | 
|  | 2920 | /* | 
|  | 2921 | * The stripe must enter state machine again to call endio, so | 
|  | 2922 | * don't delay. | 
|  | 2923 | */ | 
|  | 2924 | clear_bit(STRIPE_DELAYED, &sh->state); | 
|  | 2925 | atomic_inc(&sh->count); | 
|  | 2926 |  | 
|  | 2927 | mutex_lock(&log->io_mutex); | 
|  | 2928 | /* meta + data */ | 
|  | 2929 | reserve = (1 + pages) << (PAGE_SHIFT - 9); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2930 |  | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 2931 | if (test_bit(R5C_LOG_CRITICAL, &conf->cache_state) && | 
|  | 2932 | sh->log_start == MaxSector) | 
|  | 2933 | r5l_add_no_space_stripe(log, sh); | 
|  | 2934 | else if (!r5l_has_free_space(log, reserve)) { | 
|  | 2935 | if (sh->log_start == log->last_checkpoint) | 
|  | 2936 | BUG(); | 
|  | 2937 | else | 
|  | 2938 | r5l_add_no_space_stripe(log, sh); | 
| Song Liu | 1e6d690 | 2016-11-17 15:24:39 -0800 | [diff] [blame] | 2939 | } else { | 
|  | 2940 | ret = r5l_log_stripe(log, sh, pages, 0); | 
|  | 2941 | if (ret) { | 
|  | 2942 | spin_lock_irq(&log->io_list_lock); | 
|  | 2943 | list_add_tail(&sh->log_list, &log->no_mem_stripes); | 
|  | 2944 | spin_unlock_irq(&log->io_list_lock); | 
|  | 2945 | } | 
|  | 2946 | } | 
|  | 2947 |  | 
|  | 2948 | mutex_unlock(&log->io_mutex); | 
|  | 2949 | return 0; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2950 | } | 
|  | 2951 |  | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 2952 | /* check whether this big stripe is in write back cache. */ | 
|  | 2953 | bool r5c_big_stripe_cached(struct r5conf *conf, sector_t sect) | 
|  | 2954 | { | 
|  | 2955 | struct r5l_log *log = conf->log; | 
|  | 2956 | sector_t tree_index; | 
|  | 2957 | void *slot; | 
|  | 2958 |  | 
|  | 2959 | if (!log) | 
|  | 2960 | return false; | 
|  | 2961 |  | 
|  | 2962 | WARN_ON_ONCE(!rcu_read_lock_held()); | 
|  | 2963 | tree_index = r5c_tree_index(conf, sect); | 
|  | 2964 | slot = radix_tree_lookup(&log->big_stripe_tree, tree_index); | 
|  | 2965 | return slot != NULL; | 
|  | 2966 | } | 
|  | 2967 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2968 | static int r5l_load_log(struct r5l_log *log) | 
|  | 2969 | { | 
|  | 2970 | struct md_rdev *rdev = log->rdev; | 
|  | 2971 | struct page *page; | 
|  | 2972 | struct r5l_meta_block *mb; | 
|  | 2973 | sector_t cp = log->rdev->journal_tail; | 
|  | 2974 | u32 stored_crc, expected_crc; | 
|  | 2975 | bool create_super = false; | 
| JackieLiu | d30dfeb | 2016-12-08 08:47:39 +0800 | [diff] [blame] | 2976 | int ret = 0; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2977 |  | 
|  | 2978 | /* Make sure it's valid */ | 
|  | 2979 | if (cp >= rdev->sectors || round_down(cp, BLOCK_SECTORS) != cp) | 
|  | 2980 | cp = 0; | 
|  | 2981 | page = alloc_page(GFP_KERNEL); | 
|  | 2982 | if (!page) | 
|  | 2983 | return -ENOMEM; | 
|  | 2984 |  | 
| Mike Christie | 796a5cf | 2016-06-05 14:32:07 -0500 | [diff] [blame] | 2985 | if (!sync_page_io(rdev, cp, PAGE_SIZE, page, REQ_OP_READ, 0, false)) { | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2986 | ret = -EIO; | 
|  | 2987 | goto ioerr; | 
|  | 2988 | } | 
|  | 2989 | mb = page_address(page); | 
|  | 2990 |  | 
|  | 2991 | if (le32_to_cpu(mb->magic) != R5LOG_MAGIC || | 
|  | 2992 | mb->version != R5LOG_VERSION) { | 
|  | 2993 | create_super = true; | 
|  | 2994 | goto create; | 
|  | 2995 | } | 
|  | 2996 | stored_crc = le32_to_cpu(mb->checksum); | 
|  | 2997 | mb->checksum = 0; | 
| Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 2998 | expected_crc = crc32c_le(log->uuid_checksum, mb, PAGE_SIZE); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 2999 | if (stored_crc != expected_crc) { | 
|  | 3000 | create_super = true; | 
|  | 3001 | goto create; | 
|  | 3002 | } | 
|  | 3003 | if (le64_to_cpu(mb->position) != cp) { | 
|  | 3004 | create_super = true; | 
|  | 3005 | goto create; | 
|  | 3006 | } | 
|  | 3007 | create: | 
|  | 3008 | if (create_super) { | 
|  | 3009 | log->last_cp_seq = prandom_u32(); | 
|  | 3010 | cp = 0; | 
| Zhengyuan Liu | 56056c2 | 2016-10-24 16:15:59 +0800 | [diff] [blame] | 3011 | r5l_log_write_empty_meta_block(log, cp, log->last_cp_seq); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3012 | /* | 
|  | 3013 | * Make sure super points to correct address. Log might have | 
|  | 3014 | * data very soon. If super hasn't correct log tail address, | 
|  | 3015 | * recovery can't find the log | 
|  | 3016 | */ | 
|  | 3017 | r5l_write_super(log, cp); | 
|  | 3018 | } else | 
|  | 3019 | log->last_cp_seq = le64_to_cpu(mb->seq); | 
|  | 3020 |  | 
|  | 3021 | log->device_size = round_down(rdev->sectors, BLOCK_SECTORS); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3022 | log->max_free_space = log->device_size >> RECLAIM_MAX_FREE_SPACE_SHIFT; | 
|  | 3023 | if (log->max_free_space > RECLAIM_MAX_FREE_SPACE) | 
|  | 3024 | log->max_free_space = RECLAIM_MAX_FREE_SPACE; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3025 | log->last_checkpoint = cp; | 
|  | 3026 |  | 
|  | 3027 | __free_page(page); | 
|  | 3028 |  | 
| JackieLiu | d30dfeb | 2016-12-08 08:47:39 +0800 | [diff] [blame] | 3029 | if (create_super) { | 
|  | 3030 | log->log_start = r5l_ring_add(log, cp, BLOCK_SECTORS); | 
|  | 3031 | log->seq = log->last_cp_seq + 1; | 
|  | 3032 | log->next_checkpoint = cp; | 
|  | 3033 | } else | 
|  | 3034 | ret = r5l_recovery_log(log); | 
|  | 3035 |  | 
| Zhengyuan Liu | 3d7e7e1 | 2016-12-04 16:49:44 +0800 | [diff] [blame] | 3036 | r5c_update_log_state(log); | 
|  | 3037 | return ret; | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3038 | ioerr: | 
|  | 3039 | __free_page(page); | 
|  | 3040 | return ret; | 
|  | 3041 | } | 
|  | 3042 |  | 
| Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 3043 | void r5c_update_on_rdev_error(struct mddev *mddev, struct md_rdev *rdev) | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3044 | { | 
|  | 3045 | struct r5conf *conf = mddev->private; | 
|  | 3046 | struct r5l_log *log = conf->log; | 
|  | 3047 |  | 
|  | 3048 | if (!log) | 
|  | 3049 | return; | 
|  | 3050 |  | 
| Song Liu | 70d466f | 2017-05-11 15:28:28 -0700 | [diff] [blame] | 3051 | if ((raid5_calc_degraded(conf) > 0 || | 
|  | 3052 | test_bit(Journal, &rdev->flags)) && | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3053 | conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_BACK) | 
|  | 3054 | schedule_work(&log->disable_writeback_work); | 
|  | 3055 | } | 
|  | 3056 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3057 | int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev) | 
|  | 3058 | { | 
| Jens Axboe | c888a8f | 2016-04-13 13:33:19 -0600 | [diff] [blame] | 3059 | struct request_queue *q = bdev_get_queue(rdev->bdev); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3060 | struct r5l_log *log; | 
| Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 3061 | char b[BDEVNAME_SIZE]; | 
|  | 3062 |  | 
|  | 3063 | pr_debug("md/raid:%s: using device %s as journal\n", | 
|  | 3064 | mdname(conf->mddev), bdevname(rdev->bdev, b)); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3065 |  | 
|  | 3066 | if (PAGE_SIZE != 4096) | 
|  | 3067 | return -EINVAL; | 
| Song Liu | c757ec9 | 2016-11-17 15:24:36 -0800 | [diff] [blame] | 3068 |  | 
|  | 3069 | /* | 
|  | 3070 | * The PAGE_SIZE must be big enough to hold 1 r5l_meta_block and | 
|  | 3071 | * raid_disks r5l_payload_data_parity. | 
|  | 3072 | * | 
|  | 3073 | * Write journal and cache does not work for very big array | 
|  | 3074 | * (raid_disks > 203) | 
|  | 3075 | */ | 
|  | 3076 | if (sizeof(struct r5l_meta_block) + | 
|  | 3077 | ((sizeof(struct r5l_payload_data_parity) + sizeof(__le32)) * | 
|  | 3078 | conf->raid_disks) > PAGE_SIZE) { | 
|  | 3079 | pr_err("md/raid:%s: write journal/cache doesn't work for array with %d disks\n", | 
|  | 3080 | mdname(conf->mddev), conf->raid_disks); | 
|  | 3081 | return -EINVAL; | 
|  | 3082 | } | 
|  | 3083 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3084 | log = kzalloc(sizeof(*log), GFP_KERNEL); | 
|  | 3085 | if (!log) | 
|  | 3086 | return -ENOMEM; | 
|  | 3087 | log->rdev = rdev; | 
|  | 3088 |  | 
| Jens Axboe | c888a8f | 2016-04-13 13:33:19 -0600 | [diff] [blame] | 3089 | log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0; | 
| Christoph Hellwig | 56fef7c | 2015-10-05 09:31:09 +0200 | [diff] [blame] | 3090 |  | 
| Shaohua Li | 5cb2fbd | 2015-10-28 08:41:25 -0700 | [diff] [blame] | 3091 | log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid, | 
|  | 3092 | sizeof(rdev->mddev->uuid)); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3093 |  | 
|  | 3094 | mutex_init(&log->io_mutex); | 
|  | 3095 |  | 
|  | 3096 | spin_lock_init(&log->io_list_lock); | 
|  | 3097 | INIT_LIST_HEAD(&log->running_ios); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3098 | INIT_LIST_HEAD(&log->io_end_ios); | 
| Shaohua Li | a8c34f9 | 2015-09-02 13:49:46 -0700 | [diff] [blame] | 3099 | INIT_LIST_HEAD(&log->flushing_ios); | 
| Christoph Hellwig | 04732f7 | 2015-10-05 09:31:07 +0200 | [diff] [blame] | 3100 | INIT_LIST_HEAD(&log->finished_ios); | 
| Ming Lei | 3a83f46 | 2016-11-22 08:57:21 -0700 | [diff] [blame] | 3101 | bio_init(&log->flush_bio, NULL, 0); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3102 |  | 
|  | 3103 | log->io_kc = KMEM_CACHE(r5l_io_unit, 0); | 
|  | 3104 | if (!log->io_kc) | 
|  | 3105 | goto io_kc; | 
|  | 3106 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3107 | log->io_pool = mempool_create_slab_pool(R5L_POOL_SIZE, log->io_kc); | 
|  | 3108 | if (!log->io_pool) | 
|  | 3109 | goto io_pool; | 
|  | 3110 |  | 
| NeilBrown | 011067b | 2017-06-18 14:38:57 +1000 | [diff] [blame] | 3111 | log->bs = bioset_create(R5L_POOL_SIZE, 0, BIOSET_NEED_BVECS); | 
| Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3112 | if (!log->bs) | 
|  | 3113 | goto io_bs; | 
|  | 3114 |  | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3115 | log->meta_pool = mempool_create_page_pool(R5L_POOL_SIZE, 0); | 
|  | 3116 | if (!log->meta_pool) | 
|  | 3117 | goto out_mempool; | 
|  | 3118 |  | 
| Song Liu | 03b047f | 2017-01-11 13:39:14 -0800 | [diff] [blame] | 3119 | spin_lock_init(&log->tree_lock); | 
|  | 3120 | INIT_RADIX_TREE(&log->big_stripe_tree, GFP_NOWAIT | __GFP_NOWARN); | 
|  | 3121 |  | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3122 | log->reclaim_thread = md_register_thread(r5l_reclaim_thread, | 
|  | 3123 | log->rdev->mddev, "reclaim"); | 
|  | 3124 | if (!log->reclaim_thread) | 
|  | 3125 | goto reclaim_thread; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 3126 | log->reclaim_thread->timeout = R5C_RECLAIM_WAKEUP_INTERVAL; | 
|  | 3127 |  | 
| Shaohua Li | 0fd22b4 | 2015-09-02 13:49:47 -0700 | [diff] [blame] | 3128 | init_waitqueue_head(&log->iounit_wait); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3129 |  | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3130 | INIT_LIST_HEAD(&log->no_mem_stripes); | 
|  | 3131 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3132 | INIT_LIST_HEAD(&log->no_space_stripes); | 
|  | 3133 | spin_lock_init(&log->no_space_stripes_lock); | 
|  | 3134 |  | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 3135 | INIT_WORK(&log->deferred_io_work, r5l_submit_io_async); | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3136 | INIT_WORK(&log->disable_writeback_work, r5c_disable_writeback_async); | 
| Song Liu | 3bddb7f | 2016-11-18 16:46:50 -0800 | [diff] [blame] | 3137 |  | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 3138 | log->r5c_journal_mode = R5C_JOURNAL_MODE_WRITE_THROUGH; | 
| Song Liu | a39f7af | 2016-11-17 15:24:40 -0800 | [diff] [blame] | 3139 | INIT_LIST_HEAD(&log->stripe_in_journal_list); | 
|  | 3140 | spin_lock_init(&log->stripe_in_journal_lock); | 
|  | 3141 | atomic_set(&log->stripe_in_journal_count, 0); | 
| Song Liu | 2ded370 | 2016-11-17 15:24:38 -0800 | [diff] [blame] | 3142 |  | 
| Song Liu | d2250f1 | 2016-12-14 15:38:02 -0800 | [diff] [blame] | 3143 | rcu_assign_pointer(conf->log, log); | 
|  | 3144 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3145 | if (r5l_load_log(log)) | 
|  | 3146 | goto error; | 
|  | 3147 |  | 
| Shaohua Li | a62ab49 | 2016-01-06 14:37:13 -0800 | [diff] [blame] | 3148 | set_bit(MD_HAS_JOURNAL, &conf->mddev->flags); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3149 | return 0; | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3150 |  | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3151 | error: | 
| Song Liu | d2250f1 | 2016-12-14 15:38:02 -0800 | [diff] [blame] | 3152 | rcu_assign_pointer(conf->log, NULL); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3153 | md_unregister_thread(&log->reclaim_thread); | 
|  | 3154 | reclaim_thread: | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3155 | mempool_destroy(log->meta_pool); | 
|  | 3156 | out_mempool: | 
| Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3157 | bioset_free(log->bs); | 
|  | 3158 | io_bs: | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3159 | mempool_destroy(log->io_pool); | 
|  | 3160 | io_pool: | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3161 | kmem_cache_destroy(log->io_kc); | 
|  | 3162 | io_kc: | 
|  | 3163 | kfree(log); | 
|  | 3164 | return -EINVAL; | 
|  | 3165 | } | 
|  | 3166 |  | 
| Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 3167 | void r5l_exit_log(struct r5conf *conf) | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3168 | { | 
| Artur Paszkiewicz | ff87573 | 2017-03-09 09:59:58 +0100 | [diff] [blame] | 3169 | struct r5l_log *log = conf->log; | 
|  | 3170 |  | 
|  | 3171 | conf->log = NULL; | 
|  | 3172 | synchronize_rcu(); | 
|  | 3173 |  | 
| NeilBrown | 4d5324f | 2017-10-19 12:17:16 +1100 | [diff] [blame] | 3174 | /* Ensure disable_writeback_work wakes up and exits */ | 
|  | 3175 | wake_up(&conf->mddev->sb_wait); | 
| Song Liu | 2e38a37 | 2017-01-24 10:45:30 -0800 | [diff] [blame] | 3176 | flush_work(&log->disable_writeback_work); | 
| Shaohua Li | 0576b1c | 2015-08-13 14:32:00 -0700 | [diff] [blame] | 3177 | md_unregister_thread(&log->reclaim_thread); | 
| Christoph Hellwig | e8deb63 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3178 | mempool_destroy(log->meta_pool); | 
| Christoph Hellwig | c38d29b | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3179 | bioset_free(log->bs); | 
| Christoph Hellwig | 5036c390 | 2015-12-21 10:51:02 +1100 | [diff] [blame] | 3180 | mempool_destroy(log->io_pool); | 
| Shaohua Li | f6bed0e | 2015-08-13 14:31:59 -0700 | [diff] [blame] | 3181 | kmem_cache_destroy(log->io_kc); | 
|  | 3182 | kfree(log); | 
|  | 3183 | } |